filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
config/wsgi.py
|
"""
WSGI config for Histonets project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
from django.core.wsgi import get_wsgi_application
# This allows easy placement of apps within the interior
# histonets directory.
app_path = os.path.abspath(os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.pardir))
sys.path.append(os.path.join(app_path, 'histonets'))
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
[] |
[] |
[
"DJANGO_SETTINGS_MODULE"
] |
[]
|
["DJANGO_SETTINGS_MODULE"]
|
python
| 1 | 0 | |
core/chaincode/platforms/golang/hash.go
|
/*
Copyright IBM Corp. 2016 All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package golang
import (
"archive/tar"
"bytes"
"encoding/hex"
"errors"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
"github.com/op/go-logging"
"github.com/spf13/viper"
cutil "github.com/hyperledger/fabric/core/container/util"
"github.com/hyperledger/fabric/core/util"
pb "github.com/hyperledger/fabric/protos"
)
var logger = logging.MustGetLogger("golang/hash")
//core hash computation factored out for testing
func computeHash(contents []byte, hash []byte) []byte {
newSlice := make([]byte, len(hash)+len(contents))
//copy the contents
copy(newSlice[0:len(contents)], contents[:])
//add the previous hash
copy(newSlice[len(contents):], hash[:])
//compute new hash
hash = util.ComputeCryptoHash(newSlice)
return hash
}
//hashFilesInDir computes h=hash(h,file bytes) for each file in a directory
//Directory entries are traversed recursively. In the end a single
//hash value is returned for the entire directory structure
func hashFilesInDir(rootDir string, dir string, hash []byte, tw *tar.Writer) ([]byte, error) {
currentDir := filepath.Join(rootDir, dir)
logger.Debugf("hashFiles %s", currentDir)
//ReadDir returns sorted list of files in dir
fis, err := ioutil.ReadDir(currentDir)
if err != nil {
return hash, fmt.Errorf("ReadDir failed %s\n", err)
}
for _, fi := range fis {
name := filepath.Join(dir, fi.Name())
if fi.IsDir() {
var err error
hash, err = hashFilesInDir(rootDir, name, hash, tw)
if err != nil {
return hash, err
}
continue
}
fqp := filepath.Join(rootDir, name)
buf, err := ioutil.ReadFile(fqp)
if err != nil {
fmt.Printf("Error reading %s\n", err)
return hash, err
}
//get the new hash from file contents
hash = computeHash(buf, hash)
if tw != nil {
is := bytes.NewReader(buf)
if err = cutil.WriteStreamToPackage(is, fqp, filepath.Join("src", name), tw); err != nil {
return hash, fmt.Errorf("Error adding file to tar %s", err)
}
}
}
return hash, nil
}
func isCodeExist(tmppath string) error {
file, err := os.Open(tmppath)
if err != nil {
return fmt.Errorf("Download failed %s", err)
}
fi, err := file.Stat()
if err != nil {
return fmt.Errorf("Could not stat file %s", err)
}
if !fi.IsDir() {
return fmt.Errorf("File %s is not dir\n", file.Name())
}
return nil
}
func getCodeFromHTTP(path string) (codegopath string, err error) {
codegopath = ""
err = nil
logger.Debugf("getCodeFromHTTP %s", path)
// The following could be done with os.Getenv("GOPATH") but we need to change it later so this prepares for that next step
env := os.Environ()
var origgopath string
var gopathenvIndex int
for i, v := range env {
if strings.Index(v, "GOPATH=") == 0 {
p := strings.SplitAfter(v, "GOPATH=")
origgopath = p[1]
gopathenvIndex = i
break
}
}
if origgopath == "" {
err = fmt.Errorf("GOPATH not defined")
return
}
// Only take the first element of GOPATH
gopath := filepath.SplitList(origgopath)[0]
// Define a new gopath in which to download the code
newgopath := filepath.Join(gopath, "_usercode_")
//ignore errors.. _usercode_ might exist. TempDir will catch any other errors
os.Mkdir(newgopath, 0755)
if codegopath, err = ioutil.TempDir(newgopath, ""); err != nil {
err = fmt.Errorf("could not create tmp dir under %s(%s)", newgopath, err)
return
}
//go paths can have multiple dirs. We create a GOPATH with two source tree's as follows
//
// <temporary empty folder to download chaincode source> : <local go path with OBC source>
//
//This approach has several goodness:
// . Go will pick the first path to download user code (which we will delete after processing)
// . GO will not download OBC as it is in the second path. GO will use the local OBC for generating chaincode image
// . network savings
// . more secure
// . as we are not downloading OBC, private, password-protected OBC repo's become non-issue
env[gopathenvIndex] = "GOPATH=" + codegopath + string(os.PathListSeparator) + origgopath
// Use a 'go get' command to pull the chaincode from the given repo
logger.Debugf("go get %s", path)
cmd := exec.Command("go", "get", path)
cmd.Env = env
var out bytes.Buffer
cmd.Stdout = &out
var errBuf bytes.Buffer
cmd.Stderr = &errBuf //capture Stderr and print it on error
err = cmd.Start()
// Create a go routine that will wait for the command to finish
done := make(chan error, 1)
go func() {
done <- cmd.Wait()
}()
select {
case <-time.After(time.Duration(viper.GetInt("chaincode.deploytimeout")) * time.Millisecond):
// If pulling repos takes too long, we should give up
// (This can happen if a repo is private and the git clone asks for credentials)
if err = cmd.Process.Kill(); err != nil {
err = fmt.Errorf("failed to kill: %s", err)
} else {
err = errors.New("Getting chaincode took too long")
}
case err = <-done:
// If we're here, the 'go get' command must have finished
if err != nil {
err = fmt.Errorf("'go get' failed with error: \"%s\"\n%s", err, string(errBuf.Bytes()))
}
}
return
}
func getCodeFromFS(path string) (codegopath string, err error) {
logger.Debugf("getCodeFromFS %s", path)
gopath := os.Getenv("GOPATH")
if gopath == "" {
err = fmt.Errorf("GOPATH not defined")
return
}
// Only take the first element of GOPATH
codegopath = filepath.SplitList(gopath)[0]
return
}
//generateHashcode gets hashcode of the code under path. If path is a HTTP(s) url
//it downloads the code first to compute the hash.
//NOTE: for dev mode, user builds and runs chaincode manually. The name provided
//by the user is equivalent to the path. This method will treat the name
//as codebytes and compute the hash from it. ie, user cannot run the chaincode
//with the same (name, ctor, args)
func generateHashcode(spec *pb.ChaincodeSpec, tw *tar.Writer) (string, error) {
if spec == nil {
return "", fmt.Errorf("Cannot generate hashcode from nil spec")
}
chaincodeID := spec.ChaincodeID
if chaincodeID == nil || chaincodeID.Path == "" {
return "", fmt.Errorf("Cannot generate hashcode from empty chaincode path")
}
ctor := spec.CtorMsg
if ctor == nil || ctor.Function == "" {
return "", fmt.Errorf("Cannot generate hashcode from empty ctor")
}
//code root will point to the directory where the code exists
//in the case of http it will be a temporary dir that
//will have to be deleted
var codegopath string
var ishttp bool
defer func() {
if ishttp && codegopath != "" {
os.RemoveAll(codegopath)
}
}()
path := chaincodeID.Path
var err error
var actualcodepath string
if strings.HasPrefix(path, "http://") {
ishttp = true
actualcodepath = path[7:]
codegopath, err = getCodeFromHTTP(actualcodepath)
} else if strings.HasPrefix(path, "https://") {
ishttp = true
actualcodepath = path[8:]
codegopath, err = getCodeFromHTTP(actualcodepath)
} else {
actualcodepath = path
codegopath, err = getCodeFromFS(path)
}
if err != nil {
return "", fmt.Errorf("Error getting code %s", err)
}
tmppath := filepath.Join(codegopath, "src", actualcodepath)
if err = isCodeExist(tmppath); err != nil {
return "", fmt.Errorf("code does not exist %s", err)
}
hash := util.GenerateHashFromSignature(actualcodepath, ctor.Function, ctor.Args)
hash, err = hashFilesInDir(filepath.Join(codegopath, "src"), actualcodepath, hash, tw)
if err != nil {
return "", fmt.Errorf("Could not get hashcode for %s - %s\n", path, err)
}
return hex.EncodeToString(hash[:]), nil
}
|
[
"\"GOPATH\"",
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
main.go
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"fmt"
goi18n "github.com/nicksnyder/go-i18n/i18n"
"os"
"github.com/apache/openwhisk-cli/commands"
"github.com/apache/openwhisk-cli/wski18n"
"github.com/apache/openwhisk-client-go/whisk"
)
// CLI_BUILD_TIME holds the time of the CLI build. During gradle builds,
// this value will be overwritten via the command:
// go build -ldflags "-X main.CLI_BUILD_TIME=nnnnn" // nnnnn is the new timestamp
var CLI_BUILD_TIME string = "not set"
var cliDebug = os.Getenv("WSK_CLI_DEBUG") // Useful for tracing init() code
var T goi18n.TranslateFunc
func init() {
if len(cliDebug) > 0 {
whisk.SetDebug(true)
}
T = wski18n.T
// Rest of CLI uses the Properties struct, so set the build time there
commands.Properties.CLIVersion = CLI_BUILD_TIME
}
func main() {
defer func() {
if r := recover(); r != nil {
fmt.Println(r)
fmt.Println(T("Application exited unexpectedly"))
}
}()
if err := commands.Execute(); err != nil {
commands.ExitOnError(err)
}
return
}
|
[
"\"WSK_CLI_DEBUG\""
] |
[] |
[
"WSK_CLI_DEBUG"
] |
[]
|
["WSK_CLI_DEBUG"]
|
go
| 1 | 0 | |
parser/team03/grammarReview.py
|
from parser import *
from scanner import tokens
from parse.expressions.expressions_math import *
from parse.expressions.expressions_base import *
from parse.expressions.expressions_trig import *
from treeGraph import *
start = 'init'
precedence = (
# Arthmetic
('left', 'MAS', 'MENOS'),
('left', 'POR', 'DIAGONAL'),
('left', 'EXPONENCIANCION'),
('right', 'UMENOS'),
('right', 'UMAS'),
# Relational
('left', 'MENOR', 'MAYOR', 'IGUAL', 'MENORQ', 'MAYORQ'),
# logic
# ('left', 'OR'),
# ('left', 'AND'),
# ('right', 'NOT'),
)
def p_init(t):
''' init : statements'''
t[0] = t[1]
def p_statements(t):
''' statements : statements statement '''
t[1].append(t[2])
t[0] = t[1]
def p_statements2(t):
''' statements : statement '''
t[0] = [t[1]]
def p_statement(t):
'''statement : relExpression PUNTOCOMA
'''
t[0] = t[1]
########## Definition of opttional productions, who could reduce to 'empty' (epsilon) ################
# def p_not_opt(t):
# '''not_opt : NOT
# | empty'''
########## Definition of Relational expressions ##############
def p_relExpression(t):
'''relExpression : expression MENOR expression
| expression MAYOR expression
| expression IGUAL expression
| expression MENORQ expression
| expression MAYORQ expression
| expression DIFERENTE expression
| expression NOT LIKE TEXTO
| expression LIKE TEXTO'''
token = t.slice[2]
if token.type == "MENOR":
graph_ref = graph_node(str(t[2]), [t[1].graph_ref, t[3].graph_ref])
t[0] = RelationalExpression(t[1], t[3], OpRelational.LESS, 0, 0, graph_ref)
elif token.type == "MAYOR":
graph_ref = graph_node(str(t[2]), [t[1].graph_ref, t[3].graph_ref])
t[0] = RelationalExpression(t[1], t[3], OpRelational.GREATER, 0, 0, graph_ref)
elif token.type == "IGUAL":
graph_ref = graph_node(str(t[2]), [t[1].graph_ref, t[3].graph_ref])
t[0] = RelationalExpression(t[1], t[3], OpRelational.EQUALS, 0, 0, graph_ref)
elif token.type == "MENORQ":
graph_ref = graph_node(str(t[2]), [t[1].graph_ref, t[3].graph_ref])
t[0] = RelationalExpression(t[1], t[3], OpRelational.LESS_EQUALS, 0, 0, graph_ref)
elif token.type == "MAYORQ":
graph_ref = graph_node(str(t[2]), [t[1].graph_ref, t[3].graph_ref])
t[0] = RelationalExpression(t[1], t[3], OpRelational.GREATER_EQUALS, 0, 0, graph_ref)
elif token.type == "DIFERENTE":
graph_ref = graph_node(str(t[2]), [t[1].graph_ref, t[3].graph_ref])
t[0] = RelationalExpression(t[1], t[3], OpRelational.NOT_EQUALS, 0, 0, graph_ref)
elif token.type == "NOT":
graph_ref = graph_node(str(str(t[2] + " " + t[3]), [t[1].graph_ref]))
t[0] = RelationalExpression(t[1], t[4], OpRelational.NOT_LIKE, 0, 0, graph_ref)
elif token.type == "LIKE":
graph_ref = graph_node(str(str(t[2] + " " + t[3]), [t[1].graph_ref]))
t[0] = RelationalExpression(t[1], t[3], OpRelational.LIKE, 0, 0, graph_ref)
else:
print("Missing code from: ", t.slice)
def p_relExpReducExp(t):
'''relExpression : expression'''
t[0] = t[1]
########## Defintions of produtions for expression :== ##############
def p_expression(t):
''' expression : expression MAS expression
| expression MENOS expression
| expression POR expression
| expression DIAGONAL expression
| expression PORCENTAJE expression
| expression EXPONENCIANCION expression
'''
if t[2] == '+':
graph_ref = graph_node(str(t[2]), [t[1].graph_ref, t[3].graph_ref])
t[0] = BinaryExpression(t[1], t[3], OpArithmetic.PLUS, 0, 0, graph_ref)
elif t[2] == '-':
graph_ref = graph_node(str(t[2]), [t[1].graph_ref, t[3].graph_ref])
t[0] = BinaryExpression(t[1], t[3], OpArithmetic.MINUS, 0, 0, graph_ref)
elif t[2] == '*':
graph_ref = graph_node(str(t[2]), [t[1].graph_ref, t[3].graph_ref])
t[0] = BinaryExpression(t[1], t[3], OpArithmetic.TIMES, 0, 0, graph_ref)
elif t[2] == '/':
graph_ref = graph_node(str(t[2]), [t[1].graph_ref, t[3].graph_ref])
t[0] = BinaryExpression(t[1], t[3], OpArithmetic.DIVIDE, 0, 0, graph_ref)
elif t[2] == '%':
graph_ref = graph_node(str(t[2]), [t[1].graph_ref, t[3].graph_ref])
t[0] = BinaryExpression(t[1], t[3], OpArithmetic.MODULE, 0, 0, graph_ref)
elif t[2] == '^':
graph_ref = graph_node(str(t[2]), [t[1].graph_ref, t[3].graph_ref])
t[0] = BinaryExpression(t[1], t[3], OpArithmetic.POWER, 0, 0, graph_ref)
else:
print("You forgot wirte code for the operator: ", t[2])
def p_trigonometric(t):
''' expression : ACOS PARA expression PARC
| ACOSD PARA expression PARC
| ASIN PARA expression PARC
| ASIND PARA expression PARC
| ATAN PARA expression PARC
| ATAND PARA expression PARC
| ATAN2 PARA expression COMA expression PARC
| ATAN2D PARA expression COMA expression PARC
| COS PARA expression PARC
| COSD PARA expression PARC
| COT PARA expression PARC
| COTD PARA expression PARC
| SIN PARA expression PARC
| SIND PARA expression PARC
| TAN PARA expression PARC
| TAND PARA expression PARC
| SINH PARA expression PARC
| COSH PARA expression PARC
| TANH PARA expression PARC
| ASINH PARA expression PARC
| ACOSH PARA expression PARC
| ATANH PARA expression PARC'''
if t.slice[1].type == 'ACOS':
graph_ref = graph_node(str(t[1]), [t[3].graph_ref])
t[0] = Acos(t[3], t.slice[1].lineno, t.slice[1].lexpos, graph_ref)
elif t.slice[1].type == 'ACOSD':
graph_ref = graph_node(str(t[1]), [t[3].graph_ref])
t[0] = Acosd(t[3], t.slice[1].lineno, t.slice[1].lexpos, graph_ref)
elif t.slice[1].type == 'ASIN':
graph_ref = graph_node(str(t[1]), [t[3].graph_ref])
t[0] = Asin(t[3], t.slice[1].lineno, t.slice[1].lexpos, graph_ref)
elif t.slice[1].type == 'ASIND':
graph_ref = graph_node(str(t[1]), [t[3].graph_ref])
t[0] = Asind(t[3], t.slice[1].lineno, t.slice[1].lexpos, graph_ref)
elif t.slice[1].type == 'ATAN':
graph_ref = graph_node(str(t[1]), [t[3].graph_ref])
t[0] = Atan(t[3], t.slice[1].lineno, t.slice[1].lexpos, graph_ref)
elif t.slice[1].type == 'ATAND':
graph_ref = graph_node(str(t[1]), [t[3].graph_ref])
t[0] = Atand(t[3], t.slice[1].lineno, t.slice[1].lexpos, graph_ref)
elif t.slice[1].type == 'ATAN2':
graph_ref = graph_node(str(t[1]), [t[3].graph_ref, t[5].graph_ref])
t[0] = Atan2(t[3], t[5], t.slice[1].lineno, t.slice[1].lexpos, graph_ref)
elif t.slice[1].type == 'ATAN2D':
graph_ref = graph_node(str(t[1]), [t[3].graph_ref, t[5].graph_ref])
t[0] = Atan2d(t[3], t[5], t.slice[1].lineno, t.slice[1].lexpos, graph_ref)
elif t.slice[1].type == 'COS':
graph_ref = graph_node(str(t[1]), [t[3].graph_ref])
t[0] = Cos(t[3], t.slice[1].lineno, t.slice[1].lexpos, graph_ref)
elif t.slice[1].type == 'COSD':
graph_ref = graph_node(str(t[1]), [t[3].graph_ref])
t[0] = Cosd(t[3], t.slice[1].lineno, t.slice[1].lexpos, graph_ref)
elif t.slice[1].type == 'COT':
graph_ref = graph_node(str(t[1]), [t[3].graph_ref])
t[0] = Cot(t[3], t.slice[1].lineno, t.slice[1].lexpos, graph_ref)
elif t.slice[1].type == 'COTD':
graph_ref = graph_node(str(t[1]), [t[3].graph_ref])
t[0] = Cotd(t[3], t.slice[1].lineno, t.slice[1].lexpos, graph_ref)
elif t.slice[1].type == 'SIN':
graph_ref = graph_node(str(t[1]), [t[3].graph_ref])
t[0] = Sin(t[3], t.slice[1].lineno, t.slice[1].lexpos, graph_ref)
elif t.slice[1].type == 'SIND':
graph_ref = graph_node(str(t[1]), [t[3].graph_ref])
t[0] = Sind(t[3], t.slice[1].lineno, t.slice[1].lexpos, graph_ref)
elif t.slice[1].type == 'TAN':
graph_ref = graph_node(str(t[1]), [t[3].graph_ref])
t[0] = Tan(t[3], t.slice[1].lineno, t.slice[1].lexpos, graph_ref)
elif t.slice[1].type == 'TAND':
graph_ref = graph_node(str(t[1]), [t[3].graph_ref])
t[0] = Tand(t[3], t.slice[1].lineno, t.slice[1].lexpos, graph_ref)
elif t.slice[1].type == 'SINH':
graph_ref = graph_node(str(t[1]), [t[3].graph_ref])
t[0] = Sinh(t[3], t.slice[1].lineno, t.slice[1].lexpos, graph_ref)
elif t.slice[1].type == 'COSH':
graph_ref = graph_node(str(t[1]), [t[3].graph_ref])
t[0] = Cosh(t[3], t.slice[1].lineno, t.slice[1].lexpos, graph_ref)
elif t.slice[1].type == 'TANH':
graph_ref = graph_node(str(t[1]), [t[3].graph_ref])
t[0] = Tanh(t[3], t.slice[1].lineno, t.slice[1].lexpos, graph_ref)
elif t.slice[1].type == 'ASINH':
graph_ref = graph_node(str(t[1]), [t[3].graph_ref])
t[0] = Asinh(t[3], t.slice[1].lineno, t.slice[1].lexpos, graph_ref)
elif t.slice[1].type == 'ACOSH':
graph_ref = graph_node(str(t[1]), [t[3].graph_ref])
t[0] = Acosh(t[3], t.slice[1].lineno, t.slice[1].lexpos, graph_ref)
elif t.slice[1].type == 'ATANH':
graph_ref = graph_node(str(t[1]), [t[3].graph_ref])
t[0] = Atanh(t[3], t.slice[1].lineno, t.slice[1].lexpos, graph_ref)
def p_aritmetic(t):
'''expression : ABS PARA expression PARC
| CBRT PARA expression PARC
| CEIL PARA expression PARC
| CEILING PARA expression PARC
| DEGREES PARA expression PARC
| DIV PARA expression COMA expression PARC
| EXP PARA expression PARC
| FACTORIAL PARA expression PARC
| FLOOR PARA expression PARC
| GCD PARA expression COMA expression PARC
| LCM PARA expression COMA expression PARC
| LN PARA expression PARC
| LOG PARA expression PARC
| LOG10 PARA expression PARC
| MIN_SCALE PARA expression PARC
| MOD PARA expression COMA expression PARC
| PI PARA PARC
| POWER PARA expression COMA expression PARC
| RADIANS PARA expression PARC
| ROUND PARA expression PARC
| SCALE PARA expression PARC
| SIGN PARA expression PARC
| SQRT PARA expression PARC
| TRIM_SCALE PARA expression PARC
| WIDTH_BUCKET PARA expression COMA expression PARC
| RANDOM PARA PARC
| SETSEED PARA expression PARC
'''
token = t.slice[1]
if token.type == "ABS":
graph_ref = graph_node(str(t[1]), [t[3].graph_ref])
t[0] = Abs(t[3], token.lineno, token.lexpos, graph_ref)
elif token.type == "CBRT":
graph_ref = graph_node(str(t[1]), [t[3].graph_ref])
t[0] = Cbrt(t[3], token.lineno, token.lexpos, graph_ref)
elif token.type == "CEIL" or token.type == "CEILING":
graph_ref = graph_node(str(t[1]), [t[3].graph_ref])
t[0] = Ceil(t[3], token.lineno, token.lexpos)
elif token.type == "DEGREES":
graph_ref = graph_node(str(t[1]), [t[3].graph_ref])
t[0] = Degrees(t[3], token.lineno, token.lexpos, graph_ref)
elif token.type == "DIV":
graph_ref = graph_node(str(t[1]), [t[3].graph_ref, t[5].graph_ref])
t[0] = Div(t[3], t[5], token.lineno, token.lexpos, graph_ref)
elif token.type == "EXP":
graph_ref = graph_node(str(t[1]), [t[3].graph_ref])
t[0] = Exp(t[3], token.lineno, token.lexpos, graph_ref)
elif token.type == "FACTORIAL":
graph_ref = graph_node(str(t[1]), [t[3].graph_ref])
t[0] = Factorial(t[3], token.lineno, token.lexpos, graph_ref)
elif token.type == "FLOOR":
graph_ref = graph_node(str(t[1]), [t[3].graph_ref])
t[0] = Floor(t[3], token.lineno, token.lexpos, graph_ref)
elif token.type == "GCD":
graph_ref = graph_node(str(t[1]), [t[3].graph_ref, t[5].graph_ref])
t[0] = Gcd(t[3], t[5], token.lineno, token.lexpos, graph_ref)
###
elif token.type == "LCM":
graph_ref = graph_node(str(t[1]), [t[3].graph_ref, t[5].graph_ref])
t[0] = Lcm(t[3], t[5], token.lineno, token.lexpos, graph_ref)
elif token.type == "LN":
graph_ref = graph_node(str(t[1]), [t[3].graph_ref])
t[0] = Ln(t[3], token.lineno, token.lexpos, graph_ref)
elif token.type == "LOG":
graph_ref = graph_node(str(t[1]), [t[3].graph_ref])
t[0] = Log(t[3], token.lineno, token.lexpos, graph_ref)
elif token.type == "LOG10":
graph_ref = graph_node(str(t[1]), [t[3].graph_ref])
t[0] = Log10(t[3], token.lineno, token.lexpos, graph_ref)
elif token.type == "MIN_SCALE":
graph_ref = graph_node(str(t[1]), [t[3].graph_ref])
t[0] = MinScale(t[3], token.lineno, token.lexpos, graph_ref)
elif token.type == "MOD":
graph_ref = graph_node(str(t[1]), [t[3].graph_ref, t[5].graph_ref])
t[0] = Mod(t[3], t[5], token.lineno, token.lexpos, graph_ref)
elif token.type == "PI":
graph_ref = graph_node(str(t[1]))
t[0] = PI(token.lineno, token.lexpos, graph_ref)
elif token.type == "POWER":
graph_ref = graph_node(str(t[1]), [t[3].graph_ref, t[5].graph_ref])
t[0] = Power(t[3], t[5], token.lineno, token.lexpos, graph_ref)
elif token.type == "RADIANS":
graph_ref = graph_node(str(t[1]), [t[3].graph_ref])
t[0] = Radians(t[3], token.lineno, token.lexpos, graph_ref)
elif token.type == "ROUND":
graph_ref = graph_node(str(t[1]), [t[3].graph_ref])
t[0] = Round(t[3], token.lineno, token.lexpos, graph_ref)
elif token.type == "SCALE":
graph_ref = graph_node(str(t[1]), [t[3].graph_ref])
t[0] = Scale(t[3], token.lineno, token.lexpos, graph_ref)
elif token.type == "SIGN":
graph_ref = graph_node(str(t[1]), [t[3].graph_ref])
t[0] = Sign(t[3], token.lineno, token.lexpos, graph_ref)
elif token.type == "SQRT":
graph_ref = graph_node(str(t[1]), [t[3].graph_ref])
t[0] = Sqrt(t[3], token.lineno, token.lexpos, graph_ref)
elif token.type == "TRIM_SCALE":
graph_ref = graph_node(str(t[1]), [t[3].graph_ref])
t[0] = TrimScale(t[3], token.lineno, token.lexpos, graph_ref)
elif token.type == "WIDTH_BUCKET":
graph_ref = graph_node(str(t[1]), [t[3].graph_ref, t[5].graph_ref])
t[0] = WithBucket(t[3], t[5], token.lineno, token.lexpos, graph_ref)
elif token.type == "RANDOM":
graph_ref = graph_node(str(t[1]))
t[0] = Random(token.lineno, token.lexpos, graph_ref)
elif token.type == "SETSEED":
graph_ref = graph_node(str(t[1]), [t[3].graph_ref])
t[0] = SetSeed(t[3], token.lineno, token.lexpos, graph_ref)
# | NOT expression
# '''
# | PARA logicExpression PARC'''
def p_exp_unary(t):
'''expression : MENOS expression %prec UMENOS
| MAS expression %prec UMAS '''
if t[1] == '+':
graph_ref = graph_node(str(t[1]), [t[2].graph_ref])
t[0] = BinaryExpression(Numeric(1, 0, 0, 0), t[2], OpArithmetic.TIMES, 0, 0, graph_ref)
elif t[1] == '-':
graph_ref = graph_node(str(t[1]), [t[2].graph_ref])
t[0] = BinaryExpression(NumericNegative(1, 0, 0, 0), t[2], OpArithmetic.TIMES, 0, 0, graph_ref)
else:
print("Missed code from unary expression")
def p_exp_num(t):
'''expression : numero
| col_name'''
t[0] = t[1]
def p_exp_val(t):
'''expression : TEXTO
| BOOLEAN_VALUE
| NOW PARA PARC'''
token = t.slice[1]
if token.type == "TEXTO":
graph_ref = graph_node(str(t[1]))
t[0] = Text(token.value, token.lineno, token.lexpos, graph_ref)
if token.type == "BOOLEAN_VALUE":
graph_ref = graph_node(str(t[1]))
t[0] = BoolAST(token.value, token.lineno, token.lexpos, graph_ref)
if token.type == "NOW":
graph_ref = graph_node(str(t[1]))
t[0] = Now(token.lineno, token.lexpos, graph_ref)
def p_exp_afunc1(t):
'''expression : TRUC PARA expression PARC'''
token = t.slice[1]
if token.type == "TRUC":
graph_ref = graph_node(str(t[1]), [t[3].graph_ref])
t[0] = Trunc(t[3], 0, 0, graph_ref)
# else:
# print("Missing code from: ",t[1])
# def p_empty(t):
# '''empty :'''
# pass
def p_error(p):
if not p:
print("End of file!")
return
# Read ahead looking for a closing ';'
while True:
tok = parse.token() # Get the next token
if not tok or tok.type == 'PUNTOCOMA':
print("-->Syntax Error: Ilega token \"" + str(p.type) + "\" Line: " + str(p.lineno) + "Column: " + str(
p.lexpos))
break
parse.restart()
def p_numero(t):
''' numero : ENTERO
| FLOAT'''
token = t.slice[1]
graph_ref = graph_node(str(t[1]))
t[0] = Numeric(token.value, token.lineno, token.lexpos, graph_ref)
def p_col_name(t):
''' col_name : ID PUNTO ID
| ID '''
token = t.slice[1]
if len(t) == 2:
graph_ref = graph_node(str(t[1]))
t[0] = ColumnName(None, t[1], token.lineno, token.lexpos, graph_ref)
else:
graph_ref = graph_node(str(t[1] + t[2] + t[3]))
t[0] = ColumnName(t[1], t[3], token.lineno, token.lexpos, graph_ref)
import ply.yacc as yacc
parse = yacc.yacc()
def toParse(input):
# return parse.parse(input,lexer)
parse.parse(input)
dot.view()
return parse.parse(input)
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
python/craftassist/craftassist_agent.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import os
import sys
# python/ dir, for agent.so
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
import faulthandler
import itertools
import logging
import numpy as np
import random
import re
import sentry_sdk
import signal
import time
from multiprocessing import set_start_method
from agent import Agent
from agent_connection import default_agent_name
from voxel_models.subcomponent_classifier import SubComponentClassifier
from voxel_models.geoscorer import Geoscorer
import memory
import perception
import shapes
from util import to_block_pos, pos_to_np, TimingWarn, hash_user
import default_behaviors
from ttad_model_dialogue_manager import TtadModelDialogueManager
faulthandler.register(signal.SIGUSR1)
random.seed(0)
log_formatter = logging.Formatter(
"%(asctime)s [%(filename)s:%(lineno)s - %(funcName)s() %(levelname)s]: %(message)s"
)
logging.getLogger().setLevel(logging.DEBUG)
logging.getLogger().handlers.clear()
sentry_sdk.init() # enabled if SENTRY_DSN set in env
DEFAULT_BEHAVIOUR_TIMEOUT = 20
DEFAULT_PORT = 25565
class CraftAssistAgent(Agent):
def __init__(
self,
host="localhost",
port=DEFAULT_PORT,
name=None,
ttad_prev_model_path=None,
ttad_model_dir=None,
ttad_bert_data_dir=None,
ttad_embeddings_path=None,
ttad_grammar_path=None,
semseg_model_path=None,
voxel_model_gpu_id=-1,
get_perception_interval=20,
draw_fn=None,
no_default_behavior=False,
geoscorer_model_path=None,
):
logging.info("CraftAssistAgent.__init__ started")
self.name = name or default_agent_name()
self.no_default_behavior = no_default_behavior
# files needed to set up ttad model
if ttad_prev_model_path is None:
ttad_prev_model_path = os.path.join(os.path.dirname(__file__), "models/ttad/ttad.pth")
if ttad_model_dir is None:
ttad_model_dir = os.path.join(os.path.dirname(__file__), "models/ttad_bert/model/")
if ttad_bert_data_dir is None:
ttad_bert_data_dir = os.path.join(
os.path.dirname(__file__), "models/ttad_bert/annotated_data/"
)
if ttad_embeddings_path is None:
ttad_embeddings_path = os.path.join(
os.path.dirname(__file__), "models/ttad/ttad_ft_embeds.pth"
)
if ttad_grammar_path is None:
ttad_grammar_path = os.path.join(
os.path.dirname(__file__), "models/ttad/dialogue_grammar.json"
)
# set up the SubComponentClassifier model
if semseg_model_path is not None:
self.subcomponent_classifier = SubComponentClassifier(
voxel_model_path=semseg_model_path
)
else:
self.subcomponent_classifier = None
# set up the Geoscorer model
if geoscorer_model_path is not None:
self.geoscorer = Geoscorer(merger_model_path=geoscorer_model_path)
else:
self.geoscorer = None
self.memory = memory.AgentMemory(
db_file=os.environ.get("DB_FILE", ":memory:"),
db_log_path="agent_memory.{}.log".format(self.name),
)
logging.info("Initialized AgentMemory")
self.dialogue_manager = TtadModelDialogueManager(
self,
ttad_prev_model_path,
ttad_model_dir,
ttad_bert_data_dir,
ttad_embeddings_path,
ttad_grammar_path,
)
logging.info("Initialized DialogueManager")
# Log to file
fh = logging.FileHandler("agent.{}.log".format(self.name))
fh.setFormatter(log_formatter)
fh.setLevel(logging.DEBUG)
logging.getLogger().addHandler(fh)
# Login to server
logging.info("Attempting to connect to port {}".format(port))
super().__init__(host, port, self.name)
logging.info("Logged in to server")
# Wrap C++ agent methods
self._cpp_send_chat = self.send_chat
self.send_chat = self._send_chat
self.last_chat_time = 0
self.get_perception_interval = get_perception_interval
self.uncaught_error_count = 0
self.last_task_memid = None
self.point_targets = []
def start(self):
logging.info("CraftAssistAgent.start() called")
# start the subcomponent classification model
if self.subcomponent_classifier:
self.subcomponent_classifier.start()
for self.count in itertools.count(): # count forever
try:
if self.count == 0:
logging.info("First top-level step()")
self.step()
except Exception as e:
logging.exception(
"Default handler caught exception, db_log_idx={}".format(
self.memory.get_db_log_idx()
)
)
self.send_chat("Oops! I got confused and wasn't able to complete my last task :(")
sentry_sdk.capture_exception(e)
self.memory.task_stack_clear()
self.dialogue_manager.dialogue_stack.clear()
self.uncaught_error_count += 1
if self.uncaught_error_count >= 100:
sys.exit(1)
def step(self):
self.pos = to_block_pos(pos_to_np(self.get_player().pos))
# remove old point targets
self.point_targets = [pt for pt in self.point_targets if time.time() - pt[1] < 6]
# Update memory with current world state
# Removed get_perception call due to very slow updates on non-flatworlds
with TimingWarn(2):
self.memory.update(self)
# Process incoming chats
self.dialogue_step()
# Step topmost task on stack
self.task_step()
def task_step(self, sleep_time=0.25):
# Clean finished tasks
while (
self.memory.task_stack_peek() and self.memory.task_stack_peek().task.check_finished()
):
self.memory.task_stack_pop()
# Maybe add default task
if not self.no_default_behavior:
self.maybe_run_slow_defaults()
# If nothing to do, wait a moment
if self.memory.task_stack_peek() is None:
time.sleep(sleep_time)
return
# If something to do, step the topmost task
task_mem = self.memory.task_stack_peek()
if task_mem.memid != self.last_task_memid:
logging.info("Starting task {}".format(task_mem.task))
self.last_task_memid = task_mem.memid
task_mem.task.step(self)
self.memory.task_stack_update_task(task_mem.memid, task_mem.task)
def get_time(self):
# round to 100th of second, return as
# n hundreth of seconds since agent init
return self.memory.get_time()
def get_perception(self, force=False):
"""
Get both block objects and component objects and put them
in memory
"""
if not force and (
self.count % self.get_perception_interval != 0
or self.memory.task_stack_peek() is not None
):
return
block_objs_for_vision = []
for obj in perception.all_nearby_objects(self.get_blocks, self.pos):
memory.BlockObjectNode.create(self.memory, obj)
# If any xyz of obj is has not been labeled
if any([(not self.memory.get_component_object_ids_by_xyz(xyz)) for xyz, _ in obj]):
block_objs_for_vision.append(obj)
# TODO formalize this, make a list of all perception calls to make, etc.
# note this directly adds the memories
perception.get_all_nearby_holes(self, self.pos, radius=15)
perception.get_nearby_airtouching_blocks(self, self.pos, radius=15)
if self.subcomponent_classifier is None:
return
for obj in block_objs_for_vision:
self.subcomponent_classifier.block_objs_q.put(obj)
# everytime we try to retrieve as many recognition results as possible
while not self.subcomponent_classifier.loc2labels_q.empty():
loc2labels, obj = self.subcomponent_classifier.loc2labels_q.get()
loc2ids = dict(obj)
label2blocks = {}
def contaminated(blocks):
"""
Check if blocks are still consistent with the current world
"""
mx, Mx, my, My, mz, Mz = shapes.get_bounds(blocks)
yzxb = self.get_blocks(mx, Mx, my, My, mz, Mz)
for b, _ in blocks:
x, y, z = b
if loc2ids[b][0] != yzxb[y - my, z - mz, x - mx, 0]:
return True
return False
for loc, labels in loc2labels.items():
b = (loc, loc2ids[loc])
for l in labels:
if l in label2blocks:
label2blocks[l].append(b)
else:
label2blocks[l] = [b]
for l, blocks in label2blocks.items():
## if the blocks are contaminated we just ignore
if not contaminated(blocks):
memory.ComponentObjectNode.create(self.memory, blocks, [l])
def maybe_run_slow_defaults(self):
"""Pick a default task task to run
with a low probability"""
if self.memory.task_stack_peek() or len(self.dialogue_manager.dialogue_stack) > 0:
return
# list of (prob, default function) pairs
visible_defaults = [
(0.001, default_behaviors.build_random_shape),
(0.005, default_behaviors.come_to_player),
]
# default behaviors of the agent not visible in the game
invisible_defaults = []
defaults = (
visible_defaults + invisible_defaults
if time.time() - self.last_chat_time > DEFAULT_BEHAVIOUR_TIMEOUT
else invisible_defaults
)
defaults = [(p, f) for (p, f) in defaults if f not in self.memory.banned_default_behaviors]
def noop(*args):
pass
defaults.append((1 - sum(p for p, _ in defaults), noop)) # noop with remaining prob
# weighted random choice of functions
p, fns = zip(*defaults)
fn = np.random.choice(fns, p=p)
if fn != noop:
logging.info("Default behavior: {}".format(fn))
fn(self)
def dialogue_step(self):
"""Process incoming chats and modify task stack"""
raw_incoming_chats = self.get_incoming_chats()
if raw_incoming_chats:
# force to get objects
self.get_perception(force=True)
# logging.info("Incoming chats: {}".format(raw_incoming_chats))
incoming_chats = []
for raw_chat in raw_incoming_chats:
match = re.search("^<([^>]+)> (.*)", raw_chat)
if match is None:
logging.info("Ignoring chat: {}".format(raw_chat))
continue
speaker, chat = match.group(1), match.group(2)
speaker_hash = hash_user(speaker)
logging.info("Incoming chat: ['{}' -> {}]".format(speaker_hash, chat))
if chat.startswith("/"):
continue
incoming_chats.append((speaker, chat))
self.memory.add_chat(self.memory.get_player_by_name(speaker).memid, chat)
if len(incoming_chats) > 0:
# change this to memory.get_time() format?
self.last_chat_time = time.time()
# for now just process the first incoming chat
self.dialogue_manager.step(incoming_chats[0])
else:
self.dialogue_manager.step((None, ""))
# TODO reset all blocks in point area to what they
# were before the point action no matter what
# so e.g. player construction in pointing area during point
# is reverted
def safe_get_changed_blocks(self):
blocks = self.get_changed_blocks()
safe_blocks = []
if len(self.point_targets) > 0:
for point_target in self.point_targets:
pt = point_target[0]
for b in blocks:
x, y, z = b[0]
xok = x < pt[0] or x > pt[3]
yok = y < pt[1] or y > pt[4]
zok = z < pt[2] or z > pt[5]
if xok and yok and zok:
safe_blocks.append(b)
else:
safe_blocks = blocks
return safe_blocks
def point_at(self, target, sleep=None):
"""Bot pointing.
Args:
target: list of x1 y1 z1 x2 y2 z2, where:
x1 <= x2,
y1 <= y2,
z1 <= z2.
"""
assert len(target) == 6
self.send_chat("/point {} {} {} {} {} {}".format(*target))
self.point_targets.append((target, time.time()))
# sleep before the bot can take any actions
# otherwise there might be bugs since the object is flashing
# deal with this in the task...
if sleep:
time.sleep(sleep)
def relative_head_pitch(self, angle):
# warning: pitch is flipped!
new_pitch = self.get_player().look.pitch - angle
self.set_look(self.get_player().look.yaw, new_pitch)
def _send_chat(self, chat: str):
logging.info("Sending chat: {}".format(chat))
self.memory.add_chat(self.memory.self_memid, chat)
return self._cpp_send_chat(chat)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--semseg_model_path", type=str, help="path to semantic segmentation model"
)
parser.add_argument("--gpu_id", type=int, default=-1, help="GPU id (-1 for cpu)")
parser.add_argument("--ttad_prev_model_path", help="path to previous TTAD model")
parser.add_argument("--ttad_model_dir", help="path to current listener model dir")
parser.add_argument("--ttad_bert_data_dir", help="path to annotated data")
parser.add_argument("--geoscorer_model_path", help="path to geoscorer model")
parser.add_argument("--draw_vis", action="store_true", help="use visdom to draw agent vision")
parser.add_argument(
"--no_default_behavior",
action="store_true",
help="do not perform default behaviors when idle",
)
parser.add_argument("--name", help="Agent login name")
parser.add_argument("--verbose", "-v", action="store_true", help="Debug logging")
parser.add_argument("--port", type=int, default=25565)
opts = parser.parse_args()
# set up stdout logging
sh = logging.StreamHandler()
sh.setLevel(logging.DEBUG if opts.verbose else logging.INFO)
sh.setFormatter(log_formatter)
logging.getLogger().addHandler(sh)
logging.info("Info logging")
logging.debug("Debug logging")
draw_fn = None
if opts.draw_vis:
import train_cnn
draw_fn = train_cnn.draw_img
set_start_method("spawn", force=True)
sa = CraftAssistAgent(
ttad_prev_model_path=opts.ttad_prev_model_path,
port=opts.port,
ttad_model_dir=opts.ttad_model_dir,
ttad_bert_data_dir=opts.ttad_bert_data_dir,
semseg_model_path=opts.semseg_model_path,
voxel_model_gpu_id=opts.gpu_id,
draw_fn=draw_fn,
no_default_behavior=opts.no_default_behavior,
name=opts.name,
geoscorer_model_path=opts.geoscorer_model_path,
)
sa.start()
|
[] |
[] |
[
"DB_FILE"
] |
[]
|
["DB_FILE"]
|
python
| 1 | 0 | |
synthtool/gcp/gapic_bazel.py
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
from typing import Optional, Union
import os
import shutil
import tempfile
from synthtool import _tracked_paths, metadata, shell
from synthtool.log import logger
from synthtool.sources import git
GOOGLEAPIS_URL: str = git.make_repo_clone_url("googleapis/googleapis")
GOOGLEAPIS_PRIVATE_URL: str = git.make_repo_clone_url("googleapis/googleapis-private")
DISCOVERY_ARTIFACT_MANAGER_URL: str = git.make_repo_clone_url(
"googleapis/discovery-artifact-manager"
)
LOCAL_GOOGLEAPIS: Optional[str] = os.environ.get("SYNTHTOOL_GOOGLEAPIS")
LOCAL_DISCOVERY_ARTIFACT_MANAGER: Optional[str] = os.environ.get(
"SYNTHTOOL_DISCOVERY_ARTIFACT_MANAGER"
)
class GAPICBazel:
"""A synthtool component that can produce libraries using bazel build.
"""
def __init__(self):
self._ensure_dependencies_installed()
self._googleapis = None
self._googleapis_private = None
self._discovery_artifact_manager = None
def py_library(self, service: str, version: str, **kwargs) -> Path:
return self._generate_code(service, version, "python", **kwargs)
def go_library(self, service: str, version: str, **kwargs) -> Path:
return self._generate_code(service, version, "go", **kwargs)
def node_library(self, service: str, version: str, **kwargs) -> Path:
return self._generate_code(service, version, "nodejs", **kwargs)
def csharp_library(self, service: str, version: str, **kwargs) -> Path:
return self._generate_code(service, version, "csharp", **kwargs)
def php_library(self, service: str, version: str, **kwargs) -> Path:
return self._generate_code(service, version, "php", **kwargs)
def java_library(self, service: str, version: str, **kwargs) -> Path:
return self._generate_code(
service, version, "java", tar_strip_components=0, **kwargs
)
def ruby_library(self, service: str, version: str, **kwargs) -> Path:
return self._generate_code(service, version, "ruby", **kwargs)
def _generate_code(
self,
service: str,
version: str,
language: str,
*,
private: bool = False,
discogapic: bool = False,
proto_path: Union[str, Path] = None,
output_dir: Union[str, Path] = None,
bazel_target: str = None,
include_protos: bool = False,
proto_output_path: Union[str, Path] = None,
tar_strip_components: int = 1,
):
# Determine which googleapis repo to use
if discogapic:
api_definitions_repo = self._clone_discovery_artifact_manager()
api_definitions_repo_name = "discovery-artifact-manager"
elif private:
api_definitions_repo = self._clone_googleapis_private()
api_definitions_repo_name = "googleapis_private"
else:
api_definitions_repo = self._clone_googleapis()
api_definitions_repo_name = "googleapis"
# Sanity check: We should have a googleapis repo; if we do not,
# something went wrong, and we should abort.
if not api_definitions_repo:
raise RuntimeError(
f"Unable to generate {service}, the sources repository repository"
"is unavailable."
)
# Calculate proto_path if necessary.
if not bazel_target or include_protos:
# If bazel_target is not specified explicitly, we will need
# proto_path to calculate it. If include_protos is True,
# we will need the proto_path to copy the protos.
if not proto_path:
if bazel_target:
# Calculate proto_path from the full bazel target, which is
# in the format "//proto_path:target_name
proto_path = bazel_target.split(":")[0][2:]
else:
# If bazel_target is not specified, assume the protos are
# simply under google/cloud, where the most of the protos
# usually are.
proto_path = f"google/cloud/{service}/{version}"
protos = Path(proto_path)
if protos.is_absolute():
protos = protos.relative_to("/")
# Determine bazel target based on per-language patterns
# Java: google-cloud-{{assembly_name}}-{{version}}-java
# Go: gapi-cloud-{{assembly_name}}-{{version}}-go
# Python: {{assembly_name}}-{{version}}-py
# PHP: google-cloud-{{assembly_name}}-{{version}}-php
# Node.js: {{assembly_name}}-{{version}}-nodejs
# Ruby: google-cloud-{{assembly_name}}-{{version}}-ruby
# C#: google-cloud-{{assembly_name}}-{{version}}-csharp
if not bazel_target:
# Determine where the protos we are generating actually live.
# We can sometimes (but not always) determine this from the service
# and version; in other cases, the user must provide it outright.
parts = list(protos.parts)
while len(parts) > 0 and parts[0] != "google":
parts.pop(0)
if len(parts) == 0:
raise RuntimeError(
f"Cannot determine bazel_target from proto_path {protos}."
"Please set bazel_target explicitly."
)
if language == "python":
suffix = f"{service}-{version}-py"
elif language == "nodejs":
suffix = f"{service}-{version}-nodejs"
elif language == "go":
suffix = f"gapi-{'-'.join(parts[1:])}-go"
else:
suffix = f"{'-'.join(parts)}-{language}"
bazel_target = f"//{os.path.sep.join(parts)}:{suffix}"
# Sanity check: Do we have protos where we think we should?
if not (api_definitions_repo / protos).exists():
raise FileNotFoundError(
f"Unable to find directory for protos: {(api_definitions_repo / protos)}."
)
if not tuple((api_definitions_repo / protos).glob("*.proto")):
raise FileNotFoundError(
f"Directory {(api_definitions_repo / protos)} exists, but no protos found."
)
if not (api_definitions_repo / protos / "BUILD.bazel"):
raise FileNotFoundError(
f"File {(api_definitions_repo / protos / 'BUILD.bazel')} does not exist."
)
# Ensure the desired output directory exists.
# If none was provided, create a temporary directory.
if not output_dir:
output_dir = tempfile.mkdtemp()
output_dir = Path(output_dir).resolve()
# Let's build some stuff now.
cwd = os.getcwd()
os.chdir(str(api_definitions_repo))
bazel_run_args = [
"bazel",
"--max_idle_secs=240",
"build",
bazel_target,
]
logger.debug(f"Generating code for: {bazel_target}.")
shell.run(bazel_run_args)
# We've got tar file!
# its location: bazel-bin/google/cloud/language/v1/language-v1-nodejs.tar.gz
# bazel_target: //google/cloud/language/v1:language-v1-nodejs
tar_file = (
f"bazel-bin{os.path.sep}{bazel_target[2:].replace(':', os.path.sep)}.tar.gz"
)
tar_run_args = [
"tar",
"-C",
str(output_dir),
f"--strip-components={tar_strip_components}",
"-xzf",
tar_file,
]
shell.run(tar_run_args)
# Get the *.protos files and put them in a protos dir in the output
if include_protos:
proto_files = protos.glob("**/*.proto")
# By default, put the protos at the root in a folder named 'protos'.
# Specific languages can be cased here to put them in a more language
# appropriate place.
if not proto_output_path:
proto_output_path = output_dir / "protos"
if language == "python":
# place protos alongsize the *_pb2.py files
proto_output_path = (
output_dir / f"google/cloud/{service}_{version}/proto"
)
else:
proto_output_path = Path(output_dir / proto_output_path)
os.makedirs(proto_output_path, exist_ok=True)
for i in proto_files:
logger.debug(f"Copy: {i} to {proto_output_path / i.name}")
shutil.copyfile(i, proto_output_path / i.name)
logger.success(f"Placed proto files into {proto_output_path}.")
os.chdir(cwd)
# Sanity check: Does the output location have code in it?
# If not, complain.
if not tuple(output_dir.iterdir()):
raise RuntimeError(
f"Code generation seemed to succeed, but {output_dir} is empty."
)
# Huzzah, it worked.
logger.success(f"Generated code into {output_dir}.")
# Record this in the synthtool metadata.
metadata.add_client_destination(
source=api_definitions_repo_name,
api_name=service,
api_version=version,
language=language,
generator="bazel",
)
_tracked_paths.add(output_dir)
return output_dir
def _clone_googleapis(self):
if self._googleapis:
return self._googleapis
if LOCAL_GOOGLEAPIS:
self._googleapis = Path(LOCAL_GOOGLEAPIS).expanduser()
logger.debug(f"Using local googleapis at {self._googleapis}")
else:
logger.debug("Cloning googleapis.")
self._googleapis = git.clone(GOOGLEAPIS_URL)
return self._googleapis
def _clone_googleapis_private(self):
if self._googleapis_private:
return self._googleapis_private
if LOCAL_GOOGLEAPIS:
self._googleapis_private = Path(LOCAL_GOOGLEAPIS).expanduser()
logger.debug(
f"Using local googleapis at {self._googleapis_private} for googleapis-private"
)
else:
logger.debug("Cloning googleapis-private.")
self._googleapis_private = git.clone(GOOGLEAPIS_PRIVATE_URL)
return self._googleapis_private
def _clone_discovery_artifact_manager(self):
if self._discovery_artifact_manager:
return self._discovery_artifact_manager
if LOCAL_DISCOVERY_ARTIFACT_MANAGER:
self._discovery_artifact_manager = Path(
LOCAL_DISCOVERY_ARTIFACT_MANAGER
).expanduser()
logger.debug(
f"Using local discovery_artifact_manager at {self._discovery_artifact_manager} for googleapis-private"
)
else:
logger.debug("Cloning discovery-artifact-manager.")
self._discovery_artifact_manager = git.clone(DISCOVERY_ARTIFACT_MANAGER_URL)
return self._discovery_artifact_manager
def _ensure_dependencies_installed(self):
logger.debug("Ensuring dependencies.")
dependencies = ["bazel", "zip", "unzip", "tar"]
failed_dependencies = []
for dependency in dependencies:
return_code = shell.run(["which", dependency], check=False).returncode
if return_code:
failed_dependencies.append(dependency)
if failed_dependencies:
raise EnvironmentError(
f"Dependencies missing: {', '.join(failed_dependencies)}"
)
|
[] |
[] |
[
"SYNTHTOOL_DISCOVERY_ARTIFACT_MANAGER\"",
"SYNTHTOOL_GOOGLEAPIS"
] |
[]
|
["SYNTHTOOL_DISCOVERY_ARTIFACT_MANAGER\"", "SYNTHTOOL_GOOGLEAPIS"]
|
python
| 2 | 0 | |
cmd/utils_test.go
|
package cmd
import (
"os"
"path"
"gopkg.in/check.v1"
"launchpad.net/gnuflag"
)
func (s *S) TestJoinWithUserDir(c *check.C) {
expected := path.Join(os.Getenv("MEGAM_HOME"), "a", "b")
path := JoinWithUserDir("a", "b")
c.Assert(path, check.Equals, expected)
}
func (s *S) TestJoinWithUserDirHomePath(c *check.C) {
defer os.Setenv("MEGAM_HOME", os.Getenv("MEGAM_HOME"))
os.Setenv("MEGAM_HOME", "")
os.Setenv("MEGAM_HOME", "/wat")
path := JoinWithUserDir("a", "b")
c.Assert(path, check.Equals, "/wat/a/b")
}
func (s *S) TestMergeFlagSet(c *check.C) {
var x, y bool
fs1 := gnuflag.NewFlagSet("x", gnuflag.ExitOnError)
fs1.BoolVar(&x, "x", false, "Something")
fs2 := gnuflag.NewFlagSet("y", gnuflag.ExitOnError)
fs2.BoolVar(&y, "y", false, "Something")
ret := MergeFlagSet(fs1, fs2)
c.Assert(ret, check.Equals, fs1)
fs1.Parse(true, []string{"-x", "-y"})
c.Assert(x, check.Equals, true)
c.Assert(y, check.Equals, true)
}
|
[
"\"MEGAM_HOME\"",
"\"MEGAM_HOME\""
] |
[] |
[
"MEGAM_HOME"
] |
[]
|
["MEGAM_HOME"]
|
go
| 1 | 0 | |
examples/main.go
|
package main
import (
"log"
"net/http"
"reflect"
"strings"
"github.com/emicklei/go-restful"
"fmt"
"github.com/go-chassis/go-restful-swagger20"
"os"
"path/filepath"
)
type Book struct {
Id string
Title string
Author string
Student []Student
}
type ID string
type Age int64
type Student struct {
Id ID `swag:"string"`
Age Age
Name string
}
func modelTypeNameHandler(st reflect.Type) (string, bool) {
key := st.String()
if len(st.Name()) == 0 {
key = strings.Replace(key, "[]", "", -1)
}
if key == "main.Age" {
return "number", true
}
return key, true
}
func main() {
ws := new(restful.WebService)
ws.Path("/book")
ws.Consumes(restful.MIME_JSON, restful.MIME_XML)
ws.Produces(restful.MIME_JSON, restful.MIME_XML)
restful.Add(ws)
ws.Route(ws.GET("/{medium}").To(getBookById).
Doc("Search a books").
Param(ws.PathParameter("medium", "digital or paperback").DataType("string")).
Param(ws.QueryParameter("language", "en,nl,de").DataType("string")).Metadata("tags", []string{"users", "desc"}).
Param(ws.HeaderParameter("If-Modified-Since", "last known timestamp").DataType("string").DataFormat("datetime")).
Returns(200, "haha", Book{}))
ws.Route(ws.PUT("/{medium}").To(modifyBookById).
Operation("modifyBookById").
Doc("modify a book").
Param(ws.PathParameter("medium", "digital or paperback").DataType("string")).
Reads(Book{Id: "2", Title: "go", Author: "lisi"}).
Do(returns200, returns500))
ws.Route(ws.POST("/add").To(addBook).
Notes("add a book").
Reads(Student{}).
Do(returns200, returns500))
ws.ApiVersion("1.0.1")
val := os.Getenv("SWAGGERFILEPATH")
fmt.Println(val)
if val == "" {
val, _ = filepath.Abs(filepath.Dir(os.Args[0]))
}
config := swagger.Config{
WebServices: restful.DefaultContainer.RegisteredWebServices(), // you control what services are visible
WebServicesUrl: "http://localhost:8080",
ApiPath: "/apidocs.json",
//FileStyle: "json",
OpenService: true,
SwaggerPath: "/apidocs/",
OutFilePath: filepath.Join(val, "api.yaml"),
ModelTypeNameHandler: modelTypeNameHandler}
config.Info.Description = "This is a sample server Book server"
config.Info.Title = "swagger Book"
swagger.RegisterSwaggerService(config, restful.DefaultContainer)
log.Print("start listening on localhost:8080")
server := &http.Server{Addr: ":8080", Handler: restful.DefaultContainer}
log.Fatal(server.ListenAndServe())
}
func getBookById(req *restful.Request, resp *restful.Response) {
book := Book{Id: "1", Title: "java", Author: "zhangsan"}
id := req.PathParameter("medium")
if id != book.Id {
resp.WriteErrorString(http.StatusNotFound, "Book could not be found.")
} else {
resp.WriteEntity(book)
}
}
func modifyBookById(req *restful.Request, resp *restful.Response) {}
func addBook(req *restful.Request, resp *restful.Response) {}
func returns200(b *restful.RouteBuilder) {
b.Returns(http.StatusOK, "OK", map[string]Book{})
}
func returns500(b *restful.RouteBuilder) {
b.Returns(http.StatusInternalServerError, "Bummer, something went wrong", nil)
}
|
[
"\"SWAGGERFILEPATH\""
] |
[] |
[
"SWAGGERFILEPATH"
] |
[]
|
["SWAGGERFILEPATH"]
|
go
| 1 | 0 | |
src/php/brats/brats_suite_test.go
|
package brats_test
import (
"flag"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/cloudfoundry/libbuildpack"
"github.com/cloudfoundry/libbuildpack/bratshelper"
"github.com/cloudfoundry/libbuildpack/cutlass"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
func init() {
flag.StringVar(&cutlass.DefaultMemory, "memory", "256M", "default memory for pushed apps")
flag.StringVar(&cutlass.DefaultDisk, "disk", "384M", "default disk for pushed apps")
flag.Parse()
}
var _ = SynchronizedBeforeSuite(func() []byte {
// Run once
return bratshelper.InitBpData(os.Getenv("CF_STACK"), ApiHasStackAssociation()).Marshal()
}, func(data []byte) {
// Run on all nodes
bratshelper.Data.Unmarshal(data)
Expect(cutlass.CopyCfHome()).To(Succeed())
cutlass.SeedRandom()
cutlass.DefaultStdoutStderr = GinkgoWriter
})
var _ = SynchronizedAfterSuite(func() {
// Run on all nodes
}, func() {
// Run once
_ = cutlass.DeleteOrphanedRoutes()
Expect(cutlass.DeleteBuildpack(strings.Replace(bratshelper.Data.Cached, "_buildpack", "", 1))).To(Succeed())
Expect(cutlass.DeleteBuildpack(strings.Replace(bratshelper.Data.Uncached, "_buildpack", "", 1))).To(Succeed())
Expect(os.Remove(bratshelper.Data.CachedFile)).To(Succeed())
})
func TestBrats(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Brats Suite")
}
func FirstOfVersionLine(line string) string {
bpDir, err := cutlass.FindRoot()
if err != nil {
panic(err)
}
manifest, err := libbuildpack.NewManifest(bpDir, nil, time.Now())
if err != nil {
panic(err)
}
deps := manifest.AllDependencyVersions("php")
versions, err := libbuildpack.FindMatchingVersions(line, deps)
if err != nil {
panic(err)
}
return versions[0]
}
func CopyBratsWithFramework(phpVersion, webserver, webserverVersion string) *cutlass.App {
manifest, err := libbuildpack.NewManifest(bratshelper.Data.BpDir, nil, time.Now())
Expect(err).ToNot(HaveOccurred())
if phpVersion == "" {
phpVersion = "x"
}
if strings.Contains(phpVersion, "x") {
deps := manifest.AllDependencyVersions("php")
phpVersion, err = libbuildpack.FindMatchingVersion(phpVersion, deps)
Expect(err).ToNot(HaveOccurred())
}
if webserver == "" {
webserver = "httpd"
}
if webserverVersion == "" {
webserverVersion = "x"
}
if strings.Contains(webserverVersion, "x") {
deps := manifest.AllDependencyVersions(webserver)
webserverVersion, err = libbuildpack.FindMatchingVersion(webserverVersion, deps)
Expect(err).ToNot(HaveOccurred())
}
dir, err := cutlass.CopyFixture(filepath.Join(bratshelper.Data.BpDir, "fixtures", "brats"))
Expect(err).ToNot(HaveOccurred())
options := make(map[string]interface{})
options["PHP_VM"] = "php"
options["PHP_VERSION"] = phpVersion
options["WEB_SERVER"] = webserver
options[strings.ToUpper(webserver)+"_VERSION"] = webserverVersion
options["PHP_EXTENSIONS"] = phpExtensions(phpVersion)
options["ZEND_EXTENSIONS"] = zendExtensions(phpVersion)
Expect(libbuildpack.NewJSON().Write(filepath.Join(dir, ".bp-config", "options.json"), options)).To(Succeed())
return cutlass.New(dir)
}
func CopyBrats(version string) *cutlass.App {
return CopyBratsWithFramework(version, "", "")
}
func PushApp(app *cutlass.App) {
Expect(app.Push()).To(Succeed())
Eventually(app.InstanceStates, 20*time.Second).Should(Equal([]string{"RUNNING"}))
}
func modulesForPHPVersion(version string) []string {
manifest := struct {
ManifestEntries []struct {
Dependency struct {
Name string `yaml:"name"`
Version string `yaml:"version"`
Modules []string `yaml:"modules"`
} `yaml:",inline"`
} `yaml:"dependencies"`
}{}
manifestPath := filepath.Join(bratshelper.Data.BpDir, "manifest.yml")
err := libbuildpack.NewYAML().Load(manifestPath, &manifest)
Expect(err).ToNot(HaveOccurred())
for _, entry := range manifest.ManifestEntries {
if entry.Dependency.Name == "php" {
if entry.Dependency.Version == version {
return entry.Dependency.Modules
}
}
}
return []string{}
}
func zendExtensions(phpVersion string) (extensions []string) {
for _, module := range modulesForPHPVersion(phpVersion) {
if isZendExtension(module) {
extensions = append(extensions, module)
}
}
return
}
func phpExtensions(phpVersion string) (extensions []string) {
for _, module := range modulesForPHPVersion(phpVersion) {
if !isZendExtension(module) {
extensions = append(extensions, module)
}
}
return
}
func isZendExtension(moduleName string) bool {
return moduleName == "ioncube" || moduleName == "opcache" || moduleName == "xdebug"
}
func ApiHasStackAssociation() bool {
supported, err := cutlass.ApiGreaterThan("2.113.0")
Expect(err).NotTo(HaveOccurred())
return supported
}
|
[
"\"CF_STACK\""
] |
[] |
[
"CF_STACK"
] |
[]
|
["CF_STACK"]
|
go
| 1 | 0 | |
api/server/server.go
|
package server
import (
"crypto/tls"
"net"
"net/http"
"os"
"strings"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/server/router"
"github.com/docker/docker/api/server/router/build"
"github.com/docker/docker/api/server/router/container"
"github.com/docker/docker/api/server/router/local"
"github.com/docker/docker/api/server/router/network"
"github.com/docker/docker/api/server/router/system"
"github.com/docker/docker/api/server/router/volume"
"github.com/docker/docker/daemon"
"github.com/docker/docker/pkg/authorization"
"github.com/docker/docker/utils"
"github.com/docker/go-connections/sockets"
"github.com/gorilla/mux"
"golang.org/x/net/context"
)
// versionMatcher defines a variable matcher to be parsed by the router
// when a request is about to be served.
const versionMatcher = "/v{version:[0-9.]+}"
// Config provides the configuration for the API server
type Config struct {
Logging bool
EnableCors bool
CorsHeaders string
AuthorizationPluginNames []string
Version string
SocketGroup string
TLSConfig *tls.Config
Addrs []Addr
}
// Server contains instance details for the server
type Server struct {
cfg *Config
servers []*HTTPServer
routers []router.Router
authZPlugins []authorization.Plugin
}
// Addr contains string representation of address and its protocol (tcp, unix...).
type Addr struct {
Proto string
Addr string
}
// New returns a new instance of the server based on the specified configuration.
// It allocates resources which will be needed for ServeAPI(ports, unix-sockets).
func New(cfg *Config) (*Server, error) {
s := &Server{
cfg: cfg,
}
for _, addr := range cfg.Addrs {
srv, err := s.newServer(addr.Proto, addr.Addr)
if err != nil {
return nil, err
}
logrus.Debugf("Server created for HTTP on %s (%s)", addr.Proto, addr.Addr)
s.servers = append(s.servers, srv...)
}
return s, nil
}
// Close closes servers and thus stop receiving requests
func (s *Server) Close() {
for _, srv := range s.servers {
if err := srv.Close(); err != nil {
logrus.Error(err)
}
}
}
// ServeAPI loops through all initialized servers and spawns goroutine
// with Server method for each. It sets CreateMux() as Handler also.
func (s *Server) ServeAPI() error {
var chErrors = make(chan error, len(s.servers))
for _, srv := range s.servers {
srv.srv.Handler = s.CreateMux()
go func(srv *HTTPServer) {
var err error
logrus.Infof("API listen on %s", srv.l.Addr())
if err = srv.Serve(); err != nil && strings.Contains(err.Error(), "use of closed network connection") {
err = nil
}
chErrors <- err
}(srv)
}
for i := 0; i < len(s.servers); i++ {
err := <-chErrors
if err != nil {
return err
}
}
return nil
}
// HTTPServer contains an instance of http server and the listener.
// srv *http.Server, contains configuration to create a http server and a mux router with all api end points.
// l net.Listener, is a TCP or Socket listener that dispatches incoming request to the router.
type HTTPServer struct {
srv *http.Server
l net.Listener
}
// Serve starts listening for inbound requests.
func (s *HTTPServer) Serve() error {
return s.srv.Serve(s.l)
}
// Close closes the HTTPServer from listening for the inbound requests.
func (s *HTTPServer) Close() error {
return s.l.Close()
}
func writeCorsHeaders(w http.ResponseWriter, r *http.Request, corsHeaders string) {
logrus.Debugf("CORS header is enabled and set to: %s", corsHeaders)
w.Header().Add("Access-Control-Allow-Origin", corsHeaders)
w.Header().Add("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept, X-Registry-Auth")
w.Header().Add("Access-Control-Allow-Methods", "HEAD, GET, POST, DELETE, PUT, OPTIONS")
}
func (s *Server) initTCPSocket(addr string) (l net.Listener, err error) {
if s.cfg.TLSConfig == nil || s.cfg.TLSConfig.ClientAuth != tls.RequireAndVerifyClientCert {
logrus.Warn("/!\\ DON'T BIND ON ANY IP ADDRESS WITHOUT setting -tlsverify IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\")
}
if l, err = sockets.NewTCPSocket(addr, s.cfg.TLSConfig); err != nil {
return nil, err
}
if err := allocateDaemonPort(addr); err != nil {
return nil, err
}
return
}
func (s *Server) makeHTTPHandler(handler httputils.APIFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
// log the handler call
logrus.Debugf("Calling %s %s", r.Method, r.URL.Path)
// Define the context that we'll pass around to share info
// like the docker-request-id.
//
// The 'context' will be used for global data that should
// apply to all requests. Data that is specific to the
// immediate function being called should still be passed
// as 'args' on the function call.
ctx := context.Background()
handlerFunc := s.handleWithGlobalMiddlewares(handler)
vars := mux.Vars(r)
if vars == nil {
vars = make(map[string]string)
}
if err := handlerFunc(ctx, w, r, vars); err != nil {
logrus.Errorf("Handler for %s %s returned error: %s", r.Method, r.URL.Path, utils.GetErrorMessage(err))
httputils.WriteError(w, err)
}
}
}
// InitRouters initializes a list of routers for the server.
func (s *Server) InitRouters(d *daemon.Daemon) {
s.addRouter(container.NewRouter(d))
s.addRouter(local.NewRouter(d))
s.addRouter(network.NewRouter(d))
s.addRouter(system.NewRouter(d))
s.addRouter(volume.NewRouter(d))
s.addRouter(build.NewRouter(d))
}
// addRouter adds a new router to the server.
func (s *Server) addRouter(r router.Router) {
s.routers = append(s.routers, r)
}
// CreateMux initializes the main router the server uses.
// we keep enableCors just for legacy usage, need to be removed in the future
func (s *Server) CreateMux() *mux.Router {
m := mux.NewRouter()
if os.Getenv("DEBUG") != "" {
profilerSetup(m, "/debug/")
}
logrus.Debugf("Registering routers")
for _, apiRouter := range s.routers {
for _, r := range apiRouter.Routes() {
f := s.makeHTTPHandler(r.Handler())
logrus.Debugf("Registering %s, %s", r.Method(), r.Path())
m.Path(versionMatcher + r.Path()).Methods(r.Method()).Handler(f)
m.Path(r.Path()).Methods(r.Method()).Handler(f)
}
}
return m
}
|
[
"\"DEBUG\""
] |
[] |
[
"DEBUG"
] |
[]
|
["DEBUG"]
|
go
| 1 | 0 | |
src/python/supply/supply.go
|
package supply
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"strings"
"github.com/cloudfoundry/python-buildpack/src/python/conda"
"github.com/cloudfoundry/python-buildpack/src/python/pipfile"
"os/exec"
"github.com/cloudfoundry/libbuildpack"
"github.com/cloudfoundry/libbuildpack/snapshot"
"github.com/kr/text"
)
type Stager interface {
BuildDir() string
CacheDir() string
DepDir() string
DepsIdx() string
LinkDirectoryInDepDir(string, string) error
WriteEnvFile(string, string) error
WriteProfileD(string, string) error
}
type Manifest interface {
AllDependencyVersions(string) []string
DefaultVersion(string) (libbuildpack.Dependency, error)
IsCached() bool
}
type Installer interface {
InstallDependency(libbuildpack.Dependency, string) error
InstallOnlyVersion(string, string) error
}
type Command interface {
Execute(string, io.Writer, io.Writer, string, ...string) error
Output(dir string, program string, args ...string) (string, error)
RunWithOutput(*exec.Cmd) ([]byte, error)
}
type Supplier struct {
PythonVersion string
Manifest Manifest
Installer Installer
Stager Stager
Command Command
Log *libbuildpack.Logger
Logfile *os.File
HasNltkData bool
removeRequirementsText bool
}
func Run(s *Supplier) error {
if exists, err := libbuildpack.FileExists(filepath.Join(s.Stager.BuildDir(), "environment.yml")); err != nil {
s.Log.Error("Error checking existence of environment.yml: %v", err)
return err
} else if exists {
return conda.Run(conda.New(s.Installer, s.Stager, s.Command, s.Log))
} else {
return RunPython(s)
}
}
func RunPython(s *Supplier) error {
s.Log.BeginStep("Supplying Python")
dirSnapshot := snapshot.Dir(s.Stager.BuildDir(), s.Log)
if err := s.SetupCacheDir(); err != nil {
s.Log.Error("Error setting up cache: %v", err)
return err
}
if err := s.CopyRuntimeTxt(); err != nil {
s.Log.Error("Error copying runtime.txt to deps dir: %v", err)
return err
}
if err := s.HandlePipfile(); err != nil {
s.Log.Error("Error checking for Pipfile.lock: %v", err)
return err
}
if err := s.InstallPython(); err != nil {
s.Log.Error("Could not install python: %v", err)
return err
}
if err := s.InstallPipPop(); err != nil {
s.Log.Error("Could not install pip pop: %v", err)
return err
}
if err := s.InstallPipEnv(); err != nil {
s.Log.Error("Could not install pipenv: %v", err)
return err
}
if err := s.HandleRequirementstxt(); err != nil {
s.Log.Error("Error checking requirements.txt: %v", err)
return err
}
if err := s.HandlePylibmc(); err != nil {
s.Log.Error("Error checking Pylibmc: %v", err)
return err
}
if err := s.HandleFfi(); err != nil {
s.Log.Error("Error checking ffi: %v", err)
return err
}
if err := s.HandleMercurial(); err != nil {
s.Log.Error("Could not handle pip mercurial dependencies: %v", err)
return err
}
if err := s.UninstallUnusedDependencies(); err != nil {
s.Log.Error("Error uninstalling unused dependencies: %v", err)
return err
}
vendored, err := libbuildpack.FileExists(filepath.Join(s.Stager.BuildDir(), "vendor"))
if err != nil {
return fmt.Errorf("could not check vendor existence: %v", err)
}
if vendored {
if err := s.RunPipVendored(); err != nil {
s.Log.Error("Could not install vendored pip packages: %v", err)
return err
}
} else {
if err := s.RunPipUnvendored(); err != nil {
s.Log.Error("Could not install pip packages: %v", err)
return err
}
}
if err := s.DownloadNLTKCorpora(); err != nil {
s.Log.Error("Could not download NLTK Corpora: %v", err)
return err
}
if err := s.RewriteShebangs(); err != nil {
s.Log.Error("Unable to rewrite she-bangs: %s", err.Error())
return err
}
if err := s.CreateDefaultEnv(); err != nil {
s.Log.Error("Unable to setup default environment: %s", err.Error())
return err
}
if cacheDirSize, err := s.Command.Output(os.Getenv("XDG_CACHE_HOME"), "du", "--summarize", os.Getenv("XDG_CACHE_HOME")); err == nil {
s.Log.Debug("Size of pip cache dir: %s", cacheDirSize)
}
if s.removeRequirementsText {
if err := os.Remove(filepath.Join(s.Stager.BuildDir(), "requirements.txt")); err != nil {
s.Log.Error("Unable to clean up app directory: %s", err.Error())
return err
}
}
dirSnapshot.Diff()
return nil
}
func (s *Supplier) CopyRuntimeTxt() error {
if exists, err := libbuildpack.FileExists(filepath.Join(s.Stager.BuildDir(), "runtime.txt")); err != nil {
return err
} else if exists {
if err = libbuildpack.CopyFile(filepath.Join(s.Stager.BuildDir(), "runtime.txt"), filepath.Join(s.Stager.DepDir(), "runtime.txt")); err != nil {
return err
}
}
return nil
}
func (s *Supplier) HandleMercurial() error {
if err := s.Command.Execute(s.Stager.BuildDir(), ioutil.Discard, ioutil.Discard, "grep", "-Fiq", "hg+", "requirements.txt"); err != nil {
return nil
}
if s.Manifest.IsCached() {
s.Log.Warning("Cloud Foundry does not support Pip Mercurial dependencies while in offline-mode. Vendor your dependencies if they do not work.")
}
if err := s.Command.Execute(s.Stager.BuildDir(), indentWriter(os.Stdout), indentWriter(os.Stderr), "python", "-m", "pip", "install", "mercurial"); err != nil {
return err
}
if err := s.Stager.LinkDirectoryInDepDir(filepath.Join(s.Stager.DepDir(), "python", "bin"), "bin"); err != nil {
return err
}
return nil
}
func (s *Supplier) HandlePipfile() error {
var pipfileExists, runtimeExists bool
var pipfileJson pipfile.Lock
var err error
if pipfileExists, err = libbuildpack.FileExists(filepath.Join(s.Stager.BuildDir(), "Pipfile.lock")); err != nil {
return err
}
if runtimeExists, err = libbuildpack.FileExists(filepath.Join(s.Stager.DepDir(), "runtime.txt")); err != nil {
return err
}
if pipfileExists && !runtimeExists {
if err = libbuildpack.NewJSON().Load(filepath.Join(s.Stager.BuildDir(), "Pipfile.lock"), &pipfileJson); err != nil {
return err
}
formattedVersion := s.formatVersion(pipfileJson.Meta.Requires.Version)
if err := ioutil.WriteFile(filepath.Join(s.Stager.DepDir(), "runtime.txt"), []byte(formattedVersion), 0644); err != nil {
return err
}
}
return nil
}
func (s *Supplier) InstallPython() error {
var dep libbuildpack.Dependency
runtimetxtExists, err := libbuildpack.FileExists(filepath.Join(s.Stager.DepDir(), "runtime.txt"))
if err != nil {
return err
}
if runtimetxtExists {
userDefinedVersion, err := ioutil.ReadFile(filepath.Join(s.Stager.DepDir(), "runtime.txt"))
if err != nil {
return err
}
s.PythonVersion = strings.TrimSpace(strings.NewReplacer("\\r", "", "\\n", "").Replace(string(userDefinedVersion)))
s.Log.Debug("***Version info: (%s)", s.PythonVersion)
}
if s.PythonVersion != "" {
versions := s.Manifest.AllDependencyVersions("python")
shortPythonVersion := strings.TrimLeft(s.PythonVersion, "python-")
s.Log.Debug("***Version info: (%s) (%s)", s.PythonVersion, shortPythonVersion)
ver, err := libbuildpack.FindMatchingVersion(shortPythonVersion, versions)
if err != nil {
return err
}
dep.Name = "python"
dep.Version = ver
s.Log.Debug("***Version info: %s, %s, %s", dep.Name, s.PythonVersion, dep.Version)
} else {
var err error
dep, err = s.Manifest.DefaultVersion("python")
if err != nil {
return err
}
}
pythonInstallDir := filepath.Join(s.Stager.DepDir(), "python")
if err := s.Installer.InstallDependency(dep, pythonInstallDir); err != nil {
return err
}
if err := s.Stager.LinkDirectoryInDepDir(filepath.Join(pythonInstallDir, "bin"), "bin"); err != nil {
return err
}
if found, err := libbuildpack.FileExists(filepath.Join(pythonInstallDir, "usr", "lib", "x86_64-linux-gnu")); err != nil {
return err
} else if found {
if err := s.Stager.LinkDirectoryInDepDir(filepath.Join(pythonInstallDir, "usr", "lib", "x86_64-linux-gnu"), "lib"); err != nil {
return err
}
}
if err := s.Stager.LinkDirectoryInDepDir(filepath.Join(pythonInstallDir, "lib"), "lib"); err != nil {
return err
}
if err := os.Setenv("PATH", fmt.Sprintf("%s:%s", filepath.Join(s.Stager.DepDir(), "bin"), os.Getenv("PATH"))); err != nil {
return err
}
if err := os.Setenv("PYTHONPATH", filepath.Join(s.Stager.DepDir())); err != nil {
return err
}
return nil
}
func (s *Supplier) RewriteShebangs() error {
files, err := filepath.Glob(filepath.Join(s.Stager.DepDir(), "bin", "*"))
if err != nil {
return err
}
for _, file := range files {
if fileInfo, err := os.Stat(file); err != nil {
return err
} else if fileInfo.IsDir() {
continue
}
fileContents, err := ioutil.ReadFile(file)
if err != nil {
return err
}
shebangRegex := regexp.MustCompile(`^#!/.*/python.*`)
fileContents = shebangRegex.ReplaceAll(fileContents, []byte("#!/usr/bin/env python"))
if err := ioutil.WriteFile(file, fileContents, 0755); err != nil {
return err
}
}
return nil
}
func (s *Supplier) InstallPipPop() error {
tempPath := filepath.Join("/tmp", "pip-pop")
if err := s.Installer.InstallOnlyVersion("pip-pop", tempPath); err != nil {
return err
}
if err := s.Command.Execute(s.Stager.BuildDir(), ioutil.Discard, ioutil.Discard, "python", "-m", "pip", "install", "pip-pop", "--exists-action=w", "--no-index", fmt.Sprintf("--find-links=%s", tempPath)); err != nil {
s.Log.Debug("******Path val: %s", os.Getenv("PATH"))
return err
}
if err := s.Stager.LinkDirectoryInDepDir(filepath.Join(s.Stager.DepDir(), "python", "bin"), "bin"); err != nil {
return err
}
return nil
}
func (s *Supplier) InstallPipEnv() error {
requirementstxtExists, err := libbuildpack.FileExists(filepath.Join(s.Stager.BuildDir(), "requirements.txt"))
if err != nil {
return err
} else if requirementstxtExists {
return nil
}
pipfileExists, err := libbuildpack.FileExists(filepath.Join(s.Stager.BuildDir(), "Pipfile"))
if err != nil {
return err
} else if !pipfileExists {
return nil
}
hasLockFile, err := libbuildpack.FileExists(filepath.Join(s.Stager.BuildDir(), "Pipfile.lock"))
if err != nil {
return fmt.Errorf("could not check Pipfile.lock existence: %v", err)
} else if hasLockFile {
s.Log.Info("Generating 'requirements.txt' from Pipfile.lock")
requirementsContents, err := pipfileToRequirements(filepath.Join(s.Stager.BuildDir(), "Pipfile.lock"))
if err != nil {
return fmt.Errorf("failed to write `requirement.txt` from Pipfile.lock: %s", err.Error())
}
return s.writeTempRequirementsTxt(requirementsContents)
}
s.Log.Info("Installing pipenv")
if err := s.Installer.InstallOnlyVersion("pipenv", filepath.Join("/tmp", "pipenv")); err != nil {
return err
}
if err := s.installFfi(); err != nil {
return err
}
for _, dep := range []string{"setuptools_scm", "pytest-runner", "parver", "invoke", "pipenv", "wheel"} {
s.Log.Info("Installing %s", dep)
out := &bytes.Buffer{}
stderr := &bytes.Buffer{}
if err := s.Command.Execute(s.Stager.BuildDir(), out, stderr, "python", "-m", "pip", "install", dep, "--exists-action=w", "--no-index", fmt.Sprintf("--find-links=%s", filepath.Join("/tmp", "pipenv"))); err != nil {
return fmt.Errorf("Failed to install %s: %v.\nStdout: %v\nStderr: %v", dep, err, out, stderr)
}
}
s.Stager.LinkDirectoryInDepDir(filepath.Join(s.Stager.DepDir(), "python", "bin"), "bin")
s.Log.Info("Generating 'requirements.txt' with pipenv")
cmd := exec.Command("pipenv", "lock", "--requirements")
cmd.Dir = s.Stager.BuildDir()
cmd.Env = append(os.Environ(), "VIRTUALENV_NEVER_DOWNLOAD=true")
output, err := s.Command.RunWithOutput(cmd)
if err != nil {
return err
}
outputString := string(output)
// Remove output due to virtualenv
if strings.HasPrefix(outputString, "Using ") {
reqs := strings.SplitN(outputString, "\n", 2)
if len(reqs) > 0 {
outputString = reqs[1]
}
}
return s.writeTempRequirementsTxt(outputString)
}
func pipfileToRequirements(lockFilePath string) (string, error) {
var lockFile struct {
Meta struct {
Sources []struct {
URL string
}
} `json:"_meta"`
Default map[string]struct {
Version string
}
}
lockContents, err := ioutil.ReadFile(lockFilePath)
if err != nil {
return "", err
}
err = json.Unmarshal(lockContents, &lockFile)
if err != nil {
return "", err
}
buf := &bytes.Buffer{}
for _, source := range lockFile.Meta.Sources {
fmt.Fprintf(buf, "-i %s\n", source.URL)
}
for pkg, obj := range lockFile.Default {
fmt.Fprintf(buf, "%s%s\n", pkg, obj.Version)
}
return buf.String(), nil
}
func (s *Supplier) HandlePylibmc() error {
memcachedDir := filepath.Join(s.Stager.DepDir(), "libmemcache")
if err := s.Command.Execute(s.Stager.BuildDir(), ioutil.Discard, ioutil.Discard, "pip-grep", "-s", "requirements.txt", "pylibmc"); err == nil {
s.Log.BeginStep("Noticed pylibmc. Bootstrapping libmemcached.")
if err := s.Installer.InstallOnlyVersion("libmemcache", memcachedDir); err != nil {
return err
}
os.Setenv("LIBMEMCACHED", memcachedDir)
s.Stager.WriteEnvFile("LIBMEMCACHED", memcachedDir)
s.Stager.LinkDirectoryInDepDir(filepath.Join(memcachedDir, "lib"), "lib")
s.Stager.LinkDirectoryInDepDir(filepath.Join(memcachedDir, "lib", "sasl2"), "lib")
s.Stager.LinkDirectoryInDepDir(filepath.Join(memcachedDir, "lib", "pkgconfig"), "pkgconfig")
s.Stager.LinkDirectoryInDepDir(filepath.Join(memcachedDir, "include"), "include")
}
return nil
}
func (s *Supplier) HandleRequirementstxt() error {
if exists, err := libbuildpack.FileExists(filepath.Join(s.Stager.BuildDir(), "requirements.txt")); err != nil {
return err
} else if exists {
return nil
}
if exists, err := libbuildpack.FileExists(filepath.Join(s.Stager.BuildDir(), "setup.py")); err != nil {
return err
} else if !exists {
return nil
}
return s.writeTempRequirementsTxt("-e .")
}
func (s *Supplier) installFfi() error {
ffiDir := filepath.Join(s.Stager.DepDir(), "libffi")
// Only install libffi if we haven't done so already
// This could be installed twice because pipenv installs it, but
// we later run HandleFfi, which installs it if a dependency
// from requirements.txt needs libffi.
if os.Getenv("LIBFFI") != ffiDir {
s.Log.BeginStep("Noticed dependency requiring libffi. Bootstrapping libffi.")
if err := s.Installer.InstallOnlyVersion("libffi", ffiDir); err != nil {
return err
}
versions := s.Manifest.AllDependencyVersions("libffi")
os.Setenv("LIBFFI", ffiDir)
s.Stager.WriteEnvFile("LIBFFI", ffiDir)
s.Stager.LinkDirectoryInDepDir(filepath.Join(ffiDir, "lib"), "lib")
s.Stager.LinkDirectoryInDepDir(filepath.Join(ffiDir, "lib", "pkgconfig"), "pkgconfig")
s.Stager.LinkDirectoryInDepDir(filepath.Join(ffiDir, "lib", "libffi-"+versions[0], "include"), "include")
}
return nil
}
func (s *Supplier) HandleFfi() error {
if err := s.Command.Execute(s.Stager.BuildDir(), ioutil.Discard, ioutil.Discard, "pip-grep", "-s", "requirements.txt", "pymysql", "argon2-cffi", "bcrypt", "cffi", "cryptography", "django[argon2]", "Django[argon2]", "django[bcrypt]", "Django[bcrypt]", "PyNaCl", "pyOpenSSL", "PyOpenSSL", "requests[security]", "misaka"); err == nil {
return s.installFfi()
}
return nil
}
func (s *Supplier) UninstallUnusedDependencies() error {
requirementsDeclaredExists, err := libbuildpack.FileExists(filepath.Join(s.Stager.DepDir(), "python", "requirements-declared.txt"))
if err != nil {
return err
}
if requirementsDeclaredExists {
fileContents, _ := ioutil.ReadFile(filepath.Join(s.Stager.DepDir(), "python", "requirements-declared.txt"))
s.Log.Info("requirements-declared: %s", string(fileContents))
staleContents, err := s.Command.Output(
s.Stager.BuildDir(),
"pip-diff",
"--stale",
filepath.Join(s.Stager.DepDir(), "python", "requirements-declared.txt"),
filepath.Join(s.Stager.BuildDir(), "requirements.txt"),
"--exclude",
"setuptools",
"pip",
"wheel",
)
if err != nil {
return err
}
if staleContents == "" {
return nil
}
if err := ioutil.WriteFile(filepath.Join(s.Stager.DepDir(), "python", "requirements-stale.txt"), []byte(staleContents), 0644); err != nil {
return err
}
s.Log.BeginStep("Uninstalling stale dependencies")
if err := s.Command.Execute(
s.Stager.BuildDir(),
indentWriter(os.Stdout),
indentWriter(os.Stderr),
"python",
"-m",
"pip",
"uninstall",
"-r",
filepath.Join(s.Stager.DepDir(), "python", "requirements-stale.txt", "-y", "--exists-action=w"),
); err != nil {
return err
}
}
return nil
}
func (s *Supplier) RunPipUnvendored() error {
shouldContinue, requirementsPath, err := s.shouldRunPip()
if err != nil {
return err
} else if !shouldContinue {
return nil
}
// Search lines from requirements.txt that begin with -i, -index-url, or -extra-index-url
// and add them to the pydistutils file. We do this so that easy_install will use
// the same indexes as pip. This may not actually be necessary because it's possible that
// easy_install has been fixed upstream, but it has no ill side-effects.
reqs, err := ioutil.ReadFile(requirementsPath)
if err != nil {
return fmt.Errorf("could not read requirements.txt: %v", err)
}
distUtils := map[string][]string{}
re := regexp.MustCompile(`(?m)^\s*(-i|-index-url)\s+(.*)$`)
match := re.FindStringSubmatch(string(reqs))
if len(match) > 0 {
distUtils["index_url"] = []string{match[len(match)-1]}
}
re = regexp.MustCompile(`(?m)^\s*-extra-index-url\s+(.*)$`)
matches := re.FindAllStringSubmatch(string(reqs), -1)
for _, m := range matches {
distUtils["find_links"] = append(distUtils["find_links"], m[len(m)-1])
}
if err := writePyDistUtils(distUtils); err != nil {
return err
}
installArgs := []string{"-m", "pip", "install", "-r", requirementsPath, "--ignore-installed", "--exists-action=w", "--src=" + filepath.Join(s.Stager.DepDir(), "src")}
if err := s.Command.Execute(s.Stager.BuildDir(), indentWriter(os.Stdout), indentWriter(os.Stderr), "python", installArgs...); err != nil {
return fmt.Errorf("could not run pip: %v", err)
}
return s.Stager.LinkDirectoryInDepDir(filepath.Join(s.Stager.DepDir(), "python", "bin"), "bin")
}
func (s *Supplier) RunPipVendored() error {
shouldContinue, requirementsPath, err := s.shouldRunPip()
if err != nil {
return err
} else if !shouldContinue {
return nil
}
distUtils := map[string][]string{
"allows_hosts": {""},
"find_links": {filepath.Join(s.Stager.BuildDir(), "vendor")},
}
if err := writePyDistUtils(distUtils); err != nil {
return err
}
installArgs := []string{
"-m",
"pip",
"install",
"-r",
requirementsPath,
"--ignore-installed",
"--exists-action=w",
"--src=" + filepath.Join(s.Stager.DepDir(), "src"),
"--no-index",
"--no-build-isolation",
"--find-links=file://" + filepath.Join(s.Stager.BuildDir(), "vendor"),
}
// Remove lines from requirements.txt that begin with -i
// because specifying index links here makes pip always want internet access,
// and pipenv generates requirements.txt with -i.
originalReqs, err := ioutil.ReadFile(requirementsPath)
if err != nil {
return fmt.Errorf("could not read requirements.txt: %v", err)
}
re := regexp.MustCompile(`(?m)^\s*-i.*$`)
modifiedReqs := re.ReplaceAll(originalReqs, []byte{})
err = ioutil.WriteFile(requirementsPath, modifiedReqs, 0644)
if err != nil {
return fmt.Errorf("could not overwrite requirements file: %v", err)
}
if err := s.Command.Execute(s.Stager.BuildDir(), indentWriter(os.Stdout), indentWriter(os.Stderr), "python", installArgs...); err != nil {
s.Log.Info("Running pip install without indexes failed. Not all dependencies were vendored. Trying again with indexes.")
if err := ioutil.WriteFile(requirementsPath, originalReqs, 0644); err != nil {
return fmt.Errorf("could not overwrite modified requirements file: %v", err)
}
if err := s.RunPipUnvendored(); err != nil {
s.Log.Info("Running pip install failed. You need to include all dependencies in the vendor directory.")
return err
}
}
return s.Stager.LinkDirectoryInDepDir(filepath.Join(s.Stager.DepDir(), "python", "bin"), "bin")
}
func (s *Supplier) CreateDefaultEnv() error {
var environmentVars = map[string]string{
"PYTHONPATH": s.Stager.DepDir(),
"LIBRARY_PATH": filepath.Join(s.Stager.DepDir(), "lib"),
"PYTHONHOME": filepath.Join(s.Stager.DepDir(), "python"),
"PYTHONUNBUFFERED": "1",
"PYTHONHASHSEED": "random",
"LANG": "en_US.UTF-8",
}
scriptContents := fmt.Sprintf(`export LANG=${LANG:-en_US.UTF-8}
export PYTHONHASHSEED=${PYTHONHASHSEED:-random}
export PYTHONPATH=$DEPS_DIR/%s
export PYTHONHOME=$DEPS_DIR/%s/python
export PYTHONUNBUFFERED=1
export FORWARDED_ALLOW_IPS='*'
export GUNICORN_CMD_ARGS=${GUNICORN_CMD_ARGS:-'--access-logfile -'}
`, s.Stager.DepsIdx(), s.Stager.DepsIdx())
if s.HasNltkData {
scriptContents += fmt.Sprintf(`export NLTK_DATA=$DEPS_DIR/%s/python/nltk_data`, s.Stager.DepsIdx())
environmentVars["NLTK_DATA"] = filepath.Join(s.Stager.DepDir(), "python", "nltk_data")
}
for envVar, envValue := range environmentVars {
if err := s.Stager.WriteEnvFile(envVar, envValue); err != nil {
return err
}
}
return s.Stager.WriteProfileD("python.sh", scriptContents)
}
func (s *Supplier) DownloadNLTKCorpora() error {
if err := s.Command.Execute("/", ioutil.Discard, ioutil.Discard, "python", "-m", "nltk.downloader", "-h"); err != nil {
return nil
}
s.Log.BeginStep("Downloading NLTK corpora...")
if exists, err := libbuildpack.FileExists(filepath.Join(s.Stager.BuildDir(), "nltk.txt")); err != nil {
return fmt.Errorf("Couldn't check nltk.txt existence: %v", err)
} else if !exists {
s.Log.Info("nltk.txt not found, not downloading any corpora")
return nil
}
bPackages, err := ioutil.ReadFile(filepath.Join(s.Stager.BuildDir(), "nltk.txt"))
if err != nil {
return err
}
sPackages := strings.TrimSpace(strings.NewReplacer("\r", " ", "\n", " ").Replace(string(bPackages)))
args := []string{"-m", "nltk.downloader", "-d", filepath.Join(s.Stager.DepDir(), "python", "nltk_data")}
args = append(args, strings.Split(sPackages, " ")...)
s.Log.BeginStep("Downloading NLTK packages: %s", sPackages)
if err := s.Command.Execute("/", indentWriter(os.Stdout), indentWriter(os.Stderr), "python", args...); err != nil {
return err
}
s.HasNltkData = true
return nil
}
func (s *Supplier) SetupCacheDir() error {
if err := os.Setenv("XDG_CACHE_HOME", filepath.Join(s.Stager.CacheDir(), "pip_cache")); err != nil {
return err
}
if err := s.Stager.WriteEnvFile("XDG_CACHE_HOME", filepath.Join(s.Stager.CacheDir(), "pip_cache")); err != nil {
return err
}
return nil
}
func writePyDistUtils(distUtils map[string][]string) error {
pyDistUtilsPath := filepath.Join(os.Getenv("HOME"), ".pydistutils.cfg")
b := strings.Builder{}
b.WriteString("[easy_install]\n")
for k, v := range distUtils {
b.WriteString(fmt.Sprintf("%s = %s\n", k, strings.Join(v, "\n\t")))
}
if err := ioutil.WriteFile(pyDistUtilsPath, []byte(b.String()), os.ModePerm); err != nil {
return err
}
return nil
}
func (s *Supplier) shouldRunPip() (bool, string, error) {
s.Log.BeginStep("Running Pip Install")
if os.Getenv("PIP_CERT") == "" {
os.Setenv("PIP_CERT", "/etc/ssl/certs/ca-certificates.crt")
}
requirementsPath := filepath.Join(s.Stager.BuildDir(), "requirements.txt")
if exists, err := libbuildpack.FileExists(requirementsPath); err != nil {
return false, "", fmt.Errorf("could not determine existence of requirements.txt: %v", err)
} else if !exists {
s.Log.Debug("Skipping 'pip install' since requirements.txt does not exist")
return false, "", nil
}
return true, requirementsPath, nil
}
func (s *Supplier) formatVersion(version string) string {
verSlice := strings.Split(version, ".")
if len(verSlice) < 3 {
return fmt.Sprintf("python-%s.x", version)
}
return fmt.Sprintf("python-%s", version)
}
func (s *Supplier) writeTempRequirementsTxt(content string) error {
s.removeRequirementsText = true
return ioutil.WriteFile(filepath.Join(s.Stager.BuildDir(), "requirements.txt"), []byte(content), 0644)
}
func indentWriter(writer io.Writer) io.Writer {
return text.NewIndentWriter(writer, []byte(" "))
}
|
[
"\"XDG_CACHE_HOME\"",
"\"XDG_CACHE_HOME\"",
"\"PATH\"",
"\"PATH\"",
"\"LIBFFI\"",
"\"HOME\"",
"\"PIP_CERT\""
] |
[] |
[
"XDG_CACHE_HOME",
"PIP_CERT",
"HOME",
"PATH",
"LIBFFI"
] |
[]
|
["XDG_CACHE_HOME", "PIP_CERT", "HOME", "PATH", "LIBFFI"]
|
go
| 5 | 0 | |
klaverjas/__init__.py
|
import logging
from logging.handlers import RotatingFileHandler
import os
from flask import Flask
from flask_bootstrap import Bootstrap
from flask_login import LoginManager
from flask_socketio import SocketIO
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SECRET_KEY'] = os.urandom(24).hex()
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL',
'sqlite:///app.db')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['STRICT_SLASHES'] = False
bootstrap = Bootstrap(app)
login = LoginManager(app)
login.login_view = 'login'
socketio = SocketIO(app)
db = SQLAlchemy(app)
from klaverjas import errors, models, routes, sockets
if not app.debug:
if not os.path.exists('logs'):
os.mkdir('logs')
file_handler = RotatingFileHandler('logs/app.log', maxBytes=10485760,
backupCount=10)
file_handler.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'))
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.setLevel(logging.INFO)
app.logger.info('App startup')
|
[] |
[] |
[
"DATABASE_URL"
] |
[]
|
["DATABASE_URL"]
|
python
| 1 | 0 | |
train.py
|
from __future__ import print_function, division
import sys
sys.path.append('core')
import argparse
import os
import cv2
import time
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from logger import TBLogger
from pathlib import Path
from torch.utils.data import DataLoader
from core.raft_v2_0 import RAFT
import core.datasets as datasets
from core.utils.flow_viz import flow_to_image
from core.utils.utils import dump_args_to_text
# exclude extremly large displacements
MAX_FLOW = 1000
SUM_FREQ = 100
CHKPT_FREQ = 5000
EVAL_FREQ = 1000
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def admm_loss(flow_preds, aux_vars, flow_gt, valid, fidelity_func = 'l1', rho = 0.0, params_dict = {}):
""" ADMM dervied Loss function defined over F,Q,C,beta of all iterations."""
n_predictions = len(flow_preds)
fidelity_loss = 0.0
reg_loss = 0.0
# extract admm auxiliary vars
q,c,betas = aux_vars
# exlude invalid pixels and extremely large diplacements
valid = (valid >= 0.5) & (flow_gt.abs().sum(dim=1) < MAX_FLOW)
for i in range(n_predictions):
i_weight = 0.8**(n_predictions - i - 1)
if fidelity_func == 'l1':
i_loss = (flow_preds[i] - flow_gt).abs()
elif fidelity_func == 'l2':
i_loss = (flow_preds[i] - flow_gt)**2
if rho > 0.0:
i_reg = 0.5 * rho * (q[i] - c[i] + betas[i])**2
else:
i_reg = 0.0
fidelity_loss += (valid[:, None] * i_weight * i_loss).mean()
reg_loss += i_reg.mean()
flow_loss = fidelity_loss + reg_loss
epe = torch.sum((flow_preds[-1] - flow_gt)**2, dim=1).sqrt()
tv = total_variation(flow_preds[-1]).sum(dim=1)
epe = epe.view(-1)[valid.view(-1)]
tv = tv.view(-1)[valid.view(-1)]
metrics = {
'loss': flow_loss.item(),
'fid': fidelity_loss.item(),
'reg': reg_loss.item(),
'epe': epe.mean().item(),
'tv': tv.mean().item(),
'1px': (epe < 1).float().mean().item(),
'3px': (epe < 3).float().mean().item(),
'5px': (epe < 5).float().mean().item(),
}
return flow_loss, {**metrics,**params_dict}
def triplet_sequence_loss(flow_preds, q_preds, flow_gt, valid, fidelity_func = 'l1', q_weight = 0.0):
""" Loss function defined over sequence of flow predictions """
n_predictions = len(flow_preds)
flow_loss = 0.0
# exlude invalid pixels and extremely large diplacements
valid = (valid >= 0.5) & (flow_gt.abs().sum(dim=1) < MAX_FLOW)
for i in range(n_predictions):
i_weight = 0.8**(n_predictions - i - 1)
if fidelity_func == 'l1':
i_loss = (flow_preds[i] - flow_gt).abs()
elif fidelity_func == 'l2':
i_loss = (flow_preds[i] - flow_gt)**2
if q_weight > 0.0:
i_reg = q_weight * (flow_preds[i] - q_preds[i])**2
else:
i_reg = 0.0
flow_loss += i_weight * (valid[:, None] * (i_loss + i_reg)).mean()
epe = torch.sum((flow_preds[-1] - flow_gt)**2, dim=1).sqrt()
reg = torch.sum((flow_preds[-1] - q_preds[-1])**2, dim=1).sqrt()
epe = epe.view(-1)[valid.view(-1)]
reg = reg.view(-1)[valid.view(-1)]
metrics = {
'loss': flow_loss.item(),
'epe': epe.mean().item(),
'reg': reg.mean().item(),
'1px': (epe < 1).float().mean().item(),
'3px': (epe < 3).float().mean().item(),
'5px': (epe < 5).float().mean().item(),
}
return flow_loss, metrics
def sequence_loss(flow_preds, flow_gt, valid, sup_loss = 'l1', tv_weight = 0.0):
""" Loss function defined over sequence of flow predictions """
n_predictions = len(flow_preds)
flow_loss = 0.0
# exlude invalid pixels and extremely large diplacements
valid = (valid >= 0.5) & (flow_gt.abs().sum(dim=1) < MAX_FLOW)
for i in range(n_predictions):
i_weight = 0.8**(n_predictions - i - 1)
if sup_loss == 'l1':
i_loss = (flow_preds[i] - flow_gt).abs()
elif sup_loss == 'l2':
i_loss = (flow_preds[i] - flow_gt)**2
if tv_weight > 0.0:
i_tv = tv_weight * total_variation(flow_preds[i])
else:
i_tv = 0.0
flow_loss += i_weight * (valid[:, None] * (i_loss + i_tv)).mean()
epe = torch.sum((flow_preds[-1] - flow_gt)**2, dim=1).sqrt()
epe = epe.view(-1)[valid.view(-1)]
metrics = {
'loss': flow_loss.item(),
'epe': epe.mean().item(),
'1px': (epe < 1).float().mean().item(),
'3px': (epe < 3).float().mean().item(),
'5px': (epe < 5).float().mean().item(),
}
return flow_loss, metrics
def total_variation(flow):
Dx = torch.tensor([[1, 0, -1], [2, 0, -2], [1, 0, -1]], dtype = torch.float, requires_grad = False).view(1,1,3,3).cuda()
Dy = torch.tensor([[1, 2, 1], [0, 0, 0], [-1, -2, -1]], dtype = torch.float, requires_grad = False).view(1,1,3,3).cuda()
D = torch.cat((Dx, Dy), dim = 0)
u,v = torch.split(flow, 1, dim = 1)
Du = F.conv2d(u, D, padding = 1)
Dv = F.conv2d(v, D, padding = 1)
return torch.cat((Du.abs().sum(dim = 1, keepdim = True), Dv.sum(dim = 1, keepdim = True)), dim = 1)
def fetch_dataloader(args):
""" Create the data loader for the corresponding trainign set """
if args.dataset == 'chairs':
train_dataset = datasets.FlyingChairs(args, root=args.data_dir, image_size=args.curr_image_size)
elif args.dataset == 'things':
clean_dataset = datasets.SceneFlow(args, root=args.data_dir, image_size=args.curr_image_size, dstype='frames_cleanpass')
final_dataset = datasets.SceneFlow(args, root=args.data_dir, image_size=args.curr_image_size, dstype='frames_finalpass')
train_dataset = clean_dataset + final_dataset
elif args.dataset == 'sintel':
clean_dataset = datasets.MpiSintel(args, image_size=args.curr_image_size, dstype='clean')
final_dataset = datasets.MpiSintel(args, image_size=args.curr_image_size, dstype='final')
train_dataset = clean_dataset + final_dataset
elif args.dataset == 'kitti':
train_dataset = datasets.KITTI(args, image_size=args.curr_image_size, is_val=False)
gpuargs = {'num_workers': 4, 'drop_last' : True}
train_loader = DataLoader(train_dataset, batch_size=args.batch_size,
pin_memory=True, shuffle=True, **gpuargs)
if args.run_eval:
if args.eval_dataset == 'sintel':
valid_dataset = datasets.MpiSintel(args, image_size=args.curr_image_size, dstype='clean', root=args.eval_dir)
valid_loader = DataLoader(valid_dataset, batch_size=args.batch_size,
pin_memory=True, shuffle=True, **gpuargs)
else:
valid_dataset = None
valid_loader = None
print('Training with %d image pairs' % len(train_dataset))
if args.run_eval:
print('Validating with %d image pairs' % len(valid_dataset))
return train_loader, valid_loader
def fetch_optimizer(args, model):
""" Create the optimizer and learning rate scheduler """
optimizer = optim.AdamW(model.parameters(), lr=args.lr, weight_decay=args.wdecay, eps=args.epsilon)
scheduler = optim.lr_scheduler.OneCycleLR(optimizer, args.lr, args.num_steps,
pct_start=args.pct_start, cycle_momentum=False, anneal_strategy='linear', final_div_factor=1.0)
return optimizer, scheduler
class Logger:
def __init__(self, initial_step, model, scheduler, name):
self.model = model
self.scheduler = scheduler
self.name = name
self.total_steps = initial_step
self.running_loss = {}
def _print_training_status(self):
metrics_data = [self.running_loss[k]/SUM_FREQ for k in sorted(self.running_loss.keys())]
training_str = "[{:6d}, {:10.7f}] ".format(self.total_steps+1, self.scheduler.get_lr()[0])
metrics_str = ("{:10.4f}, "*len(metrics_data)).format(*metrics_data)
name_str = self.name + " : "
# print the training status
print(name_str + training_str + metrics_str)
#for key in self.running_loss:
# self.running_loss[key] = 0.0
def push(self, metrics):
self.total_steps += 1
if self.total_steps % SUM_FREQ == 0:
self.running_loss = {}
for key in metrics:
if key not in self.running_loss:
self.running_loss[key] = 0.0
self.running_loss[key] += metrics[key]
if self.total_steps % SUM_FREQ == SUM_FREQ-1:
self._print_training_status()
def validate(args,model,valid_loader,tb_logger,step):
print('Evaluating...')
model.eval()
epe_list = []
with torch.no_grad():
for i_batch, data_blob in tqdm(enumerate(valid_loader)):
image1, image2, flow_gt, valid = [x.cuda() for x in data_blob]
flow_preds,_,_ = model(image1, image2, iters=args.eval_iters)
# measure epe in batch
valid = (valid >= 0.5) & (flow_gt.abs().sum(dim=1) < MAX_FLOW)
epe = torch.sum((flow_preds[-1] - flow_gt)**2, dim=1).sqrt()
epe = epe.view(-1)[valid.view(-1)].mean().item()
epe_list.append(epe)
# Save and print eval results
print('Eval Summary - dataset: {} | step: {} | av. epe: {}'.format(args.eval_dataset, step, np.mean(epe_list)))
tb_logger.scalar_summary('Eval EPE', np.mean(epe_list), step)
B = args.batch_size
# Eval Images vs. Pred vs. GT
gt_list = [np.array(x) for x in np.array(flow_gt.detach().cpu()).transpose(0,2,3,1).tolist()]
pr_list = [np.array(x) for x in np.array(flow_preds[-1].detach().cpu()).transpose(0,2,3,1).tolist()]
gt_list = list(map(flow_to_image, gt_list))
pr_list = list(map(flow_to_image, pr_list))
tb_logger.image_summary('Eval - src & tgt, pred, gt',
[np.concatenate([np.concatenate([i.squeeze(0), j.squeeze(0)], axis = 1), np.concatenate([k, l], axis = 1)], axis=0)
for i,j,k,l in zip( np.split(np.array(image1.data.cpu()).astype(np.uint8).transpose(0,2,3,1), B, axis = 0),
np.split(np.array(image2.data.cpu()).astype(np.uint8).transpose(0,2,3,1), B, axis = 0),
gt_list,
pr_list)
],
step)
# Eval Error
pred_batch = [np.array(x) for x in np.array(flow_preds[-1].detach().cpu()).transpose(0,2,3,1).tolist()]
gt_batch = [np.array(x) for x in np.array(flow_gt.detach().cpu()).transpose(0,2,3,1).tolist()]
err_batch = [(np.sum(np.abs(pr - gt)**2, axis=2,keepdims=True)**0.5).astype(np.uint8) for pr,gt in zip(pred_batch, gt_batch)]
err_vis = [np.concatenate([gt, pr, np.tile(err,(1,1,3))], axis=0) for gt, pr, err in zip(gt_list, pr_list,err_batch )]
tb_logger.image_summary(f'Eval - Error', err_vis, step)
return
def train(args):
model = RAFT(args)
model = nn.DataParallel(model)
print("Parameter Count: %d" % count_parameters(model))
if args.restore_ckpt is not None:
model.load_state_dict(torch.load(args.restore_ckpt))
if args.image_size != args.curr_image_size:
model.module.admm_init.update_matrices_for_eval(shape=[sh // 8 for sh in args.curr_image_size])
model.module.admm_block.u_solver.update_matrices_for_eval(shape=[sh // 8 for sh in args.curr_image_size])
model.module.admm_block.v_solver.update_matrices_for_eval(shape=[sh // 8 for sh in args.curr_image_size])
print('Updated D matrices. Train image size is {}, Eval image size is {}'.format(args.image_size, args.curr_image_size))
model.cuda()
if 'chairs' not in args.dataset:
model.module.freeze_bn()
train_loader, valid_loader = fetch_dataloader(args)
optimizer, scheduler = fetch_optimizer(args, model)
total_steps = args.initial_step
logger = Logger(args.initial_step, model, scheduler, args.name)
tb_logger = TBLogger(args.log_dir)
should_keep_training = True
while should_keep_training:
for i_batch, data_blob in enumerate(train_loader):
image1, image2, flow, valid = [x.cuda() for x in data_blob]
model.train()
optimizer.zero_grad()
# forward
flow_predictions, aux_vars, _ = model(image1, image2, iters=args.iters)
# keep track of specific admm params
#admm_params_dict = {'lamb': model.module.admm_block.SoftThresh.lamb.item(),
# 'eta': model.module.admm_block.UpdateMul.eta.item()}
# loss function
if args.loss_func == 'sequence':
loss, metrics = sequence_loss(flow_predictions, flow, valid, sup_loss=args.sup_loss, tv_weight = args.tv_weight)
elif args.loss_func == 'triplet':
loss, metrics = triplet_sequence_loss(flow_predictions, aux_vars, flow, valid, fidelity_func=args.sup_loss, q_weight = args.q_weight)
elif args.loss_func == 'admm':
loss, metrics = admm_loss(flow_predictions, aux_vars, flow, valid, fidelity_func=args.sup_loss, rho=args.admm_rho, params_dict=admm_params_dict)
# backward
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
optimizer.step()
scheduler.step()
total_steps += 1
logger.push(metrics)
if total_steps % SUM_FREQ == SUM_FREQ-1:
# Scalar Summaries
# ============================================================
tb_logger.scalar_summary('lr', optimizer.param_groups[0]['lr'], total_steps)
for key, value in logger.running_loss.items():
tb_logger.scalar_summary(key, value/SUM_FREQ, total_steps)
# Image Summaries
# ============================================================
if not args.run_eval:
B = args.batch_size
# Images vs. Pred vs. GT
gt_list = [np.array(x) for x in np.array(flow.detach().cpu()).transpose(0,2,3,1).tolist()]
pr_list = [np.array(x) for x in np.array(flow_predictions[-1].detach().cpu()).transpose(0,2,3,1).tolist()]
gt_list = list(map(flow_to_image, gt_list))
pr_list = list(map(flow_to_image, pr_list))
tb_logger.image_summary('src & tgt, pred, gt',
[np.concatenate([np.concatenate([i.squeeze(0), j.squeeze(0)], axis = 1), np.concatenate([k, l], axis = 1)], axis=0)
for i,j,k,l in zip( np.split(np.array(image1.data.cpu()).astype(np.uint8).transpose(0,2,3,1), B, axis = 0),
np.split(np.array(image2.data.cpu()).astype(np.uint8).transpose(0,2,3,1), B, axis = 0),
gt_list,
pr_list)
],
total_steps)
# Error
pred_batch = [np.array(x) for x in np.array(flow_predictions[-1].detach().cpu()).transpose(0,2,3,1).tolist()]
gt_batch = [np.array(x) for x in np.array(flow.detach().cpu()).transpose(0,2,3,1).tolist()]
err_batch = [(np.sum(np.abs(pr - gt)**2, axis=2,keepdims=True)**0.5).astype(np.uint8) for pr,gt in zip(pred_batch, gt_batch)]
err_vis = [np.concatenate([gt, pr, np.tile(err,(1,1,3))], axis=0) for gt, pr, err in zip(gt_list, pr_list,err_batch )]
tb_logger.image_summary(f'Error', err_vis, total_steps)
# Masks
Mx, My = aux_vars[1]
masks = [(255*np.concatenate([mx,my],axis=2)).astype(np.uint8).squeeze() for mx,my in zip(np.array(Mx.detach().cpu()).tolist(), np.array(My.detach().cpu()).tolist())]
tb_logger.image_summary(f'Masks', masks, total_steps)
if total_steps % EVAL_FREQ == EVAL_FREQ-1 and args.run_eval:
validate(args,model,valid_loader,tb_logger,total_steps)
if (total_steps % CHKPT_FREQ == CHKPT_FREQ-1 and args.save_checkpoints) is True:
PATH = args.log_dir + '/%d_%s.pth' % (total_steps+1, args.name)
torch.save(model.state_dict(), PATH)
if total_steps == args.num_steps:
should_keep_training = False
break
PATH = args.log_dir +'/%s.pth' % args.name
torch.save(model.state_dict(), PATH)
return PATH
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--cuda_devices', default="0,1", help="choose which GPUs are available")
parser.add_argument('--name', default='bla', help="name your experiment")
parser.add_argument('--dataset', help="which dataset to use for training")
parser.add_argument('--data_dir', help='path to dataset')
parser.add_argument('--restore_ckpt', help="restore checkpoint")
parser.add_argument('--small', action='store_true', help='use small model')
parser.add_argument('--save_checkpoints', action='store_true', help='save checkpoints during training')
parser.add_argument('--log_dir', default = os.path.join(os.getcwd(), 'checkpoints', datetime.now().strftime('%Y%m%d-%H%M%S')))
parser.add_argument('--run_eval', action='store_true')
parser.add_argument('--eval_dataset', default='sintel', help='which dataset to use for eval')
parser.add_argument('--eval_dir', help='path to eval dataset')
parser.add_argument('--eval_iters',type=int, default=12)
parser.add_argument('--lr', type=float, default=0.00002)
parser.add_argument('--pct_start', type=float, default=0.2)
parser.add_argument('--final_div_factor', type=float, default=1.0)
parser.add_argument('--sup_loss', help='supervised loss term', default='l1')
parser.add_argument('--loss_func', default='sequence')
parser.add_argument('--q_weight', type=float, help='total variation term weight', default=0.4)
parser.add_argument('--tv_weight', type=float, help='total variation term weight', default=0.0)
parser.add_argument('--num_steps', type=int, default=100000)
parser.add_argument('--initial_step', type=int, default=0)
parser.add_argument('--batch_size', type=int, default=6)
parser.add_argument('--image_size', type=int, nargs='+', default=[384, 512])
parser.add_argument('--curr_image_size', type=int, nargs='+', default=[384, 512])
parser.add_argument('--admm_solver', action='store_true', help='apply admm block')
parser.add_argument('--admm_iters',type=int,default=1)
parser.add_argument('--admm_mask', action='store_true', help='apply mask within admm block')
parser.add_argument('--admm_lamb', type=float, default=0.4)
parser.add_argument('--learn_lamb', action='store_true')
parser.add_argument('--admm_rho', type=float, default=0.01)
parser.add_argument('--admm_eta', type=float, default=0.01)
parser.add_argument('--learn_eta', action='store_true')
parser.add_argument('--iters', type=int, default=12)
parser.add_argument('--wdecay', type=float, default=.00005)
parser.add_argument('--epsilon', type=float, default=1e-8)
parser.add_argument('--clip', type=float, default=1.0)
parser.add_argument('--dropout', type=float, default=0.0)
args = parser.parse_args()
#torch.manual_seed(1234)
#np.random.seed(1234)
# scale learning rate and batch size by number of GPUs
os.environ["CUDA_VISIBLE_DEVICES"] = args.cuda_devices
num_gpus = torch.cuda.device_count()
args.batch_size = args.batch_size * num_gpus
args.lr = args.lr * num_gpus
args.num_gpus = num_gpus
if (not os.path.isdir(args.log_dir) and args.save_checkpoints) is True:
os.mkdir(args.log_dir)
print("Checkpoints will be saved to " + args.log_dir)
dump_args_to_text(args, args.log_dir)
train(args)
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
examples/src/main/java/com/convertapi/examples/ConvertWordToPdfAndPng.java
|
package com.convertapi.examples;
import com.convertapi.client.Config;
import com.convertapi.client.ConversionResult;
import com.convertapi.client.ConvertApi;
import com.convertapi.client.Param;
import java.io.File;
import java.io.IOException;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.List;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import static java.lang.System.getenv;
/**
* Example of saving Word docx to PDF and to PNG
* Conversion is made by using same file parameter and processing two conversions simultaneously
* https://www.convertapi.com/docx-to-pdf
* https://www.convertapi.com/docx-to-png
*/
public class ConvertWordToPdfAndPng {
public static void main(String[] args) throws IOException, ExecutionException, InterruptedException {
Config.setDefaultSecret(getenv("CONVERTAPI_SECRET")); //Get your secret at https://www.convertapi.com/a
Path tempDir = Paths.get(System.getProperty("java.io.tmpdir"));
System.out.println("Converting DOCX to PDF and JPG in parallel");
Param docxFileParam = new Param("file", new File(AlternativeConverter.class.getClassLoader().getResource("test.docx").getFile()).toPath());
CompletableFuture<ConversionResult> pdfResult = ConvertApi.convert("docx", "pdf", docxFileParam);
CompletableFuture<ConversionResult> jpgResult = ConvertApi.convert("docx", "jpg", docxFileParam);
System.out.println("PDF file saved to: " + pdfResult.get().saveFile(tempDir).get());
List<CompletableFuture<Path>> jpgPaths = jpgResult.get().saveFiles(tempDir);
for (CompletableFuture<Path> path: jpgPaths) {
System.out.println("JPG file saved to: " + path.get().toString());
}
}
}
|
[] |
[] |
[] |
[]
|
[]
|
java
| 0 | 0 | |
cmd/update.go
|
// DBDeployer - The MySQL Sandbox
// Copyright © 2006-2019 Giuseppe Maxia
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"fmt"
"os"
"path"
"regexp"
"runtime"
"strings"
"github.com/dustin/go-humanize"
"github.com/spf13/cobra"
"github.com/datacharmer/dbdeployer/common"
"github.com/datacharmer/dbdeployer/globals"
"github.com/datacharmer/dbdeployer/rest"
"github.com/datacharmer/dbdeployer/unpack"
)
func updateDbDeployer(cmd *cobra.Command, args []string) {
version := ""
if len(args) > 0 {
version = args[0]
}
flags := cmd.Flags()
OS, _ := flags.GetString(globals.OSLabel)
dryRun, _ := flags.GetBool(globals.DryRunLabel)
verbose, _ := flags.GetBool(globals.VerboseLabel)
newPath, _ := flags.GetString(globals.NewPathLabel)
getDocs, _ := flags.GetBool(globals.DocsLabel)
forceOldVersion, _ := flags.GetBool(globals.ForceOldVersionLabel)
currentOS := strings.ToLower(runtime.GOOS)
if OS == "" {
OS = runtime.GOOS
}
OS = strings.ToLower(OS)
if OS == "macos" || OS == "darwin" {
OS = "osx"
}
if currentOS == "macos" || currentOS == "darwin" {
currentOS = "osx"
}
if dryRun {
verbose = true
}
release, err := rest.GetLatestRelease(version)
common.ErrCheckExitf(err, 1, "error getting releases %s", err)
targetDirectory := newPath
programName := common.BaseName(os.Args[0])
fullProgramName := common.Which(programName)
fileInfo, err := os.Stat(fullProgramName)
if err != nil {
common.Exitf(1, "error retrieving file info for %s", fullProgramName)
}
filePermissions := fileInfo.Mode().Perm()
dbdeployerPath := common.DirName(common.Which(programName))
if targetDirectory == "" {
targetDirectory = dbdeployerPath
}
if !common.DirExists(targetDirectory) {
common.Exitf(1, globals.ErrDirectoryNotFound, targetDirectory)
}
tag := release.TagName
reV := regexp.MustCompile(`^v`)
tag = reV.ReplaceAllString(tag, "")
tagList, err := common.VersionToList(tag)
common.ErrCheckExitf(err, 1, "error converting tag %s to version list", tag)
if tag == common.VersionDef && !forceOldVersion {
common.Exit(0,
fmt.Sprintf("download version (%s) is the same as the current version ", tag),
fmt.Sprintf("Option --%s was not used\n", globals.ForceOldVersionLabel),
"Download canceled",
)
}
foundOldVersion, err := common.GreaterOrEqualVersion(common.VersionDef, tagList)
common.ErrCheckExitf(err, 1, "error comparing remote tag %s to dbdeployer version %s", tag, common.VersionDef)
if foundOldVersion && !forceOldVersion {
common.Exit(0,
fmt.Sprintf("download version (%s) is older than current version (%s) ", tag, common.VersionDef),
fmt.Sprintf("Option --%s was not used\n", globals.ForceOldVersionLabel),
"Download canceled",
)
}
docsLabel := ""
if getDocs {
docsLabel = "-docs"
}
fileName := fmt.Sprintf("dbdeployer-%s%s.%s", tag, docsLabel, OS)
tarballName := fileName + ".tar.gz"
signatureName := tarballName + ".sha256"
fileUrl := ""
signatureUrl := ""
if verbose {
fmt.Printf("Remote version: %s\n", tag)
fmt.Printf("Remote file: %s\n", tarballName)
fmt.Printf("OS: %s\n", OS)
fmt.Printf("dbdeployer location: %s\n", dbdeployerPath)
fmt.Println()
fmt.Printf("%s\n", globals.DashLine)
fmt.Printf("Release : %s\n", release.Name)
fmt.Printf("Date : %s\n", release.PublishedAt)
fmt.Printf("%s\n", release.Body)
fmt.Printf("%s\n", globals.DashLine)
}
for _, asset := range release.Assets {
chosenLabel := ""
if signatureName == asset.Name {
signatureUrl = asset.BrowserDownloadURL
if verbose {
fmt.Printf("\t%s (%s) [CHOSEN CRC]\n", asset.Name, humanize.Bytes(uint64(asset.Size)))
}
}
if tarballName == asset.Name {
fileUrl = asset.BrowserDownloadURL
chosenLabel = " [CHOSEN]"
}
if verbose {
fmt.Printf("\t%s (%s)%s\n", asset.Name, humanize.Bytes(uint64(asset.Size)), chosenLabel)
}
}
if fileUrl == "" {
common.Exitf(1, "file %s not found in release", tarballName)
}
if dryRun {
fmt.Printf("Download %s\n", fileUrl)
if currentOS == OS {
fmt.Printf("save to %s/%s\n", targetDirectory, programName)
}
return
}
if common.FileExists(tarballName) {
err = os.Remove(tarballName)
common.ErrCheckExitf(err, 1, "error removing old copy of %s", tarballName)
}
err = rest.DownloadFile(tarballName, fileUrl, true, globals.MB)
common.ErrCheckExitf(err, 1, "error downloading %s", tarballName)
if !common.FileExists(tarballName) {
common.Exitf(1, "tarball %s not found after download", tarballName)
}
if common.FileExists(fileName) {
err = os.Remove(fileName)
common.ErrCheckExitf(err, 1, "error removing old copy of %s", fileName)
}
err = unpack.UnpackTar(tarballName, os.Getenv("PWD"), unpack.VERBOSE)
common.ErrCheckExitf(err, 1, "error unpacking %s", tarballName)
if !common.FileExists(fileName) {
common.Exitf(1, globals.ErrFileNotFound, fileName)
}
if verbose {
fmt.Printf("File %s extracted from %s\n", fileName, tarballName)
}
if signatureUrl == "" {
fmt.Printf("*** WARNING *** No SHA256 checksum found for %s\n", tarballName)
} else {
err = rest.DownloadFile(signatureName, signatureUrl, true, globals.MB)
common.ErrCheckExitf(err, 1, "error downloading %s", signatureName)
signature, err := common.SlurpAsBytes(signatureName)
common.ErrCheckExitf(err, 1, "error reading from %s", signatureName)
reSignature := regexp.MustCompile(`^(\S+)\s+(\S+)`)
signatureList := reSignature.FindAllSubmatch(signature, -1)
if len(signatureList) == 0 || len(signatureList[0]) == 0 {
common.Exitf(1, "signature not found in %s", signatureName)
}
checksum := signatureList[0][1]
checksumFileName := signatureList[0][2]
if string(checksumFileName) != tarballName {
common.Exitf(1, "wanted signature for %s but got %s", tarballName, checksumFileName)
}
calculatedChecksum, err := common.GetFileChecksum(tarballName, "sha256")
common.ErrCheckExitf(err, 1, "error calculating checksum for %s: %s", tarballName, err)
if string(checksum) != calculatedChecksum {
common.Exitf(1, "wanted checksum for %s: %s but got %s", tarballName, checksum, calculatedChecksum)
}
fmt.Printf("checksum for %s matches\n", tarballName)
_ = os.Remove(signatureName)
}
_ = os.Remove(tarballName)
if verbose {
fmt.Printf("File %s removed\n", tarballName)
}
// Give the new file the same attributes of the existing dbdeployer executable
err = os.Chmod(fileName, filePermissions)
common.ErrCheckExitf(err, 1, "error changing attributes of %s", fileName)
if currentOS != OS && targetDirectory == dbdeployerPath {
fmt.Printf("OS of the remote file (%s) different from current OS (%s)\n", OS, currentOS)
fmt.Printf("Won't overwrite current dbdeployer executable.\n")
fmt.Printf("The downloaded file is %s\n", fileName)
return
}
out, err := common.RunCmdCtrlWithArgs("mv", []string{fileName, path.Join(targetDirectory, programName)}, false)
if err != nil {
fmt.Printf("%s\n", out)
common.Exitf(1, "error moving %s to %s/%s", fileName, targetDirectory, programName)
}
if verbose {
fmt.Printf("File %s moved to %s\n", programName, targetDirectory)
}
if targetDirectory == "." {
currentDir, err := os.Getwd()
if err != nil {
currentDir = os.Getenv("PWD")
}
if currentDir == "" {
common.Exitf(1, "error getting current working directory")
}
targetDirectory = currentDir
}
_, err = common.RunCmdCtrlWithArgs(path.Join(targetDirectory, programName), []string{"--version"}, false)
if err != nil {
common.Exitf(1, "error running %s/%s :%s", targetDirectory, programName, err)
}
}
var updateCmd = &cobra.Command{
Use: "update [version]",
Short: "Gets dbdeployer newest version",
Long: `Updates dbdeployer in place using the latest version (or one of your choice)`,
Example: `
$ dbdeployer update
# gets the latest release, overwrites current dbdeployer binaries
$ dbdeployer update --dry-run
# shows what it will do, but does not do it
$ dbdeployer update --new-path=$PWD
# downloads the latest executable into the current directory
$ dbdeployer update v1.34.0 --force-old-version
# downloads dbdeployer 1.34.0 and replace the current one
# (WARNING: a version older than 1.36.0 won't support updating)
`,
Run: updateDbDeployer,
}
func init() {
rootCmd.AddCommand(updateCmd)
setPflag(updateCmd, globals.NewPathLabel, "", "", "", "Download updated dbdeployer into a different path", false)
setPflag(updateCmd, globals.OSLabel, "", "", "", "Gets the executable for this Operating system", false)
updateCmd.Flags().BoolP(globals.DryRunLabel, "", false, "Show what would happen, but don't execute it")
updateCmd.Flags().BoolP(globals.VerboseLabel, "", false, "Gives more info")
updateCmd.Flags().BoolP(globals.ForceOldVersionLabel, "", false, "Force download of older version")
updateCmd.Flags().BoolP(globals.DocsLabel, "", false, "Gets the docs version of the executable")
}
|
[
"\"PWD\"",
"\"PWD\""
] |
[] |
[
"PWD"
] |
[]
|
["PWD"]
|
go
| 1 | 0 | |
internal/config/environment.go
|
package config
import (
"fmt"
"net/http"
"os"
"os/exec"
"path/filepath"
"runtime"
"sort"
"strings"
"github.com/google/uuid"
"github.com/infracost/infracost/internal/version"
)
type EnvironmentSpec struct {
Version string `json:"version"`
FullVersion string `json:"fullVersion"`
Flags []string `json:"flags"`
OutputFormat string `json:"outputFormat"`
IsTest bool `json:"isTest"`
IsDev bool `json:"isDev"`
OS string `json:"os"`
CIPlatform string `json:"ciPlatform,omitempty"`
TerraformBinary string `json:"terraformBinary"`
TerraformFullVersion string `json:"terraformFullVersion"`
TerraformVersion string `json:"terraformVersion"`
TerraformRemoteExecutionModeEnabled bool `json:"terraformRemoteExecutionModeEnabled"`
TerraformInfracostProviderEnabled bool `json:"terraformInfracostProviderEnabled"`
}
var Environment *EnvironmentSpec
func init() {
Environment = loadEnvironment()
}
func loadEnvironment() *EnvironmentSpec {
return &EnvironmentSpec{
Version: baseVersion(version.Version),
FullVersion: version.Version,
Flags: []string{},
OutputFormat: "",
IsTest: isTest(),
IsDev: isDev(),
OS: runtime.GOOS,
CIPlatform: ciPlatform(),
TerraformBinary: filepath.Base(terraformBinary()),
TerraformFullVersion: terraformFullVersion(),
TerraformVersion: terraformVersion(),
TerraformRemoteExecutionModeEnabled: false,
TerraformInfracostProviderEnabled: false,
}
}
func userAgent() string {
userAgent := "infracost"
if version.Version != "" {
userAgent += fmt.Sprintf("-%s", version.Version)
}
return userAgent
}
func baseVersion(v string) string {
return strings.SplitN(v, "+", 2)[0]
}
func terraformBinary() string {
terraformBinary := os.Getenv("TERRAFORM_BINARY")
if terraformBinary == "" {
terraformBinary = "terraform"
}
return terraformBinary
}
func terraformFullVersion() string {
exe := terraformBinary()
out, _ := exec.Command(exe, "-version").Output()
return strings.SplitN(string(out), "\n", 2)[0]
}
func terraformVersion() string {
v := terraformFullVersion()
p := strings.Split(v, " ")
if len(p) > 1 {
return p[len(p)-1]
}
return ""
}
func ciPlatform() string {
if IsTruthy(os.Getenv("GITHUB_ACTIONS")) {
return "github_actions"
} else if IsTruthy(os.Getenv("GITLAB_CI")) {
return "gitlab_ci"
} else if IsTruthy(os.Getenv("CIRCLECI")) {
return "circleci"
} else {
envKeys := os.Environ()
sort.Strings(envKeys)
for _, k := range envKeys {
if strings.HasPrefix(k, "ATLANTIS_") {
return "atlantis"
} else if strings.HasPrefix(k, "BITBUCKET_") {
return "bitbucket"
} else if strings.HasPrefix(k, "JENKINS_") {
return "jenkins"
} else if strings.HasPrefix(k, "CONCOURSE_") {
return "concourse"
}
}
if IsTruthy(os.Getenv("CI")) {
return "ci"
}
}
return ""
}
func isTest() bool {
return os.Getenv("INFRACOST_ENV") == "test" || strings.HasSuffix(os.Args[0], ".test")
}
func isDev() bool {
return os.Getenv("INFRACOST_ENV") == "dev"
}
func TraceID() string {
return uuid.New().String()
}
func AddNoAuthHeaders(req *http.Request) {
req.Header.Set("content-type", "application/json")
req.Header.Set("User-Agent", userAgent())
}
func AddAuthHeaders(req *http.Request) {
AddNoAuthHeaders(req)
req.Header.Set("X-Api-Key", Config.APIKey)
req.Header.Set("X-Trace-Id", TraceID())
}
|
[
"\"TERRAFORM_BINARY\"",
"\"GITHUB_ACTIONS\"",
"\"GITLAB_CI\"",
"\"CIRCLECI\"",
"\"CI\"",
"\"INFRACOST_ENV\"",
"\"INFRACOST_ENV\""
] |
[] |
[
"TERRAFORM_BINARY",
"GITLAB_CI",
"CIRCLECI",
"CI",
"GITHUB_ACTIONS",
"INFRACOST_ENV"
] |
[]
|
["TERRAFORM_BINARY", "GITLAB_CI", "CIRCLECI", "CI", "GITHUB_ACTIONS", "INFRACOST_ENV"]
|
go
| 6 | 0 | |
cmd/werf/common/synchronization.go
|
package common
import (
"context"
"fmt"
"os"
"strings"
"github.com/werf/werf/pkg/werf/global_warnings"
"github.com/werf/werf/pkg/werf/locker_with_retry"
"github.com/werf/lockgate/pkg/distributed_locker"
"github.com/werf/logboek"
"github.com/spf13/cobra"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
"github.com/werf/kubedog/pkg/kube"
"github.com/werf/werf/pkg/storage/synchronization_server"
"github.com/werf/werf/pkg/storage"
"github.com/werf/werf/pkg/werf"
)
func SetupSynchronization(cmdData *CmdData, cmd *cobra.Command) {
cmdData.Synchronization = new(string)
defaultValue := os.Getenv("WERF_SYNCHRONIZATION")
cmd.Flags().StringVarP(cmdData.Synchronization, "synchronization", "S", defaultValue, fmt.Sprintf(`Address of synchronizer for multiple werf processes to work with a single repo.
Default:
- $WERF_SYNCHRONIZATION, or
- :local if --repo is not specified, or
- %s if --repo has been specified.
The same address should be specified for all werf processes that work with a single repo. :local address allows execution of werf processes from a single host only`, storage.DefaultHttpSynchronizationServer))
}
type SynchronizationType string
const (
LocalSynchronization SynchronizationType = "LocalSynchronization"
KubernetesSynchronization SynchronizationType = "KubernetesSynchronization"
HttpSynchronization SynchronizationType = "HttpSynchronization"
)
type SynchronizationParams struct {
Address string
SynchronizationType SynchronizationType
KubeParams *storage.KubernetesSynchronizationParams
}
func checkSynchronizationKubernetesParamsForWarnings(cmdData *CmdData) {
if *cmdData.Synchronization != "" {
return
}
ctx := BackgroundContext()
doPrintWarning := false
if *cmdData.KubeConfigBase64 != "" {
doPrintWarning = true
global_warnings.GlobalWarningLn(ctx, `###`)
global_warnings.GlobalWarningLn(ctx, `## Required --synchronization param (or WERF_SYNCHRONIZATION env var) to be specified explicitly,`)
global_warnings.GlobalWarningLn(ctx, fmt.Sprintf(`## because --kube-config-base64=%s (or WERF_KUBE_CONFIG_BASE64, or WERF_KUBECONFIG_BASE64, or $KUBECONFIG_BASE64 env var) has been specified explicitly.`, *cmdData.KubeConfigBase64))
} else if kubeConfigEnv := os.Getenv("KUBECONFIG"); kubeConfigEnv != "" {
doPrintWarning = true
global_warnings.GlobalWarningLn(ctx, `###`)
global_warnings.GlobalWarningLn(ctx, `## Required --synchronization param (or WERF_SYNCHRONIZATION env var) to be specified explicitly,`)
global_warnings.GlobalWarningLn(ctx, fmt.Sprintf(`## because KUBECONFIG=%s env var has been specified explicitly.`, kubeConfigEnv))
} else if *cmdData.KubeConfig != "" {
doPrintWarning = true
global_warnings.GlobalWarningLn(ctx, `###`)
global_warnings.GlobalWarningLn(ctx, `## Required --synchronization param (or WERF_SYNCHRONIZATION env var) to be specified explicitly,`)
global_warnings.GlobalWarningLn(ctx, fmt.Sprintf(`## because --kube-config=%s (or WERF_KUBE_CONFIG, or WERF_KUBECONFIG, or KUBECONFIG env var) has been specified explicitly.`, kubeConfigEnv))
} else if *cmdData.KubeContext != "" {
doPrintWarning = true
global_warnings.GlobalWarningLn(ctx, `###`)
global_warnings.GlobalWarningLn(ctx, `## Required --synchronization param (or WERF_SYNCHRONIZATION env var) to be specified explicitly,`)
global_warnings.GlobalWarningLn(ctx, fmt.Sprintf(`## because --kube-context=%s (or WERF_KUBE_CONTEXT env var) has been specified explicitly.`, kubeConfigEnv))
}
if doPrintWarning {
global_warnings.GlobalWarningLn(ctx, `## `)
global_warnings.GlobalWarningLn(ctx, `## IMPORTANT: all invocations of the werf for any single project should use the same`)
global_warnings.GlobalWarningLn(ctx, `## --synchronization param (or WERF_SYNCHRONIZATION env var) value`)
global_warnings.GlobalWarningLn(ctx, `## to prevent inconsistency of the werf setup for this project.`)
global_warnings.GlobalWarningLn(ctx, `## `)
global_warnings.GlobalWarningLn(ctx, `## Format of the synchronization param: kubernetes://NAMESPACE[:CONTEXT][@(base64:BASE64_CONFIG_DATA)|CONFIG_PATH]`)
global_warnings.GlobalWarningLn(ctx, `## `)
global_warnings.GlobalWarningLn(ctx, `## By default werf stores synchronization data using --synchronization=kubernetes://werf-synchronization namespace`)
global_warnings.GlobalWarningLn(ctx, `## with default kube-config and kube-context.`)
global_warnings.GlobalWarningLn(ctx, `## `)
global_warnings.GlobalWarningLn(ctx, `## For example, configure werf synchronization with the following settings:`)
global_warnings.GlobalWarningLn(ctx, `## `)
global_warnings.GlobalWarningLn(ctx, `## export WERF_SYNCHRONIZATION=kubernetes://werf-synchronization:mycontext@/root/.kube/custom-config`)
global_warnings.GlobalWarningLn(ctx, `## `)
global_warnings.GlobalWarningLn(ctx, `## — these same settings required to be used in every werf invocation for your project.`)
global_warnings.GlobalWarningLn(ctx, `## `)
global_warnings.GlobalWarningLn(ctx, `## More info about synchronization: https://werf.io/documentation/advanced/synchronization.html`)
global_warnings.GlobalWarningLn(ctx, `###`)
}
}
func GetSynchronization(ctx context.Context, cmdData *CmdData, projectName string, stagesStorage storage.StagesStorage) (*SynchronizationParams, error) {
getKubeParamsFunc := func(address string, commonKubeInitializer *OndemandKubeInitializer) (*SynchronizationParams, error) {
res := &SynchronizationParams{}
res.SynchronizationType = KubernetesSynchronization
res.Address = address
if params, err := storage.ParseKubernetesSynchronization(res.Address); err != nil {
return nil, fmt.Errorf("unable to parse synchronization address %s: %s", res.Address, err)
} else {
res.KubeParams = params
}
if res.KubeParams.ConfigPath == "" {
res.KubeParams.ConfigPath = commonKubeInitializer.KubeConfig
}
if res.KubeParams.ConfigContext == "" {
res.KubeParams.ConfigContext = commonKubeInitializer.KubeContext
}
if res.KubeParams.ConfigDataBase64 == "" {
res.KubeParams.ConfigDataBase64 = commonKubeInitializer.KubeConfigBase64
}
if res.KubeParams.ConfigPathMergeList == nil {
res.KubeParams.ConfigPathMergeList = commonKubeInitializer.KubeConfigPathMergeList
}
return res, nil
}
getHttpParamsFunc := func(synchronization string, stagesStorage storage.StagesStorage) (*SynchronizationParams, error) {
var address string
if err := logboek.Default().LogProcess(fmt.Sprintf("Getting client id for the http synchronization server")).
DoError(func() error {
if clientID, err := synchronization_server.GetOrCreateClientID(ctx, projectName, synchronization_server.NewSynchronizationClient(synchronization), stagesStorage); err != nil {
return fmt.Errorf("unable to get synchronization client id: %s", err)
} else {
address = fmt.Sprintf("%s/%s", synchronization, clientID)
logboek.Default().LogF("Using clientID %q for http synchronization server at address %s\n", clientID, address)
return nil
}
}); err != nil {
return nil, err
}
return &SynchronizationParams{Address: address, SynchronizationType: HttpSynchronization}, nil
}
if *cmdData.Synchronization == "" {
if stagesStorage.Address() == storage.LocalStorageAddress {
return &SynchronizationParams{SynchronizationType: LocalSynchronization, Address: storage.LocalStorageAddress}, nil
} else {
return getHttpParamsFunc(storage.DefaultHttpSynchronizationServer, stagesStorage)
}
} else if *cmdData.Synchronization == storage.LocalStorageAddress {
return &SynchronizationParams{Address: *cmdData.Synchronization, SynchronizationType: LocalSynchronization}, nil
} else if strings.HasPrefix(*cmdData.Synchronization, "kubernetes://") {
checkSynchronizationKubernetesParamsForWarnings(cmdData)
return getKubeParamsFunc(*cmdData.Synchronization, GetOndemandKubeInitializer())
} else if strings.HasPrefix(*cmdData.Synchronization, "http://") || strings.HasPrefix(*cmdData.Synchronization, "https://") {
return getHttpParamsFunc(*cmdData.Synchronization, stagesStorage)
} else {
return nil, fmt.Errorf("only --synchronization=%s or --synchronization=kubernetes://NAMESPACE or --synchronization=http[s]://HOST:PORT/CLIENT_ID is supported, got %q", storage.LocalStorageAddress, *cmdData.Synchronization)
}
}
func GetStagesStorageCache(synchronization *SynchronizationParams) (storage.StagesStorageCache, error) {
switch synchronization.SynchronizationType {
case LocalSynchronization:
return storage.NewFileStagesStorageCache(werf.GetStagesStorageCacheDir()), nil
case KubernetesSynchronization:
if config, err := kube.GetKubeConfig(kube.KubeConfigOptions{
ConfigPath: synchronization.KubeParams.ConfigPath,
ConfigDataBase64: synchronization.KubeParams.ConfigDataBase64,
ConfigPathMergeList: synchronization.KubeParams.ConfigPathMergeList,
Context: synchronization.KubeParams.ConfigContext,
}); err != nil {
return nil, fmt.Errorf("unable to load synchronization kube config (context %q): %s", synchronization.KubeParams.ConfigContext, err)
} else if client, err := kubernetes.NewForConfig(config.Config); err != nil {
return nil, fmt.Errorf("unable to create synchronization kubernetes client: %s", err)
} else {
return storage.NewKubernetesStagesStorageCache(synchronization.KubeParams.Namespace, client, func(projectName string) string {
return fmt.Sprintf("werf-%s", projectName)
}), nil
}
case HttpSynchronization:
return synchronization_server.NewStagesStorageCacheHttpClient(fmt.Sprintf("%s/stages-storage-cache", synchronization.Address)), nil
default:
panic(fmt.Sprintf("unsupported synchronization address %q", synchronization.Address))
}
}
func GetStorageLockManager(ctx context.Context, synchronization *SynchronizationParams) (storage.LockManager, error) {
switch synchronization.SynchronizationType {
case LocalSynchronization:
return storage.NewGenericLockManager(werf.GetHostLocker()), nil
case KubernetesSynchronization:
if config, err := kube.GetKubeConfig(kube.KubeConfigOptions{
ConfigPath: synchronization.KubeParams.ConfigPath,
ConfigDataBase64: synchronization.KubeParams.ConfigDataBase64,
ConfigPathMergeList: synchronization.KubeParams.ConfigPathMergeList,
Context: synchronization.KubeParams.ConfigContext,
}); err != nil {
return nil, fmt.Errorf("unable to load synchronization kube config %q (context %q): %s", synchronization.KubeParams.ConfigPath, synchronization.KubeParams.ConfigContext, err)
} else if dynamicClient, err := dynamic.NewForConfig(config.Config); err != nil {
return nil, fmt.Errorf("unable to create synchronization kubernetes dynamic client: %s", err)
} else if client, err := kubernetes.NewForConfig(config.Config); err != nil {
return nil, fmt.Errorf("unable to create synchronization kubernetes client: %s", err)
} else {
return storage.NewKubernetesLockManager(synchronization.KubeParams.Namespace, client, dynamicClient, func(projectName string) string {
return fmt.Sprintf("werf-%s", projectName)
}), nil
}
case HttpSynchronization:
locker := distributed_locker.NewHttpLocker(fmt.Sprintf("%s/locker", synchronization.Address))
lockerWithRetry := locker_with_retry.NewLockerWithRetry(ctx, locker, locker_with_retry.LockerWithRetryOptions{MaxAcquireAttempts: 10, MaxReleaseAttempts: 10})
return storage.NewGenericLockManager(lockerWithRetry), nil
default:
panic(fmt.Sprintf("unsupported synchronization address %q", synchronization.Address))
}
}
|
[
"\"WERF_SYNCHRONIZATION\"",
"\"KUBECONFIG\""
] |
[] |
[
"KUBECONFIG",
"WERF_SYNCHRONIZATION"
] |
[]
|
["KUBECONFIG", "WERF_SYNCHRONIZATION"]
|
go
| 2 | 0 | |
controllers/observabilityendpoint/ocp_resource.go
|
// Copyright (c) 2021 Red Hat, Inc.
// Copyright Contributors to the Open Cluster Management project.
package observabilityendpoint
import (
"context"
"os"
"reflect"
ocinfrav1 "github.com/openshift/api/config/v1"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
)
const (
clusterRoleBindingName = "metrics-collector-view"
caConfigmapName = "metrics-collector-serving-certs-ca-bundle"
)
var (
serviceAccountName = os.Getenv("SERVICE_ACCOUNT")
)
func deleteMonitoringClusterRoleBinding(ctx context.Context, client client.Client) error {
rb := &rbacv1.ClusterRoleBinding{}
err := client.Get(ctx, types.NamespacedName{Name: clusterRoleBindingName,
Namespace: ""}, rb)
if err != nil {
if errors.IsNotFound(err) {
log.Info("clusterrolebinding already deleted")
return nil
}
log.Error(err, "Failed to check the clusterrolebinding")
return err
}
err = client.Delete(ctx, rb)
if err != nil {
log.Error(err, "Error deleting clusterrolebinding")
return err
}
log.Info("clusterrolebinding deleted")
return nil
}
func createMonitoringClusterRoleBinding(ctx context.Context, client client.Client) error {
rb := &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: clusterRoleBindingName,
Annotations: map[string]string{
ownerLabelKey: ownerLabelValue,
},
},
RoleRef: rbacv1.RoleRef{
Kind: "ClusterRole",
Name: "cluster-monitoring-view",
APIGroup: "rbac.authorization.k8s.io",
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Name: serviceAccountName,
Namespace: namespace,
},
},
}
found := &rbacv1.ClusterRoleBinding{}
err := client.Get(ctx, types.NamespacedName{Name: clusterRoleBindingName,
Namespace: ""}, found)
if err != nil {
if errors.IsNotFound(err) {
err = client.Create(ctx, rb)
if err == nil {
log.Info("clusterrolebinding created")
} else {
log.Error(err, "Failed to create the clusterrolebinding")
}
return err
}
log.Error(err, "Failed to check the clusterrolebinding")
return err
}
if reflect.DeepEqual(rb.RoleRef, found.RoleRef) && reflect.DeepEqual(rb.Subjects, found.Subjects) {
log.Info("The clusterrolebinding already existed")
} else {
rb.ObjectMeta.ResourceVersion = found.ObjectMeta.ResourceVersion
err = client.Update(ctx, rb)
if err != nil {
log.Error(err, "Failed to update the clusterrolebinding")
}
}
return nil
}
func deleteCAConfigmap(ctx context.Context, client client.Client) error {
cm := &corev1.ConfigMap{}
err := client.Get(ctx, types.NamespacedName{Name: caConfigmapName,
Namespace: namespace}, cm)
if err != nil {
if errors.IsNotFound(err) {
log.Info("configmap already deleted")
return nil
}
log.Error(err, "Failed to check the configmap")
return err
}
err = client.Delete(ctx, cm)
if err != nil {
log.Error(err, "Error deleting configmap")
return err
}
log.Info("configmap deleted")
return nil
}
func createCAConfigmap(ctx context.Context, client client.Client) error {
cm := &corev1.ConfigMap{}
err := client.Get(ctx, types.NamespacedName{Name: caConfigmapName,
Namespace: namespace}, cm)
if err != nil {
if errors.IsNotFound(err) {
cm := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: caConfigmapName,
Namespace: namespace,
Annotations: map[string]string{
ownerLabelKey: ownerLabelValue,
"service.alpha.openshift.io/inject-cabundle": "true",
},
},
Data: map[string]string{"service-ca.crt": ""},
}
err = client.Create(ctx, cm)
if err == nil {
log.Info("Configmap created")
} else {
log.Error(err, "Failed to create the configmap")
}
return err
} else {
log.Error(err, "Failed to check the configmap")
return err
}
} else {
log.Info("The configmap already existed")
}
return nil
}
// getClusterID is used to get the cluster uid
func getClusterID(ctx context.Context, c client.Client) (string, error) {
clusterVersion := &ocinfrav1.ClusterVersion{}
if err := c.Get(ctx, types.NamespacedName{Name: "version"}, clusterVersion); err != nil {
log.Error(err, "Failed to get clusterVersion")
return "", err
}
return string(clusterVersion.Spec.ClusterID), nil
}
func isSNO(ctx context.Context, c client.Client) (bool, error) {
infraConfig := &ocinfrav1.Infrastructure{}
if err := c.Get(ctx, types.NamespacedName{Name: "cluster"}, infraConfig); err != nil {
log.Info("No OCP infrastructure found, determine SNO by checking master size")
return isSingleMaster(ctx, c)
}
if infraConfig.Status.ControlPlaneTopology == ocinfrav1.SingleReplicaTopologyMode {
return true, nil
}
return false, nil
}
func isSingleMaster(ctx context.Context, c client.Client) (bool, error) {
nodes := &corev1.NodeList{}
opts := &client.ListOptions{
LabelSelector: labels.SelectorFromSet(map[string]string{"node-role.kubernetes.io/master": ""}),
}
err := c.List(ctx, nodes, opts)
if err != nil {
log.Error(err, "Failed to get node list")
return false, err
}
if len(nodes.Items) == 1 {
return true, nil
}
return false, nil
}
|
[
"\"SERVICE_ACCOUNT\""
] |
[] |
[
"SERVICE_ACCOUNT"
] |
[]
|
["SERVICE_ACCOUNT"]
|
go
| 1 | 0 | |
kubetest/aksengine.go
|
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"bytes"
"context"
"encoding/json"
"errors"
"flag"
"fmt"
"io/ioutil"
"log"
"math/rand"
"net/http"
"net/url"
"os"
"os/exec"
"path"
"path/filepath"
"strconv"
"strings"
"time"
"k8s.io/test-infra/kubetest/e2e"
"k8s.io/test-infra/kubetest/process"
"k8s.io/test-infra/kubetest/util"
"github.com/Azure/azure-storage-blob-go/azblob"
"github.com/Azure/go-autorest/autorest/azure"
uuid "github.com/satori/go.uuid"
)
const (
winZipTemplate = "win-zip-%s.zip"
k8sNodeTarballTemplate = "kubernetes-node-linux-amd64-%s.tar.gz"
azureBlobContainerURLTemplate = "https://%s.blob.core.windows.net/%s"
azureKubemarkTestPrefix = "[for azure kubemark test]"
)
var (
aksResourceName = flag.String("aksengine-resource-name", "", "Azure Resource Name")
aksResourceGroupName = flag.String("aksengine-resourcegroup-name", "", "Azure Resource Group Name")
aksLocation = flag.String("aksengine-location", "", "Azure AKS location")
aksMasterVMSize = flag.String("aksengine-mastervmsize", "", "Azure Master VM size")
aksAgentVMSize = flag.String("aksengine-agentvmsize", "", "Azure Agent VM size")
aksAdminUsername = flag.String("aksengine-admin-username", "", "Admin username")
aksAdminPassword = flag.String("aksengine-admin-password", "", "Admin password")
aksAgentPoolCount = flag.Int("aksengine-agentpoolcount", 0, "Azure Agent Pool Count")
aksTemplateURL = flag.String("aksengine-template-url", "", "Azure Template URL.")
aksDNSPrefix = flag.String("aksengine-dnsprefix", "", "Azure K8s Master DNS Prefix")
aksEngineURL = flag.String("aksengine-download-url", "", "Download URL for AKS engine")
aksEngineMD5 = flag.String("aksengine-md5-sum", "", "Checksum for aks engine download")
aksSSHPublicKeyPath = flag.String("aksengine-public-key", "", "Path to SSH Public Key")
aksSSHPrivateKeyPath = flag.String("aksengine-private-key", "", "Path to SSH Private Key")
aksWinBinaries = flag.Bool("aksengine-win-binaries", false, "Set to True if you want kubetest to build a custom zip with windows binaries for aks-engine")
aksCcm = flag.Bool("aksengine-ccm", false, "Set to True if you want kubetest to build a custom cloud controller manager for aks-engine")
aksCnm = flag.Bool("aksengine-cnm", false, "Set to True if you want kubetest to build a custom cloud node manager for aks-engine. Require --aksengine-ccm to be true")
aksCredentialsFile = flag.String("aksengine-creds", "", "Path to credential file for Azure")
aksOrchestratorRelease = flag.String("aksengine-orchestratorRelease", "", "Orchestrator Profile for aks-engine")
aksWinZipBuildScript = flag.String("aksengine-winZipBuildScript", "https://raw.githubusercontent.com/Azure/aks-engine/master/scripts/build-windows-k8s.sh", "Build script to create custom zip containing win binaries for aks-engine")
aksNetworkPlugin = flag.String("aksengine-networkPlugin", "azure", "Network pluging to use with aks-engine")
aksAzureEnv = flag.String("aksengine-azure-env", "AzurePublicCloud", "The target Azure cloud")
aksIdentitySystem = flag.String("aksengine-identity-system", "azure_ad", "identity system (default:`azure_ad`, `adfs`)")
aksCustomCloudURL = flag.String("aksengine-custom-cloud-url", "", "management portal URL to use in custom Azure cloud (i.e Azure Stack etc)")
aksDeployCustomK8s = flag.Bool("aksengine-deploy-custom-k8s", false, "Set to True if you want to deploy custom-built k8s via aks-engine")
aksCheckParams = flag.Bool("aksengine-check-params", true, "Set to True if you want to validate your input parameters")
aksDumpClusterLogs = flag.Bool("aksengine-dump-cluster-logs", true, "Set to True if you want to dump cluster logs")
aksNodeProblemDetector = flag.Bool("aksengine-node-problem-detector", false, "Set to True if you want to enable node problem detector addon")
testCcm = flag.Bool("test-ccm", false, "Set to True if you want kubetest to run e2e tests for ccm")
testAzureFileCSIDriver = flag.Bool("test-azure-file-csi-driver", false, "Set to True if you want kubetest to run e2e tests for Azure File CSI driver")
testAzureDiskCSIDriver = flag.Bool("test-azure-disk-csi-driver", false, "Set to True if you want kubetest to run e2e tests for Azure Disk CSI driver")
testBlobfuseCSIDriver = flag.Bool("test-blobfuse-csi-driver", false, "Set to True if you want kubetest to run e2e tests for Blobfuse CSI driver")
testSecretStoreCSIDriver = flag.Bool("test-secrets-store-csi-driver", false, "Set to True if you want kubetest to run e2e tests for Secrets Store CSI driver")
// Commonly used variables
k8sVersion = getImageVersion(util.K8s("kubernetes"))
cloudProviderAzureVersion = getImageVersion(util.K8sSigs("cloud-provider-azure"))
imageRegistry = os.Getenv("REGISTRY")
k8sNodeTarballDir = util.K8s("kubernetes", "_output", "release-tars") // contains custom-built kubelet and kubectl
// kubemark scale tests
buildWithKubemark = flag.Bool("build-with-kubemark", false, fmt.Sprintf("%s Enable building clusters with kubemark", azureKubemarkTestPrefix))
kubemarkBuildScriptURL = flag.String("kubemark-build-script-url", "", fmt.Sprintf("%s URL to the building script of kubemark and kubemark-external cluster", azureKubemarkTestPrefix))
kubemarkClusterTemplateURL = flag.String("kubemark-cluster-template-url", "", fmt.Sprintf("%s URL to the aks-engine template of kubemark cluster", azureKubemarkTestPrefix))
externalClusterTemplateURL = flag.String("external-cluster-template-url", "", fmt.Sprintf("%s URL to the aks-engine template of kubemark external cluster", azureKubemarkTestPrefix))
hollowNodesDeploymentURL = flag.String("hollow-nodes-deployment-url", "", fmt.Sprintf("%s URL to the deployment configuration file of hollow nodes", azureKubemarkTestPrefix))
clusterLoader2BinURL = flag.String("clusterloader2-bin-url", "", fmt.Sprintf("%s URL to the binary of clusterloader2", azureKubemarkTestPrefix))
kubemarkLocation = flag.String("kubemark-location", "southcentralus", fmt.Sprintf("%s The location where the kubemark and external clusters run", azureKubemarkTestPrefix))
kubemarkSize = flag.String("kubemark-size", "100", fmt.Sprintf("%s The number of hollow nodes in kubemark cluster", azureKubemarkTestPrefix))
)
const (
// AzureStackCloud is a const string reference identifier for Azure Stack cloud
AzureStackCloud = "AzureStackCloud"
// ADFSIdentitySystem is a const for ADFS identifier on Azure Stack cloud
ADFSIdentitySystem = "adfs"
)
const (
ccmImageName = "azure-cloud-controller-manager"
cnmImageName = "azure-cloud-node-manager"
cnmAddonName = "cloud-node-manager"
nodeProblemDetectorAddonName = "node-problem-detector"
hyperkubeImageName = "hyperkube-amd64"
kubeAPIServerImageName = "kube-apiserver-amd64"
kubeControllerManagerImageName = "kube-controller-manager-amd64"
kubeSchedulerImageName = "kube-scheduler-amd64"
kubeProxyImageName = "kube-proxy-amd64"
)
const (
vmTypeVMSS = "vmss"
vmTypeStandard = "standard"
availabilityProfileVMSS = "VirtualMachineScaleSets"
)
type aksDeploymentMethod int
const (
// https://github.com/Azure/aks-engine/blob/master/docs/topics/kubernetes-developers.md#kubernetes-116-or-earlier
customHyperkube aksDeploymentMethod = iota
// https://github.com/Azure/aks-engine/blob/master/docs/topics/kubernetes-developers.md#kubernetes-117
customK8sComponents
noop
)
type Creds struct {
ClientID string
ClientSecret string
TenantID string
SubscriptionID string
StorageAccountName string
StorageAccountKey string
}
type Config struct {
Creds Creds
}
type aksEngineDeployer struct {
ctx context.Context
credentials *Creds
location string
resourceGroup string
name string
apiModelPath string
dnsPrefix string
templateJSON map[string]interface{}
parametersJSON map[string]interface{}
outputDir string
sshPublicKey string
sshPrivateKeyPath string
adminUsername string
adminPassword string
masterVMSize string
agentVMSize string
customHyperkubeImage string
aksCustomWinBinariesURL string
aksEngineBinaryPath string
customCcmImage string // custom cloud controller manager (ccm) image
customCnmImage string // custom cloud node manager (cnm) image
customKubeAPIServerImage string
customKubeControllerManagerImage string
customKubeProxyImage string
customKubeSchedulerImage string
customKubeBinaryURL string
azureEnvironment string
azureIdentitySystem string
azureCustomCloudURL string
agentPoolCount int
k8sVersion string
networkPlugin string
azureClient *AzureClient
aksDeploymentMethod aksDeploymentMethod
useManagedIdentity bool
identityName string
azureBlobContainerURL string
}
// IsAzureStackCloud return true if the cloud is AzureStack
func (c *aksEngineDeployer) isAzureStackCloud() bool {
return c.azureCustomCloudURL != "" && strings.EqualFold(c.azureEnvironment, AzureStackCloud)
}
// SetCustomCloudProfileEnvironment retrieves the endpoints from Azure Stack metadata endpoint and sets the values for azure.Environment
func (c *aksEngineDeployer) SetCustomCloudProfileEnvironment() error {
var environmentJSON string
if c.isAzureStackCloud() {
env := azure.Environment{}
env.Name = c.azureEnvironment
azsFQDNSuffix := strings.Replace(c.azureCustomCloudURL, fmt.Sprintf("https://portal.%s.", c.location), "", -1)
azsFQDNSuffix = strings.TrimSuffix(azsFQDNSuffix, "/")
env.ResourceManagerEndpoint = fmt.Sprintf("https://management.%s.%s/", c.location, azsFQDNSuffix)
metadataURL := fmt.Sprintf("%s/metadata/endpoints?api-version=1.0", strings.TrimSuffix(env.ResourceManagerEndpoint, "/"))
// Retrieve the metadata
httpClient := &http.Client{
Timeout: 30 * time.Second,
}
endpointsresp, err := httpClient.Get(metadataURL)
if err != nil || endpointsresp.StatusCode != 200 {
return fmt.Errorf("%s . apimodel invalid: failed to retrieve Azure Stack endpoints from %s", err, metadataURL)
}
body, err := ioutil.ReadAll(endpointsresp.Body)
if err != nil {
return fmt.Errorf("%s . apimodel invalid: failed to read the response from %s", err, metadataURL)
}
endpoints := AzureStackMetadataEndpoints{}
err = json.Unmarshal(body, &endpoints)
if err != nil {
return fmt.Errorf("%s . apimodel invalid: failed to parse the response from %s", err, metadataURL)
}
if endpoints.GraphEndpoint == "" || endpoints.Authentication == nil || endpoints.Authentication.LoginEndpoint == "" || len(endpoints.Authentication.Audiences) == 0 || endpoints.Authentication.Audiences[0] == "" {
return fmt.Errorf("%s . apimodel invalid: invalid response from %s", err, metadataURL)
}
env.GraphEndpoint = endpoints.GraphEndpoint
env.ServiceManagementEndpoint = endpoints.Authentication.Audiences[0]
env.GalleryEndpoint = endpoints.GalleryEndpoint
env.ActiveDirectoryEndpoint = endpoints.Authentication.LoginEndpoint
if strings.EqualFold(c.azureIdentitySystem, ADFSIdentitySystem) {
env.ActiveDirectoryEndpoint = strings.TrimSuffix(env.ActiveDirectoryEndpoint, "/")
env.ActiveDirectoryEndpoint = strings.TrimSuffix(env.ActiveDirectoryEndpoint, ADFSIdentitySystem)
}
env.ManagementPortalURL = endpoints.PortalEndpoint
env.ResourceManagerVMDNSSuffix = fmt.Sprintf("cloudapp.%s", azsFQDNSuffix)
env.StorageEndpointSuffix = fmt.Sprintf("%s.%s", c.location, azsFQDNSuffix)
env.KeyVaultDNSSuffix = fmt.Sprintf("vault.%s.%s", c.location, azsFQDNSuffix)
bytes, err := json.Marshal(env)
if err != nil {
return fmt.Errorf("Could not serialize Environment object - %s", err.Error())
}
environmentJSON = string(bytes)
// Create and update the file.
tmpFile, err := ioutil.TempFile("", "azurestackcloud.json")
tmpFileName := tmpFile.Name()
if err != nil {
return err
}
// Build content for the file
if err = ioutil.WriteFile(tmpFileName, []byte(environmentJSON), os.ModeAppend); err != nil {
return err
}
os.Setenv("AZURE_ENVIRONMENT_FILEPATH", tmpFileName)
}
return nil
}
func validateAzureStackCloudProfile() error {
if *aksLocation == "" {
return fmt.Errorf("no location specified for Azure Stack")
}
if *aksCustomCloudURL == "" {
return fmt.Errorf("no custom cloud portal URL specified for Azure Stack")
}
if !strings.HasPrefix(*aksCustomCloudURL, fmt.Sprintf("https://portal.%s.", *aksLocation)) {
return fmt.Errorf("custom cloud portal URL needs to start with https://portal.%s. ", *aksLocation)
}
return nil
}
func randomAKSEngineLocation() string {
var AzureLocations = []string{
"westeurope",
"westus2",
"eastus2",
"southcentralus",
}
return AzureLocations[rand.Intn(len(AzureLocations))]
}
func checkParams() error {
if !*aksCheckParams {
log.Print("Skipping checkParams")
return nil
}
// Validate flags
if strings.EqualFold(*aksAzureEnv, AzureStackCloud) {
if err := validateAzureStackCloudProfile(); err != nil {
return err
}
} else if *aksLocation == "" {
*aksLocation = randomAKSEngineLocation()
}
if *aksCredentialsFile == "" {
return fmt.Errorf("no credentials file path specified")
}
if *aksResourceName == "" {
*aksResourceName = "kubetest-" + uuid.NewV1().String()
}
if *aksResourceGroupName == "" {
*aksResourceGroupName = *aksResourceName
}
if *aksDNSPrefix == "" {
*aksDNSPrefix = *aksResourceName
}
if *aksSSHPublicKeyPath == "" {
*aksSSHPublicKeyPath = os.Getenv("HOME") + "/.ssh/id_rsa.pub"
}
if *aksSSHPrivateKeyPath == "" {
*aksSSHPrivateKeyPath = os.Getenv("HOME") + "/.ssh/id_rsa"
}
if !*buildWithKubemark && *aksTemplateURL == "" {
return fmt.Errorf("no ApiModel URL specified, *buildWithKubemark=%v\n", *buildWithKubemark)
}
if *aksCnm && !*aksCcm {
return fmt.Errorf("--aksengine-cnm cannot be true without --aksengine-ccm also being true")
}
return nil
}
func newAKSEngine() (*aksEngineDeployer, error) {
if err := checkParams(); err != nil {
return nil, fmt.Errorf("error creating Azure K8S cluster: %v", err)
}
sshKey, err := ioutil.ReadFile(*aksSSHPublicKeyPath)
if err != nil {
if os.IsNotExist(err) {
sshKey = []byte{}
} else {
return nil, fmt.Errorf("error reading SSH Key %v %v", *aksSSHPublicKeyPath, err)
}
}
outputDir, err := ioutil.TempDir(os.Getenv("HOME"), "tmp")
if err != nil {
return nil, fmt.Errorf("error creating tempdir: %v", err)
}
c := aksEngineDeployer{
ctx: context.Background(),
apiModelPath: *aksTemplateURL,
name: *aksResourceName,
dnsPrefix: *aksDNSPrefix,
location: *aksLocation,
resourceGroup: *aksResourceGroupName,
outputDir: outputDir,
sshPublicKey: fmt.Sprintf("%s", sshKey),
sshPrivateKeyPath: *aksSSHPrivateKeyPath,
credentials: &Creds{},
masterVMSize: *aksMasterVMSize,
agentVMSize: *aksAgentVMSize,
adminUsername: *aksAdminUsername,
adminPassword: *aksAdminPassword,
agentPoolCount: *aksAgentPoolCount,
k8sVersion: *aksOrchestratorRelease,
networkPlugin: *aksNetworkPlugin,
azureEnvironment: *aksAzureEnv,
azureIdentitySystem: *aksIdentitySystem,
azureCustomCloudURL: *aksCustomCloudURL,
customHyperkubeImage: getDockerImage(hyperkubeImageName),
customCcmImage: getDockerImage(ccmImageName),
customCnmImage: getDockerImage(cnmImageName),
customKubeAPIServerImage: getDockerImage(kubeAPIServerImageName),
customKubeControllerManagerImage: getDockerImage(kubeControllerManagerImageName),
customKubeProxyImage: getDockerImage(kubeProxyImageName),
customKubeSchedulerImage: getDockerImage(kubeSchedulerImageName),
aksEngineBinaryPath: "aks-engine", // use the one in path by default
aksDeploymentMethod: getAKSDeploymentMethod(*aksOrchestratorRelease),
useManagedIdentity: false,
identityName: "",
}
creds, err := getAzCredentials()
if err != nil {
return nil, fmt.Errorf("failed to get azure credentials: %v", err)
}
c.credentials = creds
c.azureBlobContainerURL = fmt.Sprintf(azureBlobContainerURLTemplate, c.credentials.StorageAccountName, os.Getenv("AZ_STORAGE_CONTAINER_NAME"))
c.customKubeBinaryURL = fmt.Sprintf("%s/%s", c.azureBlobContainerURL, fmt.Sprintf(k8sNodeTarballTemplate, k8sVersion))
c.aksCustomWinBinariesURL = fmt.Sprintf("%s/%s", c.azureBlobContainerURL, fmt.Sprintf(winZipTemplate, k8sVersion))
err = c.SetCustomCloudProfileEnvironment()
if err != nil {
return nil, fmt.Errorf("failed to create custom cloud profile file: %v", err)
}
err = c.getAzureClient(c.ctx)
if err != nil {
return nil, fmt.Errorf("failed to generate ARM client: %v", err)
}
// like kops and gke set KUBERNETES_CONFORMANCE_TEST so the auth is picked up
// from kubectl instead of bash inference.
if err := os.Setenv("KUBERNETES_CONFORMANCE_TEST", "yes"); err != nil {
return nil, err
}
if err := c.dockerLogin(); err != nil {
return nil, err
}
return &c, nil
}
func getAKSDeploymentMethod(k8sRelease string) aksDeploymentMethod {
if !*aksDeployCustomK8s {
return noop
}
// k8sRelease should be in the format of X.XX
s := strings.Split(k8sRelease, ".")
if len(s) != 2 {
return noop
}
minor, err := strconv.Atoi(s[1])
if err != nil {
return noop
}
// Deploy custom-built individual k8s components because
// there is no hyperkube support in aks-engine for 1.17+
if minor >= 17 {
return customK8sComponents
}
return customHyperkube
}
func (c *aksEngineDeployer) populateAPIModelTemplate() error {
var err error
v := AKSEngineAPIModel{}
if c.apiModelPath != "" {
// template already exists, read it
template, err := ioutil.ReadFile(path.Join(c.outputDir, "kubernetes.json"))
if err != nil {
return fmt.Errorf("error reading ApiModel template file: %v.", err)
}
dec := json.NewDecoder(bytes.NewReader(template))
// Enforce strict JSON
dec.DisallowUnknownFields()
if err := dec.Decode(&v); err != nil {
return fmt.Errorf("error unmarshaling ApiModel template file: %v", err)
}
} else {
return fmt.Errorf("No template file specified %v", err)
}
// set default distro so we do not use prebuilt os image
if v.Properties.MasterProfile.Distro == "" {
v.Properties.MasterProfile.Distro = "ubuntu"
}
for _, agentPool := range v.Properties.AgentPoolProfiles {
if agentPool.Distro == "" {
agentPool.Distro = "ubuntu"
}
}
// replace APIModel template properties from flags
if c.location != "" {
v.Location = c.location
}
if c.name != "" {
v.Name = c.name
}
if c.k8sVersion != "" {
v.Properties.OrchestratorProfile.OrchestratorRelease = c.k8sVersion
}
if v.Properties.OrchestratorProfile.KubernetesConfig == nil {
v.Properties.OrchestratorProfile.KubernetesConfig = &KubernetesConfig{}
}
// to support aks-engine validation logic `networkPolicy 'none' is not supported with networkPlugin 'azure'`
if v.Properties.OrchestratorProfile.KubernetesConfig.NetworkPolicy != "none" && v.Properties.OrchestratorProfile.KubernetesConfig.NetworkPlugin == "" {
// default NetworkPlugin to Azure if not provided
v.Properties.OrchestratorProfile.KubernetesConfig.NetworkPlugin = c.networkPlugin
}
if c.dnsPrefix != "" {
v.Properties.MasterProfile.DNSPrefix = c.dnsPrefix
}
if c.masterVMSize != "" {
v.Properties.MasterProfile.VMSize = c.masterVMSize
}
if c.agentVMSize != "" {
for _, agentPool := range v.Properties.AgentPoolProfiles {
agentPool.VMSize = c.agentVMSize
}
}
if c.agentPoolCount != 0 {
for _, agentPool := range v.Properties.AgentPoolProfiles {
agentPool.Count = c.agentPoolCount
}
}
if c.adminUsername != "" {
v.Properties.LinuxProfile.AdminUsername = c.adminUsername
if v.Properties.WindowsProfile != nil {
v.Properties.WindowsProfile.AdminUsername = c.adminUsername
}
}
if c.adminPassword != "" {
if v.Properties.WindowsProfile != nil {
v.Properties.WindowsProfile.AdminPassword = c.adminPassword
}
}
v.Properties.LinuxProfile.SSHKeys.PublicKeys = []PublicKey{{
KeyData: c.sshPublicKey,
}}
if !toBool(v.Properties.OrchestratorProfile.KubernetesConfig.UseManagedIdentity) {
v.Properties.ServicePrincipalProfile.ClientID = c.credentials.ClientID
v.Properties.ServicePrincipalProfile.Secret = c.credentials.ClientSecret
} else {
c.useManagedIdentity = true
if v.Properties.OrchestratorProfile.KubernetesConfig.UserAssignedID != "" {
c.identityName = v.Properties.OrchestratorProfile.KubernetesConfig.UserAssignedID
} else {
c.identityName = c.resourceGroup + "-id"
v.Properties.OrchestratorProfile.KubernetesConfig.UserAssignedID = c.identityName
}
}
if *aksWinBinaries {
v.Properties.OrchestratorProfile.KubernetesConfig.CustomWindowsPackageURL = c.aksCustomWinBinariesURL
}
if *aksCcm {
useCloudControllerManager := true
v.Properties.OrchestratorProfile.KubernetesConfig.UseCloudControllerManager = &useCloudControllerManager
v.Properties.OrchestratorProfile.KubernetesConfig.CustomCcmImage = c.customCcmImage
}
if *aksCnm {
cnmAddon := KubernetesAddon{
Name: cnmAddonName,
Enabled: boolPointer(true),
Containers: []KubernetesContainerSpec{
{
Name: cnmAddonName,
Image: c.customCnmImage,
},
},
}
appendAddonToAPIModel(&v, cnmAddon)
}
if *aksNodeProblemDetector {
nodeProblemDetectorAddon := KubernetesAddon{
Name: nodeProblemDetectorAddonName,
Enabled: boolPointer(true),
}
appendAddonToAPIModel(&v, nodeProblemDetectorAddon)
}
// Populate PrivateAzureRegistryServer field if we are using ACR and custom-built k8s components
if strings.Contains(imageRegistry, "azurecr") && c.aksDeploymentMethod != noop {
v.Properties.OrchestratorProfile.KubernetesConfig.PrivateAzureRegistryServer = imageRegistry
}
switch c.aksDeploymentMethod {
case customHyperkube:
v.Properties.OrchestratorProfile.KubernetesConfig.CustomKubeAPIServerImage = ""
v.Properties.OrchestratorProfile.KubernetesConfig.CustomKubeControllerManagerImage = ""
v.Properties.OrchestratorProfile.KubernetesConfig.CustomKubeProxyImage = ""
v.Properties.OrchestratorProfile.KubernetesConfig.CustomKubeSchedulerImage = ""
v.Properties.OrchestratorProfile.KubernetesConfig.CustomKubeBinaryURL = ""
v.Properties.OrchestratorProfile.KubernetesConfig.CustomHyperkubeImage = c.customHyperkubeImage
case customK8sComponents:
v.Properties.OrchestratorProfile.KubernetesConfig.CustomKubeAPIServerImage = c.customKubeAPIServerImage
v.Properties.OrchestratorProfile.KubernetesConfig.CustomKubeControllerManagerImage = c.customKubeControllerManagerImage
v.Properties.OrchestratorProfile.KubernetesConfig.CustomKubeProxyImage = c.customKubeProxyImage
v.Properties.OrchestratorProfile.KubernetesConfig.CustomKubeSchedulerImage = c.customKubeSchedulerImage
v.Properties.OrchestratorProfile.KubernetesConfig.CustomKubeBinaryURL = c.customKubeBinaryURL
v.Properties.OrchestratorProfile.KubernetesConfig.CustomHyperkubeImage = ""
}
if c.isAzureStackCloud() {
v.Properties.CustomCloudProfile.PortalURL = c.azureCustomCloudURL
}
if len(v.Properties.AgentPoolProfiles) > 0 {
// Default to VirtualMachineScaleSets if AvailabilityProfile is empty
isVMSS := v.Properties.AgentPoolProfiles[0].AvailabilityProfile == "" || v.Properties.AgentPoolProfiles[0].AvailabilityProfile == availabilityProfileVMSS
if err := populateAzureCloudConfig(isVMSS, *c.credentials, c.azureEnvironment, c.resourceGroup, c.location, c.outputDir); err != nil {
return err
}
}
apiModel, _ := json.MarshalIndent(v, "", " ")
c.apiModelPath = path.Join(c.outputDir, "kubernetes.json")
err = ioutil.WriteFile(c.apiModelPath, apiModel, 0644)
if err != nil {
return fmt.Errorf("cannot write apimodel to file: %v", err)
}
return nil
}
func appendAddonToAPIModel(v *AKSEngineAPIModel, addon KubernetesAddon) {
// Update the addon if it already exists in the API model
for i := range v.Properties.OrchestratorProfile.KubernetesConfig.Addons {
a := &v.Properties.OrchestratorProfile.KubernetesConfig.Addons[i]
if a.Name == addon.Name {
a = &addon
return
}
}
v.Properties.OrchestratorProfile.KubernetesConfig.Addons = append(v.Properties.OrchestratorProfile.KubernetesConfig.Addons, addon)
}
func (c *aksEngineDeployer) getAKSEngine(retry int) error {
downloadPath := path.Join(os.Getenv("HOME"), "aks-engine.tar.gz")
f, err := os.Create(downloadPath)
if err != nil {
return err
}
defer f.Close()
for i := 0; i < retry; i++ {
log.Printf("downloading %v from %v.", downloadPath, *aksEngineURL)
if err := httpRead(*aksEngineURL, f); err == nil {
break
}
err = fmt.Errorf("url=%s failed get %v: %v.", *aksEngineURL, downloadPath, err)
if i == retry-1 {
return err
}
log.Println(err)
sleep(time.Duration(i) * time.Second)
}
f.Close()
if *aksEngineMD5 != "" {
o, err := control.Output(exec.Command("md5sum", f.Name()))
if err != nil {
return err
}
if strings.Split(string(o), " ")[0] != *aksEngineMD5 {
return fmt.Errorf("wrong md5 sum for aks-engine.")
}
}
// adding aks-engine binary to the kubernetes dir modifies the tree
// and dirties the tree. This makes it difficult to diff the CI signal
// so moving the binary to /tmp folder
wd := os.TempDir()
log.Printf("Extracting tar file %v into directory %v .", f.Name(), wd)
if err = control.FinishRunning(exec.Command("tar", "-xzf", f.Name(), "--strip", "1", "-C", wd)); err != nil {
return err
}
c.aksEngineBinaryPath = path.Join(wd, "aks-engine")
return nil
}
func (c *aksEngineDeployer) generateARMTemplates() error {
cmd := exec.Command(c.aksEngineBinaryPath, "generate", c.apiModelPath, "--output-directory", c.outputDir)
cmd.Dir = os.TempDir()
if err := control.FinishRunning(cmd); err != nil {
return fmt.Errorf("failed to generate ARM templates: %v.", err)
}
return nil
}
func (c *aksEngineDeployer) loadARMTemplates() error {
var err error
template, err := ioutil.ReadFile(path.Join(c.outputDir, "azuredeploy.json"))
if err != nil {
return fmt.Errorf("error reading ARM template file: %v.", err)
}
c.templateJSON = make(map[string]interface{})
err = json.Unmarshal(template, &c.templateJSON)
if err != nil {
return fmt.Errorf("error unmarshall template %v", err.Error())
}
parameters, err := ioutil.ReadFile(path.Join(c.outputDir, "azuredeploy.parameters.json"))
if err != nil {
return fmt.Errorf("error reading ARM parameters file: %v", err)
}
c.parametersJSON = make(map[string]interface{})
err = json.Unmarshal(parameters, &c.parametersJSON)
if err != nil {
return fmt.Errorf("error unmarshall parameters %v", err.Error())
}
c.parametersJSON = c.parametersJSON["parameters"].(map[string]interface{})
return nil
}
func (c *aksEngineDeployer) getAzureClient(ctx context.Context) error {
// instantiate Azure Resource Manager Client
env, err := azure.EnvironmentFromName(c.azureEnvironment)
var client *AzureClient
if c.isAzureStackCloud() && strings.EqualFold(c.azureIdentitySystem, ADFSIdentitySystem) {
if client, err = getAzureClient(env,
c.credentials.SubscriptionID,
c.credentials.ClientID,
c.azureIdentitySystem,
c.credentials.ClientSecret); err != nil {
return fmt.Errorf("error trying to get ADFS Azure Client: %v", err)
}
} else {
if client, err = getAzureClient(env,
c.credentials.SubscriptionID,
c.credentials.ClientID,
c.credentials.TenantID,
c.credentials.ClientSecret); err != nil {
return fmt.Errorf("error trying to get Azure Client: %v", err)
}
}
c.azureClient = client
return nil
}
func (c *aksEngineDeployer) createCluster() error {
var err error
kubecfgDir, _ := ioutil.ReadDir(path.Join(c.outputDir, "kubeconfig"))
kubecfg := path.Join(c.outputDir, "kubeconfig", kubecfgDir[0].Name())
log.Printf("Setting kubeconfig env variable: kubeconfig path: %v.", kubecfg)
os.Setenv("KUBECONFIG", kubecfg)
log.Printf("Creating resource group: %v.", c.resourceGroup)
log.Printf("Creating Azure resource group: %v for cluster deployment.", c.resourceGroup)
_, err = c.azureClient.EnsureResourceGroup(c.ctx, c.resourceGroup, c.location, nil)
if err != nil {
return fmt.Errorf("could not ensure resource group: %v", err)
}
log.Printf("Validating deployment ARM templates.")
if _, err := c.azureClient.ValidateDeployment(
c.ctx, c.resourceGroup, c.name, &c.templateJSON, &c.parametersJSON,
); err != nil {
return fmt.Errorf("ARM template invalid: %v", err)
}
log.Printf("Deploying cluster %v in resource group %v.", c.name, c.resourceGroup)
if _, err := c.azureClient.DeployTemplate(
c.ctx, c.resourceGroup, c.name, &c.templateJSON, &c.parametersJSON,
); err != nil {
return fmt.Errorf("cannot deploy: %v", err)
}
if c.useManagedIdentity && c.identityName != "" {
log.Printf("Assigning 'Owner' role to %s in %s", c.identityName, c.resourceGroup)
if err := c.azureClient.AssignOwnerRoleToIdentity(c.ctx, c.resourceGroup, c.identityName); err != nil {
return err
}
}
return nil
}
func (c *aksEngineDeployer) dockerLogin() error {
cwd, _ := os.Getwd()
log.Printf("CWD %v", cwd)
cmd := &exec.Cmd{}
username := ""
pwd := ""
server := ""
var err error
if !strings.Contains(imageRegistry, "azurecr.io") {
// if REGISTRY is not ACR, then use docker cred
log.Println("Attempting Docker login with docker cred.")
username = os.Getenv("DOCKER_USERNAME")
passwordFile := os.Getenv("DOCKER_PASSWORD_FILE")
password, err := ioutil.ReadFile(passwordFile)
if err != nil {
return fmt.Errorf("error reading docker password file %v: %v", passwordFile, err)
}
pwd = strings.TrimSuffix(string(password), "\n")
} else {
// if REGISTRY is ACR, then use azure credential
log.Println("Attempting Docker login with azure cred.")
username = c.credentials.ClientID
pwd = c.credentials.ClientSecret
server = imageRegistry
}
cmd = exec.Command("docker", "login", fmt.Sprintf("--username=%s", username), fmt.Sprintf("--password=%s", pwd), server)
if err = cmd.Run(); err != nil {
return fmt.Errorf("failed Docker login with error: %v", err)
}
log.Println("Docker login success.")
return nil
}
func dockerPush(images ...string) error {
for _, image := range images {
log.Printf("Pushing docker image %s", image)
cmd := exec.Command("docker", "push", image)
if err := cmd.Run(); err != nil {
return fmt.Errorf("failed to push %s: %v", image, err)
}
}
return nil
}
func getDockerImage(imageName string) string {
imageVersion := k8sVersion
switch imageName {
case ccmImageName, cnmImageName:
imageVersion = cloudProviderAzureVersion
}
return fmt.Sprintf("%s/%s:%s", imageRegistry, imageName, imageVersion)
}
func areAllDockerImagesExist(images ...string) bool {
for _, image := range images {
cmd := exec.Command("docker", "pull", image)
if err := cmd.Run(); err != nil {
log.Printf("%s does not exist", image)
return false
}
log.Printf("Reusing %s", image)
}
return true
}
func isURLExist(url string) bool {
cmd := exec.Command("curl", "-s", "-o", "/dev/null", "-s", "-f", url)
if err := cmd.Run(); err != nil {
log.Printf("%s does not exist", url)
return false
}
log.Printf("Reusing %s", url)
return true
}
// getImageVersion returns the image version based on the project's latest git commit
func getImageVersion(projectPath string) string {
cmd := exec.Command("git", "describe", "--tags", "--always", "--dirty")
cmd.Dir = projectPath
output, err := cmd.Output()
if err != nil {
return ""
}
return strings.TrimSpace(string(output))
}
func (c *aksEngineDeployer) buildAzureCloudComponents() error {
log.Println("Building cloud controller manager and cloud node manager.")
// Set environment variables for building cloud components' images
if err := os.Setenv("IMAGE_REGISTRY", imageRegistry); err != nil {
return err
}
if err := os.Setenv("IMAGE_TAG", cloudProviderAzureVersion); err != nil {
return err
}
cmd := exec.Command("make", "-C", util.K8sSigs("cloud-provider-azure"), "image", "push")
cmd.Stdout = ioutil.Discard
if err := control.FinishRunning(cmd); err != nil {
return err
}
log.Printf("Custom cloud controller manager image: %s", c.customCcmImage)
if *aksCnm {
c.customCnmImage = getDockerImage(cnmImageName)
log.Printf("Custom cloud node manager image: %s", c.customCnmImage)
}
return nil
}
func (c *aksEngineDeployer) buildHyperkube() error {
var pushCmd *exec.Cmd
os.Setenv("VERSION", k8sVersion)
log.Println("Building hyperkube.")
if _, err := os.Stat(util.K8s("kubernetes", "cmd", "hyperkube")); err == nil {
// cmd/hyperkube binary still exists in repo
cmd := exec.Command("make", "-C", util.K8s("kubernetes"), "WHAT=cmd/hyperkube")
cmd.Stdout = ioutil.Discard
if err := control.FinishRunning(cmd); err != nil {
return err
}
hyperkubeBin := util.K8s("kubernetes", "_output", "bin", "hyperkube")
pushCmd = exec.Command("make", "-C", util.K8s("kubernetes", "cluster", "images", "hyperkube"), "push", fmt.Sprintf("HYPERKUBE_BIN=%s", hyperkubeBin))
} else if os.IsNotExist(err) {
pushCmd = exec.Command("make", "-C", util.K8s("kubernetes", "cluster", "images", "hyperkube"), "push")
}
log.Println("Pushing hyperkube.")
pushCmd.Stdout = ioutil.Discard
if err := control.FinishRunning(pushCmd); err != nil {
return err
}
log.Printf("Custom hyperkube image: %s", c.customHyperkubeImage)
return nil
}
func (c *aksEngineDeployer) uploadToAzureStorage(filePath string) (string, error) {
credential, err := azblob.NewSharedKeyCredential(c.credentials.StorageAccountName, c.credentials.StorageAccountKey)
if err != nil {
return "", fmt.Errorf("new shared key credential: %v", err)
}
p := azblob.NewPipeline(credential, azblob.PipelineOptions{})
URL, _ := url.Parse(c.azureBlobContainerURL)
containerURL := azblob.NewContainerURL(*URL, p)
file, err := os.Open(filePath)
if err != nil {
return "", fmt.Errorf("failed to open file %v . Error %v", filePath, err)
}
defer file.Close()
blobURL := containerURL.NewBlockBlobURL(filepath.Base(file.Name()))
blobURLString := blobURL.URL()
if _, err = azblob.UploadFileToBlockBlob(context.Background(), file, blobURL, azblob.UploadToBlockBlobOptions{}); err != nil {
// 'BlobHasBeenModified' conflict happens when two concurrent jobs are trying to upload files with the same name
// Simply ignore the error and return the blob URL since at least one job will successfully upload the file to Azure storage
if strings.Contains(err.Error(), "BlobHasBeenModified") {
return blobURLString.String(), nil
}
return "", err
}
log.Printf("Uploaded %s to %s", filePath, blobURLString.String())
return blobURLString.String(), nil
}
func getZipBuildScript(buildScriptURL string, retry int) (string, error) {
downloadPath := path.Join(os.Getenv("HOME"), "build-win-zip.sh")
f, err := os.Create(downloadPath)
if err != nil {
return "", err
}
defer f.Close()
for i := 0; i < retry; i++ {
log.Printf("downloading %v from %v.", downloadPath, buildScriptURL)
if err := httpRead(buildScriptURL, f); err == nil {
break
}
err = fmt.Errorf("url=%s failed get %v: %v.", buildScriptURL, downloadPath, err)
if i == retry-1 {
return "", err
}
log.Println(err)
sleep(time.Duration(i) * time.Second)
}
f.Chmod(0744)
return downloadPath, nil
}
func (c *aksEngineDeployer) buildWinZip() error {
zipName := fmt.Sprintf(winZipTemplate, k8sVersion)
buildFolder := path.Join(os.Getenv("HOME"), "winbuild")
zipPath := path.Join(os.Getenv("HOME"), zipName)
log.Printf("Building %s", zipName)
buildScriptPath, err := getZipBuildScript(*aksWinZipBuildScript, 2)
if err != nil {
return err
}
// the build script for the windows binaries will produce a lot of output. Capture it here.
cmd := exec.Command(buildScriptPath, "-u", zipName, "-z", buildFolder)
cmd.Stdout = ioutil.Discard
if err := control.FinishRunning(cmd); err != nil {
return err
}
log.Printf("Uploading %s", zipPath)
if c.aksCustomWinBinariesURL, err = c.uploadToAzureStorage(zipPath); err != nil {
return err
}
return nil
}
func (c *aksEngineDeployer) Up() error {
if *buildWithKubemark {
if err := c.setCred(); err != nil {
log.Printf("error during setting up azure credentials: %v", err)
return err
}
cmd := exec.Command("curl", "-o", "build-kubemark.sh", *kubemarkBuildScriptURL)
if err := cmd.Run(); err != nil {
return fmt.Errorf("failed to get build-kubemark.sh from %v: %v", *kubemarkBuildScriptURL, err)
}
cmd = exec.Command("bash", "build-kubemark.sh",
"--kubemark-cluster-template-url", *kubemarkClusterTemplateURL,
"--external-cluster-template-url", *externalClusterTemplateURL,
"--hollow-nodes-deployment-url", *hollowNodesDeploymentURL,
"--clusterloader2-bin-url", *clusterLoader2BinURL,
"--kubemark-size", *kubemarkSize,
"--location", *kubemarkLocation)
if err := control.FinishRunning(cmd); err != nil {
return fmt.Errorf("failed to build up kubemark environment: %v", err)
}
log.Println("kubemark test finished")
return nil
}
var err error
if c.apiModelPath != "" {
templateFile, err := downloadFromURL(c.apiModelPath, path.Join(c.outputDir, "kubernetes.json"), 2)
if err != nil {
return fmt.Errorf("error downloading ApiModel template: %v with error %v", c.apiModelPath, err)
}
c.apiModelPath = templateFile
}
err = c.populateAPIModelTemplate()
if err != nil {
return fmt.Errorf("failed to populate aks-engine apimodel template: %v", err)
}
if *aksEngineURL != "" {
err = c.getAKSEngine(2)
if err != nil {
return fmt.Errorf("failed to get AKS Engine binary: %v", err)
}
}
err = c.generateARMTemplates()
if err != nil {
return fmt.Errorf("failed to generate ARM templates: %v", err)
}
err = c.loadARMTemplates()
if err != nil {
return fmt.Errorf("error loading ARM templates: %v", err)
}
err = c.createCluster()
if err != nil {
return fmt.Errorf("error creating cluster: %v", err)
}
return nil
}
func (c *aksEngineDeployer) Build(b buildStrategy) error {
if c.aksDeploymentMethod == customHyperkube && !areAllDockerImagesExist(c.customHyperkubeImage) {
// Build k8s without any special environment variables
if err := b.Build(); err != nil {
return err
}
if err := c.buildHyperkube(); err != nil {
return fmt.Errorf("error building hyperkube %v", err)
}
} else if c.aksDeploymentMethod == customK8sComponents &&
(!areAllDockerImagesExist(c.customKubeAPIServerImage,
c.customKubeControllerManagerImage,
c.customKubeProxyImage,
c.customKubeSchedulerImage) || !isURLExist(c.customKubeBinaryURL)) {
// Environment variables for creating custom k8s images
if err := os.Setenv("KUBE_DOCKER_REGISTRY", imageRegistry); err != nil {
return err
}
if err := os.Setenv("KUBE_DOCKER_IMAGE_TAG", k8sVersion); err != nil {
return err
}
if err := b.Build(); err != nil {
return err
}
if err := dockerPush(
c.customKubeAPIServerImage,
c.customKubeControllerManagerImage,
c.customKubeProxyImage,
c.customKubeSchedulerImage,
); err != nil {
return err
}
oldK8sNodeTarball := filepath.Join(k8sNodeTarballDir, "kubernetes-node-linux-amd64.tar.gz")
if _, err := os.Stat(oldK8sNodeTarball); os.IsNotExist(err) {
return fmt.Errorf("%s does not exist", oldK8sNodeTarball)
}
// Rename the tarball so that uploaded tarball won't get overwritten by other jobs
newK8sNodeTarball := filepath.Join(k8sNodeTarballDir, fmt.Sprintf(k8sNodeTarballTemplate, k8sVersion))
log.Printf("Renaming %s to %s", oldK8sNodeTarball, newK8sNodeTarball)
if err := os.Rename(oldK8sNodeTarball, newK8sNodeTarball); err != nil {
return fmt.Errorf("error renaming %s to %s: %v", oldK8sNodeTarball, newK8sNodeTarball, err)
}
var err error
if c.customKubeBinaryURL, err = c.uploadToAzureStorage(newK8sNodeTarball); err != nil {
return err
}
} else if !*testCcm && !*testAzureDiskCSIDriver && !*testAzureFileCSIDriver && !*testBlobfuseCSIDriver && !*testSecretStoreCSIDriver && !strings.EqualFold(string(b), "none") {
// Only build the required components to run upstream e2e tests
for _, component := range []string{"WHAT='test/e2e/e2e.test'", "WHAT=cmd/kubectl", "ginkgo"} {
cmd := exec.Command("make", component)
cmd.Dir = util.K8s("kubernetes")
err := control.FinishRunning(cmd)
if err != nil {
return err
}
}
}
if *aksCcm && !areAllDockerImagesExist(c.customCcmImage, c.customCnmImage) {
if err := c.buildAzureCloudComponents(); err != nil {
return fmt.Errorf("error building Azure cloud components: %v", err)
}
}
if *aksWinBinaries && !isURLExist(c.aksCustomWinBinariesURL) {
if err := c.buildWinZip(); err != nil {
return fmt.Errorf("error building windowsZipFile %v", err)
}
}
return nil
}
func (c *aksEngineDeployer) Down() error {
log.Printf("Deleting resource group: %v.", c.resourceGroup)
return c.azureClient.DeleteResourceGroup(c.ctx, c.resourceGroup)
}
func (c *aksEngineDeployer) DumpClusterLogs(localPath, gcsPath string) error {
if !*aksDumpClusterLogs {
log.Print("Skipping DumpClusterLogs")
return nil
}
if err := os.Setenv("ARTIFACTS", localPath); err != nil {
return err
}
logDumper := func() error {
// Extract log dump script and manifest from cloud-provider-azure repo
const logDumpURLPrefix string = "https://raw.githubusercontent.com/kubernetes-sigs/cloud-provider-azure/master/hack/log-dump/"
logDumpScript, err := downloadFromURL(logDumpURLPrefix+"log-dump.sh", path.Join(c.outputDir, "log-dump.sh"), 2)
if err != nil {
return fmt.Errorf("error downloading log dump script: %v", err)
}
if err := control.FinishRunning(exec.Command("chmod", "+x", logDumpScript)); err != nil {
return fmt.Errorf("error changing access permission for %s: %v", logDumpScript, err)
}
if _, err := downloadFromURL(logDumpURLPrefix+"log-dump-daemonset.yaml", path.Join(c.outputDir, "log-dump-daemonset.yaml"), 2); err != nil {
return fmt.Errorf("error downloading log dump manifest: %v", err)
}
if err := control.FinishRunning(exec.Command("bash", "-c", logDumpScript)); err != nil {
return fmt.Errorf("error running log collection script %s: %v", logDumpScript, err)
}
return nil
}
logDumperWindows := func() error {
const winLogDumpScriptUrl string = "https://raw.githubusercontent.com/kubernetes-sigs/windows-testing/master/scripts/win-ci-logs-collector.sh"
winLogDumpScript, err := downloadFromURL(winLogDumpScriptUrl, path.Join(c.outputDir, "win-ci-logs-collector.sh"), 2)
masterFQDN := fmt.Sprintf("%s.%s.cloudapp.azure.com", c.dnsPrefix, c.location)
if err != nil {
return fmt.Errorf("error downloading windows logs dump script: %v", err)
}
if err := control.FinishRunning(exec.Command("chmod", "+x", winLogDumpScript)); err != nil {
return fmt.Errorf("error changing permission for script %s: %v", winLogDumpScript, err)
}
if err := control.FinishRunning(exec.Command("bash", "-c", fmt.Sprintf("%s %s %s %s", winLogDumpScript, masterFQDN, c.outputDir, c.sshPrivateKeyPath))); err != nil {
return fmt.Errorf("error while running Windows log collector script: %v", err)
}
return nil
}
var errors []string
if err := logDumper(); err != nil {
errors = append(errors, err.Error())
}
if err := logDumperWindows(); err != nil {
// don't log error since logDumperWindows failed is expected on non-Windows cluster
//errors = append(errors, err.Error())
}
if len(errors) != 0 {
return fmt.Errorf(strings.Join(errors, "\n"))
}
return nil
}
func (c *aksEngineDeployer) GetClusterCreated(clusterName string) (time.Time, error) {
return time.Time{}, errors.New("not implemented")
}
func (c *aksEngineDeployer) setCred() error {
if err := os.Setenv("K8S_AZURE_TENANTID", c.credentials.TenantID); err != nil {
return err
}
if err := os.Setenv("K8S_AZURE_SUBSID", c.credentials.SubscriptionID); err != nil {
return err
}
if err := os.Setenv("K8S_AZURE_SPID", c.credentials.ClientID); err != nil {
return err
}
if err := os.Setenv("K8S_AZURE_SPSEC", c.credentials.ClientSecret); err != nil {
return err
}
if err := os.Setenv("K8S_AZURE_LOCATION", c.location); err != nil {
return err
}
return nil
}
func (c *aksEngineDeployer) TestSetup() error {
// set env vars required by the ccm e2e tests
if *testCcm {
if err := c.setCred(); err != nil {
log.Printf("error during setting up azure credentials: %v", err)
return err
}
} else if *testAzureFileCSIDriver || *testAzureDiskCSIDriver || *testBlobfuseCSIDriver {
// Set env vars required by CSI driver e2e jobs.
// tenantId, subscriptionId, aadClientId, and aadClientSecret will be obtained from AZURE_CREDENTIAL
if err := os.Setenv("RESOURCE_GROUP", c.resourceGroup); err != nil {
return err
}
if err := os.Setenv("LOCATION", c.location); err != nil {
return err
}
}
// Download repo-list that defines repositories for Windows test images.
downloadUrl, ok := os.LookupEnv("KUBE_TEST_REPO_LIST_DOWNLOAD_LOCATION")
if !ok {
// Env value for downloadUrl is not set, nothing to do
log.Printf("KUBE_TEST_REPO_LIST_DOWNLOAD_LOCATION not set. Using default test image repos.")
return nil
}
downloadPath := path.Join(os.Getenv("HOME"), "repo-list")
f, err := os.Create(downloadPath)
if err != nil {
return err
}
defer f.Close()
log.Printf("downloading %v from %v.", downloadPath, downloadUrl)
err = httpRead(downloadUrl, f)
if err != nil {
return fmt.Errorf("url=%s failed get %v: %v.", downloadUrl, downloadPath, err)
}
f.Chmod(0744)
if err := os.Setenv("KUBE_TEST_REPO_LIST", downloadPath); err != nil {
return err
}
return nil
}
func (c *aksEngineDeployer) IsUp() error {
return isUp(c)
}
func (c *aksEngineDeployer) KubectlCommand() (*exec.Cmd, error) {
return exec.Command("kubectl"), nil
}
// BuildTester returns a standard ginkgo-script tester or a custom one if testCcm is enabled
func (c *aksEngineDeployer) BuildTester(o *e2e.BuildTesterOptions) (e2e.Tester, error) {
if *testCcm {
return &GinkgoCCMTester{}, nil
}
var csiDriverName string
if *testAzureDiskCSIDriver {
csiDriverName = "azuredisk-csi-driver"
} else if *testAzureFileCSIDriver {
csiDriverName = "azurefile-csi-driver"
} else if *testBlobfuseCSIDriver {
csiDriverName = "blobfuse-csi-driver"
} else if *testSecretStoreCSIDriver {
csiDriverName = "secrets-store-csi-driver"
}
if csiDriverName != "" {
return &GinkgoCSIDriverTester{
driverName: csiDriverName,
}, nil
}
// Run e2e tests from upstream k8s repo
return &GinkgoScriptTester{}, nil
}
// GinkgoCCMTester implements Tester by running E2E tests for Azure CCM
type GinkgoCCMTester struct {
}
// Run executes custom ginkgo script
func (t *GinkgoCCMTester) Run(control *process.Control, testArgs []string) error {
artifactsDir, ok := os.LookupEnv("ARTIFACTS")
if !ok {
artifactsDir = filepath.Join(os.Getenv("WORKSPACE"), "_artifacts")
}
log.Printf("artifactsDir %v", artifactsDir)
// set CCM_JUNIT_REPORT_DIR for ccm e2e test to use the same dir
if err := os.Setenv("CCM_JUNIT_REPORT_DIR", artifactsDir); err != nil {
return err
}
cmd := exec.Command("make", "test-ccm-e2e")
projectPath := util.K8sSigs("cloud-provider-azure")
cmd.Dir = projectPath
testErr := control.FinishRunning(cmd)
return testErr
}
// GinkgoCSIDriverTester implements Tester by running E2E tests for Azure-related CSI drivers
type GinkgoCSIDriverTester struct {
driverName string
}
// Run executes custom ginkgo script
func (t *GinkgoCSIDriverTester) Run(control *process.Control, testArgs []string) error {
cmd := exec.Command("make", "e2e-test")
cmd.Dir = util.K8sSigs(t.driverName)
return control.FinishRunning(cmd)
}
|
[
"\"REGISTRY\"",
"\"HOME\"",
"\"HOME\"",
"\"HOME\"",
"\"AZ_STORAGE_CONTAINER_NAME\"",
"\"HOME\"",
"\"DOCKER_USERNAME\"",
"\"DOCKER_PASSWORD_FILE\"",
"\"HOME\"",
"\"HOME\"",
"\"HOME\"",
"\"HOME\"",
"\"WORKSPACE\""
] |
[] |
[
"DOCKER_PASSWORD_FILE",
"DOCKER_USERNAME",
"AZ_STORAGE_CONTAINER_NAME",
"WORKSPACE",
"REGISTRY",
"HOME"
] |
[]
|
["DOCKER_PASSWORD_FILE", "DOCKER_USERNAME", "AZ_STORAGE_CONTAINER_NAME", "WORKSPACE", "REGISTRY", "HOME"]
|
go
| 6 | 0 | |
step_1/demo_user.py
|
import time
import json
import asyncio
import websockets
import numpy as np
from fire import Fire
from Feynman.cloud import Google_drive
from Feynman.serialize import Pickle_serializer
from Feynman.etc.util import Config, get_logger
class Demo_user():
def __init__(self):
self.logger = get_logger()
self._opt = Config(open('config/demo.json').read())
self._url = self._opt.demo_user.url
self._gd = Google_drive('token.pickle')
self._ps = Pickle_serializer()
def _make_user_list(self):
user_num = self._traffic/24/60/60*self._opt.demo_user.sleep_t
user_num = np.random.poisson(user_num)
u_idxs = np.random.choice(range(self._user_count), user_num, p=self._p_user[0])
return {'user_id': list(map(int, u_idxs))}
def _make_user_choice(self, u_list):
u_idxs = u_list['user_id']
u_ks = [np.random.choice(range(self._cluster), p=self._p_cluster_user[u_idx]) for u_idx in u_idxs]
i_idxs = [np.random.choice(range(self._item_count), p=self._p_item_cluster[u_k]/sum(self._p_item_cluster[u_k])) for u_k in u_ks]
return dict(zip(u_idxs, i_idxs))
def _make_user_interest(self, u_list):
u_idxs = u_list['user_id']
u_max_interest = self._opt.demo_user.max_interest
return {u_idx: list(np.argsort(-np.dot(self._p_cluster_user[u_idx], self._p_item_cluster))[:u_max_interest]) for u_idx in u_idxs}
def _pack_dic_msg(self, val, msg_type):
dic_msg = {}
dic_msg['type'] = msg_type
dic_msg['value'] = val
dic_msg['timestamp'] = time.time()
dic_msg['servive'] = 'demo_personal_reco_system'
return dic_msg
async def _producer(self):
self.logger.info('Start producer...')
while True:
begin_t = time.time()
# to do
try:
self._opt = Config(open('config/demo.json').read())
u_list = self._make_user_list()
self._u_choice = self._make_user_choice(u_list)
self._u_interest = self._make_user_interest(u_list)
self.logger.info('demo user {} generate... '.format(len(u_list['user_id'])))
dic_msg = self._pack_dic_msg(val=u_list, msg_type='user_list')
await self.ws.send(json.dumps(dic_msg))
except Exception as e:
self.logger.warning('Somthing is wrong : {}'.format(e))
break
# finishing
sleep_t = max(0, self._opt.demo_user.sleep_t - int(time.time() - begin_t))
self.logger.info('Sleep {} secs before next start'.format(sleep_t))
await asyncio.sleep(sleep_t)
def _make_user_react(self, message):
result = []
reco_user_list = message['value']
pss, choice, click, unclick = 0, 0, 0, 0
for user_id in reco_user_list.keys():
stat = np.random.choice(['pass', 'choice', 'click'], p=[0.4, 0.3, 0.3])
if stat == 'pass':
pss += 1
continue
elif stat == 'choice' and int(user_id) in self._u_choice:
tmp = {'user_id': user_id,
'item_id': str(self._u_choice[int(user_id)]),
'bucket': reco_user_list[user_id]['bucket'],
'stat': 'choice'}
choice += 1
result.append(tmp)
elif stat == 'click' and int(user_id) in self._u_interest:
reco_item = set(list(zip(*reco_user_list[user_id]['list']))[0])
interest_item = set(self._u_interest[int(user_id)])
candidate_item = list(reco_item.intersection(interest_item))
if candidate_item:
tmp = {'user_id': user_id,
'item_id': str(np.random.choice(candidate_item)),
'bucket': reco_user_list[user_id]['bucket'],
'stat': 'click'}
click += 1
result.append(tmp)
else:
unclick += 1
self.logger.info('Make user feedback -> pass: {}, choice: {}, click: {}, unclick: {}'
.format(pss, choice, click, unclick))
return result
async def _consumer(self):
self.logger.info('Start consumer...')
while True:
message = json.loads(await self.ws.recv())
# to do
try:
if message['type'] == 'reco_user_list':
u_feedback = self._make_user_react(message)
dic_msg = self._pack_dic_msg(val=u_feedback, msg_type='user_feedback')
await self.ws.send(json.dumps(dic_msg))
except Exception as e:
self.logger.warning('Somthing is wrong : {}'.format(e))
break
# finishing
def _data_load(self):
self._gd.download(folder=self._opt.demo_user.google_drive.folder,
path=self._opt.demo_user.google_drive.root_path)
demo_user = self._ps.load(self._opt.demo_user.google_drive.data_path)
self._traffic = demo_user['traffic']
self._user_count = demo_user['user_count']
self._item_count = demo_user['item_count']
self._cluster = demo_user['cluster']
self._p_user = demo_user['p_user']
self._p_cluster_user = demo_user['p_cluster_user']
self._p_item_cluster = demo_user['p_item_cluster']
self._user_idx = demo_user['user_idx']
self._item_idx = demo_user['item_idx']
self._u_choice = {}
self._u_interest = {}
async def _main(self):
self.logger.info('Start...')
while True:
try:
self._data_load()
self.ws = await websockets.connect(self._url)
await asyncio.gather(self._producer(),
self._consumer())
except Exception as e:
self.logger.warning('Restart... after {} secs -> {}'.format(60, e))
await asyncio.sleep(60)
continue
def run(self):
asyncio.run(self._main())
if __name__ == '__main__':
Fire(Demo_user)
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
main_test.go
|
package redisync_test
import (
"os"
"testing"
"github.com/gomodule/redigo/redis"
)
var (
pool *redis.Pool
)
func TestMain(m *testing.M) {
pool = &redis.Pool{
Dial: func() (redis.Conn, error) { return redis.DialURL(os.Getenv("REDIS_URL")) },
MaxIdle: 100,
MaxActive: 100,
Wait: true,
}
defer pool.Close()
code := m.Run()
defer os.Exit(code)
}
func cleanupTestRedis() {
conn := pool.Get()
defer conn.Close()
_, err := conn.Do("FLUSHALL")
if err != nil {
panic(err)
}
}
|
[
"\"REDIS_URL\""
] |
[] |
[
"REDIS_URL"
] |
[]
|
["REDIS_URL"]
|
go
| 1 | 0 | |
cmd/influxWriteData_generated.go
|
// Code generated by piper's step-generator. DO NOT EDIT.
package cmd
import (
"fmt"
"os"
"time"
"github.com/SAP/jenkins-library/pkg/config"
"github.com/SAP/jenkins-library/pkg/log"
"github.com/SAP/jenkins-library/pkg/splunk"
"github.com/SAP/jenkins-library/pkg/telemetry"
"github.com/spf13/cobra"
)
type influxWriteDataOptions struct {
ServerURL string `json:"serverUrl,omitempty"`
AuthToken string `json:"authToken,omitempty"`
Bucket string `json:"bucket,omitempty"`
Organization string `json:"organization,omitempty"`
DataMap string `json:"dataMap,omitempty"`
DataMapTags string `json:"dataMapTags,omitempty"`
}
// InfluxWriteDataCommand Writes metrics to influxdb
func InfluxWriteDataCommand() *cobra.Command {
const STEP_NAME = "influxWriteData"
metadata := influxWriteDataMetadata()
var stepConfig influxWriteDataOptions
var startTime time.Time
var logCollector *log.CollectorHook
var createInfluxWriteDataCmd = &cobra.Command{
Use: STEP_NAME,
Short: "Writes metrics to influxdb",
Long: `In this step, the metrics are written to the timeseries database [InfluxDB](https://www.influxdata.com/time-series-platform/influxdb/)`,
PreRunE: func(cmd *cobra.Command, _ []string) error {
startTime = time.Now()
log.SetStepName(STEP_NAME)
log.SetVerbose(GeneralConfig.Verbose)
path, _ := os.Getwd()
fatalHook := &log.FatalHook{CorrelationID: GeneralConfig.CorrelationID, Path: path}
log.RegisterHook(fatalHook)
err := PrepareConfig(cmd, &metadata, STEP_NAME, &stepConfig, config.OpenPiperFile)
if err != nil {
log.SetErrorCategory(log.ErrorConfiguration)
return err
}
log.RegisterSecret(stepConfig.AuthToken)
if len(GeneralConfig.HookConfig.SentryConfig.Dsn) > 0 {
sentryHook := log.NewSentryHook(GeneralConfig.HookConfig.SentryConfig.Dsn, GeneralConfig.CorrelationID)
log.RegisterHook(&sentryHook)
}
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
logCollector = &log.CollectorHook{CorrelationID: GeneralConfig.CorrelationID}
log.RegisterHook(logCollector)
}
return nil
},
Run: func(_ *cobra.Command, _ []string) {
telemetryData := telemetry.CustomData{}
telemetryData.ErrorCode = "1"
handler := func() {
config.RemoveVaultSecretFiles()
telemetryData.Duration = fmt.Sprintf("%v", time.Since(startTime).Milliseconds())
telemetryData.ErrorCategory = log.GetErrorCategory().String()
telemetry.Send(&telemetryData)
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
splunk.Send(&telemetryData, logCollector)
}
}
log.DeferExitHandler(handler)
defer handler()
telemetry.Initialize(GeneralConfig.NoTelemetry, STEP_NAME)
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
splunk.Initialize(GeneralConfig.CorrelationID,
GeneralConfig.HookConfig.SplunkConfig.Dsn,
GeneralConfig.HookConfig.SplunkConfig.Token,
GeneralConfig.HookConfig.SplunkConfig.Index,
GeneralConfig.HookConfig.SplunkConfig.SendLogs)
}
influxWriteData(stepConfig, &telemetryData)
telemetryData.ErrorCode = "0"
log.Entry().Info("SUCCESS")
},
}
addInfluxWriteDataFlags(createInfluxWriteDataCmd, &stepConfig)
return createInfluxWriteDataCmd
}
func addInfluxWriteDataFlags(cmd *cobra.Command, stepConfig *influxWriteDataOptions) {
cmd.Flags().StringVar(&stepConfig.ServerURL, "serverUrl", os.Getenv("PIPER_serverUrl"), "Base URL to the InfluxDB server")
cmd.Flags().StringVar(&stepConfig.AuthToken, "authToken", os.Getenv("PIPER_authToken"), "Token to authenticate to the Influxdb")
cmd.Flags().StringVar(&stepConfig.Bucket, "bucket", `piper`, "Name of database (1.8) or bucket (2.0)")
cmd.Flags().StringVar(&stepConfig.Organization, "organization", os.Getenv("PIPER_organization"), "Name of influx organization. Only for Influxdb 2.0")
cmd.Flags().StringVar(&stepConfig.DataMap, "dataMap", os.Getenv("PIPER_dataMap"), "Map of fields for each measurements. It has to be a JSON string. For example: {'series_1':{'field_a':11,'field_b':12},'series_2':{'field_c':21,'field_d':22}}")
cmd.Flags().StringVar(&stepConfig.DataMapTags, "dataMapTags", os.Getenv("PIPER_dataMapTags"), "Map of tags for each measurements. It has to be a JSON string. For example: {'series_1':{'tag_a':'a','tag_b':'b'},'series_2':{'tag_c':'c','tag_d':'d'}}")
cmd.MarkFlagRequired("serverUrl")
cmd.MarkFlagRequired("authToken")
cmd.MarkFlagRequired("dataMap")
}
// retrieve step metadata
func influxWriteDataMetadata() config.StepData {
var theMetaData = config.StepData{
Metadata: config.StepMetadata{
Name: "influxWriteData",
Aliases: []config.Alias{},
Description: "Writes metrics to influxdb",
},
Spec: config.StepSpec{
Inputs: config.StepInputs{
Secrets: []config.StepSecrets{
{Name: "influxAuthTokenId", Description: "Influxdb token for authentication to the InfluxDB. In 1.8 version use 'username:password' instead.", Type: "jenkins"},
},
Parameters: []config.StepParameters{
{
Name: "serverUrl",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_serverUrl"),
},
{
Name: "authToken",
ResourceRef: []config.ResourceReference{
{
Name: "influxAuthTokenId",
Type: "secret",
},
{
Name: "",
Paths: []string{"$(vaultPath)/influxdb", "$(vaultBasePath)/$(vaultPipelineName)/influxdb", "$(vaultBasePath)/GROUP-SECRETS/influxdb"},
Type: "vaultSecret",
},
},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_authToken"),
},
{
Name: "bucket",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: `piper`,
},
{
Name: "organization",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_organization"),
},
{
Name: "dataMap",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_dataMap"),
},
{
Name: "dataMapTags",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_dataMapTags"),
},
},
},
},
}
return theMetaData
}
|
[
"\"PIPER_serverUrl\"",
"\"PIPER_authToken\"",
"\"PIPER_organization\"",
"\"PIPER_dataMap\"",
"\"PIPER_dataMapTags\"",
"\"PIPER_serverUrl\"",
"\"PIPER_authToken\"",
"\"PIPER_organization\"",
"\"PIPER_dataMap\"",
"\"PIPER_dataMapTags\""
] |
[] |
[
"PIPER_dataMapTags",
"PIPER_serverUrl",
"PIPER_dataMap",
"PIPER_organization",
"PIPER_authToken"
] |
[]
|
["PIPER_dataMapTags", "PIPER_serverUrl", "PIPER_dataMap", "PIPER_organization", "PIPER_authToken"]
|
go
| 5 | 0 | |
pkg/cmd/step/step_tag.go
|
package step
import (
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/jenkins-x/jx/pkg/cmd/opts/step"
"github.com/jenkins-x/jx/pkg/cmd/helper"
"github.com/jenkins-x/jx/pkg/kube/naming"
"github.com/jenkins-x/jx/pkg/cmd/opts"
"github.com/jenkins-x/jx/pkg/cmd/templates"
"github.com/jenkins-x/jx/pkg/config"
"github.com/jenkins-x/jx/pkg/log"
"github.com/jenkins-x/jx/pkg/util"
"github.com/spf13/cobra"
"k8s.io/helm/pkg/chartutil"
)
const (
VERSION = "version"
defaultVersionFile = "VERSION"
ValuesYamlRepositoryPrefix = " repository:"
ValuesYamlTagPrefix = " tag:"
)
// CreateClusterOptions the flags for running create cluster
type StepTagOptions struct {
step.StepOptions
Flags StepTagFlags
}
type StepTagFlags struct {
Version string
VersionFile string
Dir string
ChartsDir string
ChartValueRepository string
NoApply bool
}
var (
stepTagLong = templates.LongDesc(`
This pipeline step command creates a git tag using a version number prefixed with 'v' and pushes it to a
remote origin repo.
This commands effectively runs:
$ git commit -a -m "release $(VERSION)" --allow-empty
$ git tag -fa v$(VERSION) -m "Release version $(VERSION)"
$ git push origin v$(VERSION)
`)
stepTagExample = templates.Examples(`
jx step tag --version 1.0.0
`)
)
func NewCmdStepTag(commonOpts *opts.CommonOptions) *cobra.Command {
options := StepTagOptions{
StepOptions: step.StepOptions{
CommonOptions: commonOpts,
},
}
cmd := &cobra.Command{
Use: "tag",
Short: "Creates a git tag and pushes to remote repo",
Long: stepTagLong,
Example: stepTagExample,
Run: func(cmd *cobra.Command, args []string) {
options.Cmd = cmd
options.Args = args
err := options.Run()
helper.CheckErr(err)
},
}
cmd.Flags().StringVarP(&options.Flags.Version, VERSION, "v", "", "version number for the tag [required]")
cmd.Flags().StringVarP(&options.Flags.VersionFile, "version-file", "", defaultVersionFile, "The file name used to load the version number from if no '--version' option is specified")
cmd.Flags().StringVarP(&options.Flags.ChartsDir, "charts-dir", "d", "", "the directory of the chart to update the version")
cmd.Flags().StringVarP(&options.Flags.Dir, "dir", "", "", "the directory which may contain a 'jenkins-x.yml'")
cmd.Flags().StringVarP(&options.Flags.ChartValueRepository, "charts-value-repository", "r", "", "the fully qualified image name without the version tag. e.g. 'dockerregistry/myorg/myapp'")
cmd.Flags().BoolVarP(&options.Flags.NoApply, "no-apply", "", false, "Do not push the tag to the server, this is used for example in dry runs")
return cmd
}
func (o *StepTagOptions) Run() error {
if o.Flags.Version == "" {
// lets see if its defined in the VERSION file
path := o.Flags.VersionFile
if path == "" {
path = "VERSION"
}
exists, err := util.FileExists(path)
if exists && err == nil {
data, err := ioutil.ReadFile(path)
if err != nil {
return err
}
o.Flags.Version = strings.TrimSpace(string(data))
}
}
if o.Flags.Version == "" {
return errors.New("No version flag")
}
log.Logger().Debug("looking for charts folder...")
chartsDir := o.Flags.ChartsDir
if chartsDir == "" {
exists, err := util.FileExists("Chart.yaml")
if !exists && err == nil {
// lets try find the charts/foo dir ignoring the charts/preview dir
chartsDir, err = o.findChartsDir()
if err != nil {
return err
}
}
}
log.Logger().Debugf("updating chart if it exists")
err := o.updateChart(o.Flags.Version, chartsDir)
if err != nil {
return err
}
err = o.updateChartValues(o.Flags.Version, chartsDir)
if err != nil {
return err
}
tag := "v" + o.Flags.Version
log.Logger().Debugf("performing git commit")
err = o.Git().AddCommit("", fmt.Sprintf("release %s", o.Flags.Version))
if err != nil {
return err
}
err = o.Git().CreateTag("", tag, fmt.Sprintf("release %s", o.Flags.Version))
if err != nil {
return err
}
if o.Flags.NoApply {
log.Logger().Infof("NoApply: no push tag to git server")
} else {
log.Logger().Debugf("pushing git tag %s", tag)
err = o.Git().PushTag("", tag)
if err != nil {
return err
}
log.Logger().Infof("Tag %s created and pushed to remote origin", tag)
}
return nil
}
func (o *StepTagOptions) updateChart(version string, chartsDir string) error {
chartFile := filepath.Join(chartsDir, "Chart.yaml")
exists, err := util.FileExists(chartFile)
if err != nil {
return err
}
if !exists {
return nil
}
chart, err := chartutil.LoadChartfile(chartFile)
if err != nil {
return err
}
if chart.Version == version {
return nil
}
chart.Version = version
chart.AppVersion = version
log.Logger().Infof("Updating chart version in %s to %s", chartFile, version)
err = chartutil.SaveChartfile(chartFile, chart)
if err != nil {
return fmt.Errorf("Failed to save chart %s: %s", chartFile, err)
}
return nil
}
func (o *StepTagOptions) updateChartValues(version string, chartsDir string) error {
valuesFile := filepath.Join(chartsDir, "values.yaml")
exists, err := util.FileExists(valuesFile)
if err != nil {
return err
}
if !exists {
return nil
}
data, err := ioutil.ReadFile(valuesFile)
lines := strings.Split(string(data), "\n")
chartValueRepository := o.Flags.ChartValueRepository
if chartValueRepository == "" {
chartValueRepository = o.defaultChartValueRepository()
}
updated := false
changedRepository := false
changedTag := false
for idx, line := range lines {
if chartValueRepository != "" && strings.HasPrefix(line, ValuesYamlRepositoryPrefix) && !changedRepository {
// lets ensure we use a valid docker image name
chartValueRepository = naming.ToValidImageName(chartValueRepository)
updated = true
changedRepository = true
log.Logger().Infof("Updating repository in %s to %s", valuesFile, chartValueRepository)
lines[idx] = ValuesYamlRepositoryPrefix + " " + chartValueRepository
} else if strings.HasPrefix(line, ValuesYamlTagPrefix) && !changedTag {
version = naming.ToValidImageVersion(version)
updated = true
changedTag = true
log.Logger().Infof("Updating tag in %s to %s", valuesFile, version)
lines[idx] = ValuesYamlTagPrefix + " " + version
}
}
if updated {
err = ioutil.WriteFile(valuesFile, []byte(strings.Join(lines, "\n")), util.DefaultWritePermissions)
if err != nil {
return fmt.Errorf("Failed to save chart file %s: %s", valuesFile, err)
}
}
return nil
}
func (o *StepTagOptions) defaultChartValueRepository() string {
gitInfo, err := o.FindGitInfo(o.Flags.ChartsDir)
if err != nil {
log.Logger().Warnf("failed to find git repository: %s", err.Error())
}
projectConfig, _, _ := config.LoadProjectConfig(o.Flags.Dir)
dockerRegistry := o.GetDockerRegistry(projectConfig)
dockerRegistryOrg := o.GetDockerRegistryOrg(projectConfig, gitInfo)
if dockerRegistryOrg == "" {
dockerRegistryOrg = os.Getenv("ORG")
}
if dockerRegistryOrg == "" {
dockerRegistryOrg = os.Getenv("REPO_OWNER")
}
appName := os.Getenv("APP_NAME")
if appName == "" {
appName = os.Getenv("REPO_NAME")
}
if dockerRegistryOrg == "" && gitInfo != nil {
dockerRegistryOrg = gitInfo.Organisation
}
if appName == "" && gitInfo != nil {
appName = gitInfo.Name
}
if dockerRegistry != "" && dockerRegistryOrg != "" && appName != "" {
return dockerRegistry + "/" + dockerRegistryOrg + "/" + appName
}
log.Logger().Warnf("could not generate chart repository name for GetDockerRegistry %s, GetDockerRegistryOrg %s, appName %s", dockerRegistry, dockerRegistryOrg, appName)
return ""
}
// lets try find the charts dir
func (o *StepTagOptions) findChartsDir() (string, error) {
files, err := filepath.Glob("*/*/Chart.yaml")
if err != nil {
return "", fmt.Errorf("failed to find Chart.yaml file: %s", err)
}
if len(files) > 0 {
for _, file := range files {
paths := strings.Split(file, string(os.PathSeparator))
if len(paths) > 2 && paths[len(paths)-2] != "preview" {
dir, _ := filepath.Split(file)
return dir, nil
}
}
}
return "", nil
}
|
[
"\"ORG\"",
"\"REPO_OWNER\"",
"\"APP_NAME\"",
"\"REPO_NAME\""
] |
[] |
[
"ORG",
"REPO_NAME",
"APP_NAME",
"REPO_OWNER"
] |
[]
|
["ORG", "REPO_NAME", "APP_NAME", "REPO_OWNER"]
|
go
| 4 | 0 | |
lcd_test/lcd_test.go
|
package lcdtest
import (
"encoding/base64"
"encoding/hex"
"encoding/json"
"fmt"
"net/http"
"os"
"strings"
"testing"
"time"
"github.com/cosmos/cosmos-sdk/x/mint"
"github.com/stretchr/testify/require"
"github.com/cosmos/cosmos-sdk/client"
"github.com/cosmos/cosmos-sdk/client/keys"
"github.com/cosmos/cosmos-sdk/crypto/keys/mintkey"
"github.com/cosmos/cosmos-sdk/tests"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/cosmos-sdk/types/rest"
"github.com/cosmos/cosmos-sdk/version"
"github.com/cosmos/cosmos-sdk/x/auth"
"github.com/cosmos/cosmos-sdk/x/bank"
dclcommon "github.com/cosmos/cosmos-sdk/x/distribution/client/common"
distrrest "github.com/cosmos/cosmos-sdk/x/distribution/client/rest"
disttypes "github.com/cosmos/cosmos-sdk/x/distribution/types"
"github.com/cosmos/cosmos-sdk/x/gov"
"github.com/cosmos/cosmos-sdk/x/slashing"
)
const (
name1 = "test1"
memo = "LCD test tx"
pw = client.DefaultKeyPass
)
var fees = sdk.Coins{sdk.NewInt64Coin(sdk.DefaultBondDenom, 5)}
func init() {
mintkey.BcryptSecurityParameter = 1
version.Version = os.Getenv("VERSION")
}
func TestNodeStatus(t *testing.T) {
cleanup, _, _, port, err := InitializeLCD(1, []sdk.AccAddress{}, true)
require.NoError(t, err)
defer cleanup()
getNodeInfo(t, port)
getSyncStatus(t, port, false)
}
func TestBlock(t *testing.T) {
cleanup, _, _, port, err := InitializeLCD(1, []sdk.AccAddress{}, true)
require.NoError(t, err)
defer cleanup()
getBlock(t, port, -1, false)
getBlock(t, port, 2, false)
getBlock(t, port, 100000000, true)
}
func TestValidators(t *testing.T) {
cleanup, _, _, port, err := InitializeLCD(1, []sdk.AccAddress{}, true)
require.NoError(t, err)
defer cleanup()
resultVals := getValidatorSets(t, port, -1, false)
require.Contains(t, resultVals.Validators[0].Address.String(), "ifvalcons")
require.Contains(t, resultVals.Validators[0].PubKey, "ifvalconspub")
getValidatorSets(t, port, 2, false)
getValidatorSets(t, port, 10000000, true)
}
func TestCoinSend(t *testing.T) {
kb, err := keys.NewKeyBaseFromDir(InitClientHome(""))
require.NoError(t, err)
addr, seed, err := CreateAddr(name1, pw, kb)
require.NoError(t, err)
cleanup, _, _, port, err := InitializeLCD(1, []sdk.AccAddress{addr}, true)
require.NoError(t, err)
defer cleanup()
bz, err := hex.DecodeString("8FA6AB57AD6870F6B5B2E57735F38F2F30E73CB6")
require.NoError(t, err)
someFakeAddr := sdk.AccAddress(bz)
// query empty
res, body := Request(t, port, "GET", fmt.Sprintf("/auth/accounts/%s", someFakeAddr), nil)
require.Equal(t, http.StatusOK, res.StatusCode, body)
acc := getAccount(t, port, addr)
initialBalance := acc.GetCoins()
// create TX
receiveAddr, resultTx := doTransfer(t, port, seed, name1, memo, pw, addr, fees)
tests.WaitForHeight(resultTx.Height+1, port)
// check if tx was committed
require.Equal(t, uint32(0), resultTx.Code)
// query sender
acc = getAccount(t, port, addr)
coins := acc.GetCoins()
expectedBalance := initialBalance[0].Sub(fees[0])
require.Equal(t, sdk.DefaultBondDenom, coins[0].Denom)
require.Equal(t, expectedBalance.Amount.SubRaw(1), coins[0].Amount)
expectedBalance = coins[0]
// query receiver
acc2 := getAccount(t, port, receiveAddr)
coins2 := acc2.GetCoins()
require.Equal(t, sdk.DefaultBondDenom, coins2[0].Denom)
require.Equal(t, int64(1), coins2[0].Amount.Int64())
// test failure with too little gas
res, body, _ = doTransferWithGas(t, port, seed, name1, memo, pw, addr, "100", 0, false, true, fees)
require.Equal(t, http.StatusOK, res.StatusCode, body)
require.Nil(t, err)
// test failure with negative gas
res, body, _ = doTransferWithGas(t, port, seed, name1, memo, pw, addr, "-200", 0, false, false, fees)
require.Equal(t, http.StatusBadRequest, res.StatusCode, body)
// test failure with negative adjustment
res, body, _ = doTransferWithGas(t, port, seed, name1, memo, pw, addr, "10000", -0.1, true, false, fees)
require.Equal(t, http.StatusBadRequest, res.StatusCode, body)
// test failure with 0 gas
res, body, _ = doTransferWithGas(t, port, seed, name1, memo, pw, addr, "0", 0, false, true, fees)
require.Equal(t, http.StatusOK, res.StatusCode, body)
// test failure with wrong adjustment
res, body, _ = doTransferWithGas(t, port, seed, name1, memo, pw, addr, client.GasFlagAuto, 0.1, false, true, fees)
require.Equal(t, http.StatusOK, res.StatusCode, body)
// run simulation and test success with estimated gas
res, body, _ = doTransferWithGas(
t, port, seed, name1, memo, pw, addr, "10000", 1.0, true, false, fees,
)
require.Equal(t, http.StatusOK, res.StatusCode, body)
var gasEstResp rest.GasEstimateResponse
require.Nil(t, cdc.UnmarshalJSON([]byte(body), &gasEstResp))
require.NotZero(t, gasEstResp.GasEstimate)
acc = getAccount(t, port, addr)
require.Equal(t, expectedBalance.Amount, acc.GetCoins().AmountOf(sdk.DefaultBondDenom))
// run successful tx
gas := fmt.Sprintf("%d", gasEstResp.GasEstimate)
res, body, _ = doTransferWithGas(t, port, seed, name1, memo, pw, addr, gas, 1.0, false, true, fees)
require.Equal(t, http.StatusOK, res.StatusCode, body)
err = cdc.UnmarshalJSON([]byte(body), &resultTx)
require.Nil(t, err)
tests.WaitForHeight(resultTx.Height+1, port)
require.Equal(t, uint32(0), resultTx.Code)
acc = getAccount(t, port, addr)
expectedBalance = expectedBalance.Sub(fees[0])
require.Equal(t, expectedBalance.Amount.SubRaw(1), acc.GetCoins().AmountOf(sdk.DefaultBondDenom))
}
func TestCoinSendAccAuto(t *testing.T) {
kb, err := keys.NewKeyBaseFromDir(InitClientHome(""))
require.NoError(t, err)
addr, seed, err := CreateAddr(name1, pw, kb)
require.NoError(t, err)
cleanup, _, _, port, err := InitializeLCD(1, []sdk.AccAddress{addr}, true)
require.NoError(t, err)
defer cleanup()
acc := getAccount(t, port, addr)
initialBalance := acc.GetCoins()
// send a transfer tx without specifying account number and sequence
res, body, _ := doTransferWithGasAccAuto(
t, port, seed, name1, memo, pw, addr, "200000", 1.0, false, true, fees,
)
require.Equal(t, http.StatusOK, res.StatusCode, body)
// query sender
acc = getAccount(t, port, addr)
coins := acc.GetCoins()
expectedBalance := initialBalance[0].Sub(fees[0])
require.Equal(t, sdk.DefaultBondDenom, coins[0].Denom)
require.Equal(t, expectedBalance.Amount.SubRaw(1), coins[0].Amount)
}
func TestCoinMultiSendGenerateOnly(t *testing.T) {
kb, err := keys.NewKeyBaseFromDir(InitClientHome(""))
require.NoError(t, err)
addr, seed, err := CreateAddr(name1, pw, kb)
require.NoError(t, err)
cleanup, _, _, port, err := InitializeLCD(1, []sdk.AccAddress{addr}, true)
require.NoError(t, err)
defer cleanup()
// generate only
res, body, _ := doTransferWithGas(t, port, seed, "", memo, "", addr, "200000", 1, false, false, fees)
require.Equal(t, http.StatusOK, res.StatusCode, body)
var stdTx auth.StdTx
require.Nil(t, cdc.UnmarshalJSON([]byte(body), &stdTx))
require.Equal(t, len(stdTx.Msgs), 1)
require.Equal(t, stdTx.GetMsgs()[0].Route(), bank.RouterKey)
require.Equal(t, stdTx.GetMsgs()[0].GetSigners(), []sdk.AccAddress{addr})
require.Equal(t, 0, len(stdTx.Signatures))
require.Equal(t, memo, stdTx.Memo)
require.NotZero(t, stdTx.Fee.Gas)
require.IsType(t, stdTx.GetMsgs()[0], bank.MsgSend{})
require.Equal(t, addr, stdTx.GetMsgs()[0].(bank.MsgSend).FromAddress)
}
func TestCoinSendGenerateSignAndBroadcast(t *testing.T) {
kb, err := keys.NewKeyBaseFromDir(InitClientHome(""))
require.NoError(t, err)
addr, seed, err := CreateAddr(name1, pw, kb)
require.NoError(t, err)
cleanup, _, _, port, err := InitializeLCD(1, []sdk.AccAddress{addr}, true)
require.NoError(t, err)
defer cleanup()
acc := getAccount(t, port, addr)
// simulate tx
res, body, _ := doTransferWithGas(
t, port, seed, name1, memo, "", addr, client.GasFlagAuto, 1.0, true, false, fees,
)
require.Equal(t, http.StatusOK, res.StatusCode, body)
var gasEstResp rest.GasEstimateResponse
require.Nil(t, cdc.UnmarshalJSON([]byte(body), &gasEstResp))
require.NotZero(t, gasEstResp.GasEstimate)
// generate tx
gas := fmt.Sprintf("%d", gasEstResp.GasEstimate)
res, body, _ = doTransferWithGas(t, port, seed, name1, memo, "", addr, gas, 1, false, false, fees)
require.Equal(t, http.StatusOK, res.StatusCode, body)
var tx auth.StdTx
require.Nil(t, cdc.UnmarshalJSON([]byte(body), &tx))
require.Equal(t, len(tx.Msgs), 1)
require.Equal(t, tx.Msgs[0].Route(), bank.RouterKey)
require.Equal(t, tx.Msgs[0].GetSigners(), []sdk.AccAddress{addr})
require.Equal(t, 0, len(tx.Signatures))
require.Equal(t, memo, tx.Memo)
require.NotZero(t, tx.Fee.Gas)
gasEstimate := int64(tx.Fee.Gas)
_, body = signAndBroadcastGenTx(t, port, name1, pw, body, acc, 1.0, false)
// check if tx was committed
var txResp sdk.TxResponse
require.Nil(t, cdc.UnmarshalJSON([]byte(body), &txResp))
require.Equal(t, uint32(0), txResp.Code)
require.Equal(t, gasEstimate, txResp.GasWanted)
}
func TestEncodeTx(t *testing.T) {
kb, err := keys.NewKeyBaseFromDir(InitClientHome(""))
require.NoError(t, err)
addr, seed, err := CreateAddr(name1, pw, kb)
require.NoError(t, err)
cleanup, _, _, port, err := InitializeLCD(1, []sdk.AccAddress{addr}, true)
require.NoError(t, err)
defer cleanup()
res, body, _ := doTransferWithGas(t, port, seed, name1, memo, "", addr, "2", 1, false, false, fees)
var tx auth.StdTx
require.Nil(t, cdc.UnmarshalJSON([]byte(body), &tx))
encodedJSON, _ := cdc.MarshalJSON(tx)
res, body = Request(t, port, "POST", "/txs/encode", encodedJSON)
// Make sure it came back ok, and that we can decode it back to the transaction
// 200 response.
require.Equal(t, http.StatusOK, res.StatusCode, body)
encodeResp := struct {
Tx string `json:"tx"`
}{}
require.Nil(t, cdc.UnmarshalJSON([]byte(body), &encodeResp))
// verify that the base64 decodes
decodedBytes, err := base64.StdEncoding.DecodeString(encodeResp.Tx)
require.Nil(t, err)
// check that the transaction decodes as expected
var decodedTx auth.StdTx
require.Nil(t, cdc.UnmarshalBinaryLengthPrefixed(decodedBytes, &decodedTx))
require.Equal(t, memo, decodedTx.Memo)
}
func TestTxs(t *testing.T) {
kb, err := keys.NewKeyBaseFromDir(InitClientHome(""))
require.NoError(t, err)
addr, seed, err := CreateAddr(name1, pw, kb)
require.NoError(t, err)
cleanup, _, _, port, err := InitializeLCD(1, []sdk.AccAddress{addr}, true)
require.NoError(t, err)
defer cleanup()
var emptyTxs []sdk.TxResponse
txResult := getTransactions(t, port)
require.Equal(t, emptyTxs, txResult.Txs)
// query empty
txResult = getTransactions(t, port, fmt.Sprintf("message.sender=%s", addr.String()))
require.Equal(t, emptyTxs, txResult.Txs)
// also tests url decoding
txResult = getTransactions(t, port, fmt.Sprintf("message.sender=%s", addr.String()))
require.Equal(t, emptyTxs, txResult.Txs)
txResult = getTransactions(t, port, fmt.Sprintf("message.action=submit_proposal&message.sender=%s", addr.String()))
require.Equal(t, emptyTxs, txResult.Txs)
// create tx
receiveAddr, resultTx := doTransfer(t, port, seed, name1, memo, pw, addr, fees)
tests.WaitForHeight(resultTx.Height+1, port)
// check if tx is queryable
tx := getTransaction(t, port, resultTx.TxHash)
require.Equal(t, resultTx.TxHash, tx.TxHash)
// query sender
txResult = getTransactions(t, port, fmt.Sprintf("message.sender=%s", addr.String()))
require.Len(t, txResult.Txs, 1)
require.Equal(t, resultTx.Height, txResult.Txs[0].Height)
// query recipient
txResult = getTransactions(t, port, fmt.Sprintf("transfer.recipient=%s", receiveAddr.String()))
require.Len(t, txResult.Txs, 1)
require.Equal(t, resultTx.Height, txResult.Txs[0].Height)
// query transaction that doesn't exist
validTxHash := "9ADBECAAD8DACBEC3F4F535704E7CF715C765BDCEDBEF086AFEAD31BA664FB0B"
res, body := getTransactionRequest(t, port, validTxHash)
require.True(t, strings.Contains(body, validTxHash))
require.Equal(t, http.StatusNotFound, res.StatusCode)
// bad query string
res, body = getTransactionRequest(t, port, "badtxhash")
require.True(t, strings.Contains(body, "encoding/hex"))
require.Equal(t, http.StatusInternalServerError, res.StatusCode)
}
func TestValidatorsQuery(t *testing.T) {
cleanup, valPubKeys, operAddrs, port, err := InitializeLCD(1, []sdk.AccAddress{}, true)
require.NoError(t, err)
defer cleanup()
require.Equal(t, 1, len(valPubKeys))
require.Equal(t, 1, len(operAddrs))
validators := getValidators(t, port)
require.Equal(t, 1, len(validators), fmt.Sprintf("%+v", validators))
// make sure all the validators were found (order unknown because sorted by operator addr)
foundVal := false
if validators[0].ConsPubKey == valPubKeys[0] {
foundVal = true
}
require.True(t, foundVal, "pk %v, operator %v", operAddrs[0], validators[0].OperatorAddress)
}
func TestValidatorQuery(t *testing.T) {
cleanup, valPubKeys, operAddrs, port, err := InitializeLCD(1, []sdk.AccAddress{}, true)
require.NoError(t, err)
defer cleanup()
require.Equal(t, 1, len(valPubKeys))
require.Equal(t, 1, len(operAddrs))
validator := getValidator(t, port, operAddrs[0])
require.Equal(t, validator.OperatorAddress, operAddrs[0], "The returned validator does not hold the correct data")
}
func TestBonding(t *testing.T) {
kb, err := keys.NewKeyBaseFromDir(InitClientHome(""))
require.NoError(t, err)
addr, _, err := CreateAddr(name1, pw, kb)
require.NoError(t, err)
cleanup, valPubKeys, operAddrs, port, err := InitializeLCD(2, []sdk.AccAddress{addr}, false)
require.NoError(t, err)
tests.WaitForHeight(1, port)
defer cleanup()
require.Equal(t, 2, len(valPubKeys))
require.Equal(t, 2, len(operAddrs))
amt := sdk.TokensFromConsensusPower(60)
amtDec := amt.ToDec()
validator := getValidator(t, port, operAddrs[0])
acc := getAccount(t, port, addr)
initialBalance := acc.GetCoins()
// create bond TX
delTokens := sdk.TokensFromConsensusPower(60)
resultTx := doDelegate(t, port, name1, pw, addr, operAddrs[0], delTokens, fees)
tests.WaitForHeight(resultTx.Height+1, port)
require.Equal(t, uint32(0), resultTx.Code)
// query tx
txResult := getTransactions(t, port,
fmt.Sprintf("message.action=delegate&message.sender=%s", addr),
fmt.Sprintf("delegate.validator=%s", operAddrs[0]),
)
require.Len(t, txResult.Txs, 1)
require.Equal(t, resultTx.Height, txResult.Txs[0].Height)
// verify balance
acc = getAccount(t, port, addr)
coins := acc.GetCoins()
expectedBalance := initialBalance[0].Sub(fees[0])
require.Equal(t, expectedBalance.Amount.Sub(delTokens).String(), coins.AmountOf(sdk.DefaultBondDenom).String())
expectedBalance = coins[0]
// query delegation
bond := getDelegation(t, port, addr, operAddrs[0])
require.Equal(t, amtDec, bond.Shares)
delegatorDels := getDelegatorDelegations(t, port, addr)
require.Len(t, delegatorDels, 1)
require.Equal(t, amtDec, delegatorDels[0].Shares)
// query all delegations to validator
bonds := getValidatorDelegations(t, port, operAddrs[0])
require.Len(t, bonds, 2)
bondedValidators := getDelegatorValidators(t, port, addr)
require.Len(t, bondedValidators, 1)
require.Equal(t, operAddrs[0], bondedValidators[0].OperatorAddress)
require.Equal(t, validator.DelegatorShares.Add(amtDec).String(), bondedValidators[0].DelegatorShares.String())
bondedValidator := getDelegatorValidator(t, port, addr, operAddrs[0])
require.Equal(t, operAddrs[0], bondedValidator.OperatorAddress)
// testing unbonding
unbondingTokens := sdk.TokensFromConsensusPower(30)
resultTx = doUndelegate(t, port, name1, pw, addr, operAddrs[0], unbondingTokens, fees)
tests.WaitForHeight(resultTx.Height+1, port)
require.Equal(t, uint32(0), resultTx.Code)
// sender should have not received any coins as the unbonding has only just begun
acc = getAccount(t, port, addr)
coins = acc.GetCoins()
expectedBalance = expectedBalance.Sub(fees[0])
require.True(t,
expectedBalance.Amount.LT(coins.AmountOf(sdk.DefaultBondDenom)) ||
expectedBalance.Amount.Equal(coins.AmountOf(sdk.DefaultBondDenom)),
"should get tokens back from automatic withdrawal after an unbonding delegation",
)
expectedBalance = coins[0]
// query tx
txResult = getTransactions(t, port,
fmt.Sprintf("message.action=begin_unbonding&message.sender=%s", addr),
fmt.Sprintf("unbond.validator=%s", operAddrs[0]),
)
require.Len(t, txResult.Txs, 1)
require.Equal(t, resultTx.Height, txResult.Txs[0].Height)
ubd := getUnbondingDelegation(t, port, addr, operAddrs[0])
require.Len(t, ubd.Entries, 1)
require.Equal(t, delTokens.QuoRaw(2), ubd.Entries[0].Balance)
// test redelegation
rdTokens := sdk.TokensFromConsensusPower(30)
resultTx = doBeginRedelegation(t, port, name1, pw, addr, operAddrs[0], operAddrs[1], rdTokens, fees)
require.Equal(t, uint32(0), resultTx.Code)
tests.WaitForHeight(resultTx.Height+1, port)
// query delegations, unbondings and redelegations from validator and delegator
delegatorDels = getDelegatorDelegations(t, port, addr)
require.Len(t, delegatorDels, 1)
require.Equal(t, operAddrs[1], delegatorDels[0].ValidatorAddress)
// TODO uncomment once all validators actually sign in the lcd tests
//validator2 := getValidator(t, port, operAddrs[1])
//delTokensAfterRedelegation := validator2.ShareTokens(delegatorDels[0].GetShares())
//require.Equal(t, rdTokens.ToDec(), delTokensAfterRedelegation)
// verify balance after paying fees
acc = getAccount(t, port, addr)
expectedBalance = expectedBalance.Sub(fees[0])
require.True(t,
expectedBalance.Amount.LT(coins.AmountOf(sdk.DefaultBondDenom)) ||
expectedBalance.Amount.Equal(coins.AmountOf(sdk.DefaultBondDenom)),
"should get tokens back from automatic withdrawal after an unbonding delegation",
)
// query tx
txResult = getTransactions(t, port,
fmt.Sprintf("message.action=begin_redelegate&message.sender=%s", addr),
fmt.Sprintf("redelegate.source_validator=%s", operAddrs[0]),
fmt.Sprintf("redelegate.destination_validator=%s", operAddrs[1]),
)
require.Len(t, txResult.Txs, 1)
require.Equal(t, resultTx.Height, txResult.Txs[0].Height)
redelegation := getRedelegations(t, port, addr, operAddrs[0], operAddrs[1])
require.Len(t, redelegation, 1)
require.Len(t, redelegation[0].Entries, 1)
delegatorUbds := getDelegatorUnbondingDelegations(t, port, addr)
require.Len(t, delegatorUbds, 1)
require.Len(t, delegatorUbds[0].Entries, 1)
require.Equal(t, rdTokens, delegatorUbds[0].Entries[0].Balance)
delegatorReds := getRedelegations(t, port, addr, nil, nil)
require.Len(t, delegatorReds, 1)
require.Len(t, delegatorReds[0].Entries, 1)
validatorUbds := getValidatorUnbondingDelegations(t, port, operAddrs[0])
require.Len(t, validatorUbds, 1)
require.Len(t, validatorUbds[0].Entries, 1)
require.Equal(t, rdTokens, validatorUbds[0].Entries[0].Balance)
validatorReds := getRedelegations(t, port, nil, operAddrs[0], nil)
require.Len(t, validatorReds, 1)
require.Len(t, validatorReds[0].Entries, 1)
// TODO Undonding status not currently implemented
// require.Equal(t, sdk.Unbonding, bondedValidators[0].Status)
// query txs
txs := getBondingTxs(t, port, addr, "")
require.Len(t, txs, 3, "All Txs found")
txs = getBondingTxs(t, port, addr, "bond")
require.Len(t, txs, 1, "All bonding txs found")
txs = getBondingTxs(t, port, addr, "unbond")
require.Len(t, txs, 1, "All unbonding txs found")
txs = getBondingTxs(t, port, addr, "redelegate")
require.Len(t, txs, 1, "All redelegation txs found")
}
func TestSubmitProposal(t *testing.T) {
kb, err := keys.NewKeyBaseFromDir(InitClientHome(""))
require.NoError(t, err)
addr, seed, err := CreateAddr(name1, pw, kb)
require.NoError(t, err)
cleanup, _, _, port, err := InitializeLCD(1, []sdk.AccAddress{addr}, true)
require.NoError(t, err)
defer cleanup()
acc := getAccount(t, port, addr)
initialBalance := acc.GetCoins()
// create SubmitProposal TX
proposalTokens := sdk.TokensFromConsensusPower(5)
resultTx := doSubmitProposal(t, port, seed, name1, pw, addr, proposalTokens, fees)
tests.WaitForHeight(resultTx.Height+1, port)
// check if tx was committed
require.Equal(t, uint32(0), resultTx.Code)
var proposalID uint64
bz, err := hex.DecodeString(resultTx.Data)
require.NoError(t, err)
cdc.MustUnmarshalBinaryLengthPrefixed(bz, &proposalID)
// verify balance
acc = getAccount(t, port, addr)
expectedBalance := initialBalance[0].Sub(fees[0])
require.Equal(t, expectedBalance.Amount.Sub(proposalTokens), acc.GetCoins().AmountOf(sdk.DefaultBondDenom))
// query proposal
proposal := getProposal(t, port, proposalID)
require.Equal(t, "Test", proposal.GetTitle())
proposer := getProposer(t, port, proposalID)
require.Equal(t, addr.String(), proposer.Proposer)
require.Equal(t, proposalID, proposer.ProposalID)
}
func TestSubmitCommunityPoolSpendProposal(t *testing.T) {
kb, err := keys.NewKeyBaseFromDir(InitClientHome(""))
require.NoError(t, err)
addr, seed, err := CreateAddr(name1, pw, kb)
require.NoError(t, err)
cleanup, _, _, port, err := InitializeLCD(1, []sdk.AccAddress{addr}, true)
require.NoError(t, err)
defer cleanup()
acc := getAccount(t, port, addr)
initialBalance := acc.GetCoins()
// create proposal tx
proposalTokens := sdk.TokensFromConsensusPower(5)
resultTx := doSubmitCommunityPoolSpendProposal(t, port, seed, name1, pw, addr, proposalTokens, fees)
tests.WaitForHeight(resultTx.Height+1, port)
// check if tx was committed
require.Equal(t, uint32(0), resultTx.Code)
var proposalID uint64
bz, err := hex.DecodeString(resultTx.Data)
require.NoError(t, err)
cdc.MustUnmarshalBinaryLengthPrefixed(bz, &proposalID)
// verify balance
acc = getAccount(t, port, addr)
expectedBalance := initialBalance[0].Sub(fees[0])
require.Equal(t, expectedBalance.Amount.Sub(proposalTokens), acc.GetCoins().AmountOf(sdk.DefaultBondDenom))
// query proposal
proposal := getProposal(t, port, proposalID)
require.Equal(t, "Test", proposal.GetTitle())
proposer := getProposer(t, port, proposalID)
require.Equal(t, addr.String(), proposer.Proposer)
require.Equal(t, proposalID, proposer.ProposalID)
}
func TestSubmitParamChangeProposal(t *testing.T) {
kb, err := keys.NewKeyBaseFromDir(InitClientHome(""))
require.NoError(t, err)
addr, seed, err := CreateAddr(name1, pw, kb)
require.NoError(t, err)
cleanup, _, _, port, err := InitializeLCD(1, []sdk.AccAddress{addr}, true)
require.NoError(t, err)
defer cleanup()
acc := getAccount(t, port, addr)
initialBalance := acc.GetCoins()
// create proposal tx
proposalTokens := sdk.TokensFromConsensusPower(5)
resultTx := doSubmitParamChangeProposal(t, port, seed, name1, pw, addr, proposalTokens, fees)
tests.WaitForHeight(resultTx.Height+1, port)
// check if tx was committed
require.Equal(t, uint32(0), resultTx.Code)
var proposalID uint64
bz, err := hex.DecodeString(resultTx.Data)
require.NoError(t, err)
cdc.MustUnmarshalBinaryLengthPrefixed(bz, &proposalID)
// verify balance
acc = getAccount(t, port, addr)
expectedBalance := initialBalance[0].Sub(fees[0])
require.Equal(t, expectedBalance.Amount.Sub(proposalTokens), acc.GetCoins().AmountOf(sdk.DefaultBondDenom))
// query proposal
proposal := getProposal(t, port, proposalID)
require.Equal(t, "Test", proposal.GetTitle())
proposer := getProposer(t, port, proposalID)
require.Equal(t, addr.String(), proposer.Proposer)
require.Equal(t, proposalID, proposer.ProposalID)
}
func TestDeposit(t *testing.T) {
kb, err := keys.NewKeyBaseFromDir(InitClientHome(""))
require.NoError(t, err)
addr, seed, err := CreateAddr(name1, pw, kb)
require.NoError(t, err)
cleanup, _, _, port, err := InitializeLCD(1, []sdk.AccAddress{addr}, true)
require.NoError(t, err)
defer cleanup()
acc := getAccount(t, port, addr)
initialBalance := acc.GetCoins()
// create SubmitProposal TX
proposalTokens := sdk.TokensFromConsensusPower(5)
resultTx := doSubmitProposal(t, port, seed, name1, pw, addr, proposalTokens, fees)
tests.WaitForHeight(resultTx.Height+1, port)
// check if tx was committed
require.Equal(t, uint32(0), resultTx.Code)
var proposalID uint64
bz, err := hex.DecodeString(resultTx.Data)
require.NoError(t, err)
cdc.MustUnmarshalBinaryLengthPrefixed(bz, &proposalID)
// verify balance
acc = getAccount(t, port, addr)
coins := acc.GetCoins()
expectedBalance := initialBalance[0].Sub(fees[0])
require.Equal(t, expectedBalance.Amount.Sub(proposalTokens), coins.AmountOf(sdk.DefaultBondDenom))
expectedBalance = coins[0]
// query proposal
proposal := getProposal(t, port, proposalID)
require.Equal(t, "Test", proposal.GetTitle())
// create SubmitProposal TX
depositTokens := sdk.TokensFromConsensusPower(5)
resultTx = doDeposit(t, port, seed, name1, pw, addr, proposalID, depositTokens, fees)
tests.WaitForHeight(resultTx.Height+1, port)
// verify balance after deposit and fee
acc = getAccount(t, port, addr)
expectedBalance = expectedBalance.Sub(fees[0])
require.Equal(t, expectedBalance.Amount.Sub(depositTokens), acc.GetCoins().AmountOf(sdk.DefaultBondDenom))
// query tx
txResult := getTransactions(t, port, fmt.Sprintf("message.action=deposit&message.sender=%s", addr))
require.Len(t, txResult.Txs, 1)
require.Equal(t, resultTx.Height, txResult.Txs[0].Height)
// query proposal
totalCoins := sdk.Coins{sdk.NewCoin(sdk.DefaultBondDenom, sdk.TokensFromConsensusPower(10))}
proposal = getProposal(t, port, proposalID)
require.True(t, proposal.TotalDeposit.IsEqual(totalCoins))
// query deposit
deposit := getDeposit(t, port, proposalID, addr)
require.True(t, deposit.Amount.IsEqual(totalCoins))
}
func TestVote(t *testing.T) {
kb, err := keys.NewKeyBaseFromDir(InitClientHome(""))
require.NoError(t, err)
addr, seed, err := CreateAddr(name1, pw, kb)
require.NoError(t, err)
cleanup, _, operAddrs, port, err := InitializeLCD(1, []sdk.AccAddress{addr}, true)
require.NoError(t, err)
defer cleanup()
acc := getAccount(t, port, addr)
initialBalance := acc.GetCoins()
// create SubmitProposal TX
proposalTokens := sdk.TokensFromConsensusPower(10)
resultTx := doSubmitProposal(t, port, seed, name1, pw, addr, proposalTokens, fees)
tests.WaitForHeight(resultTx.Height+1, port)
// check if tx was committed
require.Equal(t, uint32(0), resultTx.Code)
var proposalID uint64
bz, err := hex.DecodeString(resultTx.Data)
require.NoError(t, err)
cdc.MustUnmarshalBinaryLengthPrefixed(bz, &proposalID)
// verify balance
acc = getAccount(t, port, addr)
coins := acc.GetCoins()
expectedBalance := initialBalance[0].Sub(fees[0])
require.Equal(t, expectedBalance.Amount.Sub(proposalTokens), coins.AmountOf(sdk.DefaultBondDenom))
expectedBalance = coins[0]
// query proposal
proposal := getProposal(t, port, proposalID)
require.Equal(t, "Test", proposal.GetTitle())
require.Equal(t, gov.StatusVotingPeriod, proposal.Status)
// vote
resultTx = doVote(t, port, seed, name1, pw, addr, proposalID, "Yes", fees)
tests.WaitForHeight(resultTx.Height+1, port)
// verify balance after vote and fee
acc = getAccount(t, port, addr)
coins = acc.GetCoins()
expectedBalance = expectedBalance.Sub(fees[0])
require.Equal(t, expectedBalance.Amount, coins.AmountOf(sdk.DefaultBondDenom))
expectedBalance = coins[0]
// query tx
txResult := getTransactions(t, port, fmt.Sprintf("message.action=vote&message.sender=%s", addr))
require.Len(t, txResult.Txs, 1)
require.Equal(t, resultTx.Height, txResult.Txs[0].Height)
vote := getVote(t, port, proposalID, addr)
require.Equal(t, proposalID, vote.ProposalID)
require.Equal(t, gov.OptionYes, vote.Option)
tally := getTally(t, port, proposalID)
require.Equal(t, sdk.ZeroInt(), tally.Yes, "tally should be 0 as the address is not bonded")
// create bond TX
delTokens := sdk.TokensFromConsensusPower(60)
resultTx = doDelegate(t, port, name1, pw, addr, operAddrs[0], delTokens, fees)
tests.WaitForHeight(resultTx.Height+1, port)
// verify balance
acc = getAccount(t, port, addr)
coins = acc.GetCoins()
expectedBalance = expectedBalance.Sub(fees[0])
require.Equal(t, expectedBalance.Amount.Sub(delTokens), coins.AmountOf(sdk.DefaultBondDenom))
expectedBalance = coins[0]
tally = getTally(t, port, proposalID)
require.Equal(t, delTokens, tally.Yes, "tally should be equal to the amount delegated")
// change vote option
resultTx = doVote(t, port, seed, name1, pw, addr, proposalID, "No", fees)
tests.WaitForHeight(resultTx.Height+1, port)
// verify balance
acc = getAccount(t, port, addr)
expectedBalance = expectedBalance.Sub(fees[0])
require.Equal(t, expectedBalance.Amount, acc.GetCoins().AmountOf(sdk.DefaultBondDenom))
tally = getTally(t, port, proposalID)
require.Equal(t, sdk.ZeroInt(), tally.Yes, "tally should be 0 the user changed the option")
require.Equal(t, delTokens, tally.No, "tally should be equal to the amount delegated")
}
func TestUnjail(t *testing.T) {
kb, err := keys.NewKeyBaseFromDir(InitClientHome(""))
require.NoError(t, err)
addr, _, err := CreateAddr(name1, pw, kb)
require.NoError(t, err)
cleanup, valPubKeys, _, port, err := InitializeLCD(1, []sdk.AccAddress{addr}, true)
require.NoError(t, err)
defer cleanup()
// NOTE: any less than this and it fails
tests.WaitForHeight(3, port)
pkString, _ := sdk.Bech32ifyConsPub(valPubKeys[0])
signingInfo := getSigningInfo(t, port, pkString)
tests.WaitForHeight(4, port)
require.Equal(t, true, signingInfo.IndexOffset > 0)
require.Equal(t, time.Unix(0, 0).UTC(), signingInfo.JailedUntil)
require.Equal(t, true, signingInfo.MissedBlocksCounter == 0)
signingInfoList := getSigningInfoList(t, port)
require.NotZero(t, len(signingInfoList))
}
func TestProposalsQuery(t *testing.T) {
kb, err := keys.NewKeyBaseFromDir(InitClientHome(""))
require.NoError(t, err)
addrs, seeds, names, passwords, errors := CreateAddrs(kb, 2)
require.Empty(t, errors)
cleanup, _, _, port, err := InitializeLCD(1, []sdk.AccAddress{addrs[0], addrs[1]}, true)
require.NoError(t, err)
defer cleanup()
depositParam := getDepositParam(t, port)
halfMinDeposit := depositParam.MinDeposit.AmountOf(sdk.DefaultBondDenom).QuoRaw(2)
getVotingParam(t, port)
getTallyingParam(t, port)
// Addr1 proposes (and deposits) proposals #1 and #2
resultTx := doSubmitProposal(t, port, seeds[0], names[0], passwords[0], addrs[0], halfMinDeposit, fees)
var proposalID1 uint64
bz, err := hex.DecodeString(resultTx.Data)
require.NoError(t, err)
cdc.MustUnmarshalBinaryLengthPrefixed(bz, &proposalID1)
tests.WaitForHeight(resultTx.Height+1, port)
resultTx = doSubmitProposal(t, port, seeds[0], names[0], passwords[0], addrs[0], halfMinDeposit, fees)
var proposalID2 uint64
bz, err = hex.DecodeString(resultTx.Data)
require.NoError(t, err)
cdc.MustUnmarshalBinaryLengthPrefixed(bz, &proposalID2)
tests.WaitForHeight(resultTx.Height+1, port)
// Addr2 proposes (and deposits) proposals #3
resultTx = doSubmitProposal(t, port, seeds[1], names[1], passwords[1], addrs[1], halfMinDeposit, fees)
var proposalID3 uint64
bz, err = hex.DecodeString(resultTx.Data)
require.NoError(t, err)
cdc.MustUnmarshalBinaryLengthPrefixed(bz, &proposalID3)
tests.WaitForHeight(resultTx.Height+1, port)
// Addr2 deposits on proposals #2 & #3
resultTx = doDeposit(t, port, seeds[1], names[1], passwords[1], addrs[1], proposalID2, halfMinDeposit, fees)
tests.WaitForHeight(resultTx.Height+1, port)
resultTx = doDeposit(t, port, seeds[1], names[1], passwords[1], addrs[1], proposalID3, halfMinDeposit, fees)
tests.WaitForHeight(resultTx.Height+1, port)
// check deposits match proposal and individual deposits
deposits := getDeposits(t, port, proposalID1)
require.Len(t, deposits, 1)
deposit := getDeposit(t, port, proposalID1, addrs[0])
require.Equal(t, deposit, deposits[0])
deposits = getDeposits(t, port, proposalID2)
require.Len(t, deposits, 2)
deposit = getDeposit(t, port, proposalID2, addrs[0])
require.True(t, deposit.Equals(deposits[0]))
deposit = getDeposit(t, port, proposalID2, addrs[1])
require.True(t, deposit.Equals(deposits[1]))
deposits = getDeposits(t, port, proposalID3)
require.Len(t, deposits, 1)
deposit = getDeposit(t, port, proposalID3, addrs[1])
require.Equal(t, deposit, deposits[0])
// increasing the amount of the deposit should update the existing one
depositTokens := sdk.TokensFromConsensusPower(1)
resultTx = doDeposit(t, port, seeds[0], names[0], passwords[0], addrs[0], proposalID1, depositTokens, fees)
tests.WaitForHeight(resultTx.Height+1, port)
deposits = getDeposits(t, port, proposalID1)
require.Len(t, deposits, 1)
// Only proposals #1 should be in Deposit Period
proposals := getProposalsFilterStatus(t, port, gov.StatusDepositPeriod)
require.Len(t, proposals, 1)
require.Equal(t, proposalID1, proposals[0].ProposalID)
// Only proposals #2 and #3 should be in Voting Period
proposals = getProposalsFilterStatus(t, port, gov.StatusVotingPeriod)
require.Len(t, proposals, 2)
require.Equal(t, proposalID2, proposals[0].ProposalID)
require.Equal(t, proposalID3, proposals[1].ProposalID)
// Addr1 votes on proposals #2 & #3
resultTx = doVote(t, port, seeds[0], names[0], passwords[0], addrs[0], proposalID2, "Yes", fees)
tests.WaitForHeight(resultTx.Height+1, port)
resultTx = doVote(t, port, seeds[0], names[0], passwords[0], addrs[0], proposalID3, "Yes", fees)
tests.WaitForHeight(resultTx.Height+1, port)
// Addr2 votes on proposal #3
resultTx = doVote(t, port, seeds[1], names[1], passwords[1], addrs[1], proposalID3, "Yes", fees)
tests.WaitForHeight(resultTx.Height+1, port)
// Test query all proposals
proposals = getProposalsAll(t, port)
require.Equal(t, proposalID1, (proposals[0]).ProposalID)
require.Equal(t, proposalID2, (proposals[1]).ProposalID)
require.Equal(t, proposalID3, (proposals[2]).ProposalID)
// Test query deposited by addr1
proposals = getProposalsFilterDepositor(t, port, addrs[0])
require.Equal(t, proposalID1, (proposals[0]).ProposalID)
// Test query deposited by addr2
proposals = getProposalsFilterDepositor(t, port, addrs[1])
require.Equal(t, proposalID2, (proposals[0]).ProposalID)
require.Equal(t, proposalID3, (proposals[1]).ProposalID)
// Test query voted by addr1
proposals = getProposalsFilterVoter(t, port, addrs[0])
require.Equal(t, proposalID2, (proposals[0]).ProposalID)
require.Equal(t, proposalID3, (proposals[1]).ProposalID)
// Test query voted by addr2
proposals = getProposalsFilterVoter(t, port, addrs[1])
require.Equal(t, proposalID3, (proposals[0]).ProposalID)
// Test query voted and deposited by addr1
proposals = getProposalsFilterVoterDepositor(t, port, addrs[0], addrs[0])
require.Equal(t, proposalID2, (proposals[0]).ProposalID)
// Test query votes on Proposal 2
votes := getVotes(t, port, proposalID2)
require.Len(t, votes, 1)
require.Equal(t, addrs[0], votes[0].Voter)
// Test query votes on Proposal 3
votes = getVotes(t, port, proposalID3)
require.Len(t, votes, 2)
require.True(t, addrs[0].String() == votes[0].Voter.String() || addrs[0].String() == votes[1].Voter.String())
require.True(t, addrs[1].String() == votes[0].Voter.String() || addrs[1].String() == votes[1].Voter.String())
}
func TestSlashingGetParams(t *testing.T) {
cleanup, _, _, port, err := InitializeLCD(1, []sdk.AccAddress{}, true)
require.NoError(t, err)
defer cleanup()
res, body := Request(t, port, "GET", "/slashing/parameters", nil)
require.Equal(t, http.StatusOK, res.StatusCode, body)
var params slashing.Params
err = cdc.UnmarshalJSON([]byte(body), ¶ms)
require.NoError(t, err)
}
func TestDistributionGetParams(t *testing.T) {
cleanup, _, _, port, err := InitializeLCD(1, []sdk.AccAddress{}, true)
require.NoError(t, err)
defer cleanup()
res, body := Request(t, port, "GET", "/distribution/parameters", nil)
require.Equal(t, http.StatusOK, res.StatusCode, body)
require.NoError(t, cdc.UnmarshalJSON([]byte(body), &dclcommon.PrettyParams{}))
}
func TestDistributionFlow(t *testing.T) {
kb, err := keys.NewKeyBaseFromDir(InitClientHome(""))
require.NoError(t, err)
addr, seed, err := CreateAddr(name1, pw, kb)
require.NoError(t, err)
cleanup, _, valAddrs, port, err := InitializeLCD(1, []sdk.AccAddress{addr}, true)
require.NoError(t, err)
defer cleanup()
valAddr := valAddrs[0]
operAddr := sdk.AccAddress(valAddr)
var rewards sdk.DecCoins
res, body := Request(t, port, "GET", fmt.Sprintf("/distribution/validators/%s/outstanding_rewards", valAddr), nil)
require.Equal(t, http.StatusOK, res.StatusCode, body)
require.NoError(t, cdc.UnmarshalJSON(extractResultFromResponse(t, []byte(body)), &rewards))
var valDistInfo distrrest.ValidatorDistInfo
res, body = Request(t, port, "GET", "/distribution/validators/"+valAddr.String(), nil)
require.Equal(t, http.StatusOK, res.StatusCode, body)
require.NoError(t, cdc.UnmarshalJSON(extractResultFromResponse(t, []byte(body)), &valDistInfo))
require.Equal(t, valDistInfo.OperatorAddress.String(), sdk.AccAddress(valAddr).String())
// Delegate some coins
delTokens := sdk.TokensFromConsensusPower(60)
resultTx := doDelegate(t, port, name1, pw, addr, valAddr, delTokens, fees)
tests.WaitForHeight(resultTx.Height+1, port)
require.Equal(t, uint32(0), resultTx.Code)
// send some coins
_, resultTx = doTransfer(t, port, seed, name1, memo, pw, addr, fees)
tests.WaitForHeight(resultTx.Height+5, port)
require.Equal(t, uint32(0), resultTx.Code)
// Query outstanding rewards changed
res, body = Request(t, port, "GET", fmt.Sprintf("/distribution/validators/%s/outstanding_rewards", valAddr), nil)
require.Equal(t, http.StatusOK, res.StatusCode, body)
require.NoError(t, cdc.UnmarshalJSON(extractResultFromResponse(t, []byte(body)), &rewards))
// Query validator distribution info
res, body = Request(t, port, "GET", "/distribution/validators/"+valAddr.String(), nil)
require.Equal(t, http.StatusOK, res.StatusCode, body)
require.NoError(t, cdc.UnmarshalJSON(extractResultFromResponse(t, []byte(body)), &valDistInfo))
// Query validator's rewards
res, body = Request(t, port, "GET", fmt.Sprintf("/distribution/validators/%s/rewards", valAddr), nil)
require.Equal(t, http.StatusOK, res.StatusCode, body)
require.NoError(t, cdc.UnmarshalJSON(extractResultFromResponse(t, []byte(body)), &rewards))
// Query self-delegation
res, body = Request(t, port, "GET", fmt.Sprintf("/distribution/delegators/%s/rewards/%s", operAddr, valAddr), nil)
require.Equal(t, http.StatusOK, res.StatusCode, body)
require.NoError(t, cdc.UnmarshalJSON(extractResultFromResponse(t, []byte(body)), &rewards))
// Query delegation
res, body = Request(t, port, "GET", fmt.Sprintf("/distribution/delegators/%s/rewards/%s", addr, valAddr), nil)
require.Equal(t, http.StatusOK, res.StatusCode, body)
require.NoError(t, cdc.UnmarshalJSON(extractResultFromResponse(t, []byte(body)), &rewards))
// Query delegator's rewards total
var delRewards disttypes.QueryDelegatorTotalRewardsResponse
res, body = Request(t, port, "GET", fmt.Sprintf("/distribution/delegators/%s/rewards", operAddr), nil)
require.Equal(t, http.StatusOK, res.StatusCode, body)
require.NoError(t, json.Unmarshal(extractResultFromResponse(t, []byte(body)), &delRewards))
// Query delegator's withdrawal address
var withdrawAddr string
res, body = Request(t, port, "GET", fmt.Sprintf("/distribution/delegators/%s/withdraw_address", operAddr), nil)
require.Equal(t, http.StatusOK, res.StatusCode, body)
require.NoError(t, cdc.UnmarshalJSON(extractResultFromResponse(t, []byte(body)), &withdrawAddr))
require.Equal(t, operAddr.String(), withdrawAddr)
// Withdraw delegator's rewards
resultTx = doWithdrawDelegatorAllRewards(t, port, seed, name1, pw, addr, fees)
require.Equal(t, uint32(0), resultTx.Code)
}
func TestMintingQueries(t *testing.T) {
kb, err := keys.NewKeyBaseFromDir(InitClientHome(""))
require.NoError(t, err)
addr, _, err := CreateAddr(name1, pw, kb)
require.NoError(t, err)
cleanup, _, _, port, err := InitializeLCD(1, []sdk.AccAddress{addr}, true)
require.NoError(t, err)
defer cleanup()
res, body := Request(t, port, "GET", "/minting/parameters", nil)
require.Equal(t, http.StatusOK, res.StatusCode, body)
var params mint.Params
require.NoError(t, cdc.UnmarshalJSON(extractResultFromResponse(t, []byte(body)), ¶ms))
res, body = Request(t, port, "GET", "/minting/inflation", nil)
require.Equal(t, http.StatusOK, res.StatusCode, body)
var inflation sdk.Dec
require.NoError(t, cdc.UnmarshalJSON(extractResultFromResponse(t, []byte(body)), &inflation))
res, body = Request(t, port, "GET", "/minting/annual-provisions", nil)
require.Equal(t, http.StatusOK, res.StatusCode, body)
var annualProvisions sdk.Dec
require.NoError(t, cdc.UnmarshalJSON(extractResultFromResponse(t, []byte(body)), &annualProvisions))
}
func TestAccountBalanceQuery(t *testing.T) {
kb, err := keys.NewKeyBaseFromDir(InitClientHome(""))
require.NoError(t, err)
addr, _, err := CreateAddr(name1, pw, kb)
require.NoError(t, err)
cleanup, _, _, port, err := InitializeLCD(1, []sdk.AccAddress{addr}, true)
require.NoError(t, err)
defer cleanup()
bz, err := hex.DecodeString("8FA6AB57AD6870F6B5B2E57735F38F2F30E73CB6")
require.NoError(t, err)
someFakeAddr := sdk.AccAddress(bz)
// empty account
res, body := Request(t, port, "GET", fmt.Sprintf("/auth/accounts/%s", someFakeAddr), nil)
require.Equal(t, http.StatusOK, res.StatusCode, body)
require.Contains(t, body, `"type":"cosmos-sdk/Account"`)
// empty account balance
res, body = Request(t, port, "GET", fmt.Sprintf("/bank/balances/%s", someFakeAddr), nil)
require.Equal(t, http.StatusOK, res.StatusCode, body)
require.Contains(t, body, "[]")
}
|
[
"\"VERSION\""
] |
[] |
[
"VERSION"
] |
[]
|
["VERSION"]
|
go
| 1 | 0 | |
bookstore_project/settings.py
|
"""
Django settings for bookstore_project project.
Generated by 'django-admin startproject' using Django 2.2.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import logging
logger = logging.getLogger(__name__)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', 'YOU_BETTER_SET_ENVIRONMENT_VARIABLE_DJANGO_SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = int(os.environ.get('DJANGO_DEBUG', 0))
print("DEBUG is: %s" % DEBUG)
PIPENV_DEV=int(os.environ.get('PIPENV_DEV', 0))
print("PIPENV_DEV is: %s" % PIPENV_DEV)
USE_SENDGRID=int(os.environ.get('USE_SENDGRID', 0))
print("USE_SENDGRID is: %s" % USE_SENDGRID)
if DEBUG:
ALLOWED_HOSTS = ['*']
else:
# TODO: Use host ip from the image environment
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
# Third-party
'rest_framework',
'crispy_forms',
'allauth',
'allauth.account',
'corsheaders',
# local apps
'pages.apps.PagesConfig',
'users.apps.UsersConfig',
'books.apps.BooksConfig',
'orders.apps.OrdersConfig',
'api.apps.ApiConfig',
'todos.apps.TodosConfig',
'contracts.apps.ContractsConfig'
]
if DEBUG:
INSTALLED_APPS.append('debug_toolbar')
LOGIN_REDIRECT_URL = 'home'
ACCOUNT_LOGOUT_REDIRECT = 'home'
ACCOUNT_SESSION_REMEMBER = True
ACCOUNT_SIGNUP_PASSWORD_ENTER_TWICE = False
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_UNIQUE_EMAIL = True
# django-allauth config
SITE_ID = 1
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
if USE_SENDGRID:
EMAIL_BACKEND = "sendgrid_backend.SendgridBackend"
else:
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
SENDGRID_API_KEY = os.environ.get("SENDGRID_API_KEY")
SENDGRID_SANDBOX_MODE_IN_DEBUG = False
SENDGRID_ECHO_TO_STDOUT = True
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware'
]
if DEBUG:
MIDDLEWARE.append('debug_toolbar.middleware.DebugToolbarMiddleware')
APPEND_SLASH = True
CORS_ORIGIN_WHITELIST = (
'http://localhost:3000',
'http://localhost:8000'
)
INTERNAL_IPS = [
'127.0.0.1',
]
ROOT_URLCONF = 'bookstore_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'bookstore_project.wsgi.application'
# Rest Framework
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.AllowAny'
],
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework_simplejwt.authentication.JWTAuthentication',
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
]
}
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'test': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
},
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.getenv('DATABASE_NAME', 'django-master'),
'USER': os.getenv('DATABASE_USER'),
'PASSWORD': os.getenv('DATABASE_PASSWORD'),
'HOST': os.getenv('DATABASE_HOST', '127.0.0.1'),
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTH_USER_MODEL = 'users.CustomUser'
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')]
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
#STATICFILES_FINDERS = [
# 'django.contrib.staticfiles.finders.FileSystemFinder',
# 'django.contrib.staticfiles.finders.AppDirectoriesFinder'
#]
DEFAULT_FROM_EMAIL = '[email protected]'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
STRIPE_TEST_PUBLISHABLE_KEY = os.environ.get('STRIPE_TEST_PUBLISHABLE_KEY')
STRIPE_TEST_SECRET_KEY = os.environ.get('STRIPE_TEST_SECRET_KEY')
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
# use this formatter for debugging
'verbose': {
'format': '{asctime} - {name} - {module}({lineno:d}) - {process:d}/{thread:d} - {levelname}: {message}',
'style': '{',
},
'simple': {
'format': '{asctime} - {name} - {levelname}: {message}',
'style': '{',
}
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
# 'stackdriver': {
# 'class': 'google.cloud.logging.handlers.CloudLoggingHandler',
# 'client': log_client
# },
# 'stackdriver_error_reporting': {
# 'level': 'ERROR',
# 'class': 'gcp_utils.stackdriver_logging.StackdriverErrorHandler',
# }
},
'loggers': {
# root logger
'': {
'handlers': [
'console',
# 'stackdriver'
],
'level': 'INFO',
'name': os.getenv('ENVIRONMENT_NAME', "DEV")
},
},
}
|
[] |
[] |
[
"STRIPE_TEST_PUBLISHABLE_KEY",
"DJANGO_SECRET_KEY",
"DATABASE_PASSWORD",
"DATABASE_NAME",
"DATABASE_HOST",
"SENDGRID_API_KEY",
"ENVIRONMENT_NAME",
"DJANGO_DEBUG",
"PIPENV_DEV",
"DATABASE_USER",
"USE_SENDGRID",
"STRIPE_TEST_SECRET_KEY"
] |
[]
|
["STRIPE_TEST_PUBLISHABLE_KEY", "DJANGO_SECRET_KEY", "DATABASE_PASSWORD", "DATABASE_NAME", "DATABASE_HOST", "SENDGRID_API_KEY", "ENVIRONMENT_NAME", "DJANGO_DEBUG", "PIPENV_DEV", "DATABASE_USER", "USE_SENDGRID", "STRIPE_TEST_SECRET_KEY"]
|
python
| 12 | 0 | |
docs/conf.py
|
import os
on_rtd = os.environ.get('READTHEDOCS') == 'True'
if on_rtd:
html_theme = 'default'
else:
html_theme = 'sphinx_rtd_theme'
# -- Project information -----------------------------------------------------
project = 'Bastille'
copyright = '2018-2021, Christer Edwards'
author = 'Christer Edwards'
# The short X.Y version
version = '0.9.20211225'
# The full version, including alpha/beta/rc tags
release = '0.9.20211225-beta'
# -- General configuration ---------------------------------------------------
extensions = [
]
templates_path = ['_templates']
source_suffix = ['.rst', '.md']
from recommonmark.parser import CommonMarkParser
source_parsers = {
'.md': CommonMarkParser,
}
master_doc = 'index'
language = None
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
pygments_style = None
# -- Options for HTML output -------------------------------------------------
html_static_path = ['_static']
# -- Options for HTMLHelp output ---------------------------------------------
htmlhelp_basename = 'Bastilledoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
}
latex_documents = [
(master_doc, 'Bastille.tex', 'Bastille Documentation',
'Christer Edwards', 'manual'),
]
# -- Options for manual page output ------------------------------------------
man_pages = [
(master_doc, 'bastille', 'Bastille Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
texinfo_documents = [
(master_doc, 'Bastille', 'Bastille Documentation',
author, 'Bastille', 'Bastille is an open-source system for automating deployment and management of containerized applications on FreeBSD.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
epub_title = project
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
|
[] |
[] |
[
"READTHEDOCS"
] |
[]
|
["READTHEDOCS"]
|
python
| 1 | 0 | |
demo/simulation-rtt-studio-arm2d/gcc-arm-none-eabi-10.3-2021.07/arm-none-eabi/share/gdb/system-gdbinit/wrs-linux.py
|
# Copyright (C) 2011-2021 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Configure GDB using the WRS/Linux environment."""
import os
if 'ENV_PREFIX' in os.environ:
gdb.execute('set sysroot %s' % os.environ['ENV_PREFIX'])
else:
print "warning: ENV_PREFIX environment variable missing."
print "The debugger will probably be unable to find the correct system libraries"
|
[] |
[] |
[
"ENV_PREFIX"
] |
[]
|
["ENV_PREFIX"]
|
python
| 1 | 0 | |
lektor/cli.py
|
import os
import sys
import json
import time
import itertools
import warnings
import click
import pkg_resources
from .i18n import get_default_lang, is_valid_language
from .utils import secure_url
from .project import Project
version = pkg_resources.get_distribution('Lektor').version # pylint: disable=no-member
def echo_json(data):
click.echo(json.dumps(data, indent=2).rstrip())
def pruneflag(cli):
return click.option(
'--prune/--no-prune', default=True,
help='Controls if old '
'artifacts should be pruned. "prune" is the default.')(cli)
def extraflag(cli):
return click.option(
'-f', '--extra-flag', 'extra_flags', multiple=True,
help='Defines an arbitrary flag. These can be used by plugins '
'to customize the build and deploy process. More information can be '
'found in the documentation of affected plugins.')(cli)
def buildflag_deprecated(ctx, param, value):
if value:
warnings.warn(
'use --extra-flag instead of --build-flag',
DeprecationWarning,
)
return value
def buildflag(cli):
return click.option(
'--build-flag', 'build_flags', multiple=True,
help='Deprecated. Use --extra-flag instead.',
callback=buildflag_deprecated)(cli)
class AliasedGroup(click.Group):
# pylint: disable=inconsistent-return-statements
def get_command(self, ctx, cmd_name):
rv = click.Group.get_command(self, ctx, cmd_name)
if rv is not None:
return rv
matches = [x for x in self.list_commands(ctx)
if x.startswith(cmd_name)]
if not matches:
return None
elif len(matches) == 1:
return click.Group.get_command(self, ctx, matches[0])
ctx.fail('Too many matches: %s' % ', '.join(sorted(matches)))
class Context(object):
def __init__(self):
self._project_path = os.environ.get('LEKTOR_PROJECT') or None
self._project = None
self._env = None
self._ui_lang = None
def _get_ui_lang(self):
rv = self._ui_lang
if rv is None:
rv = self._ui_lang = get_default_lang()
return rv
def _set_ui_lang(self, value):
self._ui_lang = value
ui_lang = property(_get_ui_lang, _set_ui_lang)
del _get_ui_lang, _set_ui_lang
def set_project_path(self, value):
self._project_path = value
self._project = None
def get_project(self, silent=False):
if self._project is not None:
return self._project
if self._project_path is not None:
rv = Project.from_path(self._project_path)
else:
rv = Project.discover()
if rv is None:
if silent:
return None
if self._project_path is None:
raise click.UsageError('Could not automatically discover '
'project. A Lektor project must '
'exist in the working directory or '
'any of the parent directories.')
raise click.UsageError('Could not find project "%s"' %
self._project_path)
self._project = rv
return rv
def get_default_output_path(self):
rv = os.environ.get('LEKTOR_OUTPUT_PATH')
if rv is not None:
return rv
return self.get_project().get_output_path()
def get_env(self, extra_flags=None):
if self._env is not None:
return self._env
from lektor.environment import Environment
env = Environment(self.get_project(), load_plugins=False,
extra_flags=extra_flags)
self._env = env
return env
def load_plugins(self, reinstall=False, extra_flags=None):
from .packages import load_packages
load_packages(self.get_env(extra_flags=extra_flags),
reinstall=reinstall)
if not reinstall:
from .pluginsystem import initialize_plugins
initialize_plugins(self.get_env())
pass_context = click.make_pass_decorator(Context, ensure=True)
def validate_language(ctx, param, value):
if value is not None and not is_valid_language(value):
raise click.BadParameter('Unsupported language "%s".' % value)
return value
@click.group(cls=AliasedGroup)
@click.option('--project', type=click.Path(),
help='The path to the lektor project to work with.')
@click.option('--language', default=None, callback=validate_language,
help='The UI language to use (overrides autodetection).')
@click.version_option(prog_name='Lektor', version=version)
@pass_context
def cli(ctx, project=None, language=None):
"""The lektor management application.
This command can invoke lektor locally and serve up the website. It's
intended for local development of websites.
"""
warnings.simplefilter('default')
if language is not None:
ctx.ui_lang = language
if project is not None:
ctx.set_project_path(project)
@cli.command('build')
@click.option('-O', '--output-path', type=click.Path(), default=None,
help='The output path.')
@click.option('--watch', is_flag=True, help='If this is enabled the build '
'process goes into an automatic loop where it watches the '
'file system for changes and rebuilds.')
@pruneflag
@click.option('-v', '--verbose', 'verbosity', count=True,
help='Increases the verbosity of the logging.')
@click.option('--source-info-only', is_flag=True,
help='Instead of building only updates the source infos. The '
'source info is used by the web admin panel to quickly find '
'information about the source files (for instance jump to '
'files).')
@click.option('--buildstate-path', type=click.Path(), default=None,
help='Path to a directory that Lektor will use for coordinating '
'the state of the build. Defaults to a directory named '
'`.lektor` inside the output path.')
@extraflag
@buildflag
@click.option('--profile', is_flag=True,
help='Enable build profiler.')
@pass_context
def build_cmd(ctx, output_path, watch, prune, verbosity,
source_info_only, buildstate_path, profile,
extra_flags, build_flags):
"""Builds the entire project into the final artifacts.
The default behavior is to build the project into the default build
output path which can be discovered with the `project-info` command
but an alternative output folder can be provided with the `--output-path`
option.
The default behavior is to perform a build followed by a pruning step
which removes no longer referenced artifacts from the output folder.
Lektor will only build the files that require rebuilding if the output
folder is reused.
To enforce a clean build you have to issue a `clean` command first.
If the build fails the exit code will be `1` otherwise `0`. This can be
used by external scripts to only deploy on successful build for instance.
"""
from lektor.builder import Builder
from lektor.reporter import CliReporter
extra_flags = tuple(itertools.chain(extra_flags or (), build_flags or ()))
if output_path is None:
output_path = ctx.get_default_output_path()
ctx.load_plugins(extra_flags=extra_flags)
env = ctx.get_env()
def _build():
builder = Builder(env.new_pad(), output_path,
buildstate_path=buildstate_path,
extra_flags=extra_flags)
if source_info_only:
builder.update_all_source_infos()
return True
if profile:
from .utils import profile_func
failures = profile_func(builder.build_all)
else:
failures = builder.build_all()
if prune:
builder.prune()
return failures == 0
reporter = CliReporter(env, verbosity=verbosity)
with reporter:
success = _build()
if not watch:
return sys.exit(0 if success else 1)
from lektor.watcher import watch
click.secho('Watching for file system changes', fg='cyan')
last_build = time.time()
for ts, _, _ in watch(env):
if ts > last_build:
_build()
last_build = time.time()
@cli.command('clean')
@click.option('-O', '--output-path', type=click.Path(), default=None,
help='The output path.')
@click.option('-v', '--verbose', 'verbosity', count=True,
help='Increases the verbosity of the logging.')
@click.confirmation_option(help='Confirms the cleaning.')
@extraflag
@pass_context
def clean_cmd(ctx, output_path, verbosity, extra_flags):
"""Cleans the entire build folder.
If not build folder is provided, the default build folder of the project
in the Lektor cache is used.
"""
from lektor.builder import Builder
from lektor.reporter import CliReporter
if output_path is None:
output_path = ctx.get_default_output_path()
ctx.load_plugins(extra_flags=extra_flags)
env = ctx.get_env()
reporter = CliReporter(env, verbosity=verbosity)
with reporter:
builder = Builder(env.new_pad(), output_path)
builder.prune(all=True)
@cli.command('deploy', short_help='Deploy the website.')
@click.argument('server', required=False)
@click.option('-O', '--output-path', type=click.Path(), default=None,
help='The output path.')
@click.option('--username', envvar='LEKTOR_DEPLOY_USERNAME',
help='An optional username to override the URL.')
@click.option('--password', envvar='LEKTOR_DEPLOY_PASSWORD',
help='An optional password to override the URL or the '
'default prompt.')
@click.option('--key-file', envvar='LEKTOR_DEPLOY_KEY_FILE',
help='The path to a key file that should be used for the '
'authentication of the deployment.')
@click.option('--key', envvar='LEKTOR_DEPLOY_KEY',
help='The contents of a key file directly a string that should '
'be used for authentication of the deployment.')
@extraflag
@pass_context
def deploy_cmd(ctx, server, output_path, extra_flags, **credentials):
"""This command deploys the entire contents of the build folder
(`--output-path`) onto a configured remote server. The name of the
server must fit the name from a target in the project configuration.
If no server is supplied then the default server from the config is
used.
The deployment credentials are typically contained in the project config
file but it's also possible to supply them here explicitly. In this
case the `--username` and `--password` parameters (as well as the
`LEKTOR_DEPLOY_USERNAME` and `LEKTOR_DEPLOY_PASSWORD` environment
variables) can override what's in the URL.
For more information see the deployment chapter in the documentation.
"""
from lektor.publisher import publish, PublishError
if output_path is None:
output_path = ctx.get_default_output_path()
ctx.load_plugins(extra_flags=extra_flags)
env = ctx.get_env()
config = env.load_config()
if server is None:
server_info = config.get_default_server()
if server_info is None:
raise click.BadParameter('No default server configured.',
param_hint='server')
else:
server_info = config.get_server(server)
if server_info is None:
raise click.BadParameter('Server "%s" does not exist.' % server,
param_hint='server')
try:
event_iter = publish(env, server_info.target, output_path,
credentials=credentials, server_info=server_info,
extra_flags=extra_flags)
except PublishError as e:
raise click.UsageError('Server "%s" is not configured for a valid '
'publishing method: %s' % (server, e))
click.echo('Deploying to %s' % server_info.name)
click.echo(' Build cache: %s' % output_path)
click.echo(' Target: %s' % secure_url(server_info.target))
try:
for line in event_iter:
click.echo(' %s' % click.style(line, fg='cyan'))
except PublishError as e:
click.secho('Error: %s' % e, fg='red')
else:
click.echo('Done!')
@cli.command('server', short_help='Launch a local server.')
@click.option('-h', '--host', default='127.0.0.1',
help='The network interface to bind to. The default is the '
'loopback device, but by setting it to 0.0.0.0 it becomes '
'available on all network interfaces.')
@click.option('-p', '--port', default=5000, help='The port to bind to.',
show_default=True)
@click.option('-O', '--output-path', type=click.Path(), default=None,
help='The dev server will build into the same folder as '
'the build command by default.')
@pruneflag
@click.option('-v', '--verbose', 'verbosity', count=True,
help='Increases the verbosity of the logging.')
@extraflag
@buildflag
@click.option('--browse', is_flag=True)
@pass_context
def server_cmd(ctx, host, port, output_path, prune, verbosity,
extra_flags, build_flags, browse):
"""The server command will launch a local server for development.
Lektor's development server will automatically build all files into
pages similar to how the build command with the `--watch` switch
works, but also at the same time serve up the website on a local
HTTP server.
"""
from lektor.devserver import run_server
extra_flags = tuple(itertools.chain(extra_flags or (), build_flags or ()))
if output_path is None:
output_path = ctx.get_default_output_path()
ctx.load_plugins(extra_flags=extra_flags)
click.echo(' * Project path: %s' % ctx.get_project().project_path)
click.echo(' * Output path: %s' % output_path)
run_server((host, port), env=ctx.get_env(), output_path=output_path,
prune=prune, verbosity=verbosity, ui_lang=ctx.ui_lang,
extra_flags=extra_flags,
lektor_dev=os.environ.get('LEKTOR_DEV') == '1',
browse=browse)
@cli.command('project-info', short_help='Shows the info about a project.')
@click.option('as_json', '--json', is_flag=True,
help='Prints out the data as json.')
@click.option('ops', '--name', is_flag=True, multiple=True,
flag_value='name', help='Print the project name')
@click.option('ops', '--project-file', is_flag=True, multiple=True,
flag_value='project_file',
help='Print the path to the project file.')
@click.option('ops', '--tree', is_flag=True, multiple=True,
flag_value='tree', help='Print the path to the tree.')
@click.option('ops', '--output-path', is_flag=True, multiple=True,
flag_value='default_output_path',
help='Print the path to the default output path.')
@pass_context
def project_info_cmd(ctx, as_json, ops):
"""Prints out information about the project. This is particular
useful for script usage or for discovering information about a
Lektor project that is not immediately obvious (like the paths
to the default output folder).
"""
project = ctx.get_project()
if as_json:
echo_json(project.to_json())
return
if ops:
data = project.to_json()
for op in ops:
click.echo(data.get(op, ''))
else:
click.echo('Name: %s' % project.name)
click.echo('File: %s' % project.project_file)
click.echo('Tree: %s' % project.tree)
click.echo('Output: %s' % project.get_output_path())
@cli.command('content-file-info', short_help='Provides information for '
'a set of lektor files.')
@click.option('as_json', '--json', is_flag=True,
help='Prints out the data as json.')
@click.argument('files', nargs=-1, type=click.Path())
@pass_context
def content_file_info_cmd(ctx, files, as_json):
"""Given a list of files this returns the information for those files
in the context of a project. If the files are from different projects
an error is generated.
"""
project = None
def fail(msg):
if as_json:
echo_json({'success': False, 'error': msg})
sys.exit(1)
raise click.UsageError('Could not find content file info: %s' % msg)
for filename in files:
this_project = Project.discover(filename)
if this_project is None:
fail('no project found')
if project is None:
project = this_project
elif project.project_path != this_project.project_path:
fail('multiple projects')
if project is None:
fail('no file indicated a project')
project_files = []
for filename in files:
content_path = project.content_path_from_filename(filename)
if content_path is not None:
project_files.append(content_path)
if not project_files:
fail('no files resolve in project')
if as_json:
echo_json({
'success': True,
'project': project.to_json(),
'paths': project_files,
})
else:
click.echo('Project:')
click.echo(' Name: %s' % project.name)
click.echo(' File: %s' % project.project_file)
click.echo(' Tree: %s' % project.tree)
click.echo('Paths:')
for project_file in project_files:
click.echo(' - %s' % project_file)
@cli.group('plugins', short_help='Manages plugins.')
def plugins_cmd():
"""This command group provides various helpers to manages plugins
in a Lektor project.
"""
@plugins_cmd.command('add', short_help='Adds a new plugin to the project.')
@click.argument('name')
@pass_context
def plugins_add_cmd(ctx, name):
"""This command can add a new plugin to the project. If just given
the name of the plugin the latest version of that plugin is added to
the project.
The argument is either the name of the plugin or the name of the plugin
suffixed with `@version` with the version. For instance to install
the version 0.1 of the plugin demo you would do `[email protected]`.
"""
project = ctx.get_project()
from .packages import add_package_to_project
try:
info = add_package_to_project(project, name)
except RuntimeError as e:
click.echo('Error: %s' % e, err=True)
else:
click.echo('Package %s (%s) was added to the project' % (
info['name'],
info['version'],
))
@plugins_cmd.command('remove', short_help='Removes a plugin from the project.')
@click.argument('name')
@pass_context
def plugins_remove_cmd(ctx, name):
"""This command can remove a plugin to the project again. The name
of the plugin is the only argument to the function.
"""
project = ctx.get_project()
from .packages import remove_package_from_project
try:
old_info = remove_package_from_project(project, name)
except RuntimeError as e:
click.echo('Error: %s' % e, err=True)
else:
if old_info is None:
click.echo('Package was not registered with the project. '
'Nothing was removed.')
else:
click.echo('Removed package %s (%s)' % (
old_info['name'],
old_info['version'],
))
@plugins_cmd.command('list', short_help='List all plugins.')
@click.option('as_json', '--json', is_flag=True,
help='Prints out the data as json.')
@click.option('-v', '--verbose', 'verbosity', count=True,
help='Increases the verbosity of the output.')
@pass_context
def plugins_list_cmd(ctx, as_json, verbosity):
"""This returns a list of all currently actively installed plugins
in the project. By default it only prints out the plugin IDs and
version numbers but the entire information can be returned by
increasing verbosity with `-v`. Additionally JSON output can be
requested with `--json`.
"""
ctx.load_plugins()
env = ctx.get_env()
plugins = sorted(env.plugins.values(), key=lambda x: x.id.lower())
if as_json:
echo_json({
'plugins': [x.to_json() for x in plugins]
})
return
if verbosity == 0:
for plugin in plugins:
click.echo('%s (version %s)' % (plugin.id, plugin.version))
return
for idx, plugin in enumerate(plugins):
if idx:
click.echo()
click.echo('%s (%s)' % (plugin.name, plugin.id))
for line in plugin.description.splitlines():
click.echo(' %s' % line)
if plugin.path is not None:
click.echo(' path: %s' % plugin.path)
click.echo(' version: %s' % plugin.version)
click.echo(' import-name: %s' % plugin.import_name)
@plugins_cmd.command('flush-cache', short_help='Flushes the plugin '
'installation cache.')
@pass_context
def plugins_flush_cache_cmd(ctx):
"""This uninstalls all plugins in the cache. On next usage the plugins
will be reinstalled automatically. This is mostly just useful during
plugin development when the cache got corrupted.
"""
click.echo('Flushing plugin cache ...')
from .packages import wipe_package_cache
wipe_package_cache(ctx.get_env())
click.echo('All done!')
@plugins_cmd.command('reinstall', short_help='Reinstall all plugins.')
@pass_context
def plugins_reinstall_cmd(ctx):
"""Forces a re-installation of all plugins. This will download the
requested versions of the plugins and install them into the plugin
cache folder. Alternatively one can just use `flush-cache` to
flush the cache and on next build Lektor will automatically download
the plugins again.
"""
ctx.load_plugins(reinstall=True)
@cli.command('quickstart', short_help='Starts a new empty project.')
@click.option('--name', help='The name of the project.')
@click.option('--path', type=click.Path(), help='Output directory')
@pass_context
def quickstart_cmd(ctx, **options):
"""Starts a new empty project with a minimum boilerplate."""
from lektor.quickstart import project_quickstart
project_quickstart(options)
from .devcli import cli as devcli # pylint: disable=wrong-import-position
cli.add_command(devcli, 'dev')
def main(as_module=False):
args = sys.argv[1:]
name = None
if as_module:
name = 'python -m lektor'
sys.argv = ['-m', 'lektor'] + args
cli.main(args=args, prog_name=name)
if __name__ == '__main__':
main(as_module=True)
|
[] |
[] |
[
"LEKTOR_DEV",
"LEKTOR_PROJECT",
"LEKTOR_OUTPUT_PATH"
] |
[]
|
["LEKTOR_DEV", "LEKTOR_PROJECT", "LEKTOR_OUTPUT_PATH"]
|
python
| 3 | 0 | |
gar_exporter.py
|
from prometheus_client import start_http_server
from prometheus_client.core import GaugeMetricFamily, REGISTRY
from apiclient.discovery import build
from apiclient.errors import HttpError
from datetime import datetime
from oauth2client.service_account import ServiceAccountCredentials
import time, httplib2, os, bios, helper, json
class GarCollector(object):
lastResponses = {}
def collect(self):
self._gauges = {}
analytics = self._initialize_analyticsreporting()
print("[",datetime.now(),"]","Authorized to talk with Analytics v4 API")
reports = helper.yamlToReportRequests(bios.read(CONFIG_FILE))
for report in reports:
print("[",datetime.now(),"]","[REPORT REQUEST]", report)
segmentsList = report['segmentsList']
del report['segmentsList']
response = self._requestWithExponentialBackoff(analytics, report)
print("[",datetime.now(),"]","RESPONSE OBTAINED")
self._get_metrics(
response,
report.get('reportRequests')[0].get('viewId'),
report.get('reportRequests')[0].get('dateRanges')[0],
segmentsList
)
for metric in self._gauges:
yield self._gauges[metric]
def _initialize_analyticsreporting(self):
credentials = ServiceAccountCredentials.from_json_keyfile_name(
SERVICE_ACCOUNT_FILE, scopes=SCOPES
)
http = credentials.authorize(httplib2.Http())
analytics = build('analytics', 'v4', http=http, discoveryServiceUrl=DISCOVERY_URI)
return analytics
def _get_report(self, analytics, report):
return analytics.reports().batchGet(
body=report
).execute()
def _requestWithExponentialBackoff(self, analytics, report):
"""Wrapper to request Google Analytics data with exponential backoff.
The makeRequest method accepts the analytics service object, makes API
requests and returns the response. If any error occurs, the makeRequest
method is retried using exponential backoff.
Args:
analytics: The analytics service object
report: Report request structure
Returns:
The API response from the _get_report method.
"""
reportId = hash(json.dumps(report))
for n in range(0, 5):
try:
response = self._get_report(analytics, report)
self.lastResponses[reportId] = response
return response
except HttpError as error:
print("[WARNING] Http request error", error.resp.reason)
if error.resp.reason in ['userRateLimitExceeded', 'quotaExceeded',
'internalServerError', 'backendError']:
time.sleep((2 ** n) + random.random())
else:
break
print("[",datetime.now(),"]","[ERROR] There has been an error, returning earlier result", reportId)
return self.lastResponses[reportId]
def _get_metrics(self, response, viewId, dateRanges, segmentsList):
METRIC_PREFIX = 'ga_reporting'
LABELS = ['ga:viewId', 'ga:dateStart', 'ga:dateEnd']
self._gauges = {}
for report in response.get('reports', []):
columnHeader = report.get('columnHeader', {})
dimensionHeaders = LABELS
dimensionHeaders.extend(columnHeader.get('dimensions', []))
# Added dimensions as labels - fixed bug
dimensionHeadersModified = [x[3:] for x in dimensionHeaders]
metricHeaders = columnHeader.get('metricHeader', {}).get('metricHeaderEntries', [])
rows = report.get('data', {}).get('rows', [])
testi=0
for row in rows:
dimensions = [viewId, dateRanges.get('startDate'), dateRanges.get('endDate')]
dimensions.extend(row.get('dimensions', []))
dateRangeValues = row.get('metrics', [])
for i, element in enumerate(dimensions):
if element == 'Dynamic Segment':
dimensions[i] = list(segmentsList[0][0].values())[0]
testi+=1
dimension=""
# for header, dimension in zip(dimensionHeaders, dimensions):
# print("[HEADER] " + header + ': ' + dimension)
for i, values in enumerate(dateRangeValues):
# print('Date range (' + str(i) + ')')
for metricHeader, returnValue in zip(metricHeaders, values.get('values')):
metric = metricHeader.get('name')[3:]
# print("[METRIC] " + metric + ': ' + returnValue)
self._gauges[metric+str(testi)] = GaugeMetricFamily('%s_%s' % (METRIC_PREFIX, metric), '%s' % metric, value=None, labels=dimensionHeadersModified)
self._gauges[metric+str(testi)].add_metric(dimensions, value=float(returnValue))
if __name__ == '__main__':
SCOPES = ['https://www.googleapis.com/auth/analytics.readonly']
DISCOVERY_URI = ('https://analyticsreporting.googleapis.com/$discovery/rest')
SERVICE_ACCOUNT_FILE = os.getenv('SERVICE_ACCOUNT_FILE')
CONFIG_FILE=os.getenv('CONFIG_FILE')
print("[",datetime.now(),"]","Starting server in 0.0.0.0:" + os.getenv('BIND_PORT'))
start_http_server(int(os.getenv('BIND_PORT')))
REGISTRY.register(GarCollector())
print("[",datetime.now(),"]","Waiting for serving metrics")
while True: time.sleep(1)
|
[] |
[] |
[
"BIND_PORT",
"CONFIG_FILE",
"SERVICE_ACCOUNT_FILE"
] |
[]
|
["BIND_PORT", "CONFIG_FILE", "SERVICE_ACCOUNT_FILE"]
|
python
| 3 | 0 | |
conf/setup-environment.d/updatehub-freescale.py
|
def __after_init_updatehub_freescale():
PLATFORM_ROOT_DIR = os.environ['PLATFORM_ROOT_DIR']
append_layers([ os.path.join(PLATFORM_ROOT_DIR, 'sources', p) for p in
[
'meta-freescale',
'meta-freescale-3rdparty',
'meta-updatehub-freescale',
]])
# FSL EULA
eulas.accept['meta-freescale/EULA'] = 'ACCEPT_FSL_EULA = "1"'
run_after_init(__after_init_updatehub_freescale)
|
[] |
[] |
[
"PLATFORM_ROOT_DIR"
] |
[]
|
["PLATFORM_ROOT_DIR"]
|
python
| 1 | 0 | |
dlp/src/test/java/com/example/dlp/RedactIT.java
|
/*
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.example.dlp;
import static junit.framework.TestCase.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.PrintStream;
import org.junit.After;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
@RunWith(JUnit4.class)
//CHECKSTYLE OFF: AbbreviationAsWordInName
public class RedactIT {
//CHECKSTYLE ON: AbbreviationAsWordInName
private ByteArrayOutputStream bout;
private PrintStream out;
@Before
public void setUp() {
bout = new ByteArrayOutputStream();
out = new PrintStream(bout);
System.setOut(out);
assertNotNull(System.getenv("GOOGLE_APPLICATION_CREDENTIALS"));
}
@Test
public void testInfoTypesInStringAreReplaced() throws Exception {
String text =
"\"My phone number is (234) 456-7890 and my email address is [email protected]\"";
Redact.main(new String[] {"-s", text, "-r", "_REDACTED_"});
String output = bout.toString();
assertTrue(output.contains("My phone number is _REDACTED_ and my email address is _REDACTED_"));
}
@Ignore // TODO: b/69461298
@Test
public void testInfoTypesInImageAreReplaced() throws Exception {
ClassLoader classLoader = getClass().getClassLoader();
// confirm that current data contains info types
File file = new File(classLoader.getResource("test.png").getFile());
Inspect.main(new String[] {"-f", file.getAbsolutePath()});
String output = bout.toString();
assertTrue(output.contains("PHONE_NUMBER"));
assertTrue(output.contains("EMAIL_ADDRESS"));
bout.reset();
String outputFilePath = "output.png";
Redact.main(
new String[] {
"-f", file.getAbsolutePath(), "-infoTypes", "PHONE_NUMBER", "-o", outputFilePath
});
Inspect.main(new String[] {"-f", outputFilePath});
output = bout.toString();
assertFalse(output.contains("PHONE_NUMBER"));
assertTrue(output.contains("EMAIL_ADDRESS"));
}
@After
public void tearDown() {
System.setOut(null);
bout.reset();
}
}
|
[
"\"GOOGLE_APPLICATION_CREDENTIALS\""
] |
[] |
[
"GOOGLE_APPLICATION_CREDENTIALS"
] |
[]
|
["GOOGLE_APPLICATION_CREDENTIALS"]
|
java
| 1 | 0 | |
importer/pricing_import.py
|
#!/usr/bin/env python
"""
For downloading data from the AWS pricing API and importing into mysql/mariadb
"""
from __future__ import print_function
import os
import hashlib
import csv
import json
import logging
import requests
import pymysql.cursors
class PricingImporter(object):
"""For importing aws pricing into a database"""
def __init__(self, column_titles):
self.column_titles = column_titles
# Pricing URLs
self.base_url = "https://pricing.us-east-1.amazonaws.com{}"
self.offer_index_url = "https://pricing.us-east-1.amazonaws.com/offers/v1.0/aws/index.json"
# Retrieve environment variables
self.mariadb_host = os.getenv('MARIADB_HOST', 'localhost')
self.mariadb_user = os.getenv('MARIADB_USER', 'pricer')
self.mariadb_password = os.getenv('MARIADB_PASSWORD', 'prices123')
self.mariadb_db = os.getenv('MARIADB_DB', 'aws_prices')
@classmethod
def _md5(cls, file):
"""Retrieves a md5 of the specified file."""
hash_md5 = hashlib.md5()
with open(file, 'rb') as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
@classmethod
def _download_file(cls, target_url, filename):
"""Downloads a file from the specified URL"""
logging.info('Downloading file from %s...', target_url)
response = requests.get(target_url, stream=True)
with open(filename, 'wb') as f:
f.write(response.content)
def _parse_csv_schema(self, filename, table_name):
"""Parses a csv into schema"""
with open(filename, 'r') as f:
reader = csv.reader(f)
for row in reader:
if row[0] == "SKU":
schema = self._generate_schema_from_row(row, table_name)
return schema
def _generate_schema_from_row(self, row, table_name):
"""Generate sql statement for schema"""
logging.info('Generating SQL Schema from CSV...')
schema_sql = 'create table {}(\n'.format(table_name)
for column_title in row:
if column_title in self.column_titles:
column_name = self.column_titles.get(column_title, {}).get('name', 'PARSING_ERROR')
column_type = self.column_titles.get(column_title, {}).get('type', 'PARSING_ERROR')
schema_sql += '{} {},\n'.format(column_name, column_type)
else:
schema_sql += ''.join(e for e in column_title if e.isalnum()) + " VARCHAR(200),\n"
schema_sql = schema_sql[:-2]
schema_sql += ");\n"
return schema_sql
def _download_offer_index_file(self, offer_index_filename):
"""Downloads the offer index file"""
offer_index_exists = os.path.isfile(offer_index_filename)
if offer_index_exists:
resp = requests.head(self.offer_index_url)
md5_remote = resp.headers['etag'][1:-1]
if self._md5(offer_index_filename) == md5_remote:
logging.warning('You already have the latest offer index!')
return offer_index_exists
self._download_file(self.offer_index_url, offer_index_filename)
offer_index_exists = os.path.isfile(offer_index_filename)
return offer_index_exists
def load_offer_index(self):
"""Loads the offer index from json file"""
offer_index_filename = "/tmp/offer_index.json"
offer_index_exists = self._download_offer_index_file(offer_index_filename)
if not offer_index_exists:
raise IOError('Failed to download offer index file!')
with open(offer_index_filename) as json_data:
offer_index = json.load(json_data)
return offer_index
def download_offer_file(self, offer_code_url):
"""Downloads the offer file"""
offer_code = offer_code_url.split('/')[4]
offer_code_url = '{}.csv'.format(offer_code_url[:-5])
url = self.base_url.format(offer_code_url)
local_filename = '/tmp/{}.csv'.format(offer_code)
# Make sure the file does not already exist
file_exists = os.path.isfile(local_filename)
if file_exists:
resp = requests.head(url)
md5_remote = resp.headers['etag'][1:-1]
# If we already have the file, compare md5 of local and remote files
if self._md5(local_filename) == md5_remote:
logging.warning('You already have the latest csv for %s! Skipping...', offer_code)
return file_exists
self._download_file(url, local_filename)
# Ensure the file now exists after downloading it
file_exists = os.path.isfile(local_filename)
return file_exists
def import_csv_into_mariadb(self, filename):
"""Imports csv of data into mariadb"""
table_name = filename[:-4]
filename = '/tmp/{}'.format(filename)
schema = self._parse_csv_schema(filename, table_name)
db = pymysql.connect(host=self.mariadb_host,
user=self.mariadb_user,
passwd=self.mariadb_password,
db=self.mariadb_db,
local_infile=1)
cursor = db.cursor()
load_data = "LOAD DATA LOCAL INFILE '{}' INTO TABLE {}".format(filename, table_name)
load_data += """ FIELDS TERMINATED BY ','
ENCLOSED BY '"'
LINES TERMINATED BY '\n'
IGNORE 6 LINES; """
logging.info('Checking to see if table %s exists...', table_name)
cursor.execute("SELECT * FROM information_schema.tables WHERE table_schema = '{}' AND table_name = '{}' LIMIT 1;".format(self.mariadb_db, table_name))
if cursor.fetchone() is not None:
logging.info('Dropping existing table %s', table_name)
cursor.execute('DROP TABLE {};'.format(table_name))
logging.info('Recreating table...')
cursor.execute(schema)
logging.info('Loading csv data...')
cursor.execute(load_data)
db.commit()
cursor.close()
def main(self):
"""Entrypoint function for class...downloads data and imports to mariadb"""
offer_index = self.load_offer_index()
filenames = []
urls = []
number_of_threads = 0
for offer, offer_info in offer_index.get('offers', {}).items():
number_of_threads += 1
filenames.append('{}.csv'.format(offer))
urls.append(offer_info.get('currentVersionUrl', 'PARSING_ERROR'))
for url in urls:
self.download_offer_file(url)
for filename in filenames:
self.import_csv_into_mariadb(filename)
def load_column_titles():
"""Nice place to store this until it can be loaded from a file"""
#TODO: Import this from a json file instead.
column_titles = {
"SKU": {
"name": "SKU",
"type": "VARCHAR(17)"
},
"OfferTermCode": {
"name": "OfferTermCode",
"type": "VARCHAR(10)"
},
"RateCode": {
"name": "RateCode",
"type": "VARCHAR(38)"
},
"TermType": {
"name": "TermType",
"type": "VARCHAR(16)"
},
"PriceDescription": {
"name": "PriceDescription",
"type": "VARCHAR(200)"
},
"EffectiveDate": {
"name": "EffectiveDate",
"type": "DATE"
},
"StartingRange": {
"name": "StartingRange",
"type": "VARCHAR(200)"
},
"EndingRange": {
"name": "EndingRange",
"type": "VARCHAR(200)"
},
"Unit": {
"name": "Unit",
"type": "VARCHAR(50)"
},
"PricePerUnit": {
"name": "PricePerUnit",
"type": "DOUBLE"
},
"Currency": {
"name": "Currency",
"type": "VARCHAR(3)"
},
"LeaseContractLength": {
"name": "LeaseContractLength",
"type": "VARCHAR(50)"
},
"PurchaseOption": {
"name": "PurchaseOption",
"type": "VARCHAR(50)"
},
"OfferingClass": {
"name": "OfferingClass",
"type": "VARCHAR(50)"
},
"Product Family": {
"name": "ProductFamily",
"type": "VARCHAR(200)"
},
"serviceCode": {
"name": "ServiceCode",
"type": "VARCHAR(50)"
},
"Location": {
"name": "Location",
"type": "VARCHAR(50)"
},
"Location Type": {
"name": "LocationType",
"type": "VARCHAR(50)"
},
"Instance Type": {
"name": "InstanceType",
"type": "VARCHAR(50)"
},
"Current Generation": {
"name": "CurrentGeneration",
"type": "VARCHAR(10)"
},
"Instance Family": {
"name": "InstanceFamily",
"type": "VARCHAR(50)"
},
"vCPU": {
"name": "vCPU",
"type": "VARCHAR(10)"
},
"Physical Processor": {
"name": "PhysicalProcessor",
"type": "VARCHAR(50)"
},
"Clock Speed": {
"name": "ClockSpeed",
"type": "VARCHAR(50)"
},
"Memory": {
"name": "Memory",
"type": "VARCHAR(50)"
},
"Storage": {
"name": "Storage",
"type": "VARCHAR(50)"
},
"Network Performance": {
"name": "NetworkPerformance",
"type": "VARCHAR(50)"
},
"Processor Architecture": {
"name": "ProcessorArchitecture",
"type": "VARCHAR(20)"
},
"Storage Media": {
"name": "StorageMedia",
"type": "VARCHAR(15)"
},
"Volume Type": {
"name": "VolumeType",
"type": "VARCHAR(100)"
},
"Max Volume Size": {
"name": "MaxVolumeSize",
"type": "VARCHAR(10)"
},
"Max IOPS/volume": {
"name": "MaxIOPSVolume",
"type": "VARCHAR(40)"
},
"Max IOPS Burst Performance": {
"name": "MaxIOPSBurstPerformance",
"type": "VARCHAR(40)"
},
"Max throughput/volume": {
"name": "MaxThroughputPerVolume",
"type": "VARCHAR(30)"
},
"Provisioned": {
"name": "Provisioned",
"type": "VARCHAR(10)"
},
"Tenancy": {
"name": "Tenancy",
"type": "VARCHAR(20)"
},
"EBS Optimized": {
"name": "EBSOptimized",
"type": "VARCHAR(10)"
},
"Operating System": {
"name": "OS",
"type": "VARCHAR(15)"
},
"License Model": {
"name": "LicenseModel",
"type": "VARCHAR(50)"
},
"Group": {
"name": "AWSGroup",
"type": "VARCHAR(300)"
},
"Group Description": {
"name": "AWSGroupDescription",
"type": "VARCHAR(300)"
},
"Transfer Type": {
"name": "TransferType",
"type": "VARCHAR(200)"
},
"From Location": {
"name": "FromLocation",
"type": "VARCHAR(50)"
},
"From Location Type": {
"name": "FromLocationType",
"type": "VARCHAR(50)"
},
"To Location": {
"name": "ToLocation",
"type": "VARCHAR(50)"
},
"To Location Type": {
"name": "ToLocationType",
"type": "VARCHAR(50)"
},
"usageType": {
"name": "UsageType",
"type": "VARCHAR(50)"
},
"operation": {
"name": "Operation",
"type": "VARCHAR(50)"
},
"Comments": {
"name": "Comments",
"type": "VARCHAR(200)"
},
"Dedicated EBS Throughput": {
"name": "DedicatedEBSThroughput",
"type": "VARCHAR(30)"
},
"Enhanced Networking Supported": {
"name": "EnhancedNetworkingSupported",
"type": "VARCHAR(10)"
},
"GPU": {
"name": "GPU",
"type": "VARCHAR(10)"
},
"Instance Capacity - 10xlarge": {
"name": "InstanceCapacity10xLarge",
"type": "VARCHAR(10)"
},
"Instance Capacity - 2xlarge": {
"name": "InstanceCapacity2xLarge",
"type": "VARCHAR(10)"
},
"Instance Capacity - 4xlarge": {
"name": "InstanceCapacity4xLarge",
"type": "VARCHAR(10)"
},
"Instance Capacity - 8xlarge": {
"name": "InstanceCapacity8xLarge",
"type": "VARCHAR(10)"
},
"Instance Capacity - large": {
"name": "InstanceCapacityLarge",
"type": "VARCHAR(10)"
},
"Instance Capacity - medium": {
"name": "InstanceCapacityMedium",
"type": "VARCHAR(10)"
},
"Instance Capacity - xlarge": {
"name": "InstanceCapacityxLarge",
"type": "VARCHAR(10)"
},
"Intel AVX Available": {
"name": "IntelAVXAvailable",
"type": "VARCHAR(10)"
},
"Intel AVX2 Available": {
"name": "IntelAVX2Available",
"type": "VARCHAR(10)"
},
"Intel Turbo Available": {
"name": "IntelTurboAvailable",
"type": "VARCHAR(10)"
},
"Physical Cores": {
"name": "PhysicalCores",
"type": "VARCHAR(10)"
},
"Pre Installed S/W": {
"name": "PreInstalledSW",
"type": "VARCHAR(50)"
},
"Processor Features": {
"name": "ProcessorFeatures",
"type": "VARCHAR(50)"
},
"Sockets": {
"name": "Sockets",
"type": "VARCHAR(10)"
}
}
return column_titles
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
COLUMN_TITLES = load_column_titles()
PRICING_IMPORTER = PricingImporter(COLUMN_TITLES)
PRICING_IMPORTER.main()
|
[] |
[] |
[
"MARIADB_USER",
"MARIADB_PASSWORD",
"MARIADB_HOST",
"MARIADB_DB"
] |
[]
|
["MARIADB_USER", "MARIADB_PASSWORD", "MARIADB_HOST", "MARIADB_DB"]
|
python
| 4 | 0 | |
main.py
|
import gc
import json
import os
import random
import sys
import time
import uuid
from pathlib import Path
import numpy as np
import torch
from config.parser import Configuration
from data.datasets import DatasetClass, DataSourceClass
from log.logger import ConsoleLogger
from model.transfer_learning import BackendClass
from test.tester import TesterClass
from train.train import TrainerClass
from log.tensorboard import Logger
def init_seeds(seed: int):
"""
Establishes a common seed for all the random number generators used in the code to enable reproducibility
:param seed: the random seed value
"""
os.environ['CUBLAS_WORKSPACE_CONFIG'] = ":4096:8"
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.set_deterministic(True)
def main(config: Configuration, task_name: str):
start_time = time.time()
seed = config.get_param_value('seed')
init_seeds(seed)
torch.set_num_threads(1)
output_folder_path = config.get_param_value('output_folder_path')
output_folder_name = config.get_param_value('output_folder_name', False)
if output_folder_name is None:
output_folder_name = str(uuid.uuid1())
output_result_path = Path(output_folder_path) / output_folder_name
output_result_path.mkdir(parents=True, exist_ok=True)
output_json_results_path = output_result_path / 'results_test.json'
# Create a copy of the configuration used in the output directory
config.save_config(output_result_path)
logger = Logger(output_result_path, config.get_config())
sys.stdout = ConsoleLogger(output_result_path, sys.stdout)
gpu_id = config.get_param_value('gpu_id', False)
device_name = torch.device("cuda:{}".format(gpu_id) if torch.cuda.is_available() and gpu_id is not None else "cpu")
dataset_name = config.get_param_value('dataset_name')
dataset_path = config.get_param_value('dataset_path')
download = config.get_param_value('download')
source_dataset_class = DataSourceClass.get_data_source_class(dataset_name)
sample_dataset = source_dataset_class(root=dataset_path, download=download, train=True)
num_classes = len(sample_dataset.classes)
num_channels = sample_dataset.data.shape[3] if len(sample_dataset.data.shape) == 4 else 1
network_class = BackendClass.get_backend_class(config.get_param_value('backend_name'))
model = network_class(in_channels=num_channels, num_classes=num_classes, config=config, device=device_name)
model = model.to(device_name)
dataset_class = DatasetClass.get_dataset_class(config)
dataset = dataset_class(config, output_result_path, device_name, model, source_dataset_class)
print(dataset)
print(model)
if output_json_results_path.exists():
with open(str(output_json_results_path), 'r') as fp:
output_dict = json.load(fp)
else:
output_dict = {
"time": {}
}
if task_name == 'train' or task_name == 'train_test':
trainer_class = TrainerClass.get_trainer_class(config.get_param_value('network_type'))
trainer = trainer_class(config=config, device=device_name, model=model, output_result_path=output_result_path,
dataset=dataset, logger=logger)
print('Training with {}'.format(trainer))
start_train_time = time.time()
trainer.train()
end_train_time = time.time()
output_dict['time']['train'] = round(end_train_time - start_train_time, 4)
gc.collect()
tester_class = TesterClass.get_tester_class(config.get_param_value('network_type'),
config.get_param_value('fc_architecture/test_fusion_enabled',
mandatory=True))
tester = tester_class(config=config, device=device_name, model=model, output_result_path=output_result_path,
test_loader=dataset.get_eval_loader())
output_result_dict = tester.test()
output_dict['train_metrics'] = output_result_dict
if hasattr(trainer, "inc_train_exemplar_idx"):
output_dict['inc_train_exemplar_idx'] = trainer.inc_train_exemplar_idx
if 'test_metrics' in output_dict:
del output_dict['test_metrics']
if task_name == 'test':
model_path = str(output_result_path / 'model.pth')
print('Loading test model form {}'.format(model_path))
model.load_state_dict(torch.load(model_path, map_location=torch.device('cpu')))
if task_name == 'test' or task_name == 'train_test':
print('Testing')
tester_class = TesterClass.get_tester_class(config.get_param_value('network_type'),
config.get_param_value('fc_architecture/test_fusion_enabled',
mandatory=True))
tester = tester_class(config=config, device=device_name, model=model, output_result_path=output_result_path,
test_loader=dataset.get_test_loader())
print(tester)
start_test_time = time.time()
output_result_dict = tester.test()
output_dict['test_metrics'] = output_result_dict
end_test_time = time.time()
output_dict['time']['test'] = round(end_test_time - start_test_time, 4)
print('Execution time: {}s'.format(round(time.time() - start_time, 4)))
end_time = time.time()
output_dict['time']['total'] = round(end_time - start_time, 4)
logger.end_logging()
with open(str(output_result_path / 'results_test.json'), 'w') as fp:
json.dump(output_dict, fp)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Train arguments')
parser.add_argument('task', choices=('train', 'test', 'train_test', 'stats', 'alpha', 'eval'))
parser.add_argument('-c', '--config', type=str, required=True, help='path to the config file to be used')
parser_args = vars(parser.parse_args())
configuration = Configuration(parser_args['config'])
task = parser_args['task']
seed_value = configuration.get_param_value('seed')
if isinstance(seed_value, list):
for i, seed in enumerate(seed_value):
configuration.config['seed'] = seed
configuration.config['output_folder_name'] = str(i)
main(configuration, task)
else:
main(configuration, task)
|
[] |
[] |
[
"CUBLAS_WORKSPACE_CONFIG"
] |
[]
|
["CUBLAS_WORKSPACE_CONFIG"]
|
python
| 1 | 0 | |
contrib/spendfrom/spendfrom.py
|
#!/usr/bin/env python
#
# Use the raw transactions API to spend ISFs received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a insifad or insifa-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the insifa data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Insifa/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Insifa")
return os.path.expanduser("~/.insifa")
def read_bitcoin_config(dbdir):
"""Read the insifa.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "insifa.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a insifa JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 19332 if testnet else 9332
connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the insifad we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(insifad):
info = insifad.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
insifad.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = insifad.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(insifad):
address_summary = dict()
address_to_account = dict()
for info in insifad.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = insifad.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = insifad.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-insifa-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(insifad, fromaddresses, toaddress, amount, fee):
all_coins = list_available(insifad)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to insifad.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = insifad.createrawtransaction(inputs, outputs)
signed_rawtx = insifad.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(insifad, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = insifad.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(insifad, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = insifad.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(insifad, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get ISFs from")
parser.add_option("--to", dest="to", default=None,
help="address to get send ISFs to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of insifa.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
insifad = connect_JSON(config)
if options.amount is None:
address_summary = list_available(insifad)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(insifad) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(insifad, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(insifad, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = insifad.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
|
[] |
[] |
[
"APPDATA"
] |
[]
|
["APPDATA"]
|
python
| 1 | 0 | |
config.py
|
import os
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY')
SQLALCHEMY_TRACK_MODIFICATIONS = False
UPLOADED_PHOTOS_DEST = 'app/static/photos'
# email configurations
MAIL_SERVER = 'smtp.googlemail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get("MAIL_USERNAME")
MAIL_PASSWORD = os.environ.get("MAIL_PASSWORD")
# simple mde configurations
SIMPLEMDE_JS_IIFE = True
SIMPLEMDE_USE_CDN = True
@staticmethod
def init_app(app):
pass
class TestConfig(Config):
pass
class ProdConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get("DATABASE_URL")
pass
class DevConfig(Config):
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://moringa:moringa@localhost/db'
DEBUG = True
config_options = {
'development': DevConfig,
'production': ProdConfig,
'test': TestConfig
}
|
[] |
[] |
[
"MAIL_PASSWORD",
"SECRET_KEY",
"DATABASE_URL",
"MAIL_USERNAME"
] |
[]
|
["MAIL_PASSWORD", "SECRET_KEY", "DATABASE_URL", "MAIL_USERNAME"]
|
python
| 4 | 0 | |
tools/dkcm/mainservice/mainservice.go
|
/*
Copyright 2020 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package mainservice
import (
"encoding/json"
"fmt"
"log"
"net/http"
"os"
"strconv"
"sync"
"time"
"knative.dev/test-infra/pkg/clustermanager/e2e-tests/boskos"
"knative.dev/test-infra/pkg/clustermanager/kubetest2"
"knative.dev/test-infra/pkg/mysql"
"knative.dev/test-infra/tools/dkcm/clerk"
)
var (
// channel serves as a lock for go routine
chanLock = make(chan struct{})
boskosClient *boskos.Client
dbClient *clerk.DBClient
serviceAccount string
DefaultClusterParams = clerk.ClusterParams{Zone: DefaultZone, Nodes: DefaultNodesCount, NodeType: DefaultNodeType}
)
// Response to Prow
type ServiceResponse struct {
IsReady bool `json:"isReady"`
Message string `json:"message"`
ClusterInfo *clerk.Response `json:"clusterInfo"`
}
func Start(dbConfig *mysql.DBConfig, boskosClientHost, gcpServiceAccount string) error {
var err error
boskosClient, err = boskos.NewClient(boskosClientHost, "", "")
if err != nil {
return fmt.Errorf("failed to create Boskos client: %w", err)
}
dbClient, err = clerk.NewDB(dbConfig)
if err != nil {
return fmt.Errorf("failed to create Clerk client: %w", err)
}
serviceAccount = gcpServiceAccount
server := http.NewServeMux()
server.HandleFunc("/request-cluster", handleNewClusterRequest)
server.HandleFunc("/get-cluster", handleGetCluster)
server.HandleFunc("/clean-cluster", handleCleanCluster)
// use PORT environment variable, or default to 8080
port := DefaultPort
if fromEnv := os.Getenv("PORT"); fromEnv != "" {
port = fromEnv
}
// start the web server on port and accept requests
log.Printf("Server listening on port %q", port)
return http.ListenAndServe(fmt.Sprintf(":%v", port), server)
}
// handle cleaning cluster request after usage
func handleCleanCluster(w http.ResponseWriter, req *http.Request) {
// add project name
token := req.URL.Query().Get("token")
r, err := dbClient.GetRequest(token)
if err != nil {
http.Error(w, fmt.Sprintf("there is an error getting the request with the token: %v, please try again", err), http.StatusForbidden)
return
}
c, err := dbClient.GetCluster(r.ClusterID)
if err != nil {
http.Error(w, fmt.Sprintf("there is an error getting the cluster with the token: %v, please try again", err), http.StatusForbidden)
return
}
err = dbClient.DeleteCluster(r.ClusterID)
if err != nil {
http.Error(w, fmt.Sprintf("there is an error deleting the cluster with the token: %v, please try again", err), http.StatusForbidden)
return
}
err = boskosClient.ReleaseGKEProject(c.ProjectID)
if err != nil {
http.Error(w, "there is an error releasing Boskos's project. Please try again.", http.StatusInternalServerError)
return
}
}
// check the pool capacity and create clusters if necessary
func checkPoolCap(cp *clerk.ClusterParams) {
chanLock <- struct{}{}
numAvail := dbClient.CheckNumStatus(cp, Ready)
numWIP := dbClient.CheckNumStatus(cp, WIP)
diff := DefaultOverProvision - numAvail - numWIP
var wg sync.WaitGroup
// create cluster if not meeting overprovisioning criteria
for i := int64(0); i < diff; i++ {
wg.Add(1)
log.Printf("Creating a new cluster: %v", cp)
go CreateCluster(cp, &wg)
}
wg.Wait()
<-chanLock
}
// assign clusters if available upon request
func CreateCluster(cp *clerk.ClusterParams, wg *sync.WaitGroup) {
project, err := boskosClient.AcquireGKEProject(boskos.GKEProjectResource)
if err != nil {
log.Printf("Failed to acquire a project from boskos: %v", err)
return
}
projectName := project.Name
c := clerk.NewCluster(clerk.AddProjectID(projectName))
c.ClusterParams = cp
clusterID, err := dbClient.InsertCluster(c)
wg.Done()
if err != nil {
log.Printf("Failed to insert a new Cluster entry: %v", err)
return
}
if err := kubetest2.Run(&kubetest2.Options{}, &kubetest2.GKEClusterConfig{
GCPServiceAccount: serviceAccount,
GCPProjectID: projectName,
Name: DefaultClusterName,
Region: cp.Zone,
Machine: cp.NodeType,
MinNodes: int(cp.Nodes),
MaxNodes: int(cp.Nodes),
Network: DefaultNetworkName,
Environment: "prod",
Version: "latest",
Scopes: "cloud-platform",
}); err != nil {
if err := dbClient.UpdateCluster(clusterID, clerk.UpdateStringField(Status, Fail)); err != nil {
log.Printf("Failed to insert delete new Cluster entry: %v", err)
return
}
err = boskosClient.ReleaseGKEProject(c.ProjectID)
if err != nil {
log.Printf("Failed to release Boskos Project: %v", err)
return
}
return
}
if err := dbClient.UpdateCluster(clusterID, clerk.UpdateStringField(Status, Ready)); err != nil {
log.Printf("Failed to insert a new Cluster entry: %v", err)
return
}
}
// assign clusters if available upon request
func AssignCluster(token string, w http.ResponseWriter) {
r, err := dbClient.GetRequest(token)
if err != nil {
http.Error(w, fmt.Sprintf("there is an error getting the request with the token: %v, please try again", err), http.StatusForbidden)
return
}
// check if the Prow job has enough priority to get an existing cluster
ranking := dbClient.PriorityRanking(r)
numAvail := dbClient.CheckNumStatus(r.ClusterParams, Ready)
available, clusterID := dbClient.CheckAvail(r.ClusterParams)
var serviceResponse *ServiceResponse
if available && ranking <= numAvail {
response, err := dbClient.GetCluster(clusterID)
if err != nil {
http.Error(w, fmt.Sprintf("there is an error getting available clusters: %v, please try again", err), http.StatusInternalServerError)
return
}
dbClient.UpdateRequest(r.ID, clerk.UpdateNumField(ClusterID, clusterID))
dbClient.UpdateCluster(clusterID, clerk.UpdateStringField(Status, InUse))
serviceResponse = &ServiceResponse{IsReady: true, Message: "Your cluster is ready!", ClusterInfo: response}
} else {
serviceResponse = &ServiceResponse{IsReady: false, Message: "Your cluster isn't ready yet! Please check back later."}
}
responseJson, err := json.Marshal(serviceResponse)
if err != nil {
http.Error(w, fmt.Sprintf("there is an error getting parsing response: %v, please try again", err), http.StatusInternalServerError)
return
}
w.Write(responseJson)
}
// handle new cluster request
func handleNewClusterRequest(w http.ResponseWriter, req *http.Request) {
prowJobID := req.PostFormValue("prowjobid")
nodesCount, err := strconv.Atoi(req.PostFormValue("nodes"))
if err != nil || nodesCount <= 0 {
nodesCount = DefaultNodesCount
}
nodesType := req.PostFormValue("nodeType")
if nodesType == "" {
nodesType = DefaultNodeType
}
zone := req.PostFormValue("zone")
if zone == "" {
zone = DefaultZone
}
cp := clerk.NewClusterParams(clerk.AddZone(zone), clerk.AddNodes(int64(nodesCount)), clerk.AddNodeType(nodesType))
r := clerk.NewRequest(clerk.AddProwJobID(prowJobID), clerk.AddRequestTime(time.Now()))
r.ClusterParams = cp
accessToken, err := dbClient.InsertRequest(r)
if err != nil {
http.Error(w, fmt.Sprintf("there is an error creating new request: %v. Please try again.", err), http.StatusInternalServerError)
return
}
go checkPoolCap(cp)
w.Write([]byte(accessToken))
}
// handle get cluster request
func handleGetCluster(w http.ResponseWriter, req *http.Request) {
token := req.URL.Query().Get("token")
AssignCluster(token, w)
}
// run timeout check
func runTimeOut() {
for {
dbClient.ClearTimeOut(DefaultTimeOut)
time.Sleep(CheckInterval * time.Second)
}
}
|
[
"\"PORT\""
] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
go
| 1 | 0 | |
vmlifecycle/vmmanagers/vmmanagers_suite_test.go
|
package vmmanagers_test
import (
"log"
"regexp"
"testing"
"os"
"path/filepath"
"fmt"
"io/ioutil"
"reflect"
"strings"
"gopkg.in/yaml.v2"
"github.com/fatih/color"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gexec"
"github.com/pivotal-cf/om/vmlifecycle/vmmanagers"
)
func TestVMManager(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "VMManagers Suite")
}
var _ = BeforeSuite(func() {
log.SetOutput(GinkgoWriter)
pathToStub, err := gexec.Build("github.com/pivotal-cf/om/vmlifecycle/vmmanagers/stub")
Expect(err).ToNot(HaveOccurred())
tmpDir := filepath.Dir(pathToStub)
os.Setenv("PATH", tmpDir+":"+os.Getenv("PATH"))
govcPath := tmpDir + "/govc"
gcloudPath := tmpDir + "/gcloud"
omPath := tmpDir + "/om"
err = os.Link(pathToStub, govcPath)
Expect(err).ToNot(HaveOccurred())
err = os.Link(pathToStub, omPath)
Expect(err).ToNot(HaveOccurred())
err = os.Link(pathToStub, gcloudPath)
Expect(err).ToNot(HaveOccurred())
color.NoColor = true
})
var _ = AfterSuite(func() {
gexec.CleanupBuildArtifacts()
})
func testIAASForPropertiesInExampleFile(iaas string) {
It("has an example file the represents all the correct fields", func() {
exampleFile, err := ioutil.ReadFile(fmt.Sprintf("../../../docs-platform-automation/docs/examples/opsman-config/%s.yml", strings.ToLower(iaas)))
Expect(err).ToNot(HaveOccurred())
isolateCommentedParamRegex := regexp.MustCompile(`(?m)^(\s+)# ([\w-]+: )`)
exampleFile = isolateCommentedParamRegex.ReplaceAll(exampleFile, []byte("$1$2"))
config := vmmanagers.OpsmanConfigFilePayload{}
err = yaml.UnmarshalStrict(exampleFile, &config)
Expect(err).ToNot(HaveOccurred())
configStruct := reflect.ValueOf(config.OpsmanConfig)
iaasPtrStruct := configStruct.FieldByName(iaas)
iaasStruct := iaasPtrStruct.Elem()
Expect(iaasStruct.NumField()).To(BeNumerically(">", 0))
testPropertiesExist(iaasStruct)
})
}
func testPropertiesExist(vst reflect.Value) {
tst := vst.Type()
for i := 0; i < vst.NumField(); i++ {
errorMsg := fmt.Sprintf("field %s does not exist or is an empty value in the iaas example config", tst.Field(i).Name)
field := vst.Field(i)
switch field.Kind() {
case reflect.Struct:
testPropertiesExist(vst.Field(i))
case reflect.Bool:
if tst.Field(i).Name != "UseUnmanagedDiskDEPRECATED" && tst.Field(i).Name != "UseInstanceProfileDEPRECATED" {
Expect(field.Bool()).ToNot(Equal(false), errorMsg)
}
case reflect.String:
Expect(field.String()).ToNot(Equal(""), errorMsg)
case reflect.Int:
Expect(field.Int()).ToNot(Equal(0), errorMsg)
case reflect.Slice:
Expect(field.Slice(0, 0)).ToNot(Equal(""), errorMsg)
case reflect.Map:
Expect(field.MapKeys()).ToNot(HaveLen(0), errorMsg)
default:
Fail(fmt.Sprintf("unexpected type: '%s' in the iaas config", field.Kind()))
}
}
}
func writePDFFile(contents string) string {
tempfile, err := ioutil.TempFile("", "some*.pdf")
Expect(err).ToNot(HaveOccurred())
_, err = tempfile.WriteString(contents)
Expect(err).ToNot(HaveOccurred())
err = tempfile.Close()
Expect(err).ToNot(HaveOccurred())
return tempfile.Name()
}
|
[
"\"PATH\""
] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
go
| 1 | 0 | |
tutel/parted/spmdx.py
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os, sys, json, re
import tempfile
import copy
import inspect
import logging
import importlib
from . import solver
from . import patterns
logging.basicConfig(level=logging.INFO)
session = None
def init(backend_name):
global session
if session is not None:
raise Exception('Function `init()` can be only invoked once.')
if not re.match('^[a-zA-Z0-9]+$', backend_name):
raise Exception('Only letters and digits are allowed for backend_name, get: %s' % backend_name)
session = init
session.backend = importlib.import_module('..backend.%s.config' % backend_name, __name__)
session.is_strict_fmt = int(os.environ.get('STRICT_FMT', 0)) > 0
session.ptype = os.environ.get('PTYPE', '')
session.custom_dict = dict()
manual_config = os.environ.get('CONFIG', '')
manual_config = json.loads(manual_config) if manual_config else {}
manual_config = dict([(x, manual_config[x] if isinstance(manual_config[x], int) else manual_config[x][0]) for x in manual_config])
session.manual_config = manual_config
try:
extra = importlib.import_module('..backend.%s' % backend_name, __name__)
except:
extra = None
return extra
def new_dependency(header_content, depends=[]):
header_content = header_content.strip() + '\n'
depends = depends if isinstance(depends, list) else [depends]
return {"data": header_content, "depends": depends}
def product(arrlist):
result = 1
for x in arrlist: result *= int(x)
return result
class Mapper2D:
def __init__(self, item):
def split_dim(item):
parts = item.replace(')', '(').split('(')
for i in range(len(parts)):
if i % 2 == 0:
for x in parts[i]:
if x.strip():
yield x
else:
x = [x for x in parts[i] if x.strip()]
yield x if len(x) > 1 else x[0]
iter = split_dim(item)
self.id2ax = [x for x in iter]
self.ax2id = dict([(x, i) for i, x in enumerate(self.id2ax) if isinstance(x, str) and x != '*'])
for i, x in enumerate(self.id2ax):
if not isinstance(x, str):
for j, ax in enumerate(x):
self.ax2id[ax] = (i, j)
class Parser:
def __init__(self, irs):
left, rights = irs.split('=')
reduce_type = ''
if left[-1] in ('+', '<', '>', '[', ']'):
left, reduce_type = left[:-1], left[-1]
self.reduce_type = reduce_type
self.left = Mapper2D(left)
self.rights = [Mapper2D(x) for x in rights.split(',')]
self.num_inputs = len(self.rights)
def get_leading_target(self, target):
return target if isinstance(target, str) else target[0]
def get_reduce_axes(self):
reduce_axes = set()
for right in self.rights:
for k in right.ax2id:
if k not in self.left.ax2id:
reduce_axes.add(k)
return reduce_axes
def emit_dims_by_name(self, ax_name):
if ax_name == '*':
raise NotImplementedError()
target_ax = self.get_leading_target(ax_name)
source_dims, parted = dict(), 0
for i, right in enumerate(self.rights):
if target_ax not in right.ax2id:
source_dims[i] = -1
continue
ids = right.ax2id[target_ax]
if isinstance(ids, int):
source_dims[i] = ids
elif ids[1] == 0:
source_dims[i] = ids[0]
else:
raise NotImplementedError()
parted += 1
return source_dims, parted
def emit_dims_by_id(self, output_dim):
if output_dim == -1:
return dict([(i, -1) for i in range(self.num_inputs)]), 0
if output_dim == -2 or self.left.id2ax[output_dim] == '*':
raise NotImplementedError()
if output_dim >= 0:
return self.emit_dims_by_name(self.left.id2ax[output_dim])
raise NotImplementedError()
class Program:
def __init__(self, code, kwargs):
self.code = code
self.kwargs = kwargs
def save(self, path):
with open(path, 'w') as fp:
fp.write('# Copyright (c) Microsoft Corporation.\n')
fp.write('# Licensed under the MIT license.\n\n')
fp.write(self.code)
def execute(self, save_file_path=None):
is_tempfile = save_file_path is None
if is_tempfile:
save_file_path = tempfile.NamedTemporaryFile(delete=False, dir=tempfile.gettempdir(), suffix='.py').name
def remove_file(filenames):
if isinstance(filenames, str):
filenames = [filenames]
for filename in filenames:
try:
os.unlink(filename)
except FileNotFoundError:
pass
remove_file(save_file_path)
model_program = self.code
glob_size = self.kwargs['total_nodes']
device_type = self.kwargs['device_type']
group_size = self.kwargs['spmd_nodes']
with open(save_file_path, 'w') as fp:
fp.write(model_program)
log_file = tempfile.NamedTemporaryFile(delete=False, dir=tempfile.gettempdir(), suffix='.log').name
os.environ['CONFIG_STORE_PATH'] = log_file
remove_file(log_file)
os_command = session.backend.get_execute_cmd(group_size, glob_size, device_type, save_file_path)
try:
result = ''
logging.info('Executing: %s' % os_command)
assert 0 == os.system(os_command), f"Failed to execute command: {os_command}"
with open(log_file, 'r') as fp:
result = fp.read().strip()
result = json.loads(result)
except:
import traceback
print(traceback.format_exc())
print(result)
result = {}
if is_tempfile:
remove_file(save_file_path)
return result
class Custom:
__t_builtins__ = dict()
__t_ids__ = dict()
__t_ops__ = dict()
def __init__(self, data, fw_ops=None, inputs=None, op_name=None, shape_fn=None, flops=None, depends=[]):
self.op_type = op_name or inspect.currentframe().f_back.f_code.co_name
if not re.match('^[a-zA-Z0-9]+$', self.op_type):
self.op_type = 'Custom'
assert self.op_type[0].isupper(), f'The leading charactor of the operator name must be uppercase letter (received: "{self.op_type}").'
rank_dict = (Custom.__t_ops__ if self.op_type != 'Builtin' else Custom.__t_builtins__) if self.op_type != 'Id' else Custom.__t_ids__
rank_dict[self] = len(rank_dict)
self.name = f'{self.op_type[0].lower()}{self.op_type[1:]}{rank_dict[self]}'
self.depends = depends if isinstance(depends, list) else [depends]
if fw_ops is not None:
self.fw_ops = fw_ops.replace('@@', '')
if inputs is None:
assert self.fw_ops is not None, 'At least one property in "fw_ops" and inputs should be specified.'
fw_ops = fw_ops.split('@@')
input_names = []
for x in range(1, len(fw_ops), 2):
if fw_ops[x] not in input_names:
input_names.append(fw_ops[x])
self.inputs = [session.custom_dict[x] for x in input_names]
else:
self.inputs = inputs
self.outputs = []
self.data = data
if isinstance(data, dict):
self.op_type = 'data'
if data["is_param"]:
self.name += '_'
self.op_type = 'param'
self.inputs = []
self.shape = data["shape"]
self.dtype = data["dtype"]
self.flops = flops or 0
else:
self.op_type = 'compute'
self.parser = Parser(data)
if shape_fn is not None:
self.shape, self.dtype = shape_fn(self.inputs)
else:
try:
infershape = dict()
for i, x in enumerate(self.parser.rights):
for ax in x.ax2id:
infershape[ax] = self.inputs[i].shape[x.ax2id[ax]]
self.shape = [infershape[x] if not isinstance(x, list) else product([infershape[y] for y in x]) for x in self.parser.left.id2ax]
self.dtype = self.inputs[0].dtype
except:
raise Exception(f'Cannot auto-infershape for op {self.name} due to unknown dimension size by tensor format: {self.data}')
# logging.info(f'Shape dict of {self.name} = {self.shape}:{self.dtype}')
if flops is None:
self.flops = product(self.shape)
if self.parser.reduce_type:
infershape = dict()
for i, x in enumerate(self.parser.rights):
for ax in x.ax2id:
if isinstance(ax, str):
infershape[ax] = self.inputs[i].shape[x.ax2id[ax]]
self.flops *= product([infershape[x] for x in self.parser.get_reduce_axes()])
self.flops <<= 1
else:
self.flops = flops
assert self.name not in session.custom_dict, f"Node with name `{self.name}` has already existed in current session."
session.custom_dict[self.name] = self
def __del__(self):
try:
session.custom_dict.pop(self.name)
except:
pass
def update_config(self, parent, **kwargs):
if parent is not None and parent not in self.outputs:
self.outputs.append(parent)
node_name = self.name
if kwargs['spmd_nodes'] == 1:
self.config = -1
elif session.ptype == 'dp':
self.config = -1 if self.op_type == 'param' else 0
elif session.ptype == 'zero':
self.config = -2 if self.op_type == 'param' else 0
elif node_name in session.manual_config:
self.config = session.manual_config[node_name]
for input in self.inputs:
input.update_config(self, **kwargs)
def __str__(self):
return f'@@{self.name}@@'
def numel(self):
return int(product(self.shape))
def parse_inputs(self):
if isinstance(self.data, dict):
return []
results, patt = [], self.data
while True:
pos = re.search(r'\b[a-z][a-zA-Z0-9_]*\b', patt)
if not pos:
break
results += [patt[pos.start():pos.end()]]
patt = patt[pos.end() + 1:]
return results
def get_leading_dim(self):
return [i for i, x in enumerate(self.shape) if x > 1][0]
def get_input_by_name(self, name):
for inp in self.inputs:
if inp.name == name:
return inp
raise Exception(f'Node input with name `{name}` not found!')
def autotune(self, config_file=None, **kwargs):
config = Config.load_from_file(config_file)
if config:
return config
kwargs, results = optimize(self, **kwargs)
valid_configs = [sol for dim, sol in results if sol is not None]
if not valid_configs:
raise Exception('No valid configuration found!')
best_time, best_config = min(valid_configs)
config = Config.create(best_config, kwargs, best_time)
if config_file is not None:
config.save(config_file)
return config
def articulare_analyse(self):
low, dfn, cut = dict(), dict(), dict()
pcnt, root, st = [0], self, []
##### Mask Articulation Points
def mask_dfs(u):
tot = 0
st.append(u)
pcnt[0] += 1
dfn[u] = low[u] = pcnt[0]
for v in u.inputs + u.outputs:
# Assume every param tensor is unshared
if v.op_type == 'param':
continue
if v not in dfn:
tot += 1
mask_dfs(v)
low[u] = min(low[u], low[v])
if ((u == root and tot > 1) or (u != root and low[v] >= dfn[u])):
cut[u] = cut.get(u, 0) + 1
if low[v] >= dfn[u]:
while st.pop() != v:
continue
else:
low[u] = min(low[u], dfn[v])
cut[u] = cut.get(u, 0) + 1
mask_dfs(self)
##### Partition Computations into Groups
pcnt, visited, group_export = [0], set(), dict()
def compute_dfs(u, vid, is_leader):
if u in visited:
return
if u.op_type != 'compute':
return
if is_leader:
group_export[vid] = [u]
else:
group_export[vid].append(u)
visited.add(u)
for v in u.inputs:
if cut.get(v, 0) > 1:
pcnt[0] += 1
compute_dfs(v, pcnt[0], True)
else:
compute_dfs(v, vid, False)
compute_dfs(self, pcnt[0], True)
compute_groups = []
for _, members in sorted(group_export.items(), reverse=True):
for x in members:
multi_used = set()
for y in x.inputs:
if len(y.outputs) > 1:
multi_used.add(y)
compute_groups.append(([x for x in reversed(members)], multi_used))
return compute_groups
def get_data_parallel_config(self, **kwargs):
visited = set()
config = dict()
def property_dfs(node):
visited.add(id(node))
for inp in node.inputs:
if id(inp) not in visited:
property_dfs(inp)
config[node.name] = [-1, ""] if node.op_type == 'param' else [0, "BAR:0"]
property_dfs(self)
return Config.create(config, environ_config(kwargs))
def serialize(self, **kwargs):
node = self
node.update_config(None, **kwargs)
compute_groups = node.articulare_analyse()
input_nodes, compute_nodes, config = [], [], {}
visited = set()
def property_dfs(node):
visited.add(id(node))
node_name = node.name
for inp in node.inputs:
if id(inp) not in visited:
property_dfs(inp)
if hasattr(node, 'config'):
config[node_name] = getattr(node, 'config')
if isinstance(node.data, dict):
input_nodes.append(node)
else:
compute_nodes.append(node)
property_dfs(node)
return compute_groups, compute_nodes, input_nodes, config
def compile(self, config, **kwargs):
if not isinstance(config, dict):
assert config.config['v'] == Config.VERSION, f"Unmatched configuration file version: expect {Config.VERSION}, got {config.config['v']}"
for k in kwargs:
config.config['kwargs'][k] = kwargs[k]
kwargs = config.config['kwargs']
config = config.config['b']
run_mode = kwargs['run_mode']
device_type = kwargs['device_type']
total_nodes = kwargs['total_nodes']
spmd_nodes = kwargs['spmd_nodes']
assert total_nodes % spmd_nodes == 0, f"`total_nodes` must by evenly divided by `spmd_nodes`, got: {total_nodes} % {spmd_nodes} != 0"
if True:
_, compute_nodes, input_nodes, restricted_state = self.serialize(**kwargs)
# Verify restricted_state & extra padding
for node in compute_nodes + input_nodes:
node_state = config[node.name][0]
if restricted_state.get(node.name, node_state) != node_state:
raise Exception(f"Unstatisfied sharding state requirements on node `{node.name}`")
if node_state >= 0 and node.shape[node_state] % spmd_nodes != 0:
raise Exception(f"Unstatisfied slicing chunks `{node.shape[node_state]} // {spmd_nodes}` on node `{node.name}`")
# Construct Inputs
input_list, param_list = [], []
for node in input_nodes:
shard_dim, _ = config[node.name]
if node.op_type != 'param':
input_list.append((node.name, session.backend.get_input_definition(node.name, node.shape, shard_dim, node.dtype, is_param=False)))
else:
param_list.append((node.name, session.backend.get_input_definition(node.name, node.shape, shard_dim, node.dtype, is_param=True)))
def apply_communicate(item_name, comm_op):
return re.sub(fr'\$', item_name, comm_op).strip()
# Construct Computes
graph_prog, temp_ids = [], 0
for node in compute_nodes:
output_dim, key = config[node.name]
if ':' in key:
key, rank = key.split(':')
rank = int(rank)
else:
rank = None
rule_func = solver.spmd_primitives_dict[key]
conn_sol, conn_src = None, None
try:
valid_count = 0
for rank, source_dims, connectors in rule_func(session, node, output_dim, spmd_nodes, rank):
valid_count += 1
assert valid_count <= 1, f"Ambiguous solution `{key}` for node with `{node.name}` at dimension {output_dim}"
conn_sol, conn_src = connectors, source_dims
except NotImplementedError:
pass
assert conn_sol is not None, f"No statisfied parallel pattern `{key}` applying on node `{node.name}`"
graph_prog += [f'{node.name} = {node.fw_ops}',]
for index in range(len(node.inputs)):
input_item = node.inputs[index]
item_name = input_item.name
from_state = config[item_name][0]
prim_state = conn_src[index]
if from_state != prim_state:
extra = {'output_shape': node.inputs[index].shape, 'is_param': node.inputs[index].op_type == 'param'}
if from_state == -2 and prim_state >= 0:
item_name = session.backend.link(item_name, -2, -1, **extra)
item_name = session.backend.link(item_name, -1, prim_state, **extra)
else:
item_name = session.backend.link(item_name, from_state, prim_state, **extra)
if index in conn_sol:
item_name = apply_communicate(item_name, conn_sol[index]) or item_name
if item_name != input_item.name:
temp_ids = temp_ids + 1
graph_prog[-1] = f'_temp{temp_ids} = {item_name}; ' + re.sub(fr'\b{input_item.name}\b', f'_temp{temp_ids}', graph_prog[-1])
aggr_output = apply_communicate(node.name, conn_sol.get('', ''))
if aggr_output:
graph_prog += [f'{node.name} = {aggr_output}']
depends, headers = set(), []
def compute_dependencies(nodes):
for node in nodes:
if id(node) in depends:
continue
depends.add(id(node))
for dep in node["depends"]:
compute_dependencies(dep)
headers.append(node["data"])
for node in compute_nodes:
compute_dependencies(node.depends)
program_strings = session.backend.generate_framework_code(device_type, spmd_nodes, total_nodes // spmd_nodes, run_mode, self.name, headers, input_list, param_list, graph_prog)
return Program(program_strings, kwargs)
def environ_config(kwargs):
if 'spmd_nodes' not in kwargs:
kwargs['spmd_nodes'] = kwargs['total_nodes']
if 'device_type' not in kwargs:
kwargs['device_type'] = os.environ.get('DEVICE', 'cuda')
if 'run_mode' not in kwargs:
kwargs['run_mode'] = os.environ.get('MODE', 'train')
assert kwargs['total_nodes'] % kwargs['spmd_nodes'] == 0, "`total_nodes` must be exactly divided by `spmd_nodes`."
return kwargs
def optimize(node, **kwargs):
kwargs = environ_config(kwargs)
if session.is_strict_fmt:
node = Id(node, op_name='Builtin')
node.config = 0
compute_groups, compute_nodes, input_nodes, config = node.serialize(**kwargs)
print('<< TUNE Graph >>\n')
print('\n'.join([f'| {x.name} <- new_{x.op_type}() | {x.dtype}{x.shape} | {getattr(x, "config", None)} |' for x in input_nodes]))
print('---------------------------------------------------')
print('\n'.join([f'| {x.name} <- {", ".join([x.name for x in x.inputs])} | {x.dtype}{x.shape} | "{x.data}" | {getattr(x, "config", None)} |' for x in compute_nodes]))
print('\n>> config = %s\n' % (json.dumps(config),))
sys.stdout.flush()
return kwargs, solver.solve_partition(session, compute_groups, input_nodes=input_nodes, split_pref=config, kwargs=kwargs)
class Config:
VERSION = '0.1'
@staticmethod
def load_from_file(filename):
if filename is not None and os.path.exists(filename):
return Config(filename)
return None
@staticmethod
def create(config, environ, timecost=0):
return Config({'v': Config.VERSION, 't': timecost, 'b': config, 'kwargs': environ})
def __init__(self, config):
if isinstance(config, dict):
self.set_config(config)
elif isinstance(config, str):
with open(config, 'r') as fp:
config = json.load(fp)
self.set_config(config)
else:
raise Exception('Unsupported config value: %s' % config)
def set_config(self, config):
if config['v'] != Config.VERSION:
raise Exception('Incompatible config version: expect %s, got %s' % (Config.VERSION, config['v']))
self.config = config
def __str__(self):
return json.dumps(self.config)
def save(self, filepath):
with open(filepath, 'w') as fp:
json.dump(self.config, fp)
def Id(x, op_name=None):
layout = ''.join([chr(ord('a') + i) for i in range(len(x.shape))])
return Custom(f'{layout} = {layout}', f'{x}', op_name=op_name)
def Tensor(shape, dtype, is_param=False):
inp = Custom({"shape": shape, "dtype": dtype, "is_param": is_param}, inputs=[])
if not is_param and session.is_strict_fmt:
config = getattr(inp, 'config', session.manual_config.get(inp.name, None))
if config is not None:
if inp.name in session.manual_config:
session.manual_config.pop(inp.name)
inp.config = 0
inp = Id(inp, op_name="Builtin")
inp.config = config
else:
inp.config = 0
inp = Id(inp, op_name="Builtin")
return inp
|
[] |
[] |
[
"PTYPE",
"CONFIG",
"CONFIG_STORE_PATH",
"DEVICE",
"MODE",
"STRICT_FMT"
] |
[]
|
["PTYPE", "CONFIG", "CONFIG_STORE_PATH", "DEVICE", "MODE", "STRICT_FMT"]
|
python
| 6 | 0 | |
pavement.py
|
"""
This paver file is intented to help with the release process as much as
possible. It relies on virtualenv to generate 'bootstrap' environments as
independent from the user system as possible (e.g. to make sure the sphinx doc
is built against the built numpy, not an installed one).
Building a fancy dmg from scratch
=================================
Clone the numpy-macosx-installer git repo from on github into the source tree
(numpy-macosx-installer should be in the same directory as setup.py). Then, do
as follows::
git clone git://github.com/cournape/macosx-numpy-installer
# remove build dir, and everything generated by previous paver calls
# (included generated installers). Use with care !
paver nuke
paver bootstrap && source bootstrap/bin/activate
# Installing numpy is necessary to build the correct documentation (because
# of autodoc)
python setupegg.py install
paver dmg
Building a simple (no-superpack) windows installer from wine
============================================================
It assumes that blas/lapack are in c:\local\lib inside drive_c. Build python
2.5 and python 2.6 installers.
paver bdist_wininst_simple
You will have to configure your wine python locations (WINE_PYS).
The superpack requires all the atlas libraries for every arch to be installed
(see SITECFG), and can then be built as follows::
paver bdist_superpack
Building changelog + notes
==========================
Assumes you have git and the binaries/tarballs in installers/::
paver write_release
paver write_note
This automatically put the checksum into NOTES.txt, and write the Changelog
which can be uploaded to sourceforge.
TODO
====
- the script is messy, lots of global variables
- make it more easily customizable (through command line args)
- missing targets: install & test, sdist test, debian packaging
- fix bdist_mpkg: we build the same source twice -> how to make sure we use
the same underlying python for egg install in venv and for bdist_mpkg
"""
# What need to be installed to build everything on mac os x:
# - wine: python 2.6 and 2.5 + makensis + cpuid plugin + mingw, all in the PATH
# - paver + virtualenv
# - full texlive
import os
import sys
import shutil
import subprocess
import re
try:
from hashlib import md5
except ImportError:
from md5 import md5
import paver
from paver.easy import \
options, Bunch, task, call_task, sh, needs, cmdopts, dry
sys.path.insert(0, os.path.dirname(__file__))
try:
setup_py = __import__("setup")
FULLVERSION = setup_py.FULLVERSION
finally:
sys.path.pop(0)
DEFAULT_PYTHON = "2.6"
# Where to put the final installers, as put on sourceforge
SUPERPACK_BUILD = 'build-superpack'
SUPERPACK_BINDIR = os.path.join(SUPERPACK_BUILD, 'binaries')
options(bootstrap=Bunch(bootstrap_dir="bootstrap"),
virtualenv=Bunch(packages_to_install=["sphinx", "numpydoc"], no_site_packages=True),
sphinx=Bunch(builddir="build", sourcedir="source", docroot='doc'),
superpack=Bunch(builddir="build-superpack"),
installers=Bunch(releasedir="release",
installersdir=os.path.join("release", "installers")),
doc=Bunch(doc_root="doc",
sdir=os.path.join("doc", "source"),
bdir=os.path.join("doc", "build"),
bdir_latex=os.path.join("doc", "build", "latex"),
destdir_pdf=os.path.join("build_doc", "pdf")
),
html=Bunch(builddir=os.path.join("build", "html")),
dmg=Bunch(python_version=DEFAULT_PYTHON),
bdist_wininst_simple=Bunch(python_version=DEFAULT_PYTHON),
)
MPKG_PYTHON = {
"2.5": ["/Library/Frameworks/Python.framework/Versions/2.5/bin/python"],
"2.6": ["/Library/Frameworks/Python.framework/Versions/2.6/bin/python"]
}
SSE3_CFG = {'ATLAS': r'C:\local\lib\yop\sse3'}
SSE2_CFG = {'ATLAS': r'C:\local\lib\yop\sse2'}
NOSSE_CFG = {'BLAS': r'C:\local\lib\yop\nosse', 'LAPACK': r'C:\local\lib\yop\nosse'}
SITECFG = {"sse2" : SSE2_CFG, "sse3" : SSE3_CFG, "nosse" : NOSSE_CFG}
if sys.platform =="darwin":
WINDOWS_PYTHON = {
"2.6": ["wine", os.environ['HOME'] + "/.wine/drive_c/Python26/python.exe"],
"2.5": ["wine", os.environ['HOME'] + "/.wine/drive_c/Python25/python.exe"]
}
WINDOWS_ENV = os.environ
WINDOWS_ENV["DYLD_FALLBACK_LIBRARY_PATH"] = "/usr/X11/lib:/usr/lib"
MAKENSIS = ["wine", "makensis"]
elif sys.platform == "win32":
WINDOWS_PYTHON = {
"2.6": ["C:\Python26\python.exe"],
"2.5": ["C:\Python25\python.exe"],
}
# XXX: find out which env variable is necessary to avoid the pb with python
# 2.6 and random module when importing tempfile
WINDOWS_ENV = os.environ
MAKENSIS = ["makensis"]
else:
WINDOWS_PYTHON = {
"2.6": ["wine", os.environ['HOME'] + "/.wine/drive_c/Python26/python.exe"],
"2.5": ["wine", os.environ['HOME'] + "/.wine/drive_c/Python25/python.exe"]
}
WINDOWS_ENV = os.environ
MAKENSIS = ["wine", "makensis"]
# Start/end of the log (from git)
LOG_START = 'svn/tags/1.4.0'
LOG_END = 'master'
RELEASE_NOTES = 'doc/release/1.5.0-notes.rst'
#-------------------
# Windows installers
#-------------------
def superpack_name(pyver, numver):
"""Return the filename of the superpack installer."""
return 'numpy-%s-win32-superpack-python%s.exe' % (numver, pyver)
def internal_wininst_name(arch):
"""Return the name of the wininst as it will be inside the superpack (i.e.
with the arch encoded."""
ext = '.exe'
return "numpy-%s-%s%s" % (FULLVERSION, arch, ext)
def wininst_name(pyver):
"""Return the name of the installer built by wininst command."""
ext = '.exe'
return "numpy-%s.win32-py%s%s" % (FULLVERSION, pyver, ext)
def prepare_nsis_script(pyver, numver):
if not os.path.exists(SUPERPACK_BUILD):
os.makedirs(SUPERPACK_BUILD)
tpl = os.path.join('tools/win32build/nsis_scripts', 'numpy-superinstaller.nsi.in')
source = open(tpl, 'r')
target = open(os.path.join(SUPERPACK_BUILD, 'numpy-superinstaller.nsi'), 'w')
installer_name = superpack_name(pyver, numver)
cnt = "".join(source.readlines())
cnt = cnt.replace('@NUMPY_INSTALLER_NAME@', installer_name)
for arch in ['nosse', 'sse2', 'sse3']:
cnt = cnt.replace('@%s_BINARY@' % arch.upper(),
internal_wininst_name(arch))
target.write(cnt)
def bdist_wininst_arch(pyver, arch):
"""Arch specific wininst build."""
if os.path.exists("build"):
shutil.rmtree("build")
_bdist_wininst(pyver, SITECFG[arch])
@task
@cmdopts([("python-version=", "p", "python version")])
def bdist_superpack(options):
"""Build all arch specific wininst installers."""
pyver = options.python_version
def copy_bdist(arch):
# Copy the wininst in dist into the release directory
source = os.path.join('dist', wininst_name(pyver))
target = os.path.join(SUPERPACK_BINDIR, internal_wininst_name(arch))
if os.path.exists(target):
os.remove(target)
if not os.path.exists(os.path.dirname(target)):
os.makedirs(os.path.dirname(target))
os.rename(source, target)
bdist_wininst_arch(pyver, 'nosse')
copy_bdist("nosse")
bdist_wininst_arch(pyver, 'sse2')
copy_bdist("sse2")
bdist_wininst_arch(pyver, 'sse3')
copy_bdist("sse3")
idirs = options.installers.installersdir
pyver = options.python_version
prepare_nsis_script(pyver, FULLVERSION)
subprocess.check_call(MAKENSIS + ['numpy-superinstaller.nsi'],
cwd=SUPERPACK_BUILD)
# Copy the superpack into installers dir
if not os.path.exists(idirs):
os.makedirs(idirs)
source = os.path.join(SUPERPACK_BUILD, superpack_name(pyver, FULLVERSION))
target = os.path.join(idirs, superpack_name(pyver, FULLVERSION))
shutil.copy(source, target)
@task
@cmdopts([("python-version=", "p", "python version")])
def bdist_wininst_nosse(options):
"""Build the nosse wininst installer."""
bdist_wininst_arch(options.python_version, 'nosse')
@task
@cmdopts([("python-version=", "p", "python version")])
def bdist_wininst_sse2(options):
"""Build the sse2 wininst installer."""
bdist_wininst_arch(options.python_version, 'sse2')
@task
@cmdopts([("python-version=", "p", "python version")])
def bdist_wininst_sse3(options):
"""Build the sse3 wininst installer."""
bdist_wininst_arch(options.python_version, 'sse3')
@task
@cmdopts([("python-version=", "p", "python version")])
def bdist_wininst_simple():
"""Simple wininst-based installer."""
pyver = options.bdist_wininst_simple.python_version
_bdist_wininst(pyver)
def _bdist_wininst(pyver, cfg_env=None):
cmd = WINDOWS_PYTHON[pyver] + ['setup.py', 'build', '-c', 'mingw32', 'bdist_wininst']
if cfg_env:
for k, v in WINDOWS_ENV.items():
cfg_env[k] = v
else:
cfg_env = WINDOWS_ENV
subprocess.check_call(cmd, env=cfg_env)
#----------------
# Bootstrap stuff
#----------------
@task
def bootstrap(options):
"""create virtualenv in ./bootstrap"""
try:
import virtualenv
except ImportError, e:
raise RuntimeError("virtualenv is needed for bootstrap")
bdir = options.bootstrap_dir
if not os.path.exists(bdir):
os.makedirs(bdir)
bscript = "boostrap.py"
options.virtualenv.script_name = os.path.join(options.bootstrap_dir,
bscript)
options.virtualenv.no_site_packages = False
options.bootstrap.no_site_packages = False
call_task('paver.virtual.bootstrap')
sh('cd %s; %s %s' % (bdir, sys.executable, bscript))
@task
def clean():
"""Remove build, dist, egg-info garbage."""
d = ['build', 'dist', 'numpy.egg-info']
for i in d:
if os.path.exists(i):
shutil.rmtree(i)
bdir = os.path.join('doc', options.sphinx.builddir)
if os.path.exists(bdir):
shutil.rmtree(bdir)
@task
def clean_bootstrap():
bdir = os.path.join(options.bootstrap.bootstrap_dir)
if os.path.exists(bdir):
shutil.rmtree(bdir)
@task
@needs('clean', 'clean_bootstrap')
def nuke(options):
"""Remove everything: build dir, installers, bootstrap dirs, etc..."""
for d in [options.superpack.builddir, options.installers.releasedir]:
if os.path.exists(d):
shutil.rmtree(d)
#---------------------
# Documentation tasks
#---------------------
@task
def html(options):
"""Build numpy documentation and put it into build/docs"""
# Don't use paver html target because of numpy bootstrapping problems
bdir = os.path.join("doc", options.sphinx.builddir, "html")
if os.path.exists(bdir):
shutil.rmtree(bdir)
subprocess.check_call(["make", "html"], cwd="doc")
html_destdir = options.html.builddir
if os.path.exists(html_destdir):
shutil.rmtree(html_destdir)
shutil.copytree(bdir, html_destdir)
@task
def latex():
"""Build numpy documentation in latex format."""
subprocess.check_call(["make", "latex"], cwd="doc")
@task
@needs('latex')
def pdf():
sdir = options.doc.sdir
bdir = options.doc.bdir
bdir_latex = options.doc.bdir_latex
destdir_pdf = options.doc.destdir_pdf
def build_pdf():
subprocess.check_call(["make", "all-pdf"], cwd=str(bdir_latex))
dry("Build pdf doc", build_pdf)
if os.path.exists(destdir_pdf):
shutil.rmtree(destdir_pdf)
os.makedirs(destdir_pdf)
user = os.path.join(bdir_latex, "numpy-user.pdf")
shutil.copy(user, os.path.join(destdir_pdf, "userguide.pdf"))
ref = os.path.join(bdir_latex, "numpy-ref.pdf")
shutil.copy(ref, os.path.join(destdir_pdf, "reference.pdf"))
#------------------
# Mac OS X targets
#------------------
def dmg_name(fullversion, pyver):
return "numpy-%s-py%s-python.org.dmg" % (fullversion, pyver)
def macosx_version():
if not sys.platform == 'darwin':
raise ValueError("Not darwin ??")
st = subprocess.Popen(["sw_vers"], stdout=subprocess.PIPE)
out = st.stdout.readlines()
ver = re.compile("ProductVersion:\s+([0-9]+)\.([0-9]+)\.([0-9]+)")
for i in out:
m = ver.match(i)
if m:
return m.groups()
def mpkg_name(pyver):
maj, min = macosx_version()[:2]
return "numpy-%s-py%s-macosx%s.%s.mpkg" % (FULLVERSION, pyver, maj, min)
def _build_mpkg(pyver):
ldflags = "-undefined dynamic_lookup -bundle -arch i386 -arch ppc -Wl,-search_paths_first"
ldflags += " -L%s" % os.path.join(os.path.dirname(__file__), "build")
if pyver == "2.5":
sh("CC=gcc-4.0 LDFLAGS='%s' %s setupegg.py bdist_mpkg" % (ldflags, " ".join(MPKG_PYTHON[pyver])))
else:
sh("LDFLAGS='%s' %s setupegg.py bdist_mpkg" % (ldflags, " ".join(MPKG_PYTHON[pyver])))
@task
def simple_dmg():
pyver = "2.6"
src_dir = "dmg-source"
# Clean the source dir
if os.path.exists(src_dir):
shutil.rmtree(src_dir)
os.makedirs(src_dir)
# Build the mpkg
clean()
_build_mpkg(pyver)
# Build the dmg
shutil.copytree(os.path.join("dist", mpkg_name(pyver)),
os.path.join(src_dir, mpkg_name(pyver)))
_create_dmg(pyver, src_dir, "NumPy Universal %s" % FULLVERSION)
@task
def bdist_mpkg(options):
call_task("clean")
try:
pyver = options.bdist_mpkg.python_version
except AttributeError:
pyver = options.python_version
_build_mpkg(pyver)
def _create_dmg(pyver, src_dir, volname=None):
# Build the dmg
image_name = dmg_name(FULLVERSION, pyver)
if os.path.exists(image_name):
os.remove(image_name)
cmd = ["hdiutil", "create", image_name, "-srcdir", src_dir]
if volname:
cmd.extend(["-volname", "'%s'" % volname])
sh(" ".join(cmd))
@task
@needs("pdf")
@cmdopts([("python-version=", "p", "python version")])
def dmg(options):
try:
pyver = options.dmg.python_version
except:
pyver = DEFAULT_PYTHON
idirs = options.installers.installersdir
call_task("clean")
_build_mpkg(pyver)
macosx_installer_dir = "tools/numpy-macosx-installer"
dmg = os.path.join(macosx_installer_dir, dmg_name(FULLVERSION, pyver))
if os.path.exists(dmg):
os.remove(dmg)
# Clean the image source
content = os.path.join(macosx_installer_dir, 'content')
if os.path.exists(content):
shutil.rmtree(content)
os.makedirs(content)
# Copy mpkg into image source
mpkg_source = os.path.join("dist", mpkg_name(pyver))
mpkg_target = os.path.join(content, "numpy-%s-py%s.mpkg" % (FULLVERSION, pyver))
shutil.copytree(mpkg_source, mpkg_target)
# Copy docs into image source
pdf_docs = os.path.join(content, "Documentation")
if os.path.exists(pdf_docs):
shutil.rmtree(pdf_docs)
os.makedirs(pdf_docs)
user = os.path.join(options.doc.destdir_pdf, "userguide.pdf")
shutil.copy(user, os.path.join(pdf_docs, "userguide.pdf"))
ref = os.path.join(options.doc.destdir_pdf, "reference.pdf")
shutil.copy(ref, os.path.join(pdf_docs, "reference.pdf"))
# Build the dmg
cmd = ["./new-create-dmg", "--pkgname", os.path.basename(mpkg_target),
"--volname", "numpy", os.path.basename(dmg), "./content"]
st = subprocess.check_call(cmd, cwd=macosx_installer_dir)
source = dmg
target = os.path.join(idirs, os.path.basename(dmg))
if not os.path.exists(os.path.dirname(target)):
os.makedirs(os.path.dirname(target))
shutil.copy(source, target)
#--------------------------
# Source distribution stuff
#--------------------------
def tarball_name(type='gztar'):
root = 'numpy-%s' % FULLVERSION
if type == 'gztar':
return root + '.tar.gz'
elif type == 'zip':
return root + '.zip'
raise ValueError("Unknown type %s" % type)
@task
def sdist(options):
# To be sure to bypass paver when building sdist... paver + numpy.distutils
# do not play well together.
sh('python setup.py sdist --formats=gztar,zip')
# Copy the superpack into installers dir
idirs = options.installers.installersdir
if not os.path.exists(idirs):
os.makedirs(idirs)
for t in ['gztar', 'zip']:
source = os.path.join('dist', tarball_name(t))
target = os.path.join(idirs, tarball_name(t))
shutil.copy(source, target)
def compute_md5(idirs):
released = paver.path.path(idirs).listdir()
checksums = []
for f in released:
m = md5(open(f, 'r').read())
checksums.append('%s %s' % (m.hexdigest(), f))
return checksums
def write_release_task(options, filename='NOTES.txt'):
idirs = options.installers.installersdir
source = paver.path.path(RELEASE_NOTES)
target = paver.path.path(filename)
if target.exists():
target.remove()
source.copy(target)
ftarget = open(str(target), 'a')
ftarget.writelines("""
Checksums
=========
""")
ftarget.writelines(['%s\n' % c for c in compute_md5(idirs)])
def write_log_task(options, filename='Changelog'):
st = subprocess.Popen(
['git', 'svn', 'log', '%s..%s' % (LOG_START, LOG_END)],
stdout=subprocess.PIPE)
out = st.communicate()[0]
a = open(filename, 'w')
a.writelines(out)
a.close()
@task
def write_release(options):
write_release_task(options)
@task
def write_log(options):
write_log_task(options)
@task
def write_release_and_log(options):
rdir = options.installers.releasedir
write_release_task(options, os.path.join(rdir, 'NOTES.txt'))
write_log_task(options, os.path.join(rdir, 'Changelog'))
|
[] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
python
| 1 | 0 | |
builders/testdata/generic/python/simple/main.py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple flask web server used in acceptance tests.
"""
import os
from flask import Flask
from flask import request
app = Flask(__name__)
@app.route("/")
def hello():
return "PASS"
@app.route("/env")
def env():
"""Verify that the script is run using the correct version of the interpreter.
Returns:
String representing the response body.
"""
want = request.args.get("want")
if not want:
return "FAIL: ?want must not be empty"
got = os.environ.get("FOO")
if not got.startswith(want):
return "FAIL: $FOO={}, want {}".format(got, want)
return "PASS"
if __name__ == "__main__":
app.run(port=os.environ["PORT"], debug=True)
|
[] |
[] |
[
"PORT",
"FOO"
] |
[]
|
["PORT", "FOO"]
|
python
| 2 | 0 | |
tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DNNLinearCombinedEstimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
def _assert_metrics_in_range(keys, metrics):
epsilon = 0.00001 # Added for floating point edge cases.
for key in keys:
estimator_test_utils.assert_in_range(
0.0 - epsilon, 1.0 + epsilon, key, metrics)
class EmbeddingMultiplierTest(tf.test.TestCase):
"""dnn_model_fn tests."""
def testRaisesNonEmbeddingColumn(self):
one_hot_language = tf.contrib.layers.one_hot_column(
tf.contrib.layers.sparse_column_with_hash_bucket('language', 10))
params = {
'dnn_feature_columns': [one_hot_language],
'head': head_lib._multi_class_head(2),
'dnn_hidden_units': [1],
# Set lr mult to 0. to keep embeddings constant.
'embedding_lr_multipliers': {
one_hot_language: 0.0
},
'dnn_optimizer': 'Adagrad',
}
features = {
'language':
tf.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
shape=[3, 1]),
}
labels = tf.constant([[0], [0], [0]], dtype=tf.int32)
with self.assertRaisesRegexp(
ValueError, 'can only be defined for embedding columns'):
dnn_linear_combined._dnn_linear_combined_model_fn(
features, labels, tf.contrib.learn.ModeKeys.TRAIN, params)
def testMultipliesGradient(self):
embedding_language = tf.contrib.layers.embedding_column(
tf.contrib.layers.sparse_column_with_hash_bucket('language', 10),
dimension=1, initializer=tf.constant_initializer(0.1))
embedding_wire = tf.contrib.layers.embedding_column(
tf.contrib.layers.sparse_column_with_hash_bucket('wire', 10),
dimension=1, initializer=tf.constant_initializer(0.1))
params = {
'dnn_feature_columns': [embedding_language, embedding_wire],
'head': head_lib._multi_class_head(2),
'dnn_hidden_units': [1],
# Set lr mult to 0. to keep embeddings constant.
'embedding_lr_multipliers': {
embedding_language: 0.0
},
'dnn_optimizer': 'Adagrad',
}
features = {
'language':
tf.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
shape=[3, 1]),
'wire':
tf.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [2, 0]],
shape=[3, 1]),
}
labels = tf.constant([[0], [0], [0]], dtype=tf.int32)
model_ops = dnn_linear_combined._dnn_linear_combined_model_fn(
features, labels, tf.contrib.learn.ModeKeys.TRAIN, params)
with tf.train.MonitoredSession() as sess:
language_var = dnn_linear_combined._get_embedding_variable(
embedding_language, 'dnn', 'dnn/input_from_feature_columns')
wire_var = dnn_linear_combined._get_embedding_variable(
embedding_wire, 'dnn', 'dnn/input_from_feature_columns')
for _ in range(2):
_, language_value, wire_value = sess.run(
[model_ops.train_op, language_var, wire_var])
initial_value = np.full_like(language_value, 0.1)
self.assertTrue(np.all(np.isclose(language_value, initial_value)))
self.assertFalse(np.all(np.isclose(wire_value, initial_value)))
class DNNLinearCombinedClassifierTest(tf.test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, tf.contrib.learn.DNNLinearCombinedClassifier)
def testNoFeatureColumns(self):
with self.assertRaisesRegexp(
ValueError,
'Either linear_feature_columns or dnn_feature_columns must be defined'):
tf.contrib.learn.DNNLinearCombinedClassifier(
linear_feature_columns=None,
dnn_feature_columns=None,
dnn_hidden_units=[3, 3])
def testEmbeddingMultiplier(self):
embedding_language = tf.contrib.layers.embedding_column(
tf.contrib.layers.sparse_column_with_hash_bucket('language', 10),
dimension=1, initializer=tf.constant_initializer(0.1))
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
dnn_feature_columns=[embedding_language],
dnn_hidden_units=[3, 3],
embedding_lr_multipliers={embedding_language: 0.8})
self.assertEqual(
{embedding_language: 0.8},
classifier._estimator.params['embedding_lr_multipliers'])
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)]
bucketized_feature = [tf.contrib.layers.bucketized_column(
cont_features[0], test_data.get_quantile_based_buckets(iris.data, 10))]
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_feature,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testLogisticRegression_TensorData(self):
"""Tests binary classification using Tensor data as input."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
features = {}
for i in range(4):
# The following shows how to provide the Tensor data for
# RealValuedColumns.
features.update({
str(i): tf.reshape(tf.constant(iris.data[:, i], dtype=tf.float32),
[-1, 1])})
# The following shows how to provide the SparseTensor data for
# a SparseColumn.
features['dummy_sparse_column'] = tf.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [60, 0]],
shape=[len(iris.target), 2])
labels = tf.reshape(tf.constant(iris.target, dtype=tf.int32), [-1, 1])
return features, labels
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [tf.contrib.layers.real_valued_column(str(i))
for i in range(4)]
linear_features = [
tf.contrib.layers.bucketized_column(
cont_features[i], test_data.get_quantile_based_buckets(
iris.data[:, i], 10)) for i in range(4)
]
linear_features.append(tf.contrib.layers.sparse_column_with_hash_bucket(
'dummy_sparse_column', hash_bucket_size=100))
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
linear_feature_columns=linear_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=100)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn():
features = {
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = tf.constant([[1], [0], [0]])
return features, labels
sparse_features = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
tf.contrib.layers.sparse_column_with_hash_bucket('language',
hash_bucket_size=2e7)
]
embedding_features = [
tf.contrib.layers.embedding_column(sparse_features[0], dimension=1)
]
tf_config = {
'cluster': {
tf.contrib.learn.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with tf.test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = tf.contrib.learn.RunConfig()
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = tf.train.ClusterSpec({})
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
linear_feature_columns=sparse_features,
dnn_feature_columns=embedding_features,
dnn_hidden_units=[3, 3],
config=config)
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testMultiClass(self):
"""Tests multi-class classification using matrix data as input.
Please see testLogisticRegression_TensorData() for how to use Tensor
data as input instead.
"""
iris = tf.contrib.learn.datasets.load_iris()
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)]
bucketized_features = [
tf.contrib.layers.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))]
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
n_classes=3,
linear_feature_columns=bucketized_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
}
labels = tf.constant([[1], [0], [0], [0]])
return features, labels
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
n_classes=2,
linear_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_train, steps=1)
# Cross entropy = -0.25*log(0.25)-0.75*log(0.75) = 0.562
self.assertAlmostEqual(0.562, scores['loss'], delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[1.], [1.], [1.], [1.]])
}
labels = tf.constant([[1.], [0.], [0.], [0.]])
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[7.], [1.], [1.], [1.]])
}
labels = tf.constant([[1.], [0.], [0.], [0.]])
return features, labels
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
weight_column_name='w',
n_classes=2,
linear_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted cross entropy = (-7*log(0.25)-3*log(0.75))/10 = 1.06
self.assertAlmostEqual(1.06, scores['loss'], delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = tf.constant([[1], [0], [0], [0]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x).
labels = tf.constant([[1], [1], [1], [1]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
weight_column_name='w',
linear_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
_assert_metrics_in_range(('accuracy',), scores)
def testCustomOptimizerByObject(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)]
bucketized_features = [
tf.contrib.layers.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))]
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer=tf.train.FtrlOptimizer(learning_rate=0.1),
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer=tf.train.AdagradOptimizer(learning_rate=0.1))
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testCustomOptimizerByString(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)]
bucketized_features = [
tf.contrib.layers.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))]
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer='Ftrl',
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer='Adagrad')
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testCustomOptimizerByFunction(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)
]
bucketized_features = [
tf.contrib.layers.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
def _optimizer_exp_decay():
global_step = tf.contrib.framework.get_global_step()
learning_rate = tf.train.exponential_decay(learning_rate=0.1,
global_step=global_step,
decay_steps=100,
decay_rate=0.001)
return tf.train.AdagradOptimizer(learning_rate=learning_rate)
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer=_optimizer_exp_decay,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer=_optimizer_exp_decay)
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testPredict(self):
"""Tests weight column in evaluation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = tf.constant([[1], [0], [0], [0]])
features = {'x': tf.ones(shape=[4, 1], dtype=tf.float32)}
return features, labels
def _input_fn_predict():
y = tf.train.limit_epochs(
tf.ones(shape=[4, 1], dtype=tf.float32), num_epochs=1)
features = {'x': y}
return features
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
linear_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=100)
probs = list(classifier.predict_proba(input_fn=_input_fn_predict))
self.assertAllClose([[0.75, 0.25]] * 4, probs, 0.05)
classes = list(classifier.predict(input_fn=_input_fn_predict))
self.assertListEqual([0] * 4, classes)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = tf.constant([[1], [0], [0], [0]])
features = {
'x': tf.train.limit_epochs(
tf.ones(shape=[4, 1], dtype=tf.float32), num_epochs=num_epochs)}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
labels = tf.to_float(labels)
predictions = tf.slice(predictions, [0, 1], [-1, 1])
return tf.reduce_sum(tf.mul(predictions, labels))
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
linear_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'my_accuracy': MetricSpec(
metric_fn=tf.contrib.metrics.streaming_accuracy,
prediction_key='classes'),
'my_precision': MetricSpec(
metric_fn=tf.contrib.metrics.streaming_precision,
prediction_key='classes'),
'my_metric': MetricSpec(
metric_fn=_my_metric_op,
prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric'
]).issubset(set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(
list(classifier.predict(input_fn=predict_input_fn)))
self.assertEqual(_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Test the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={('bad_name', 'bad_type'): tf.contrib.metrics.streaming_auc})
# Test the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
('bad_length_name', 'classes', 'bad_length'):
tf.contrib.metrics.streaming_accuracy
})
# Test the case where the prediction_key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'bad_name': MetricSpec(
metric_fn=tf.contrib.metrics.streaming_auc,
prediction_key='bad_type')})
def testVariableQuery(self):
"""Tests bias is centered or not."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = tf.constant([[1], [1], [1], [0]])
features = {'x': tf.ones(shape=[4, 1], dtype=tf.float32),}
return features, labels
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
linear_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=500)
var_names = classifier.get_variable_names()
self.assertGreater(len(var_names), 3)
for name in var_names:
classifier.get_variable_value(name)
def testExport(self):
"""Tests export model for servo."""
def input_fn():
return {
'age': tf.constant([1]),
'language': tf.SparseTensor(values=['english'],
indices=[[0, 0]],
dense_shape=[1, 1])
}, tf.constant([[1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
linear_feature_columns=[
tf.contrib.layers.real_valued_column('age'),
language,
],
dnn_feature_columns=[
tf.contrib.layers.embedding_column(language, dimension=1),
],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=input_fn, steps=100)
export_dir = tempfile.mkdtemp()
input_feature_key = 'examples'
def serving_input_fn():
features, targets = input_fn()
features[input_feature_key] = tf.placeholder(tf.string)
return features, targets
classifier.export(export_dir, serving_input_fn, input_feature_key,
use_deprecated_input_fn=False)
def testCenteredBias(self):
"""Tests bias is centered or not."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = tf.constant([[1], [1], [1], [0]])
features = {'x': tf.ones(shape=[4, 1], dtype=tf.float32),}
return features, labels
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
linear_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3],
enable_centered_bias=True)
classifier.fit(input_fn=_input_fn_train, steps=1000)
# logodds(0.75) = 1.09861228867
self.assertAlmostEqual(
1.0986,
float(classifier.get_variable_value('centered_bias_weight')[0]),
places=2)
def testDisableCenteredBias(self):
"""Tests bias is centered or not."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = tf.constant([[1], [1], [1], [0]])
features = {'x': tf.ones(shape=[4, 1], dtype=tf.float32),}
return features, labels
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
linear_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3],
enable_centered_bias=False)
classifier.fit(input_fn=_input_fn_train, steps=500)
self.assertNotIn('centered_bias_weight', classifier.get_variable_names())
def testLinearOnly(self):
"""Tests that linear-only instantiation works."""
def input_fn():
return {
'age': tf.constant([1]),
'language': tf.SparseTensor(values=['english'],
indices=[[0, 0]],
dense_shape=[1, 1])
}, tf.constant([[1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
age = tf.contrib.layers.real_valued_column('age')
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertNotIn('dnn/logits/biases', classifier.get_variable_names())
self.assertNotIn('dnn/logits/weights', classifier.get_variable_names())
self.assertEquals(1, len(classifier.linear_bias_))
self.assertEquals(2, len(classifier.linear_weights_))
self.assertEquals(1, len(classifier.linear_weights_['linear/age/weight']))
self.assertEquals(
100, len(classifier.linear_weights_['linear/language/weights']))
def testLinearOnlyOneFeature(self):
"""Tests that linear-only instantiation works for one feature only."""
def input_fn():
return {
'language': tf.SparseTensor(values=['english'],
indices=[[0, 0]],
dense_shape=[1, 1])
}, tf.constant([[1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 99)
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
linear_feature_columns=[language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertNotIn('dnn/logits/biases', classifier.get_variable_names())
self.assertNotIn('dnn/logits/weights', classifier.get_variable_names())
self.assertEquals(1, len(classifier.linear_bias_))
self.assertEquals(99, len(classifier.linear_weights_))
def testDNNOnly(self):
"""Tests that DNN-only instantiation works."""
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)]
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
n_classes=3, dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=1000)
classifier.evaluate(input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertEquals(3, len(classifier.dnn_bias_))
self.assertEquals(3, len(classifier.dnn_weights_))
self.assertNotIn('linear/bias_weight', classifier.get_variable_names())
self.assertNotIn('linear/feature_BUCKETIZED_weights',
classifier.get_variable_names())
def testDNNWeightsBiasesNames(self):
"""Tests the names of DNN weights and biases in the checkpoints."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = tf.constant([[1], [1], [1], [0]])
features = {'x': tf.ones(shape=[4, 1], dtype=tf.float32),}
return features, labels
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
linear_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=5)
# hiddenlayer_0/weights,hiddenlayer_1/weights and dnn_logits/weights.
self.assertEquals(3, len(classifier.dnn_weights_))
# hiddenlayer_0/biases, hiddenlayer_1/biases, dnn_logits/biases.
self.assertEquals(3, len(classifier.dnn_bias_))
class DNNLinearCombinedRegressorTest(tf.test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, tf.contrib.learn.DNNLinearCombinedRegressor)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)]
regressor = tf.contrib.learn.DNNLinearCombinedRegressor(
linear_feature_columns=cont_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=test_data.iris_input_logistic_fn, steps=10)
scores = regressor.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=1)
self.assertIn('loss', scores.keys())
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn():
# Create 4 rows of (y = x)
labels = tf.constant([[100.], [3.], [2.], [2.]])
features = {'x': tf.constant([[100.], [3.], [2.], [2.]])}
return features, labels
classifier = tf.contrib.learn.DNNLinearCombinedRegressor(
linear_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=10)
classifier.evaluate(input_fn=_input_fn, steps=1)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
}
return features, labels
regressor = tf.contrib.learn.DNNLinearCombinedRegressor(
linear_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
# Average square loss = (0.75^2 + 3*0.25^2) / 4 = 0.1875
self.assertAlmostEqual(0.1875, scores['loss'], delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = tf.contrib.learn.DNNLinearCombinedRegressor(
weight_column_name='w',
linear_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted average square loss = (7*0.75^2 + 3*0.25^2) / 10 = 0.4125
self.assertAlmostEqual(0.4125, scores['loss'], delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = tf.constant([[1.], [1.], [1.], [1.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = tf.contrib.learn.DNNLinearCombinedRegressor(
weight_column_name='w',
linear_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# The model should learn (y = x) because of the weights, so the loss should
# be close to zero.
self.assertLess(scores['loss'], 0.2)
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, tf.constant(labels, dtype=tf.float32)
language_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = tf.contrib.learn.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column,
tf.contrib.layers.real_valued_column('age')
],
dnn_feature_columns=[
tf.contrib.layers.embedding_column(language_column, dimension=1),
tf.contrib.layers.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
regressor.predict(input_fn=_input_fn, as_iterable=False)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, tf.constant(labels, dtype=tf.float32)
language_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = tf.contrib.learn.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column,
tf.contrib.layers.real_valued_column('age')
],
dnn_feature_columns=[
tf.contrib.layers.embedding_column(language_column, dimension=1),
tf.contrib.layers.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
regressor.predict(input_fn=predict_input_fn, as_iterable=True)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = tf.constant([[1.], [0.], [0.], [0.]])
features = {'x': tf.train.limit_epochs(
tf.ones(shape=[4, 1], dtype=tf.float32), num_epochs=num_epochs)}
return features, labels
def _my_metric_op(predictions, labels):
return tf.reduce_sum(tf.mul(predictions, labels))
regressor = tf.contrib.learn.DNNLinearCombinedRegressor(
linear_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error': tf.contrib.metrics.streaming_mean_squared_error,
('my_metric', 'scores'): _my_metric_op
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict(input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case that the 2nd element of the key is not "scores".
with self.assertRaises(KeyError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={('my_error', 'predictions'
): tf.contrib.metrics.streaming_mean_squared_error})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('bad_length_name', 'scores', 'bad_length'):
tf.contrib.metrics.streaming_mean_squared_error
})
def testCustomMetricsWithMetricSpec(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = tf.constant([[1.], [0.], [0.], [0.]])
features = {'x': tf.train.limit_epochs(
tf.ones(shape=[4, 1], dtype=tf.float32), num_epochs=num_epochs)}
return features, labels
def _my_metric_op(predictions, labels):
return tf.reduce_sum(tf.mul(predictions, labels))
regressor = tf.contrib.learn.DNNLinearCombinedRegressor(
linear_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error': MetricSpec(
metric_fn=tf.contrib.metrics.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric': MetricSpec(
metric_fn=_my_metric_op,
prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict(input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name': MetricSpec(
metric_fn=tf.contrib.metrics.streaming_auc,
prediction_key='bad_type')})
def testExport(self):
"""Tests export model for servo."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, tf.constant(labels, dtype=tf.float32)
language_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = tf.contrib.learn.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column,
tf.contrib.layers.real_valued_column('age')
],
dnn_feature_columns=[
tf.contrib.layers.embedding_column(language_column, dimension=1),
],
dnn_hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
export_dir = tempfile.mkdtemp()
input_feature_key = 'examples'
def serving_input_fn():
features, targets = _input_fn()
features[input_feature_key] = tf.placeholder(tf.string)
return features, targets
regressor.export(export_dir, serving_input_fn, input_feature_key,
use_deprecated_input_fn=False)
def testTrainSaveLoad(self):
"""Tests regression with restarting training / evaluate."""
def _input_fn(num_epochs=None):
# Create 4 rows of (y = x)
labels = tf.constant([[100.], [3.], [2.], [2.]])
features = {'x': tf.train.limit_epochs(
tf.constant([[100.], [3.], [2.], [2.]]), num_epochs=num_epochs)}
return features, labels
model_dir = tempfile.mkdtemp()
# pylint: disable=g-long-lambda
new_estimator = lambda: tf.contrib.learn.DNNLinearCombinedRegressor(
linear_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3],
model_dir=model_dir,
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
classifier = new_estimator()
classifier.fit(input_fn=_input_fn, steps=10)
predictions = list(classifier.predict(input_fn=predict_input_fn))
del classifier
classifier = new_estimator()
predictions2 = list(classifier.predict(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, tf.constant([1., 0., 0.2], dtype=tf.float32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
language_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
tf_config = {
'cluster': {
tf.contrib.learn.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with tf.test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = tf.contrib.learn.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = tf.train.ClusterSpec({})
regressor = tf.contrib.learn.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column,
tf.contrib.layers.real_valued_column('age')
],
dnn_feature_columns=[
tf.contrib.layers.embedding_column(language_column, dimension=1),
tf.contrib.layers.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=config)
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, tf.constant([1., 0., 0.2], dtype=tf.float32)
language_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = tf.contrib.learn.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column,
tf.contrib.layers.real_valued_column('age')
],
dnn_feature_columns=[
tf.contrib.layers.embedding_column(language_column, dimension=1),
tf.contrib.layers.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
enable_centered_bias=False,
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
def testLinearOnly(self):
"""Tests linear-only instantiation and training."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, tf.constant([1., 0., 0.2], dtype=tf.float32)
language_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = tf.contrib.learn.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column,
tf.contrib.layers.real_valued_column('age')
],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
def testDNNOnly(self):
"""Tests DNN-only instantiation and training."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, tf.constant([1., 0., 0.2], dtype=tf.float32)
language_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = tf.contrib.learn.DNNLinearCombinedRegressor(
dnn_feature_columns=[
tf.contrib.layers.embedding_column(language_column, dimension=1),
tf.contrib.layers.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
class FeatureEngineeringFunctionTest(tf.test.TestCase):
"""Tests feature_engineering_fn."""
def testNoneFeatureEngineeringFn(self):
def input_fn():
# Create 4 rows of (y = x)
labels = tf.constant([[100.], [3.], [2.], [2.]])
features = {'x': tf.constant([[100.], [3.], [2.], [2.]])}
return features, labels
def feature_engineering_fn(features, labels):
_, _ = features, labels
labels = tf.constant([[1000.], [30.], [20.], [20.]])
features = {'x': tf.constant([[1000.], [30.], [20.], [20.]])}
return features, labels
estimator_with_fe_fn = tf.contrib.learn.DNNLinearCombinedRegressor(
linear_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1),
feature_engineering_fn=feature_engineering_fn)
estimator_with_fe_fn.fit(input_fn=input_fn, steps=100)
estimator_without_fe_fn = tf.contrib.learn.DNNLinearCombinedRegressor(
linear_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
estimator_without_fe_fn.fit(input_fn=input_fn, steps=100)
# predictions = y
prediction_with_fe_fn = next(
estimator_with_fe_fn.predict(input_fn=input_fn, as_iterable=True))
self.assertAlmostEqual(1000., prediction_with_fe_fn, delta=10.0)
prediction_without_fe_fn = next(
estimator_without_fe_fn.predict(input_fn=input_fn, as_iterable=True))
self.assertAlmostEqual(100., prediction_without_fe_fn, delta=1.0)
if __name__ == '__main__':
tf.test.main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
HackerRank/Find the Point/solution.java
|
import java.io.*;
import java.math.*;
import java.security.*;
import java.text.*;
import java.util.*;
import java.util.concurrent.*;
import java.util.regex.*;
class Result {
/*
* Complete the 'findPoint' function below.
*
* The function is expected to return an INTEGER_ARRAY.
* The function accepts following parameters:
* 1. INTEGER px
* 2. INTEGER py
* 3. INTEGER qx
* 4. INTEGER qy
*/
public static List<Integer> findPoint(int px, int py, int qx, int qy) {
// Write your code here
int rx = 2*qx - px;
int ry = 2*qy - py;
List<Integer> result = new ArrayList<>();
result.add(rx);
result.add(ry);
return result;
}
}
public class solution {
public static void main(String[] args) throws IOException {
BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(System.in));
BufferedWriter bufferedWriter = new BufferedWriter(new FileWriter(System.getenv("OUTPUT_PATH")));
int n = Integer.parseInt(bufferedReader.readLine().trim());
for (int nItr = 0; nItr < n; nItr++) {
String[] firstMultipleInput = bufferedReader.readLine().replaceAll("\\s+$", "").split(" ");
int px = Integer.parseInt(firstMultipleInput[0]);
int py = Integer.parseInt(firstMultipleInput[1]);
int qx = Integer.parseInt(firstMultipleInput[2]);
int qy = Integer.parseInt(firstMultipleInput[3]);
List<Integer> result = Result.findPoint(px, py, qx, qy);
for (int i = 0; i < result.size(); i++) {
bufferedWriter.write(String.valueOf(result.get(i)));
if (i != result.size() - 1) {
bufferedWriter.write(" ");
}
}
bufferedWriter.newLine();
}
bufferedReader.close();
bufferedWriter.close();
}
}
|
[
"\"OUTPUT_PATH\""
] |
[] |
[
"OUTPUT_PATH"
] |
[]
|
["OUTPUT_PATH"]
|
java
| 1 | 0 | |
catalyst/utils/image.py
|
from typing import List, Tuple
import logging
import os
import tempfile
import numpy as np
import imageio
from skimage.color import label2rgb, rgb2gray
import torch
_IMAGENET_STD = (0.229, 0.224, 0.225)
_IMAGENET_MEAN = (0.485, 0.456, 0.406)
logger = logging.getLogger(__name__)
JPEG4PY_ENABLED = False
if os.environ.get("FORCE_JPEG_TURBO", False):
try:
import jpeg4py as jpeg
# check libjpeg-turbo availability through image reading
img = np.zeros((1, 1, 3), dtype=np.uint8)
with tempfile.NamedTemporaryFile(suffix=".jpg") as fp:
imageio.imwrite(fp.name, img)
img = jpeg.JPEG(fp.name).decode()
JPEG4PY_ENABLED = True
except ImportError:
logger.warning(
"jpeg4py not available. "
"To install jpeg4py, run `pip install jpeg4py`."
)
except OSError:
logger.warning(
"libjpeg-turbo not available. "
"To install libjpeg-turbo, run `apt-get install libturbojpeg`."
)
def imread(uri, grayscale=False, expand_dims=True, rootpath=None, **kwargs):
"""
Args:
uri: {str, pathlib.Path, bytes, file}
The resource to load the image from, e.g. a filename, pathlib.Path,
http address or file object, see the docs for more info.
grayscale:
expand_dims:
rootpath:
Returns:
"""
if rootpath is not None:
uri = (
uri if uri.startswith(rootpath) else os.path.join(rootpath, uri)
)
if JPEG4PY_ENABLED and uri.endswith(("jpg", "JPG", "jpeg", "JPEG")):
img = jpeg.JPEG(uri).decode()
if grayscale:
img = rgb2gray(img)
else:
img = imageio.imread(uri, as_gray=grayscale, pilmode="RGB", **kwargs)
if expand_dims and len(img.shape) < 3: # grayscale
img = np.expand_dims(img, -1)
return img
imwrite = imageio.imwrite
imsave = imageio.imsave
def mimwrite_with_meta(uri, ims, meta, **kwargs):
writer = imageio.get_writer(uri, mode="I", **kwargs)
writer.set_meta_data(meta)
with writer:
for i in ims:
writer.append_data(i)
def tensor_from_rgb_image(image: np.ndarray) -> torch.Tensor:
image = np.moveaxis(image, -1, 0)
image = np.ascontiguousarray(image)
image = torch.from_numpy(image)
return image
def tensor_to_ndimage(
images: torch.Tensor,
mean: Tuple[float, float, float] = _IMAGENET_MEAN,
std: Tuple[float, float, float] = _IMAGENET_STD,
dtype=np.float32
) -> np.ndarray:
"""
Convert float image(s) with standard normalization to
np.ndarray with [0..1] when dtype is np.float32 and [0..255]
when dtype is `np.uint8`.
Args:
images: [B]xCxHxW float tensor
mean: mean to add
std: std to multiply
dtype: result ndarray dtype. Only float32 and uint8 are supported.
Returns:
[B]xHxWxC np.ndarray of dtype
"""
has_batch_dim = len(images.shape) == 4
num_shape = (3, 1, 1)
if has_batch_dim:
num_shape = (1, ) + num_shape
mean = images.new_tensor(mean).view(*num_shape)
std = images.new_tensor(std).view(*num_shape)
images = images * std + mean
images = images.clamp(0, 1).numpy()
images = np.moveaxis(images, -3, -1)
if dtype == np.uint8:
images = (images * 255).round().astype(dtype)
else:
assert dtype == np.float32, "Only float32 and uint8 are supported"
return images
def mask_to_overlay_image(
image: np.ndarray,
masks: List[np.ndarray],
threshold: float = 0,
mask_strength: float = 0.5
) -> np.ndarray:
"""
Draws every mask for with some color over image
Args:
image (np.ndarray): RGB image used as underlay for masks
masks (List[np.ndarray]): list of masks
threshold (float): threshold for masks binarization
mask_strength (float): opacity of colorized masks
Returns:
np.ndarray: HxWx3 image with overlay
"""
h, w = image.shape[:2]
labels = np.zeros((h, w), np.uint8)
for idx, mask in enumerate(masks, start=1):
labels[mask > threshold] = idx
mask = label2rgb(labels, bg_label=0)
image = np.array(image) / 255.0
image_with_overlay = image * (1 - mask_strength) + mask * mask_strength
image_with_overlay = (
(image_with_overlay * 255).clip(0, 255).round().astype(np.uint8)
)
return image_with_overlay
def has_image_extension(uri) -> bool:
"""
Check that file has image extension
Args:
uri (Union[str, pathlib.Path]): The resource to load the file from
Returns:
bool: True if file has image extension, False otherwise
"""
_, ext = os.path.splitext(uri)
return ext.lower() in {".bmp", ".png", ".jpeg", ".jpg", ".tif", ".tiff"}
|
[] |
[] |
[
"FORCE_JPEG_TURBO"
] |
[]
|
["FORCE_JPEG_TURBO"]
|
python
| 1 | 0 | |
ziti/cmd/ziti/cmd/create_config.go
|
/*
Copyright NetFoundry, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"github.com/openziti/channel"
edge "github.com/openziti/edge/controller/config"
fabCtrl "github.com/openziti/fabric/controller"
fabForwarder "github.com/openziti/fabric/router/forwarder"
fabXweb "github.com/openziti/fabric/xweb"
foundation "github.com/openziti/transport"
"github.com/openziti/ziti/ziti/cmd/ziti/cmd/common"
cmdhelper "github.com/openziti/ziti/ziti/cmd/ziti/cmd/helpers"
"github.com/openziti/ziti/ziti/cmd/ziti/constants"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"os"
"time"
)
const (
optionVerbose = "verbose"
defaultVerbose = false
verboseDescription = "Enable verbose logging. Logging will be sent to stdout if the config output is sent to a file. If output is sent to stdout, logging will be sent to stderr"
optionOutput = "output"
defaultOutput = "stdout"
outputDescription = "designated output destination for config, use \"stdout\" or a filepath."
)
// CreateConfigOptions the options for the create config command
type CreateConfigOptions struct {
common.CommonOptions
Output string
DatabaseFile string
}
type ConfigTemplateValues struct {
ZitiHome string
Hostname string
Controller ControllerTemplateValues
Router RouterTemplateValues
}
type ControllerTemplateValues struct {
Name string
Port string
AdvertisedAddress string
ListenerAddress string
IdentityCert string
IdentityServerCert string
IdentityKey string
IdentityCA string
MinQueuedConnects int
MaxQueuedConnects int
DefaultQueuedConnects int
MinOutstandingConnects int
MaxOutstandingConnects int
DefaultOutstandingConnects int
MinConnectTimeout time.Duration
MaxConnectTimeout time.Duration
DefaultConnectTimeout time.Duration
EdgeIdentityDuration time.Duration
EdgeRouterDuration time.Duration
Edge EdgeControllerValues
WebListener ControllerWebListenerValues
HealthCheck ControllerHealthCheckValues
}
type EdgeControllerValues struct {
AdvertisedPort string
ZitiSigningCert string
ZitiSigningKey string
APIActivityUpdateBatchSize int
APIActivityUpdateInterval time.Duration
APISessionTimeout time.Duration
ListenerHostPort string
AdvertisedHostPort string
IdentityCert string
IdentityServerCert string
IdentityKey string
IdentityCA string
}
type ControllerWebListenerValues struct {
IdleTimeout time.Duration
ReadTimeout time.Duration
WriteTimeout time.Duration
MinTLSVersion string
MaxTLSVersion string
}
type ControllerHealthCheckValues struct {
Interval time.Duration
Timeout time.Duration
InitialDelay time.Duration
}
type RouterTemplateValues struct {
Name string
IsPrivate bool
IsFabric bool
IsWss bool
IdentityCert string
IdentityServerCert string
IdentityKey string
IdentityCA string
Edge EdgeRouterTemplateValues
Wss WSSRouterTemplateValues
Forwarder RouterForwarderTemplateValues
Listener RouterListenerTemplateValues
}
type EdgeRouterTemplateValues struct {
Hostname string
Port string
}
type WSSRouterTemplateValues struct {
WriteTimeout time.Duration
ReadTimeout time.Duration
IdleTimeout time.Duration
PongTimeout time.Duration
PingInterval time.Duration
HandshakeTimeout time.Duration
ReadBufferSize int
WriteBufferSize int
EnableCompression bool
}
type RouterForwarderTemplateValues struct {
LatencyProbeInterval time.Duration
XgressDialQueueLength int
XgressDialWorkerCount int
LinkDialQueueLength int
LinkDialWorkerCount int
}
type RouterListenerTemplateValues struct {
ConnectTimeout time.Duration
GetSessionTimeout time.Duration
BindPort int
OutQueueSize int
}
var workingDir string
var data = &ConfigTemplateValues{}
func init() {
zh := os.Getenv("ZITI_HOME")
if zh == "" {
wd, err := os.Getwd()
if wd == "" || err != nil {
//on error just use "."
workingDir = "."
}
}
workingDir = cmdhelper.NormalizePath(zh)
}
// NewCmdCreateConfig creates a command object for the "config" command
func NewCmdCreateConfig() *cobra.Command {
cmd := &cobra.Command{
Use: "config",
Short: "Creates a config file for specified Ziti component using environment variables",
Aliases: []string{"cfg"},
Run: func(cmd *cobra.Command, args []string) {
cmdhelper.CheckErr(cmd.Help())
},
}
cmd.AddCommand(NewCmdCreateConfigController())
cmd.AddCommand(NewCmdCreateConfigRouter())
cmd.AddCommand(NewCmdCreateConfigEnvironment())
return cmd
}
// Add flags that are global to all "create config" commands
func (options *CreateConfigOptions) addCreateFlags(cmd *cobra.Command) {
cmd.PersistentFlags().BoolVarP(&options.Verbose, optionVerbose, "v", defaultVerbose, verboseDescription)
cmd.PersistentFlags().StringVarP(&options.Output, optionOutput, "o", defaultOutput, outputDescription)
}
func (data *ConfigTemplateValues) populateEnvVars() {
// Get and add hostname to the params
hostname, err := os.Hostname()
handleVariableError(err, "hostname")
// Get and add ziti home to the params
zitiHome, err := cmdhelper.GetZitiHome()
handleVariableError(err, constants.ZitiHomeVarName)
// Get Ziti Controller Name
zitiCtrlHostname, err := cmdhelper.GetZitiCtrlName()
handleVariableError(err, constants.ZitiCtrlNameVarName)
// Get Ziti Edge Router Port
zitiEdgeRouterPort, err := cmdhelper.GetZitiEdgeRouterPort()
handleVariableError(err, constants.ZitiEdgeRouterPortVarName)
// Get Ziti Controller Listener Address
zitiCtrlListenerAddress, err := cmdhelper.GetZitiCtrlListenerAddress()
handleVariableError(err, constants.ZitiCtrlListenerAddressVarName)
// Get Ziti Controller Advertised Address
zitiCtrlAdvertisedAddress, err := cmdhelper.GetZitiCtrlAdvertisedAddress()
handleVariableError(err, constants.ZitiCtrlAdvertisedAddressVarName)
// Get Ziti Controller Port
zitiCtrlPort, err := cmdhelper.GetZitiCtrlPort()
handleVariableError(err, constants.ZitiCtrlPortVarName)
// Get Ziti Edge Controller Listener Host and Port
zitiEdgeCtrlListenerHostPort, err := cmdhelper.GetZitiEdgeCtrlListenerHostPort()
handleVariableError(err, constants.ZitiEdgeCtrlListenerHostPortVarName)
// Get Ziti Edge Controller Advertised Host and Port
zitiEdgeCtrlAdvertisedHostPort, err := cmdhelper.GetZitiEdgeCtrlAdvertisedHostPort()
handleVariableError(err, constants.ZitiEdgeCtrlAdvertisedHostPortVarName)
// Get Ziti Edge Controller Advertised Port
zitiEdgeCtrlAdvertisedPort, err := cmdhelper.GetZitiEdgeCtrlAdvertisedPort()
handleVariableError(err, constants.ZitiEdgeCtrlAdvertisedPortVarName)
data.ZitiHome = zitiHome
data.Hostname = hostname
data.Controller.Name = zitiCtrlHostname
data.Controller.ListenerAddress = zitiCtrlListenerAddress
data.Controller.AdvertisedAddress = zitiCtrlAdvertisedAddress
data.Controller.Port = zitiCtrlPort
data.Controller.Edge.ListenerHostPort = zitiEdgeCtrlListenerHostPort
data.Controller.Edge.AdvertisedHostPort = zitiEdgeCtrlAdvertisedHostPort
data.Router.Edge.Port = zitiEdgeRouterPort
data.Controller.Edge.AdvertisedPort = zitiEdgeCtrlAdvertisedPort
}
func (data *ConfigTemplateValues) populateDefaults() {
data.Router.Listener.BindPort = constants.DefaultListenerBindPort
data.Router.Listener.GetSessionTimeout = constants.DefaultGetSessionTimeout
data.Controller.MinQueuedConnects = channel.MinQueuedConnects
data.Controller.MaxQueuedConnects = channel.MaxQueuedConnects
data.Controller.DefaultQueuedConnects = channel.DefaultQueuedConnects
data.Controller.MinOutstandingConnects = channel.MinOutstandingConnects
data.Controller.MaxOutstandingConnects = channel.MaxOutstandingConnects
data.Controller.DefaultOutstandingConnects = channel.DefaultOutstandingConnects
data.Controller.MinConnectTimeout = channel.MinConnectTimeout
data.Controller.MaxConnectTimeout = channel.MaxConnectTimeout
data.Controller.DefaultConnectTimeout = channel.DefaultConnectTimeout
data.Controller.HealthCheck.Timeout = fabCtrl.DefaultHealthChecksBoltCheckTimeout
data.Controller.HealthCheck.Interval = fabCtrl.DefaultHealthChecksBoltCheckInterval
data.Controller.HealthCheck.InitialDelay = fabCtrl.DefaultHealthChecksBoltCheckInitialDelay
data.Controller.Edge.APIActivityUpdateBatchSize = edge.DefaultEdgeApiActivityUpdateBatchSize
data.Controller.Edge.APIActivityUpdateInterval = edge.DefaultEdgeAPIActivityUpdateInterval
data.Controller.Edge.APISessionTimeout = edge.DefaultEdgeSessionTimeout
data.Controller.EdgeIdentityDuration = edge.DefaultEdgeEnrollmentDuration
data.Controller.EdgeRouterDuration = edge.DefaultEdgeEnrollmentDuration
data.Controller.WebListener.IdleTimeout = edge.DefaultHttpIdleTimeout
data.Controller.WebListener.ReadTimeout = edge.DefaultHttpReadTimeout
data.Controller.WebListener.WriteTimeout = edge.DefaultHttpWriteTimeout
data.Controller.WebListener.MinTLSVersion = fabXweb.ReverseTlsVersionMap[fabXweb.MinTLSVersion]
data.Controller.WebListener.MaxTLSVersion = fabXweb.ReverseTlsVersionMap[fabXweb.MaxTLSVersion]
data.Router.Wss.WriteTimeout = foundation.DefaultWsWriteTimeout
data.Router.Wss.ReadTimeout = foundation.DefaultWsReadTimeout
data.Router.Wss.IdleTimeout = foundation.DefaultWsIdleTimeout
data.Router.Wss.PongTimeout = foundation.DefaultWsPongTimeout
data.Router.Wss.PingInterval = foundation.DefaultWsPingInterval
data.Router.Wss.HandshakeTimeout = foundation.DefaultWsHandshakeTimeout
data.Router.Wss.ReadBufferSize = foundation.DefaultWsReadBufferSize
data.Router.Wss.WriteBufferSize = foundation.DefaultWsWriteBufferSize
data.Router.Wss.EnableCompression = foundation.DefaultWsEnableCompression
data.Router.Forwarder.LatencyProbeInterval = fabForwarder.DefaultLatencyProbeInterval
data.Router.Forwarder.XgressDialQueueLength = fabForwarder.DefaultXgressDialWorkerQueueLength
data.Router.Forwarder.XgressDialWorkerCount = fabForwarder.DefaultXgressDialWorkerCount
data.Router.Forwarder.LinkDialQueueLength = fabForwarder.DefaultLinkDialQueueLength
data.Router.Forwarder.LinkDialWorkerCount = fabForwarder.DefaultLinkDialWorkerCount
data.Router.Listener.OutQueueSize = channel.DefaultOutQueueSize
data.Router.Listener.ConnectTimeout = channel.DefaultConnectTimeout
}
func handleVariableError(err error, varName string) {
if err != nil {
logrus.Errorf("Unable to get %s: %v", varName, err)
}
}
|
[
"\"ZITI_HOME\""
] |
[] |
[
"ZITI_HOME"
] |
[]
|
["ZITI_HOME"]
|
go
| 1 | 0 | |
coverage/control.py
|
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Core control stuff for coverage.py."""
import atexit
import os
import platform
import sys
import time
from coverage import env
from coverage.annotate import AnnotateReporter
from coverage.backward import string_class, iitems
from coverage.collector import Collector
from coverage.config import read_coverage_config
from coverage.data import CoverageData, CoverageDataFiles
from coverage.debug import DebugControl, write_formatted_info
from coverage.disposition import disposition_debug_msg
from coverage.files import PathAliases, set_relative_directory, abs_file
from coverage.html import HtmlReporter
from coverage.inorout import InOrOut
from coverage.misc import CoverageException, bool_or_none, join_regex
from coverage.misc import file_be_gone, isolate_module
from coverage.plugin import FileReporter
from coverage.plugin_support import Plugins
from coverage.python import PythonFileReporter
from coverage.results import Analysis, Numbers
from coverage.summary import SummaryReporter
from coverage.xmlreport import XmlReporter
try:
from coverage.multiproc import patch_multiprocessing
except ImportError: # pragma: only jython
# Jython has no multiprocessing module.
patch_multiprocessing = None
os = isolate_module(os)
class Coverage(object):
"""Programmatic access to coverage.py.
To use::
from coverage import Coverage
cov = Coverage()
cov.start()
#.. call your code ..
cov.stop()
cov.html_report(directory='covhtml')
"""
def __init__(
self, data_file=None, data_suffix=None, cover_pylib=None,
auto_data=False, timid=None, branch=None, config_file=True,
source=None, omit=None, include=None, debug=None,
concurrency=None, check_preimported=False,
):
"""
`data_file` is the base name of the data file to use, defaulting to
".coverage". `data_suffix` is appended (with a dot) to `data_file` to
create the final file name. If `data_suffix` is simply True, then a
suffix is created with the machine and process identity included.
`cover_pylib` is a boolean determining whether Python code installed
with the Python interpreter is measured. This includes the Python
standard library and any packages installed with the interpreter.
If `auto_data` is true, then any existing data file will be read when
coverage measurement starts, and data will be saved automatically when
measurement stops.
If `timid` is true, then a slower and simpler trace function will be
used. This is important for some environments where manipulation of
tracing functions breaks the faster trace function.
If `branch` is true, then branch coverage will be measured in addition
to the usual statement coverage.
`config_file` determines what configuration file to read:
* If it is ".coveragerc", it is interpreted as if it were True,
for backward compatibility.
* If it is a string, it is the name of the file to read. If the
file can't be read, it is an error.
* If it is True, then a few standard files names are tried
(".coveragerc", "setup.cfg", "tox.ini"). It is not an error for
these files to not be found.
* If it is False, then no configuration file is read.
`source` is a list of file paths or package names. Only code located
in the trees indicated by the file paths or package names will be
measured.
`include` and `omit` are lists of file name patterns. Files that match
`include` will be measured, files that match `omit` will not. Each
will also accept a single string argument.
`debug` is a list of strings indicating what debugging information is
desired.
`concurrency` is a string indicating the concurrency library being used
in the measured code. Without this, coverage.py will get incorrect
results if these libraries are in use. Valid strings are "greenlet",
"eventlet", "gevent", "multiprocessing", or "thread" (the default).
This can also be a list of these strings.
If `check_preimported` is true, then when coverage is started, the
aleady-imported files will be checked to see if they should be measured
by coverage. Importing measured files before coverage is started can
mean that code is missed.
.. versionadded:: 4.0
The `concurrency` parameter.
.. versionadded:: 4.2
The `concurrency` parameter can now be a list of strings.
.. versionadded:: 4.6
The `check_preimported` parameter.
"""
# Build our configuration from a number of sources.
self.config = read_coverage_config(
config_file=config_file,
data_file=data_file, cover_pylib=cover_pylib, timid=timid,
branch=branch, parallel=bool_or_none(data_suffix),
source=source, run_omit=omit, run_include=include, debug=debug,
report_omit=omit, report_include=include,
concurrency=concurrency,
)
# This is injectable by tests.
self._debug_file = None
self._auto_load = self._auto_save = auto_data
self._data_suffix_specified = data_suffix
# Is it ok for no data to be collected?
self._warn_no_data = True
self._warn_unimported_source = True
self._warn_preimported_source = check_preimported
# A record of all the warnings that have been issued.
self._warnings = []
# Other instance attributes, set later.
self._data = self._data_files = self._collector = None
self._plugins = None
self._inorout = None
self._inorout_class = InOrOut
self._data_suffix = self._run_suffix = None
self._exclude_re = None
self._debug = None
# State machine variables:
# Have we initialized everything?
self._inited = False
# Have we started collecting and not stopped it?
self._started = False
# If we have sub-process measurement happening automatically, then we
# want any explicit creation of a Coverage object to mean, this process
# is already coverage-aware, so don't auto-measure it. By now, the
# auto-creation of a Coverage object has already happened. But we can
# find it and tell it not to save its data.
if not env.METACOV:
_prevent_sub_process_measurement()
def _init(self):
"""Set all the initial state.
This is called by the public methods to initialize state. This lets us
construct a :class:`Coverage` object, then tweak its state before this
function is called.
"""
if self._inited:
return
self._inited = True
# Create and configure the debugging controller. COVERAGE_DEBUG_FILE
# is an environment variable, the name of a file to append debug logs
# to.
if self._debug_file is None:
debug_file_name = os.environ.get("COVERAGE_DEBUG_FILE")
if debug_file_name:
self._debug_file = open(debug_file_name, "a")
else:
self._debug_file = sys.stderr
self._debug = DebugControl(self.config.debug, self._debug_file)
# _exclude_re is a dict that maps exclusion list names to compiled regexes.
self._exclude_re = {}
set_relative_directory()
# Load plugins
self._plugins = Plugins.load_plugins(self.config.plugins, self.config, self._debug)
# Run configuring plugins.
for plugin in self._plugins.configurers:
# We need an object with set_option and get_option. Either self or
# self.config will do. Choosing randomly stops people from doing
# other things with those objects, against the public API. Yes,
# this is a bit childish. :)
plugin.configure([self, self.config][int(time.time()) % 2])
concurrency = self.config.concurrency or []
if "multiprocessing" in concurrency:
if not patch_multiprocessing:
raise CoverageException( # pragma: only jython
"multiprocessing is not supported on this Python"
)
patch_multiprocessing(rcfile=self.config.config_file)
# Multi-processing uses parallel for the subprocesses, so also use
# it for the main process.
self.config.parallel = True
self._collector = Collector(
should_trace=self._should_trace,
check_include=self._check_include_omit_etc,
timid=self.config.timid,
branch=self.config.branch,
warn=self._warn,
concurrency=concurrency,
)
# Early warning if we aren't going to be able to support plugins.
if self._plugins.file_tracers and not self._collector.supports_plugins:
self._warn(
"Plugin file tracers (%s) aren't supported with %s" % (
", ".join(
plugin._coverage_plugin_name
for plugin in self._plugins.file_tracers
),
self._collector.tracer_name(),
)
)
for plugin in self._plugins.file_tracers:
plugin._coverage_enabled = False
# Create the file classifying substructure.
self._inorout = self._inorout_class(warn=self._warn)
self._inorout.configure(self.config)
self._inorout.plugins = self._plugins
self._inorout.disp_class = self._collector.file_disposition_class
# Suffixes are a bit tricky. We want to use the data suffix only when
# collecting data, not when combining data. So we save it as
# `self._run_suffix` now, and promote it to `self._data_suffix` if we
# find that we are collecting data later.
if self._data_suffix_specified or self.config.parallel:
if not isinstance(self._data_suffix_specified, string_class):
# if data_suffix=True, use .machinename.pid.random
self._data_suffix_specified = True
else:
self._data_suffix_specified = None
self._data_suffix = None
self._run_suffix = self._data_suffix_specified
# Create the data file. We do this at construction time so that the
# data file will be written into the directory where the process
# started rather than wherever the process eventually chdir'd to.
self._data = CoverageData(debug=self._debug)
self._data_files = CoverageDataFiles(
basename=self.config.data_file, warn=self._warn, debug=self._debug,
)
# Set the reporting precision.
Numbers.set_precision(self.config.precision)
atexit.register(self._atexit)
# The user may want to debug things, show info if desired.
self._write_startup_debug()
def _write_startup_debug(self):
"""Write out debug info at startup if needed."""
wrote_any = False
with self._debug.without_callers():
if self._debug.should('config'):
config_info = sorted(self.config.__dict__.items())
write_formatted_info(self._debug, "config", config_info)
wrote_any = True
if self._debug.should('sys'):
write_formatted_info(self._debug, "sys", self.sys_info())
for plugin in self._plugins:
header = "sys: " + plugin._coverage_plugin_name
info = plugin.sys_info()
write_formatted_info(self._debug, header, info)
wrote_any = True
if wrote_any:
write_formatted_info(self._debug, "end", ())
def _should_trace(self, filename, frame):
"""Decide whether to trace execution in `filename`.
Calls `_should_trace_internal`, and returns the FileDisposition.
"""
disp = self._inorout.should_trace(filename, frame)
if self._debug.should('trace'):
self._debug.write(disposition_debug_msg(disp))
return disp
def _check_include_omit_etc(self, filename, frame):
"""Check a file name against the include/omit/etc, rules, verbosely.
Returns a boolean: True if the file should be traced, False if not.
"""
reason = self._inorout.check_include_omit_etc(filename, frame)
if self._debug.should('trace'):
if not reason:
msg = "Including %r" % (filename,)
else:
msg = "Not including %r: %s" % (filename, reason)
self._debug.write(msg)
return not reason
def _warn(self, msg, slug=None):
"""Use `msg` as a warning.
For warning suppression, use `slug` as the shorthand.
"""
if slug in self.config.disable_warnings:
# Don't issue the warning
return
self._warnings.append(msg)
if slug:
msg = "%s (%s)" % (msg, slug)
if self._debug.should('pid'):
msg = "[%d] %s" % (os.getpid(), msg)
sys.stderr.write("Coverage.py warning: %s\n" % msg)
def get_option(self, option_name):
"""Get an option from the configuration.
`option_name` is a colon-separated string indicating the section and
option name. For example, the ``branch`` option in the ``[run]``
section of the config file would be indicated with `"run:branch"`.
Returns the value of the option.
.. versionadded:: 4.0
"""
return self.config.get_option(option_name)
def set_option(self, option_name, value):
"""Set an option in the configuration.
`option_name` is a colon-separated string indicating the section and
option name. For example, the ``branch`` option in the ``[run]``
section of the config file would be indicated with ``"run:branch"``.
`value` is the new value for the option. This should be an
appropriate Python value. For example, use True for booleans, not the
string ``"True"``.
As an example, calling::
cov.set_option("run:branch", True)
has the same effect as this configuration file::
[run]
branch = True
.. versionadded:: 4.0
"""
self.config.set_option(option_name, value)
def load(self):
"""Load previously-collected coverage data from the data file."""
self._init()
self._collector.reset()
self._data_files.read(self._data)
def start(self):
"""Start measuring code coverage.
Coverage measurement only occurs in functions called after
:meth:`start` is invoked. Statements in the same scope as
:meth:`start` won't be measured.
Once you invoke :meth:`start`, you must also call :meth:`stop`
eventually, or your process might not shut down cleanly.
"""
self._init()
self._inorout.warn_conflicting_settings()
if self._run_suffix:
# Calling start() means we're running code, so use the run_suffix
# as the data_suffix when we eventually save the data.
self._data_suffix = self._run_suffix
if self._auto_load:
self.load()
# See if we think some code that would eventually be measured has already been imported.
if self._warn_preimported_source:
self._inorout.warn_already_imported_files()
self._collector.start()
self._started = True
def stop(self):
"""Stop measuring code coverage."""
if self._started:
self._collector.stop()
self._started = False
def _atexit(self):
"""Clean up on process shutdown."""
if self._debug.should("process"):
self._debug.write("atexit: {0!r}".format(self))
if self._started:
self.stop()
if self._auto_save:
self.save()
def erase(self):
"""Erase previously-collected coverage data.
This removes the in-memory data collected in this session as well as
discarding the data file.
"""
self._init()
self._collector.reset()
self._data.erase()
self._data_files.erase(parallel=self.config.parallel)
def clear_exclude(self, which='exclude'):
"""Clear the exclude list."""
self._init()
setattr(self.config, which + "_list", [])
self._exclude_regex_stale()
def exclude(self, regex, which='exclude'):
"""Exclude source lines from execution consideration.
A number of lists of regular expressions are maintained. Each list
selects lines that are treated differently during reporting.
`which` determines which list is modified. The "exclude" list selects
lines that are not considered executable at all. The "partial" list
indicates lines with branches that are not taken.
`regex` is a regular expression. The regex is added to the specified
list. If any of the regexes in the list is found in a line, the line
is marked for special treatment during reporting.
"""
self._init()
excl_list = getattr(self.config, which + "_list")
excl_list.append(regex)
self._exclude_regex_stale()
def _exclude_regex_stale(self):
"""Drop all the compiled exclusion regexes, a list was modified."""
self._exclude_re.clear()
def _exclude_regex(self, which):
"""Return a compiled regex for the given exclusion list."""
if which not in self._exclude_re:
excl_list = getattr(self.config, which + "_list")
self._exclude_re[which] = join_regex(excl_list)
return self._exclude_re[which]
def get_exclude_list(self, which='exclude'):
"""Return a list of excluded regex patterns.
`which` indicates which list is desired. See :meth:`exclude` for the
lists that are available, and their meaning.
"""
self._init()
return getattr(self.config, which + "_list")
def save(self):
"""Save the collected coverage data to the data file."""
self._init()
data = self.get_data()
self._data_files.write(data, suffix=self._data_suffix)
def combine(self, data_paths=None, strict=False):
"""Combine together a number of similarly-named coverage data files.
All coverage data files whose name starts with `data_file` (from the
coverage() constructor) will be read, and combined together into the
current measurements.
`data_paths` is a list of files or directories from which data should
be combined. If no list is passed, then the data files from the
directory indicated by the current data file (probably the current
directory) will be combined.
If `strict` is true, then it is an error to attempt to combine when
there are no data files to combine.
.. versionadded:: 4.0
The `data_paths` parameter.
.. versionadded:: 4.3
The `strict` parameter.
"""
self._init()
self.get_data()
aliases = None
if self.config.paths:
aliases = PathAliases()
for paths in self.config.paths.values():
result = paths[0]
for pattern in paths[1:]:
aliases.add(pattern, result)
self._data_files.combine_parallel_data(
self._data, aliases=aliases, data_paths=data_paths, strict=strict,
)
def get_data(self):
"""Get the collected data.
Also warn about various problems collecting data.
Returns a :class:`coverage.CoverageData`, the collected coverage data.
.. versionadded:: 4.0
"""
self._init()
if self._collector.save_data(self._data):
self._post_save_work()
return self._data
def _post_save_work(self):
"""After saving data, look for warnings, post-work, etc.
Warn about things that should have happened but didn't.
Look for unexecuted files.
"""
# If there are still entries in the source_pkgs_unmatched list,
# then we never encountered those packages.
if self._warn_unimported_source:
self._inorout.warn_unimported_source()
# Find out if we got any data.
if not self._data and self._warn_no_data:
self._warn("No data was collected.", slug="no-data-collected")
# Find files that were never executed at all.
for file_path, plugin_name in self._inorout.find_unexecuted_files():
self._data.touch_file(file_path, plugin_name)
if self.config.note:
self._data.add_run_info(note=self.config.note)
# Backward compatibility with version 1.
def analysis(self, morf):
"""Like `analysis2` but doesn't return excluded line numbers."""
f, s, _, m, mf = self.analysis2(morf)
return f, s, m, mf
def analysis2(self, morf):
"""Analyze a module.
`morf` is a module or a file name. It will be analyzed to determine
its coverage statistics. The return value is a 5-tuple:
* The file name for the module.
* A list of line numbers of executable statements.
* A list of line numbers of excluded statements.
* A list of line numbers of statements not run (missing from
execution).
* A readable formatted string of the missing line numbers.
The analysis uses the source file itself and the current measured
coverage data.
"""
self._init()
analysis = self._analyze(morf)
return (
analysis.filename,
sorted(analysis.statements),
sorted(analysis.excluded),
sorted(analysis.missing),
analysis.missing_formatted(),
)
def _analyze(self, it):
"""Analyze a single morf or code unit.
Returns an `Analysis` object.
"""
data = self.get_data()
if not isinstance(it, FileReporter):
it = self._get_file_reporter(it)
return Analysis(data, it)
def _get_file_reporter(self, morf):
"""Get a FileReporter for a module or file name."""
plugin = None
file_reporter = "python"
if isinstance(morf, string_class):
abs_morf = abs_file(morf)
plugin_name = self._data.file_tracer(abs_morf)
if plugin_name:
plugin = self._plugins.get(plugin_name)
if plugin:
file_reporter = plugin.file_reporter(abs_morf)
if file_reporter is None:
raise CoverageException(
"Plugin %r did not provide a file reporter for %r." % (
plugin._coverage_plugin_name, morf
)
)
if file_reporter == "python":
file_reporter = PythonFileReporter(morf, self)
return file_reporter
def _get_file_reporters(self, morfs=None):
"""Get a list of FileReporters for a list of modules or file names.
For each module or file name in `morfs`, find a FileReporter. Return
the list of FileReporters.
If `morfs` is a single module or file name, this returns a list of one
FileReporter. If `morfs` is empty or None, then the list of all files
measured is used to find the FileReporters.
"""
if not morfs:
morfs = self._data.measured_files()
# Be sure we have a list.
if not isinstance(morfs, (list, tuple)):
morfs = [morfs]
file_reporters = []
for morf in morfs:
file_reporter = self._get_file_reporter(morf)
file_reporters.append(file_reporter)
return file_reporters
def report(
self, morfs=None, show_missing=None, ignore_errors=None,
file=None, # pylint: disable=redefined-builtin
omit=None, include=None, skip_covered=None,
):
"""Write a summary report to `file`.
Each module in `morfs` is listed, with counts of statements, executed
statements, missing statements, and a list of lines missed.
`include` is a list of file name patterns. Files that match will be
included in the report. Files matching `omit` will not be included in
the report.
If `skip_covered` is True, don't report on files with 100% coverage.
Returns a float, the total percentage covered.
"""
self.config.from_args(
ignore_errors=ignore_errors, report_omit=omit, report_include=include,
show_missing=show_missing, skip_covered=skip_covered,
)
reporter = SummaryReporter(self, self.config)
return reporter.report(morfs, outfile=file)
def annotate(
self, morfs=None, directory=None, ignore_errors=None,
omit=None, include=None,
):
"""Annotate a list of modules.
Each module in `morfs` is annotated. The source is written to a new
file, named with a ",cover" suffix, with each line prefixed with a
marker to indicate the coverage of the line. Covered lines have ">",
excluded lines have "-", and missing lines have "!".
See :meth:`report` for other arguments.
"""
self.config.from_args(
ignore_errors=ignore_errors, report_omit=omit, report_include=include
)
reporter = AnnotateReporter(self, self.config)
reporter.report(morfs, directory=directory)
def html_report(self, morfs=None, directory=None, ignore_errors=None,
omit=None, include=None, extra_css=None, title=None,
skip_covered=None):
"""Generate an HTML report.
The HTML is written to `directory`. The file "index.html" is the
overview starting point, with links to more detailed pages for
individual modules.
`extra_css` is a path to a file of other CSS to apply on the page.
It will be copied into the HTML directory.
`title` is a text string (not HTML) to use as the title of the HTML
report.
See :meth:`report` for other arguments.
Returns a float, the total percentage covered.
"""
self.config.from_args(
ignore_errors=ignore_errors, report_omit=omit, report_include=include,
html_dir=directory, extra_css=extra_css, html_title=title,
skip_covered=skip_covered,
)
reporter = HtmlReporter(self, self.config)
return reporter.report(morfs)
def xml_report(
self, morfs=None, outfile=None, ignore_errors=None,
omit=None, include=None,
):
"""Generate an XML report of coverage results.
The report is compatible with Cobertura reports.
Each module in `morfs` is included in the report. `outfile` is the
path to write the file to, "-" will write to stdout.
See :meth:`report` for other arguments.
Returns a float, the total percentage covered.
"""
self.config.from_args(
ignore_errors=ignore_errors, report_omit=omit, report_include=include,
xml_output=outfile,
)
file_to_close = None
delete_file = False
if self.config.xml_output:
if self.config.xml_output == '-':
outfile = sys.stdout
else:
# Ensure that the output directory is created; done here
# because this report pre-opens the output file.
# HTMLReport does this using the Report plumbing because
# its task is more complex, being multiple files.
output_dir = os.path.dirname(self.config.xml_output)
if output_dir and not os.path.isdir(output_dir):
os.makedirs(output_dir)
open_kwargs = {}
if env.PY3:
open_kwargs['encoding'] = 'utf8'
outfile = open(self.config.xml_output, "w", **open_kwargs)
file_to_close = outfile
try:
reporter = XmlReporter(self, self.config)
return reporter.report(morfs, outfile=outfile)
except CoverageException:
delete_file = True
raise
finally:
if file_to_close:
file_to_close.close()
if delete_file:
file_be_gone(self.config.xml_output)
def sys_info(self):
"""Return a list of (key, value) pairs showing internal information."""
import coverage as covmod
self._init()
def plugin_info(plugins):
"""Make an entry for the sys_info from a list of plug-ins."""
entries = []
for plugin in plugins:
entry = plugin._coverage_plugin_name
if not plugin._coverage_enabled:
entry += " (disabled)"
entries.append(entry)
return entries
info = [
('version', covmod.__version__),
('coverage', covmod.__file__),
('tracer', self._collector.tracer_name()),
('plugins.file_tracers', plugin_info(self._plugins.file_tracers)),
('plugins.configurers', plugin_info(self._plugins.configurers)),
('configs_attempted', self.config.attempted_config_files),
('configs_read', self.config.config_files_read),
('config_file', self.config.config_file),
('data_path', self._data_files.filename),
('python', sys.version.replace('\n', '')),
('platform', platform.platform()),
('implementation', platform.python_implementation()),
('executable', sys.executable),
('cwd', os.getcwd()),
('path', sys.path),
('environment', sorted(
("%s = %s" % (k, v))
for k, v in iitems(os.environ)
if k.startswith(("COV", "PY"))
)),
('command_line', " ".join(getattr(sys, 'argv', ['???']))),
]
info.extend(self._inorout.sys_info())
return info
def process_startup():
"""Call this at Python start-up to perhaps measure coverage.
If the environment variable COVERAGE_PROCESS_START is defined, coverage
measurement is started. The value of the variable is the config file
to use.
There are two ways to configure your Python installation to invoke this
function when Python starts:
#. Create or append to sitecustomize.py to add these lines::
import coverage
coverage.process_startup()
#. Create a .pth file in your Python installation containing::
import coverage; coverage.process_startup()
Returns the :class:`Coverage` instance that was started, or None if it was
not started by this call.
"""
cps = os.environ.get("COVERAGE_PROCESS_START")
if not cps:
# No request for coverage, nothing to do.
return None
# This function can be called more than once in a process. This happens
# because some virtualenv configurations make the same directory visible
# twice in sys.path. This means that the .pth file will be found twice,
# and executed twice, executing this function twice. We set a global
# flag (an attribute on this function) to indicate that coverage.py has
# already been started, so we can avoid doing it twice.
#
# https://bitbucket.org/ned/coveragepy/issue/340/keyerror-subpy has more
# details.
if hasattr(process_startup, "coverage"):
# We've annotated this function before, so we must have already
# started coverage.py in this process. Nothing to do.
return None
cov = Coverage(config_file=cps)
process_startup.coverage = cov
cov._warn_no_data = False
cov._warn_unimported_source = False
cov._warn_preimported_source = False
cov._auto_save = True
cov.start()
return cov
def _prevent_sub_process_measurement():
"""Stop any subprocess auto-measurement from writing data."""
auto_created_coverage = getattr(process_startup, "coverage", None)
if auto_created_coverage is not None:
auto_created_coverage._auto_save = False
|
[] |
[] |
[
"COVERAGE_DEBUG_FILE",
"COVERAGE_PROCESS_START"
] |
[]
|
["COVERAGE_DEBUG_FILE", "COVERAGE_PROCESS_START"]
|
python
| 2 | 0 | |
test_data/samples/distlib_compat_output.py
|
# Taken from distlib
# PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
# --------------------------------------------
#
# 1. This LICENSE AGREEMENT is between the Python Software Foundation
# ("PSF"), and the Individual or Organization ("Licensee") accessing and
# otherwise using this software ("Python") in source or binary form and
# its associated documentation.
#
# 2. Subject to the terms and conditions of this License Agreement, PSF hereby
# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
# analyze, test, perform and/or display publicly, prepare derivative works,
# distribute, and otherwise use Python alone or in any derivative version,
# provided, however, that PSF's License Agreement and PSF's notice of copyright,
# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
# 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021 Python Software Foundation;
# All Rights Reserved" are retained in Python alone or in any derivative version
# prepared by Licensee.
#
# 3. In the event Licensee prepares a derivative work that is based on
# or incorporates Python or any part thereof, and wants to make
# the derivative work available to others as provided herein, then
# Licensee hereby agrees to include in any such work a brief summary of
# the changes made to Python.
#
# 4. PSF is making Python available to Licensee on an "AS IS"
# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
# INFRINGE ANY THIRD PARTY RIGHTS.
#
# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
#
# 6. This License Agreement will automatically terminate upon a material
# breach of its terms and conditions.
#
# 7. Nothing in this License Agreement shall be deemed to create any
# relationship of agency, partnership, or joint venture between PSF and
# Licensee. This License Agreement does not grant permission to use PSF
# trademarks or trade name in a trademark sense to endorse or promote
# products or services of Licensee, or any third party.
#
# 8. By copying, installing or otherwise using Python, Licensee
# agrees to be bound by the terms and conditions of this License
# Agreement.
#
# Copyright (C) 2013-2017 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
from __future__ import absolute_import
import os
import re
import sys
try:
import ssl
except ImportError: # pragma: no cover
ssl = None
if sys.version_info[0] < 3: # pragma: no cover
from StringIO import StringIO
string_types = basestring,
text_type = unicode
from types import FileType as file_type
import __builtin__ as builtins
import ConfigParser as configparser
from ._backport import shutil
from urlparse import urlparse, urlunparse, urljoin, urlsplit, urlunsplit
from urllib import (urlretrieve, quote as _quote, unquote, url2pathname,
pathname2url, ContentTooShortError, splittype)
def quote(s):
if isinstance(s, unicode):
s = s.encode('utf-8')
return _quote(s)
import urllib2
from urllib2 import (Request, urlopen, URLError, HTTPError,
HTTPBasicAuthHandler, HTTPPasswordMgr,
HTTPHandler, HTTPRedirectHandler,
build_opener)
if ssl:
from urllib2 import HTTPSHandler
import httplib
import xmlrpclib
import Queue as queue
from HTMLParser import HTMLParser
import htmlentitydefs
raw_input = raw_input
from itertools import ifilter as filter
from itertools import ifilterfalse as filterfalse
_userprog = None
def splituser(host):
"""splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'."""
global _userprog
if _userprog is None:
import re
_userprog = re.compile('^(.*)@(.*)$')
match = _userprog.match(host)
if match: return match.group(1, 2)
return None, host
else: # pragma: no cover
from io import StringIO
string_types = str,
text_type = str
from io import TextIOWrapper as file_type
import builtins
import configparser
import shutil
from urllib.parse import (urlparse, urlunparse, urljoin, splituser, quote,
unquote, urlsplit, urlunsplit, splittype)
from urllib.request import (urlopen, urlretrieve, Request, url2pathname,
pathname2url,
HTTPBasicAuthHandler, HTTPPasswordMgr,
HTTPHandler, HTTPRedirectHandler,
build_opener)
if ssl:
from urllib.request import HTTPSHandler
from urllib.error import HTTPError, URLError, ContentTooShortError
import http.client as httplib
import urllib.request as urllib2
import xmlrpc.client as xmlrpclib
import queue
from html.parser import HTMLParser
import html.entities as htmlentitydefs
raw_input = input
from itertools import filterfalse
filter = filter
try:
from ssl import match_hostname, CertificateError
except ImportError: # pragma: no cover
class CertificateError(ValueError):
pass
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
parts = dn.split('.')
leftmost, remainder = parts[0], parts[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate, match_hostname needs a "
"SSL socket or SSL context with either "
"CERT_OPTIONAL or CERT_REQUIRED")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
try:
from types import SimpleNamespace as Container
except ImportError: # pragma: no cover
class Container(object):
"""
A generic container for when multiple values need to be returned
"""
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
try:
from shutil import which
except ImportError: # pragma: no cover
# Implementation from Python 3.3
def which(cmd, mode=os.F_OK | os.X_OK, path=None):
"""Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
of os.environ.get("PATH"), or can be overridden with a custom search
path.
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return (os.path.exists(fn) and os.access(fn, mode)
and not os.path.isdir(fn))
# If we're given a path with a directory part, look it up directly rather
# than referring to PATH directories. This includes checking relative to the
# current directory, e.g. ./script
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
if path is None:
path = os.environ.get("PATH", os.defpath)
if not path:
return None
path = path.split(os.pathsep)
if sys.platform == "win32":
# The current directory takes precedence on Windows.
if not os.curdir in path:
path.insert(0, os.curdir)
# PATHEXT is necessary to check on Windows.
pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# See if the given file matches any of the expected path extensions.
# This will allow us to short circuit when given "python.exe".
# If it does match, only test that one, otherwise we have to try
# others.
if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
files = [cmd]
else:
files = [cmd + ext for ext in pathext]
else:
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
for dir in path:
normdir = os.path.normcase(dir)
if not normdir in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
return name
return None
# ZipFile is a context manager in 2.7, but not in 2.6
from zipfile import ZipFile as BaseZipFile
if hasattr(BaseZipFile, '__enter__'): # pragma: no cover
ZipFile = BaseZipFile
else: # pragma: no cover
from zipfile import ZipExtFile as BaseZipExtFile
class ZipExtFile(BaseZipExtFile):
def __init__(self, base):
self.__dict__.update(base.__dict__)
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
# return None, so if an exception occurred, it will propagate
class ZipFile(BaseZipFile):
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
# return None, so if an exception occurred, it will propagate
def open(self, *args, **kwargs):
base = BaseZipFile.open(self, *args, **kwargs)
return ZipExtFile(base)
try:
from platform import python_implementation
except ImportError: # pragma: no cover
def python_implementation():
"""Return a string identifying the Python implementation."""
if 'PyPy' in sys.version:
return 'PyPy'
if os.name == 'java':
return 'Jython'
if sys.version.startswith('IronPython'):
return 'IronPython'
return 'CPython'
try:
import sysconfig
except ImportError: # pragma: no cover
from ._backport import sysconfig
try:
callable = callable
except NameError: # pragma: no cover
from collections.abc import Callable
def callable(obj):
return isinstance(obj, Callable)
try:
fsencode = os.fsencode
fsdecode = os.fsdecode
except AttributeError: # pragma: no cover
# Issue #99: on some systems (e.g. containerised),
# sys.getfilesystemencoding() returns None, and we need a real value,
# so fall back to utf-8. From the CPython 2.7 docs relating to Unix and
# sys.getfilesystemencoding(): the return value is "the user’s preference
# according to the result of nl_langinfo(CODESET), or None if the
# nl_langinfo(CODESET) failed."
_fsencoding = sys.getfilesystemencoding() or 'utf-8'
if _fsencoding == 'mbcs':
_fserrors = 'strict'
else:
_fserrors = 'surrogateescape'
def fsencode(filename):
if isinstance(filename, bytes):
return filename
elif isinstance(filename, text_type):
return filename.encode(_fsencoding, _fserrors)
else:
raise TypeError("expect bytes or str, not %s" %
type(filename).__name__)
def fsdecode(filename):
if isinstance(filename, text_type):
return filename
elif isinstance(filename, bytes):
return filename.decode(_fsencoding, _fserrors)
else:
raise TypeError("expect bytes or str, not %s" %
type(filename).__name__)
try:
from tokenize import detect_encoding
except ImportError: # pragma: no cover
from codecs import BOM_UTF8, lookup
import re
cookie_re = re.compile(r"coding[:=]\s*([-\w.]+)")
def _get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace("_", "-")
if enc == "utf-8" or enc.startswith("utf-8-"):
return "utf-8"
if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
return "iso-8859-1"
return orig_enc
def detect_encoding(readline):
"""
The detect_encoding() function is used to detect the encoding that should
be used to decode a Python source file. It requires one argument, readline,
in the same way as the tokenize() generator.
It will call readline a maximum of twice, and return the encoding used
(as a string) and a list of any lines (left as bytes) it has read in.
It detects the encoding from the presence of a utf-8 bom or an encoding
cookie as specified in pep-0263. If both a bom and a cookie are present,
but disagree, a SyntaxError will be raised. If the encoding cookie is an
invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found,
'utf-8-sig' is returned.
If no encoding is specified, then the default of 'utf-8' will be returned.
"""
try:
filename = readline.__self__.name
except AttributeError:
filename = None
bom_found = False
encoding = None
default = 'utf-8'
def read_or_stop():
try:
return readline()
except StopIteration:
return b''
def find_cookie(line):
try:
# Decode as UTF-8. Either the line is an encoding declaration,
# in which case it should be pure ASCII, or it must be UTF-8
# per default encoding.
line_string = line.decode('utf-8')
except UnicodeDecodeError:
msg = "invalid or missing encoding declaration"
if filename is not None:
msg = '{} for {!r}'.format(msg, filename)
raise SyntaxError(msg)
matches = cookie_re.findall(line_string)
if not matches:
return None
encoding = _get_normal_name(matches[0])
try:
codec = lookup(encoding)
except LookupError:
# This behaviour mimics the Python interpreter
if filename is None:
msg = "unknown encoding: " + encoding
else:
msg = "unknown encoding for {!r}: {}".format(filename,
encoding)
raise SyntaxError(msg)
if bom_found:
if codec.name != 'utf-8':
# This behaviour mimics the Python interpreter
if filename is None:
msg = 'encoding problem: utf-8'
else:
msg = 'encoding problem for {!r}: utf-8'.format(filename)
raise SyntaxError(msg)
encoding += '-sig'
return encoding
first = read_or_stop()
if first.startswith(BOM_UTF8):
bom_found = True
first = first[3:]
default = 'utf-8-sig'
if not first:
return default, []
encoding = find_cookie(first)
if encoding:
return encoding, [first]
second = read_or_stop()
if not second:
return default, [first]
encoding = find_cookie(second)
if encoding:
return encoding, [first, second]
return default, [first, second]
# For converting & <-> & etc.
try:
from html import escape
except ImportError:
from cgi import escape
if sys.version_info[:2] < (3, 4):
unescape = HTMLParser().unescape
else:
from html import unescape
try:
from collections import OrderedDict
except ImportError: # pragma: no cover
## {{{ http://code.activestate.com/recipes/576693/ (r9)
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running=None):
'od.__repr__() <==> repr(od)'
if not _repr_running: _repr_running = {}
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
try:
from collections import ChainMap
except ImportError: # pragma: no cover
from collections import MutableMapping
try:
from reprlib import recursive_repr as _recursive_repr
except ImportError:
def _recursive_repr(fillvalue='...'):
'''
Decorator to make a repr function return fillvalue for a recursive
call
'''
def decorating_function(user_function):
repr_running = set()
def wrapper(self):
key = id(self), _get_ident()
if key in repr_running:
return fillvalue
repr_running.add(key)
try:
result = user_function(self)
finally:
repr_running.discard(key)
return result
# Can't use functools.wraps() here because of bootstrap issues
wrapper.__module__ = getattr(user_function, '__module__')
wrapper.__doc__ = getattr(user_function, '__doc__')
wrapper.__name__ = getattr(user_function, '__name__')
wrapper.__annotations__ = getattr(user_function, '__annotations__', {})
return wrapper
return decorating_function
class ChainMap(MutableMapping):
''' A ChainMap groups multiple dicts (or other mappings) together
to create a single, updateable view.
The underlying mappings are stored in a list. That list is public and can
accessed or updated using the *maps* attribute. There is no other state.
Lookups search the underlying mappings successively until a key is found.
In contrast, writes, updates, and deletions only operate on the first
mapping.
'''
def __init__(self, *maps):
'''Initialize a ChainMap by setting *maps* to the given mappings.
If no mappings are provided, a single empty dictionary is used.
'''
self.maps = list(maps) or [{}] # always at least one map
def __missing__(self, key):
raise KeyError(key)
def __getitem__(self, key):
for mapping in self.maps:
try:
return mapping[key] # can't use 'key in mapping' with defaultdict
except KeyError:
pass
return self.__missing__(key) # support subclasses that define __missing__
def get(self, key, default=None):
return self[key] if key in self else default
def __len__(self):
return len(set().union(*self.maps)) # reuses stored hash values if possible
def __iter__(self):
return iter(set().union(*self.maps))
def __contains__(self, key):
return any(key in m for m in self.maps)
def __bool__(self):
return any(self.maps)
@_recursive_repr()
def __repr__(self):
return '{0.__class__.__name__}({1})'.format(
self, ', '.join(map(repr, self.maps)))
@classmethod
def fromkeys(cls, iterable, *args):
'Create a ChainMap with a single dict created from the iterable.'
return cls(dict.fromkeys(iterable, *args))
def copy(self):
'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]'
return self.__class__(self.maps[0].copy(), *self.maps[1:])
__copy__ = copy
def new_child(self): # like Django's Context.push()
'New ChainMap with a new dict followed by all previous maps.'
return self.__class__({}, *self.maps)
@property
def parents(self): # like Django's Context.pop()
'New ChainMap from maps[1:].'
return self.__class__(*self.maps[1:])
def __setitem__(self, key, value):
self.maps[0][key] = value
def __delitem__(self, key):
try:
del self.maps[0][key]
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def popitem(self):
'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.'
try:
return self.maps[0].popitem()
except KeyError:
raise KeyError('No keys found in the first mapping.')
def pop(self, key, *args):
'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].'
try:
return self.maps[0].pop(key, *args)
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def clear(self):
'Clear maps[0], leaving maps[1:] intact.'
self.maps[0].clear()
try:
from importlib.util import cache_from_source # Python >= 3.4
except ImportError: # pragma: no cover
try:
from imp import cache_from_source
except ImportError: # pragma: no cover
def cache_from_source(path, debug_override=None):
assert path.endswith('.py')
if debug_override is None:
debug_override = __debug__
if debug_override:
suffix = 'c'
else:
suffix = 'o'
return path + suffix
try:
from logging.config import BaseConfigurator, valid_ident
except ImportError: # pragma: no cover
IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I)
def valid_ident(s):
m = IDENTIFIER.match(s)
if not m:
raise ValueError('Not a valid Python identifier: %r' % s)
return True
# The ConvertingXXX classes are wrappers around standard Python containers,
# and they serve to convert any suitable values in the container. The
# conversion converts base dicts, lists and tuples to their wrapped
# equivalents, whereas strings which match a conversion format are converted
# appropriately.
#
# Each wrapper should have a configurator attribute holding the actual
# configurator to use for conversion.
class ConvertingDict(dict):
"""A converting dictionary wrapper."""
def __getitem__(self, key):
value = dict.__getitem__(self, key)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def get(self, key, default=None):
value = dict.get(self, key, default)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, key, default=None):
value = dict.pop(self, key, default)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class ConvertingList(list):
"""A converting list wrapper."""
def __getitem__(self, key):
value = list.__getitem__(self, key)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, idx=-1):
value = list.pop(self, idx)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
return result
class ConvertingTuple(tuple):
"""A converting tuple wrapper."""
def __getitem__(self, key):
value = tuple.__getitem__(self, key)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class BaseConfigurator(object):
"""
The configurator base class which defines some useful defaults.
"""
CONVERT_PATTERN = re.compile(r'^(?P<prefix>[a-z]+)://(?P<suffix>.*)$')
WORD_PATTERN = re.compile(r'^\s*(\w+)\s*')
DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*')
INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*')
DIGIT_PATTERN = re.compile(r'^\d+$')
value_converters = {
'ext' : 'ext_convert',
'cfg' : 'cfg_convert',
}
# We might want to use a different one, e.g. importlib
importer = staticmethod(__import__)
def __init__(self, config):
self.config = ConvertingDict(config)
self.config.configurator = self
def resolve(self, s):
"""
Resolve strings to objects using standard import and attribute
syntax.
"""
name = s.split('.')
used = name.pop(0)
try:
found = self.importer(used)
for frag in name:
used += '.' + frag
try:
found = getattr(found, frag)
except AttributeError:
self.importer(used)
found = getattr(found, frag)
return found
except ImportError:
e, tb = sys.exc_info()[1:]
v = ValueError('Cannot resolve %r: %s' % (s, e))
v.__cause__, v.__traceback__ = e, tb
raise v
def ext_convert(self, value):
"""Default converter for the ext:// protocol."""
return self.resolve(value)
def cfg_convert(self, value):
"""Default converter for the cfg:// protocol."""
rest = value
m = self.WORD_PATTERN.match(rest)
if m is None:
raise ValueError("Unable to convert %r" % value)
else:
rest = rest[m.end():]
d = self.config[m.groups()[0]]
#print d, rest
while rest:
m = self.DOT_PATTERN.match(rest)
if m:
d = d[m.groups()[0]]
else:
m = self.INDEX_PATTERN.match(rest)
if m:
idx = m.groups()[0]
if not self.DIGIT_PATTERN.match(idx):
d = d[idx]
else:
try:
n = int(idx) # try as number first (most likely)
d = d[n]
except TypeError:
d = d[idx]
if m:
rest = rest[m.end():]
else:
raise ValueError('Unable to convert '
'%r at %r' % (value, rest))
#rest should be empty
return d
def convert(self, value):
"""
Convert values to an appropriate type. dicts, lists and tuples are
replaced by their converting alternatives. Strings are checked to
see if they have a conversion format and are converted if they do.
"""
if not isinstance(value, ConvertingDict) and isinstance(value, dict):
value = ConvertingDict(value)
value.configurator = self
elif not isinstance(value, ConvertingList) and isinstance(value, list):
value = ConvertingList(value)
value.configurator = self
elif not isinstance(value, ConvertingTuple) and\
isinstance(value, tuple):
value = ConvertingTuple(value)
value.configurator = self
elif isinstance(value, string_types):
m = self.CONVERT_PATTERN.match(value)
if m:
d = m.groupdict()
prefix = d['prefix']
converter = self.value_converters.get(prefix, None)
if converter:
suffix = d['suffix']
converter = getattr(self, converter)
value = converter(suffix)
return value
def configure_custom(self, config):
"""Configure an object with a user-supplied factory."""
c = config.pop('()')
if not callable(c):
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
result = c(**kwargs)
if props:
for name, value in props.items():
setattr(result, name, value)
return result
def as_tuple(self, value):
"""Utility function which converts lists to tuples."""
if isinstance(value, list):
value = tuple(value)
return value
|
[] |
[] |
[
"PATH",
"PATHEXT"
] |
[]
|
["PATH", "PATHEXT"]
|
python
| 2 | 0 | |
datastore/tests/system/utils/clear_datastore.py
|
# Copyright 2014 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to populate datastore with system test data."""
from __future__ import print_function
import os
import sys
import six
from google.cloud import datastore
FETCH_MAX = 20
ALL_KINDS = (
"Character",
"Company",
"Kind",
"Person",
"Post",
"uuid_key",
"timestamp_key",
)
TRANSACTION_MAX_GROUPS = 5
MAX_DEL_ENTITIES = 500
def print_func(message):
if os.getenv("GOOGLE_CLOUD_NO_PRINT") != "true":
print(message)
def get_ancestors(entities):
# NOTE: A key will always have at least one path element.
key_roots = [entity.key.flat_path[:2] for entity in entities]
# Return the unique roots.
return list(set(key_roots))
def delete_chunks(client, results):
while results:
chunk, results = results[:MAX_DEL_ENTITIES], results[MAX_DEL_ENTITIES:]
client.delete_multi([result.key for result in chunk])
def remove_kind(kind, client):
query = client.query(kind=kind)
query.keys_only()
results = list(query.fetch())
if not results:
return
delete_outside_transaction = False
with client.transaction():
# Now that we have all results, we seek to delete.
print_func("Deleting keys:")
print_func(results)
ancestors = get_ancestors(results)
if len(ancestors) > TRANSACTION_MAX_GROUPS:
delete_outside_transaction = True
else:
delete_chunks(client, results)
if delete_outside_transaction:
delete_chunks(client, results)
def main():
client = datastore.Client()
kinds = sys.argv[1:]
if len(kinds) == 0:
kinds = ALL_KINDS
print_func("This command will remove all entities for " "the following kinds:")
print_func("\n".join("- " + val for val in kinds))
response = six.moves.input("Is this OK [y/n]? ")
if response.lower() == "y":
for kind in kinds:
remove_kind(kind, client)
else:
print_func("Doing nothing.")
if __name__ == "__main__":
main()
|
[] |
[] |
[
"GOOGLE_CLOUD_NO_PRINT"
] |
[]
|
["GOOGLE_CLOUD_NO_PRINT"]
|
python
| 1 | 0 | |
python/pyarrow/tests/test_fs.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime, timezone, timedelta
import gzip
import os
import pathlib
import pickle
import sys
import pytest
import weakref
import pyarrow as pa
from pyarrow.tests.test_io import assert_file_not_found
from pyarrow.vendored.version import Version
from pyarrow.fs import (FileType, FileInfo, FileSelector, FileSystem,
LocalFileSystem, SubTreeFileSystem, _MockFileSystem,
FileSystemHandler, PyFileSystem, FSSpecHandler)
class DummyHandler(FileSystemHandler):
def __init__(self, value=42):
self._value = value
def __eq__(self, other):
if isinstance(other, FileSystemHandler):
return self._value == other._value
return NotImplemented
def __ne__(self, other):
if isinstance(other, FileSystemHandler):
return self._value != other._value
return NotImplemented
def get_type_name(self):
return "dummy"
def normalize_path(self, path):
return path
def get_file_info(self, paths):
info = []
for path in paths:
if "file" in path:
info.append(FileInfo(path, FileType.File))
elif "dir" in path:
info.append(FileInfo(path, FileType.Directory))
elif "notfound" in path:
info.append(FileInfo(path, FileType.NotFound))
elif "badtype" in path:
# Will raise when converting
info.append(object())
else:
raise IOError
return info
def get_file_info_selector(self, selector):
if selector.base_dir != "somedir":
if selector.allow_not_found:
return []
else:
raise FileNotFoundError(selector.base_dir)
infos = [
FileInfo("somedir/file1", FileType.File, size=123),
FileInfo("somedir/subdir1", FileType.Directory),
]
if selector.recursive:
infos += [
FileInfo("somedir/subdir1/file2", FileType.File, size=456),
]
return infos
def create_dir(self, path, recursive):
if path == "recursive":
assert recursive is True
elif path == "non-recursive":
assert recursive is False
else:
raise IOError
def delete_dir(self, path):
assert path == "delete_dir"
def delete_dir_contents(self, path):
if not path.strip("/"):
raise ValueError
assert path == "delete_dir_contents"
def delete_root_dir_contents(self):
pass
def delete_file(self, path):
assert path == "delete_file"
def move(self, src, dest):
assert src == "move_from"
assert dest == "move_to"
def copy_file(self, src, dest):
assert src == "copy_file_from"
assert dest == "copy_file_to"
def open_input_stream(self, path):
if "notfound" in path:
raise FileNotFoundError(path)
data = "{0}:input_stream".format(path).encode('utf8')
return pa.BufferReader(data)
def open_input_file(self, path):
if "notfound" in path:
raise FileNotFoundError(path)
data = "{0}:input_file".format(path).encode('utf8')
return pa.BufferReader(data)
def open_output_stream(self, path, metadata):
if "notfound" in path:
raise FileNotFoundError(path)
return pa.BufferOutputStream()
def open_append_stream(self, path, metadata):
if "notfound" in path:
raise FileNotFoundError(path)
return pa.BufferOutputStream()
class ProxyHandler(FileSystemHandler):
def __init__(self, fs):
self._fs = fs
def __eq__(self, other):
if isinstance(other, ProxyHandler):
return self._fs == other._fs
return NotImplemented
def __ne__(self, other):
if isinstance(other, ProxyHandler):
return self._fs != other._fs
return NotImplemented
def get_type_name(self):
return "proxy::" + self._fs.type_name
def normalize_path(self, path):
return self._fs.normalize_path(path)
def get_file_info(self, paths):
return self._fs.get_file_info(paths)
def get_file_info_selector(self, selector):
return self._fs.get_file_info(selector)
def create_dir(self, path, recursive):
return self._fs.create_dir(path, recursive=recursive)
def delete_dir(self, path):
return self._fs.delete_dir(path)
def delete_dir_contents(self, path):
return self._fs.delete_dir_contents(path)
def delete_root_dir_contents(self):
return self._fs.delete_dir_contents("", accept_root_dir=True)
def delete_file(self, path):
return self._fs.delete_file(path)
def move(self, src, dest):
return self._fs.move(src, dest)
def copy_file(self, src, dest):
return self._fs.copy_file(src, dest)
def open_input_stream(self, path):
return self._fs.open_input_stream(path)
def open_input_file(self, path):
return self._fs.open_input_file(path)
def open_output_stream(self, path, metadata):
return self._fs.open_output_stream(path, metadata=metadata)
def open_append_stream(self, path, metadata):
return self._fs.open_append_stream(path, metadata=metadata)
@pytest.fixture
def localfs(request, tempdir):
return dict(
fs=LocalFileSystem(),
pathfn=lambda p: (tempdir / p).as_posix(),
allow_copy_file=True,
allow_move_dir=True,
allow_append_to_file=True,
)
@pytest.fixture
def py_localfs(request, tempdir):
return dict(
fs=PyFileSystem(ProxyHandler(LocalFileSystem())),
pathfn=lambda p: (tempdir / p).as_posix(),
allow_copy_file=True,
allow_move_dir=True,
allow_append_to_file=True,
)
@pytest.fixture
def mockfs(request):
return dict(
fs=_MockFileSystem(),
pathfn=lambda p: p,
allow_copy_file=True,
allow_move_dir=True,
allow_append_to_file=True,
)
@pytest.fixture
def py_mockfs(request):
return dict(
fs=PyFileSystem(ProxyHandler(_MockFileSystem())),
pathfn=lambda p: p,
allow_copy_file=True,
allow_move_dir=True,
allow_append_to_file=True,
)
@pytest.fixture
def localfs_with_mmap(request, tempdir):
return dict(
fs=LocalFileSystem(use_mmap=True),
pathfn=lambda p: (tempdir / p).as_posix(),
allow_copy_file=True,
allow_move_dir=True,
allow_append_to_file=True,
)
@pytest.fixture
def subtree_localfs(request, tempdir, localfs):
return dict(
fs=SubTreeFileSystem(str(tempdir), localfs['fs']),
pathfn=lambda p: p,
allow_copy_file=True,
allow_move_dir=True,
allow_append_to_file=True,
)
@pytest.fixture
def s3fs(request, s3_connection, s3_server):
request.config.pyarrow.requires('s3')
from pyarrow.fs import S3FileSystem
host, port, access_key, secret_key = s3_connection
bucket = 'pyarrow-filesystem/'
fs = S3FileSystem(
access_key=access_key,
secret_key=secret_key,
endpoint_override='{}:{}'.format(host, port),
scheme='http'
)
fs.create_dir(bucket)
yield dict(
fs=fs,
pathfn=bucket.__add__,
allow_copy_file=True,
allow_move_dir=False,
allow_append_to_file=False,
)
fs.delete_dir(bucket)
@pytest.fixture
def subtree_s3fs(request, s3fs):
prefix = 'pyarrow-filesystem/prefix/'
return dict(
fs=SubTreeFileSystem(prefix, s3fs['fs']),
pathfn=prefix.__add__,
allow_copy_file=True,
allow_move_dir=False,
allow_append_to_file=False,
)
@pytest.fixture
def hdfs(request, hdfs_connection):
request.config.pyarrow.requires('hdfs')
if not pa.have_libhdfs():
pytest.skip('Cannot locate libhdfs')
from pyarrow.fs import HadoopFileSystem
host, port, user = hdfs_connection
fs = HadoopFileSystem(host, port=port, user=user)
return dict(
fs=fs,
pathfn=lambda p: p,
allow_copy_file=False,
allow_move_dir=True,
allow_append_to_file=True,
)
@pytest.fixture
def py_fsspec_localfs(request, tempdir):
fsspec = pytest.importorskip("fsspec")
fs = fsspec.filesystem('file')
return dict(
fs=PyFileSystem(FSSpecHandler(fs)),
pathfn=lambda p: (tempdir / p).as_posix(),
allow_copy_file=True,
allow_move_dir=True,
allow_append_to_file=True,
)
@pytest.fixture
def py_fsspec_memoryfs(request, tempdir):
fsspec = pytest.importorskip("fsspec", minversion="0.7.5")
if fsspec.__version__ == "0.8.5":
# see https://issues.apache.org/jira/browse/ARROW-10934
pytest.skip("Bug in fsspec 0.8.5 for in-memory filesystem")
fs = fsspec.filesystem('memory')
return dict(
fs=PyFileSystem(FSSpecHandler(fs)),
pathfn=lambda p: p,
allow_copy_file=True,
allow_move_dir=True,
allow_append_to_file=True,
)
@pytest.fixture
def py_fsspec_s3fs(request, s3_connection, s3_server):
s3fs = pytest.importorskip("s3fs")
if (sys.version_info < (3, 7) and
Version(s3fs.__version__) >= Version("0.5")):
pytest.skip("s3fs>=0.5 version is async and requires Python >= 3.7")
host, port, access_key, secret_key = s3_connection
bucket = 'pyarrow-filesystem/'
fs = s3fs.S3FileSystem(
key=access_key,
secret=secret_key,
client_kwargs=dict(endpoint_url='http://{}:{}'.format(host, port))
)
fs = PyFileSystem(FSSpecHandler(fs))
fs.create_dir(bucket)
yield dict(
fs=fs,
pathfn=bucket.__add__,
allow_copy_file=True,
allow_move_dir=False,
allow_append_to_file=True,
)
fs.delete_dir(bucket)
@pytest.fixture(params=[
pytest.param(
pytest.lazy_fixture('localfs'),
id='LocalFileSystem()'
),
pytest.param(
pytest.lazy_fixture('localfs_with_mmap'),
id='LocalFileSystem(use_mmap=True)'
),
pytest.param(
pytest.lazy_fixture('subtree_localfs'),
id='SubTreeFileSystem(LocalFileSystem())'
),
pytest.param(
pytest.lazy_fixture('s3fs'),
id='S3FileSystem'
),
pytest.param(
pytest.lazy_fixture('hdfs'),
id='HadoopFileSystem'
),
pytest.param(
pytest.lazy_fixture('mockfs'),
id='_MockFileSystem()'
),
pytest.param(
pytest.lazy_fixture('py_localfs'),
id='PyFileSystem(ProxyHandler(LocalFileSystem()))'
),
pytest.param(
pytest.lazy_fixture('py_mockfs'),
id='PyFileSystem(ProxyHandler(_MockFileSystem()))'
),
pytest.param(
pytest.lazy_fixture('py_fsspec_localfs'),
id='PyFileSystem(FSSpecHandler(fsspec.LocalFileSystem()))'
),
pytest.param(
pytest.lazy_fixture('py_fsspec_memoryfs'),
id='PyFileSystem(FSSpecHandler(fsspec.filesystem("memory")))'
),
pytest.param(
pytest.lazy_fixture('py_fsspec_s3fs'),
id='PyFileSystem(FSSpecHandler(s3fs.S3FileSystem()))'
),
])
def filesystem_config(request):
return request.param
@pytest.fixture
def fs(request, filesystem_config):
return filesystem_config['fs']
@pytest.fixture
def pathfn(request, filesystem_config):
return filesystem_config['pathfn']
@pytest.fixture
def allow_move_dir(request, filesystem_config):
return filesystem_config['allow_move_dir']
@pytest.fixture
def allow_copy_file(request, filesystem_config):
return filesystem_config['allow_copy_file']
@pytest.fixture
def allow_append_to_file(request, filesystem_config):
return filesystem_config['allow_append_to_file']
def check_mtime(file_info):
assert isinstance(file_info.mtime, datetime)
assert isinstance(file_info.mtime_ns, int)
assert file_info.mtime_ns >= 0
assert file_info.mtime_ns == pytest.approx(
file_info.mtime.timestamp() * 1e9)
# It's an aware UTC datetime
tzinfo = file_info.mtime.tzinfo
assert tzinfo is not None
assert tzinfo.utcoffset(None) == timedelta(0)
def check_mtime_absent(file_info):
assert file_info.mtime is None
assert file_info.mtime_ns is None
def check_mtime_or_absent(file_info):
if file_info.mtime is None:
check_mtime_absent(file_info)
else:
check_mtime(file_info)
def skip_fsspec_s3fs(fs):
if fs.type_name == "py::fsspec+s3":
pytest.xfail(reason="Not working with fsspec's s3fs")
def test_file_info_constructor():
dt = datetime.fromtimestamp(1568799826, timezone.utc)
info = FileInfo("foo/bar")
assert info.path == "foo/bar"
assert info.base_name == "bar"
assert info.type == FileType.Unknown
assert info.size is None
check_mtime_absent(info)
info = FileInfo("foo/baz.txt", type=FileType.File, size=123,
mtime=1568799826.5)
assert info.path == "foo/baz.txt"
assert info.base_name == "baz.txt"
assert info.type == FileType.File
assert info.size == 123
assert info.mtime_ns == 1568799826500000000
check_mtime(info)
info = FileInfo("foo", type=FileType.Directory, mtime=dt)
assert info.path == "foo"
assert info.base_name == "foo"
assert info.type == FileType.Directory
assert info.size is None
assert info.mtime == dt
assert info.mtime_ns == 1568799826000000000
check_mtime(info)
def test_cannot_instantiate_base_filesystem():
with pytest.raises(TypeError):
FileSystem()
def test_filesystem_equals():
fs0 = LocalFileSystem()
fs1 = LocalFileSystem()
fs2 = _MockFileSystem()
assert fs0.equals(fs0)
assert fs0.equals(fs1)
with pytest.raises(TypeError):
fs0.equals('string')
assert fs0 == fs0 == fs1
assert fs0 != 4
assert fs2 == fs2
assert fs2 != _MockFileSystem()
assert SubTreeFileSystem('/base', fs0) == SubTreeFileSystem('/base', fs0)
assert SubTreeFileSystem('/base', fs0) != SubTreeFileSystem('/base', fs2)
assert SubTreeFileSystem('/base', fs0) != SubTreeFileSystem('/other', fs0)
def test_subtree_filesystem():
localfs = LocalFileSystem()
subfs = SubTreeFileSystem('/base', localfs)
assert subfs.base_path == '/base/'
assert subfs.base_fs == localfs
subfs = SubTreeFileSystem('/another/base/', LocalFileSystem())
assert subfs.base_path == '/another/base/'
assert subfs.base_fs == localfs
def test_filesystem_pickling(fs):
if fs.type_name.split('::')[-1] == 'mock':
pytest.xfail(reason='MockFileSystem is not serializable')
serialized = pickle.dumps(fs)
restored = pickle.loads(serialized)
assert isinstance(restored, FileSystem)
assert restored.equals(fs)
def test_filesystem_is_functional_after_pickling(fs, pathfn):
if fs.type_name.split('::')[-1] == 'mock':
pytest.xfail(reason='MockFileSystem is not serializable')
skip_fsspec_s3fs(fs)
aaa = pathfn('a/aa/aaa/')
bb = pathfn('a/bb')
c = pathfn('c.txt')
fs.create_dir(aaa)
with fs.open_output_stream(bb):
pass # touch
with fs.open_output_stream(c) as fp:
fp.write(b'test')
restored = pickle.loads(pickle.dumps(fs))
aaa_info, bb_info, c_info = restored.get_file_info([aaa, bb, c])
assert aaa_info.type == FileType.Directory
assert bb_info.type == FileType.File
assert c_info.type == FileType.File
def test_type_name():
fs = LocalFileSystem()
assert fs.type_name == "local"
fs = _MockFileSystem()
assert fs.type_name == "mock"
def test_normalize_path(fs):
# Trivial path names (without separators) should generally be
# already normalized. Just a sanity check.
assert fs.normalize_path("foo") == "foo"
def test_non_path_like_input_raises(fs):
class Path:
pass
invalid_paths = [1, 1.1, Path(), tuple(), {}, [], lambda: 1,
pathlib.Path()]
for path in invalid_paths:
with pytest.raises(TypeError):
fs.create_dir(path)
def test_get_file_info(fs, pathfn):
aaa = pathfn('a/aa/aaa/')
bb = pathfn('a/bb')
c = pathfn('c.txt')
zzz = pathfn('zzz')
fs.create_dir(aaa)
with fs.open_output_stream(bb):
pass # touch
with fs.open_output_stream(c) as fp:
fp.write(b'test')
aaa_info, bb_info, c_info, zzz_info = fs.get_file_info([aaa, bb, c, zzz])
assert aaa_info.path == aaa
assert 'aaa' in repr(aaa_info)
assert aaa_info.extension == ''
if fs.type_name == "py::fsspec+s3":
# s3fs doesn't create empty directories
assert aaa_info.type == FileType.NotFound
else:
assert aaa_info.type == FileType.Directory
assert 'FileType.Directory' in repr(aaa_info)
assert aaa_info.size is None
check_mtime_or_absent(aaa_info)
assert bb_info.path == str(bb)
assert bb_info.base_name == 'bb'
assert bb_info.extension == ''
assert bb_info.type == FileType.File
assert 'FileType.File' in repr(bb_info)
assert bb_info.size == 0
if fs.type_name not in ["py::fsspec+memory", "py::fsspec+s3"]:
check_mtime(bb_info)
assert c_info.path == str(c)
assert c_info.base_name == 'c.txt'
assert c_info.extension == 'txt'
assert c_info.type == FileType.File
assert 'FileType.File' in repr(c_info)
assert c_info.size == 4
if fs.type_name not in ["py::fsspec+memory", "py::fsspec+s3"]:
check_mtime(c_info)
assert zzz_info.path == str(zzz)
assert zzz_info.base_name == 'zzz'
assert zzz_info.extension == ''
assert zzz_info.type == FileType.NotFound
assert zzz_info.size is None
assert zzz_info.mtime is None
assert 'FileType.NotFound' in repr(zzz_info)
check_mtime_absent(zzz_info)
# with single path
aaa_info2 = fs.get_file_info(aaa)
assert aaa_info.path == aaa_info2.path
assert aaa_info.type == aaa_info2.type
def test_get_file_info_with_selector(fs, pathfn):
base_dir = pathfn('selector-dir/')
file_a = pathfn('selector-dir/test_file_a')
file_b = pathfn('selector-dir/test_file_b')
dir_a = pathfn('selector-dir/test_dir_a')
file_c = pathfn('selector-dir/test_dir_a/test_file_c')
dir_b = pathfn('selector-dir/test_dir_b')
try:
fs.create_dir(base_dir)
with fs.open_output_stream(file_a):
pass
with fs.open_output_stream(file_b):
pass
fs.create_dir(dir_a)
with fs.open_output_stream(file_c):
pass
fs.create_dir(dir_b)
# recursive selector
selector = FileSelector(base_dir, allow_not_found=False,
recursive=True)
assert selector.base_dir == base_dir
infos = fs.get_file_info(selector)
if fs.type_name == "py::fsspec+s3":
# s3fs only lists directories if they are not empty, but depending
# on the s3fs/fsspec version combo, it includes the base_dir
# (https://github.com/dask/s3fs/issues/393)
assert (len(infos) == 4) or (len(infos) == 5)
else:
assert len(infos) == 5
for info in infos:
if (info.path.endswith(file_a) or info.path.endswith(file_b) or
info.path.endswith(file_c)):
assert info.type == FileType.File
elif (info.path.rstrip("/").endswith(dir_a) or
info.path.rstrip("/").endswith(dir_b)):
assert info.type == FileType.Directory
elif (fs.type_name == "py::fsspec+s3" and
info.path.rstrip("/").endswith("selector-dir")):
# s3fs can include base dir, see above
assert info.type == FileType.Directory
else:
raise ValueError('unexpected path {}'.format(info.path))
check_mtime_or_absent(info)
# non-recursive selector -> not selecting the nested file_c
selector = FileSelector(base_dir, recursive=False)
infos = fs.get_file_info(selector)
if fs.type_name == "py::fsspec+s3":
# s3fs only lists directories if they are not empty
# + for s3fs 0.5.2 all directories are dropped because of buggy
# side-effect of previous find() call
# (https://github.com/dask/s3fs/issues/410)
assert (len(infos) == 3) or (len(infos) == 2)
else:
assert len(infos) == 4
finally:
fs.delete_dir(base_dir)
def test_create_dir(fs, pathfn):
# s3fs fails deleting dir fails if it is empty
# (https://github.com/dask/s3fs/issues/317)
skip_fsspec_s3fs(fs)
d = pathfn('test-directory/')
with pytest.raises(pa.ArrowIOError):
fs.delete_dir(d)
fs.create_dir(d)
fs.delete_dir(d)
d = pathfn('deeply/nested/test-directory/')
fs.create_dir(d, recursive=True)
fs.delete_dir(d)
def test_delete_dir(fs, pathfn):
skip_fsspec_s3fs(fs)
d = pathfn('directory/')
nd = pathfn('directory/nested/')
fs.create_dir(nd)
fs.delete_dir(d)
with pytest.raises(pa.ArrowIOError):
fs.delete_dir(nd)
with pytest.raises(pa.ArrowIOError):
fs.delete_dir(d)
def test_delete_dir_contents(fs, pathfn):
skip_fsspec_s3fs(fs)
d = pathfn('directory/')
nd = pathfn('directory/nested/')
fs.create_dir(nd)
fs.delete_dir_contents(d)
with pytest.raises(pa.ArrowIOError):
fs.delete_dir(nd)
fs.delete_dir(d)
with pytest.raises(pa.ArrowIOError):
fs.delete_dir(d)
def _check_root_dir_contents(config):
fs = config['fs']
pathfn = config['pathfn']
d = pathfn('directory/')
nd = pathfn('directory/nested/')
fs.create_dir(nd)
with pytest.raises(pa.ArrowInvalid):
fs.delete_dir_contents("")
with pytest.raises(pa.ArrowInvalid):
fs.delete_dir_contents("/")
with pytest.raises(pa.ArrowInvalid):
fs.delete_dir_contents("//")
fs.delete_dir_contents("", accept_root_dir=True)
fs.delete_dir_contents("/", accept_root_dir=True)
fs.delete_dir_contents("//", accept_root_dir=True)
with pytest.raises(pa.ArrowIOError):
fs.delete_dir(d)
def test_delete_root_dir_contents(mockfs, py_mockfs):
_check_root_dir_contents(mockfs)
_check_root_dir_contents(py_mockfs)
def test_copy_file(fs, pathfn, allow_copy_file):
s = pathfn('test-copy-source-file')
t = pathfn('test-copy-target-file')
with fs.open_output_stream(s):
pass
if allow_copy_file:
fs.copy_file(s, t)
fs.delete_file(s)
fs.delete_file(t)
else:
with pytest.raises(pa.ArrowNotImplementedError):
fs.copy_file(s, t)
def test_move_directory(fs, pathfn, allow_move_dir):
# move directory (doesn't work with S3)
s = pathfn('source-dir/')
t = pathfn('target-dir/')
fs.create_dir(s)
if allow_move_dir:
fs.move(s, t)
with pytest.raises(pa.ArrowIOError):
fs.delete_dir(s)
fs.delete_dir(t)
else:
with pytest.raises(pa.ArrowIOError):
fs.move(s, t)
def test_move_file(fs, pathfn):
# s3fs moving a file with recursive=True on latest 0.5 version
# (https://github.com/dask/s3fs/issues/394)
skip_fsspec_s3fs(fs)
s = pathfn('test-move-source-file')
t = pathfn('test-move-target-file')
with fs.open_output_stream(s):
pass
fs.move(s, t)
with pytest.raises(pa.ArrowIOError):
fs.delete_file(s)
fs.delete_file(t)
def test_delete_file(fs, pathfn):
p = pathfn('test-delete-target-file')
with fs.open_output_stream(p):
pass
fs.delete_file(p)
with pytest.raises(pa.ArrowIOError):
fs.delete_file(p)
d = pathfn('test-delete-nested')
fs.create_dir(d)
f = pathfn('test-delete-nested/target-file')
with fs.open_output_stream(f) as s:
s.write(b'data')
fs.delete_dir(d)
def identity(v):
return v
@pytest.mark.parametrize(
('compression', 'buffer_size', 'compressor'),
[
(None, None, identity),
(None, 64, identity),
('gzip', None, gzip.compress),
('gzip', 256, gzip.compress),
]
)
def test_open_input_stream(fs, pathfn, compression, buffer_size, compressor):
p = pathfn('open-input-stream')
data = b'some data for reading\n' * 512
with fs.open_output_stream(p) as s:
s.write(compressor(data))
with fs.open_input_stream(p, compression, buffer_size) as s:
result = s.read()
assert result == data
def test_open_input_file(fs, pathfn):
p = pathfn('open-input-file')
data = b'some data' * 1024
with fs.open_output_stream(p) as s:
s.write(data)
read_from = len(b'some data') * 512
with fs.open_input_file(p) as f:
f.seek(read_from)
result = f.read()
assert result == data[read_from:]
@pytest.mark.parametrize(
('compression', 'buffer_size', 'decompressor'),
[
(None, None, identity),
(None, 64, identity),
('gzip', None, gzip.decompress),
('gzip', 256, gzip.decompress),
]
)
def test_open_output_stream(fs, pathfn, compression, buffer_size,
decompressor):
p = pathfn('open-output-stream')
data = b'some data for writing' * 1024
with fs.open_output_stream(p, compression, buffer_size) as f:
f.write(data)
with fs.open_input_stream(p, compression, buffer_size) as f:
assert f.read(len(data)) == data
@pytest.mark.parametrize(
('compression', 'buffer_size', 'compressor', 'decompressor'),
[
(None, None, identity, identity),
(None, 64, identity, identity),
('gzip', None, gzip.compress, gzip.decompress),
('gzip', 256, gzip.compress, gzip.decompress),
]
)
def test_open_append_stream(fs, pathfn, compression, buffer_size, compressor,
decompressor, allow_append_to_file):
p = pathfn('open-append-stream')
initial = compressor(b'already existing')
with fs.open_output_stream(p) as s:
s.write(initial)
if allow_append_to_file:
with fs.open_append_stream(p, compression=compression,
buffer_size=buffer_size) as f:
f.write(b'\nnewly added')
with fs.open_input_stream(p) as f:
result = f.read()
result = decompressor(result)
assert result == b'already existing\nnewly added'
else:
with pytest.raises(pa.ArrowNotImplementedError):
fs.open_append_stream(p, compression=compression,
buffer_size=buffer_size)
def test_open_output_stream_metadata(fs, pathfn):
p = pathfn('open-output-stream-metadata')
metadata = {'Content-Type': 'x-pyarrow/test'}
data = b'some data'
with fs.open_output_stream(p, metadata=metadata) as f:
f.write(data)
with fs.open_input_stream(p) as f:
assert f.read() == data
got_metadata = f.metadata()
if fs.type_name == 's3' or 'mock' in fs.type_name:
for k, v in metadata.items():
assert got_metadata[k] == v.encode()
else:
assert got_metadata == {}
def test_localfs_options():
# LocalFileSystem instantiation
LocalFileSystem(use_mmap=False)
with pytest.raises(TypeError):
LocalFileSystem(xxx=False)
def test_localfs_errors(localfs):
# Local filesystem errors should raise the right Python exceptions
# (e.g. FileNotFoundError)
fs = localfs['fs']
with assert_file_not_found():
fs.open_input_stream('/non/existent/file')
with assert_file_not_found():
fs.open_output_stream('/non/existent/file')
with assert_file_not_found():
fs.create_dir('/non/existent/dir', recursive=False)
with assert_file_not_found():
fs.delete_dir('/non/existent/dir')
with assert_file_not_found():
fs.delete_file('/non/existent/dir')
with assert_file_not_found():
fs.move('/non/existent', '/xxx')
with assert_file_not_found():
fs.copy_file('/non/existent', '/xxx')
def test_localfs_file_info(localfs):
fs = localfs['fs']
file_path = pathlib.Path(__file__)
dir_path = file_path.parent
[file_info, dir_info] = fs.get_file_info([file_path.as_posix(),
dir_path.as_posix()])
assert file_info.size == file_path.stat().st_size
assert file_info.mtime_ns == file_path.stat().st_mtime_ns
check_mtime(file_info)
assert dir_info.mtime_ns == dir_path.stat().st_mtime_ns
check_mtime(dir_info)
def test_mockfs_mtime_roundtrip(mockfs):
dt = datetime.fromtimestamp(1568799826, timezone.utc)
fs = _MockFileSystem(dt)
with fs.open_output_stream('foo'):
pass
[info] = fs.get_file_info(['foo'])
assert info.mtime == dt
@pytest.mark.s3
def test_s3_options():
from pyarrow.fs import S3FileSystem
fs = S3FileSystem(access_key='access', secret_key='secret',
session_token='token', region='us-east-2',
scheme='https', endpoint_override='localhost:8999')
assert isinstance(fs, S3FileSystem)
assert fs.region == 'us-east-2'
assert pickle.loads(pickle.dumps(fs)) == fs
fs = S3FileSystem(role_arn='role', session_name='session',
external_id='id', load_frequency=100)
assert isinstance(fs, S3FileSystem)
assert pickle.loads(pickle.dumps(fs)) == fs
fs = S3FileSystem(anonymous=True)
assert isinstance(fs, S3FileSystem)
assert pickle.loads(pickle.dumps(fs)) == fs
with pytest.raises(ValueError):
S3FileSystem(access_key='access')
with pytest.raises(ValueError):
S3FileSystem(secret_key='secret')
with pytest.raises(ValueError):
S3FileSystem(access_key='access', session_token='token')
with pytest.raises(ValueError):
S3FileSystem(secret_key='secret', session_token='token')
with pytest.raises(ValueError):
S3FileSystem(
access_key='access', secret_key='secret', role_arn='arn'
)
with pytest.raises(ValueError):
S3FileSystem(
access_key='access', secret_key='secret', anonymous=True
)
with pytest.raises(ValueError):
S3FileSystem(role_arn="arn", anonymous=True)
@pytest.mark.s3
def test_s3_proxy_options(monkeypatch):
from pyarrow.fs import S3FileSystem
# The following two are equivalent:
proxy_opts_1_dict = {'scheme': 'http', 'host': 'localhost', 'port': 8999}
proxy_opts_1_str = 'http://localhost:8999'
# The following two are equivalent:
proxy_opts_2_dict = {'scheme': 'https', 'host': 'localhost', 'port': 8080}
proxy_opts_2_str = 'https://localhost:8080'
# Check dict case for 'proxy_options'
fs = S3FileSystem(proxy_options=proxy_opts_1_dict)
assert isinstance(fs, S3FileSystem)
assert pickle.loads(pickle.dumps(fs)) == fs
fs = S3FileSystem(proxy_options=proxy_opts_2_dict)
assert isinstance(fs, S3FileSystem)
assert pickle.loads(pickle.dumps(fs)) == fs
# Check str case for 'proxy_options'
fs = S3FileSystem(proxy_options=proxy_opts_1_str)
assert isinstance(fs, S3FileSystem)
assert pickle.loads(pickle.dumps(fs)) == fs
fs = S3FileSystem(proxy_options=proxy_opts_2_str)
assert isinstance(fs, S3FileSystem)
assert pickle.loads(pickle.dumps(fs)) == fs
# Check that two FSs using the same proxy_options dict are equal
fs1 = S3FileSystem(proxy_options=proxy_opts_1_dict)
fs2 = S3FileSystem(proxy_options=proxy_opts_1_dict)
assert fs1 == fs2
assert pickle.loads(pickle.dumps(fs1)) == fs2
assert pickle.loads(pickle.dumps(fs2)) == fs1
fs1 = S3FileSystem(proxy_options=proxy_opts_2_dict)
fs2 = S3FileSystem(proxy_options=proxy_opts_2_dict)
assert fs1 == fs2
assert pickle.loads(pickle.dumps(fs1)) == fs2
assert pickle.loads(pickle.dumps(fs2)) == fs1
# Check that two FSs using the same proxy_options str are equal
fs1 = S3FileSystem(proxy_options=proxy_opts_1_str)
fs2 = S3FileSystem(proxy_options=proxy_opts_1_str)
assert fs1 == fs2
assert pickle.loads(pickle.dumps(fs1)) == fs2
assert pickle.loads(pickle.dumps(fs2)) == fs1
fs1 = S3FileSystem(proxy_options=proxy_opts_2_str)
fs2 = S3FileSystem(proxy_options=proxy_opts_2_str)
assert fs1 == fs2
assert pickle.loads(pickle.dumps(fs1)) == fs2
assert pickle.loads(pickle.dumps(fs2)) == fs1
# Check that two FSs using equivalent proxy_options
# (one dict, one str) are equal
fs1 = S3FileSystem(proxy_options=proxy_opts_1_dict)
fs2 = S3FileSystem(proxy_options=proxy_opts_1_str)
assert fs1 == fs2
assert pickle.loads(pickle.dumps(fs1)) == fs2
assert pickle.loads(pickle.dumps(fs2)) == fs1
fs1 = S3FileSystem(proxy_options=proxy_opts_2_dict)
fs2 = S3FileSystem(proxy_options=proxy_opts_2_str)
assert fs1 == fs2
assert pickle.loads(pickle.dumps(fs1)) == fs2
assert pickle.loads(pickle.dumps(fs2)) == fs1
# Check that two FSs using nonequivalent proxy_options are not equal
fs1 = S3FileSystem(proxy_options=proxy_opts_1_dict)
fs2 = S3FileSystem(proxy_options=proxy_opts_2_dict)
assert fs1 != fs2
assert pickle.loads(pickle.dumps(fs1)) != fs2
assert pickle.loads(pickle.dumps(fs2)) != fs1
fs1 = S3FileSystem(proxy_options=proxy_opts_1_dict)
fs2 = S3FileSystem(proxy_options=proxy_opts_2_str)
assert fs1 != fs2
assert pickle.loads(pickle.dumps(fs1)) != fs2
assert pickle.loads(pickle.dumps(fs2)) != fs1
fs1 = S3FileSystem(proxy_options=proxy_opts_1_str)
fs2 = S3FileSystem(proxy_options=proxy_opts_2_dict)
assert fs1 != fs2
assert pickle.loads(pickle.dumps(fs1)) != fs2
assert pickle.loads(pickle.dumps(fs2)) != fs1
fs1 = S3FileSystem(proxy_options=proxy_opts_1_str)
fs2 = S3FileSystem(proxy_options=proxy_opts_2_str)
assert fs1 != fs2
assert pickle.loads(pickle.dumps(fs1)) != fs2
assert pickle.loads(pickle.dumps(fs2)) != fs1
# Check that two FSs (one using proxy_options and the other not)
# are not equal
fs1 = S3FileSystem(proxy_options=proxy_opts_1_dict)
fs2 = S3FileSystem()
assert fs1 != fs2
assert pickle.loads(pickle.dumps(fs1)) != fs2
assert pickle.loads(pickle.dumps(fs2)) != fs1
fs1 = S3FileSystem(proxy_options=proxy_opts_1_str)
fs2 = S3FileSystem()
assert fs1 != fs2
assert pickle.loads(pickle.dumps(fs1)) != fs2
assert pickle.loads(pickle.dumps(fs2)) != fs1
fs1 = S3FileSystem(proxy_options=proxy_opts_2_dict)
fs2 = S3FileSystem()
assert fs1 != fs2
assert pickle.loads(pickle.dumps(fs1)) != fs2
assert pickle.loads(pickle.dumps(fs2)) != fs1
fs1 = S3FileSystem(proxy_options=proxy_opts_2_str)
fs2 = S3FileSystem()
assert fs1 != fs2
assert pickle.loads(pickle.dumps(fs1)) != fs2
assert pickle.loads(pickle.dumps(fs2)) != fs1
# Only dict and str are supported
with pytest.raises(TypeError):
S3FileSystem(proxy_options=('http', 'localhost', 9090))
# Missing scheme
with pytest.raises(KeyError):
S3FileSystem(proxy_options={'host': 'localhost', 'port': 9090})
# Missing host
with pytest.raises(KeyError):
S3FileSystem(proxy_options={'scheme': 'https', 'port': 9090})
# Missing port
with pytest.raises(KeyError):
S3FileSystem(proxy_options={'scheme': 'http', 'host': 'localhost'})
# Invalid proxy URI (invalid scheme htttps)
with pytest.raises(pa.ArrowInvalid):
S3FileSystem(proxy_options='htttps://localhost:9000')
# Invalid proxy_options dict (invalid scheme htttps)
with pytest.raises(pa.ArrowInvalid):
S3FileSystem(proxy_options={'scheme': 'htttp', 'host': 'localhost',
'port': 8999})
@pytest.mark.hdfs
def test_hdfs_options(hdfs_connection):
from pyarrow.fs import HadoopFileSystem
if not pa.have_libhdfs():
pytest.skip('Cannot locate libhdfs')
host, port, user = hdfs_connection
replication = 2
buffer_size = 64*1024
default_block_size = 128*1024**2
uri = ('hdfs://{}:{}/?user={}&replication={}&buffer_size={}'
'&default_block_size={}')
hdfs1 = HadoopFileSystem(host, port, user='libhdfs',
replication=replication, buffer_size=buffer_size,
default_block_size=default_block_size)
hdfs2 = HadoopFileSystem.from_uri(uri.format(
host, port, 'libhdfs', replication, buffer_size, default_block_size
))
hdfs3 = HadoopFileSystem.from_uri(uri.format(
host, port, 'me', replication, buffer_size, default_block_size
))
hdfs4 = HadoopFileSystem.from_uri(uri.format(
host, port, 'me', replication + 1, buffer_size, default_block_size
))
hdfs5 = HadoopFileSystem(host, port)
hdfs6 = HadoopFileSystem.from_uri('hdfs://{}:{}'.format(host, port))
hdfs7 = HadoopFileSystem(host, port, user='localuser')
hdfs8 = HadoopFileSystem(host, port, user='localuser',
kerb_ticket="cache_path")
hdfs9 = HadoopFileSystem(host, port, user='localuser',
kerb_ticket=pathlib.Path("cache_path"))
hdfs10 = HadoopFileSystem(host, port, user='localuser',
kerb_ticket="cache_path2")
hdfs11 = HadoopFileSystem(host, port, user='localuser',
kerb_ticket="cache_path",
extra_conf={'hdfs_token': 'abcd'})
assert hdfs1 == hdfs2
assert hdfs5 == hdfs6
assert hdfs6 != hdfs7
assert hdfs2 != hdfs3
assert hdfs3 != hdfs4
assert hdfs7 != hdfs5
assert hdfs2 != hdfs3
assert hdfs3 != hdfs4
assert hdfs7 != hdfs8
assert hdfs8 == hdfs9
assert hdfs10 != hdfs9
assert hdfs11 != hdfs8
with pytest.raises(TypeError):
HadoopFileSystem()
with pytest.raises(TypeError):
HadoopFileSystem.from_uri(3)
for fs in [hdfs1, hdfs2, hdfs3, hdfs4, hdfs5, hdfs6, hdfs7, hdfs8,
hdfs9, hdfs10, hdfs11]:
assert pickle.loads(pickle.dumps(fs)) == fs
host, port, user = hdfs_connection
hdfs = HadoopFileSystem(host, port, user=user)
assert hdfs.get_file_info(FileSelector('/'))
hdfs = HadoopFileSystem.from_uri(
"hdfs://{}:{}/?user={}".format(host, port, user)
)
assert hdfs.get_file_info(FileSelector('/'))
@pytest.mark.parametrize(('uri', 'expected_klass', 'expected_path'), [
# leading slashes are removed intentionally, because MockFileSystem doesn't
# have a distinction between relative and absolute paths
('mock:', _MockFileSystem, ''),
('mock:foo/bar', _MockFileSystem, 'foo/bar'),
('mock:/foo/bar', _MockFileSystem, 'foo/bar'),
('mock:///foo/bar', _MockFileSystem, 'foo/bar'),
('file:/', LocalFileSystem, '/'),
('file:///', LocalFileSystem, '/'),
('file:/foo/bar', LocalFileSystem, '/foo/bar'),
('file:///foo/bar', LocalFileSystem, '/foo/bar'),
('/', LocalFileSystem, '/'),
('/foo/bar', LocalFileSystem, '/foo/bar'),
])
def test_filesystem_from_uri(uri, expected_klass, expected_path):
fs, path = FileSystem.from_uri(uri)
assert isinstance(fs, expected_klass)
assert path == expected_path
@pytest.mark.parametrize(
'path',
['', '/', 'foo/bar', '/foo/bar', __file__]
)
def test_filesystem_from_path_object(path):
p = pathlib.Path(path)
fs, path = FileSystem.from_uri(p)
assert isinstance(fs, LocalFileSystem)
assert path == p.resolve().absolute().as_posix()
@pytest.mark.s3
def test_filesystem_from_uri_s3(s3_connection, s3_server):
from pyarrow.fs import S3FileSystem
host, port, access_key, secret_key = s3_connection
uri = "s3://{}:{}@mybucket/foo/bar?scheme=http&endpoint_override={}:{}" \
.format(access_key, secret_key, host, port)
fs, path = FileSystem.from_uri(uri)
assert isinstance(fs, S3FileSystem)
assert path == "mybucket/foo/bar"
fs.create_dir(path)
[info] = fs.get_file_info([path])
assert info.path == path
assert info.type == FileType.Directory
def test_py_filesystem():
handler = DummyHandler()
fs = PyFileSystem(handler)
assert isinstance(fs, PyFileSystem)
assert fs.type_name == "py::dummy"
assert fs.handler is handler
with pytest.raises(TypeError):
PyFileSystem(None)
def test_py_filesystem_equality():
handler1 = DummyHandler(1)
handler2 = DummyHandler(2)
handler3 = DummyHandler(2)
fs1 = PyFileSystem(handler1)
fs2 = PyFileSystem(handler1)
fs3 = PyFileSystem(handler2)
fs4 = PyFileSystem(handler3)
assert fs2 is not fs1
assert fs3 is not fs2
assert fs4 is not fs3
assert fs2 == fs1 # Same handler
assert fs3 != fs2 # Unequal handlers
assert fs4 == fs3 # Equal handlers
assert fs1 != LocalFileSystem()
assert fs1 != object()
def test_py_filesystem_pickling():
handler = DummyHandler()
fs = PyFileSystem(handler)
serialized = pickle.dumps(fs)
restored = pickle.loads(serialized)
assert isinstance(restored, FileSystem)
assert restored == fs
assert restored.handler == handler
assert restored.type_name == "py::dummy"
def test_py_filesystem_lifetime():
handler = DummyHandler()
fs = PyFileSystem(handler)
assert isinstance(fs, PyFileSystem)
wr = weakref.ref(handler)
handler = None
assert wr() is not None
fs = None
assert wr() is None
# Taking the .handler attribute doesn't wreck reference counts
handler = DummyHandler()
fs = PyFileSystem(handler)
wr = weakref.ref(handler)
handler = None
assert wr() is fs.handler
assert wr() is not None
fs = None
assert wr() is None
def test_py_filesystem_get_file_info():
handler = DummyHandler()
fs = PyFileSystem(handler)
[info] = fs.get_file_info(['some/dir'])
assert info.path == 'some/dir'
assert info.type == FileType.Directory
[info] = fs.get_file_info(['some/file'])
assert info.path == 'some/file'
assert info.type == FileType.File
[info] = fs.get_file_info(['notfound'])
assert info.path == 'notfound'
assert info.type == FileType.NotFound
with pytest.raises(TypeError):
fs.get_file_info(['badtype'])
with pytest.raises(IOError):
fs.get_file_info(['xxx'])
def test_py_filesystem_get_file_info_selector():
handler = DummyHandler()
fs = PyFileSystem(handler)
selector = FileSelector(base_dir="somedir")
infos = fs.get_file_info(selector)
assert len(infos) == 2
assert infos[0].path == "somedir/file1"
assert infos[0].type == FileType.File
assert infos[0].size == 123
assert infos[1].path == "somedir/subdir1"
assert infos[1].type == FileType.Directory
assert infos[1].size is None
selector = FileSelector(base_dir="somedir", recursive=True)
infos = fs.get_file_info(selector)
assert len(infos) == 3
assert infos[0].path == "somedir/file1"
assert infos[1].path == "somedir/subdir1"
assert infos[2].path == "somedir/subdir1/file2"
selector = FileSelector(base_dir="notfound")
with pytest.raises(FileNotFoundError):
fs.get_file_info(selector)
selector = FileSelector(base_dir="notfound", allow_not_found=True)
assert fs.get_file_info(selector) == []
def test_py_filesystem_ops():
handler = DummyHandler()
fs = PyFileSystem(handler)
fs.create_dir("recursive", recursive=True)
fs.create_dir("non-recursive", recursive=False)
with pytest.raises(IOError):
fs.create_dir("foobar")
fs.delete_dir("delete_dir")
fs.delete_dir_contents("delete_dir_contents")
for path in ("", "/", "//"):
with pytest.raises(ValueError):
fs.delete_dir_contents(path)
fs.delete_dir_contents(path, accept_root_dir=True)
fs.delete_file("delete_file")
fs.move("move_from", "move_to")
fs.copy_file("copy_file_from", "copy_file_to")
def test_py_open_input_stream():
fs = PyFileSystem(DummyHandler())
with fs.open_input_stream("somefile") as f:
assert f.read() == b"somefile:input_stream"
with pytest.raises(FileNotFoundError):
fs.open_input_stream("notfound")
def test_py_open_input_file():
fs = PyFileSystem(DummyHandler())
with fs.open_input_file("somefile") as f:
assert f.read() == b"somefile:input_file"
with pytest.raises(FileNotFoundError):
fs.open_input_file("notfound")
def test_py_open_output_stream():
fs = PyFileSystem(DummyHandler())
with fs.open_output_stream("somefile") as f:
f.write(b"data")
def test_py_open_append_stream():
fs = PyFileSystem(DummyHandler())
with fs.open_append_stream("somefile") as f:
f.write(b"data")
@pytest.mark.s3
def test_s3_real_aws():
# Exercise connection code with an AWS-backed S3 bucket.
# This is a minimal integration check for ARROW-9261 and similar issues.
from pyarrow.fs import S3FileSystem
default_region = (os.environ.get('PYARROW_TEST_S3_REGION') or
'us-east-1')
fs = S3FileSystem(anonymous=True)
assert fs.region == default_region
fs = S3FileSystem(anonymous=True, region='us-east-2')
entries = fs.get_file_info(FileSelector('ursa-labs-taxi-data'))
assert len(entries) > 0
with fs.open_input_stream('ursa-labs-taxi-data/2019/06/data.parquet') as f:
md = f.metadata()
assert 'Content-Type' in md
assert md['Last-Modified'] == b'2020-01-17T16:26:28Z'
# For some reason, the header value is quoted
# (both with AWS and Minio)
assert md['ETag'] == b'"f1efd5d76cb82861e1542117bfa52b90-8"'
@pytest.mark.s3
def test_s3_real_aws_region_selection():
# Taken from a registry of open S3-hosted datasets
# at https://github.com/awslabs/open-data-registry
fs, path = FileSystem.from_uri('s3://mf-nwp-models/README.txt')
assert fs.region == 'eu-west-1'
with fs.open_input_stream(path) as f:
assert b"Meteo-France Atmospheric models on AWS" in f.read(50)
# Passing an explicit region disables auto-selection
fs, path = FileSystem.from_uri(
's3://mf-nwp-models/README.txt?region=us-east-2')
assert fs.region == 'us-east-2'
# Reading from the wrong region may still work for public buckets...
# Non-existent bucket (hopefully, otherwise need to fix this test)
with pytest.raises(IOError, match="Bucket '.*' not found"):
FileSystem.from_uri('s3://x-arrow-non-existent-bucket')
fs, path = FileSystem.from_uri(
's3://x-arrow-non-existent-bucket?region=us-east-3')
assert fs.region == 'us-east-3'
|
[] |
[] |
[
"PYARROW_TEST_S3_REGION"
] |
[]
|
["PYARROW_TEST_S3_REGION"]
|
python
| 1 | 0 | |
2019/day09/main.go
|
package main
import (
"fmt"
"time"
"github.com/bogosj/advent-of-code/2019/computer"
)
func test() {
for _, i := range []int{1, 2, 3} {
c := computer.New(fmt.Sprintf("test%v.txt", i))
in := make(chan int, 1)
in <- 0
for out := range c.Compute(in) {
fmt.Printf("%v", out)
}
fmt.Println()
}
}
func part1() {
c := computer.New("input.txt")
in := make(chan int, 1)
in <- 1
out := <-c.Compute(in)
fmt.Println("TESTS:", out)
}
func part2() {
c := computer.New("input.txt")
in := make(chan int, 1)
in <- 2
out := <-c.Compute(in)
fmt.Println("Result:", out)
}
func main() {
start := time.Now()
part1()
fmt.Println("Part 1 done in:", time.Since(start))
start = time.Now()
part2()
fmt.Println("Part 2 done in:", time.Since(start))
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
tools/analice/resolver_license_file.go
|
package main
import (
"fmt"
"go/build"
"os"
"path/filepath"
"runtime"
"strings"
)
var licenseFiles = []string{"LICENSE", "LICENSE.md", "LICENSE.txt", "LICENSE.rst", "COPYING", "License", "MIT-LICENSE.txt"}
type licenceFileResolver struct {
}
func (r licenceFileResolver) resolve(ps ...pkg) ([]pkgWithLicenseFile, []error) {
pswlf := make([]pkgWithLicenseFile, len(ps))
errs := make([]error, len(ps))
for i, p := range ps {
ptmp := p
withLic := pkgWithLicenseFile{}
withLic.pkg = &ptmp
withLic.licFilePath, errs[i] = r.resolveOne(p)
pswlf[i] = withLic
}
return pswlf, errs
}
func (r licenceFileResolver) resolveOne(p pkg) (string, error) {
if len(p.GoFiles) == 0 {
return "", fmt.Errorf("no go files for package '%v', don't know where to search for license", p.PkgPath)
}
startingDir := filepath.Dir(p.GoFiles[0])
for dir := startingDir; !strings.HasSuffix(dir, string(os.PathSeparator)) && r.stillInScope(dir, p.PkgPath); dir = filepath.Dir(dir) {
path := r.searchLicenseFile(dir)
if path != "" {
return path, nil
}
}
return "", fmt.Errorf("no license file found, starting from '%s' (have tried %v)", p.GoFiles[0], licenseFiles)
}
func (r licenceFileResolver) stillInScope(dir string, pkgPath string) bool {
gp := strings.Index(dir, os.Getenv("GOPATH"))
dgp := strings.Index(dir, build.Default.GOPATH)
gr := strings.Index(dir, runtime.GOROOT())
if gp >= 0 || dgp >= 0 || gr >= 0 {
return true
}
return strings.Contains(dir, pkgPath)
}
func (r licenceFileResolver) searchLicenseFile(dir string) string {
for _, name := range licenseFiles {
path := filepath.Join(dir, name)
_, err := os.Stat(path)
if err == nil {
return path
}
}
return ""
}
|
[
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
e2e_samples/parking_sensors_synapse/src/ddo_transform/setup.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
import os
from setuptools import setup, find_packages
version = os.environ['PACKAGE_VERSION']
with open('README.md') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = ['Click>=6.0', ]
setup_requirements = ['pytest-runner', ]
test_requirements = ['pytest', ]
setup(
author="Lace Lofranco",
author_email='[email protected]',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
description="Python Boilerplate contains all the boilerplate you need to create a Python package.",
entry_points={
'console_scripts': [
'ddo_transform=ddo_transform.cli:main',
],
},
install_requires=requirements,
license="MIT license",
long_description=readme + '\n\n' + history,
include_package_data=True,
keywords='ddo_transform',
name='ddo_transform',
packages=find_packages(include=['ddo_transform']),
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/devlace/datadevops',
version=version,
zip_safe=False,
)
|
[] |
[] |
[
"PACKAGE_VERSION"
] |
[]
|
["PACKAGE_VERSION"]
|
python
| 1 | 0 | |
vendor/github.com/gophercloud/utils/terraform/auth/config.go
|
package auth
import (
"context"
"fmt"
"log"
"net/http"
"os"
"github.com/gophercloud/gophercloud"
"github.com/gophercloud/gophercloud/openstack"
"github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth"
osClient "github.com/gophercloud/utils/client"
"github.com/gophercloud/utils/internal"
"github.com/gophercloud/utils/openstack/clientconfig"
"github.com/gophercloud/utils/terraform/mutexkv"
)
type Config struct {
CACertFile string
ClientCertFile string
ClientKeyFile string
Cloud string
DefaultDomain string
DomainID string
DomainName string
EndpointOverrides map[string]interface{}
EndpointType string
IdentityEndpoint string
Insecure *bool
Password string
ProjectDomainName string
ProjectDomainID string
Region string
Swauth bool
TenantID string
TenantName string
Token string
UserDomainName string
UserDomainID string
Username string
UserID string
ApplicationCredentialID string
ApplicationCredentialName string
ApplicationCredentialSecret string
UseOctavia bool
MaxRetries int
DisableNoCacheHeader bool
Context context.Context
DelayedAuth bool
AllowReauth bool
OsClient *gophercloud.ProviderClient
authOpts *gophercloud.AuthOptions
authenticated bool
authFailed error
swClient *gophercloud.ServiceClient
swAuthFailed error
TerraformVersion string
SDKVersion string
*mutexkv.MutexKV
}
// LoadAndValidate performs the authentication and initial configuration
// of an OpenStack Provider Client. This sets up the HTTP client and
// authenticates to an OpenStack cloud.
//
// Individual Service Clients are created later in this file.
func (c *Config) LoadAndValidate() error {
// Make sure at least one of auth_url or cloud was specified.
if c.IdentityEndpoint == "" && c.Cloud == "" {
return fmt.Errorf("One of 'auth_url' or 'cloud' must be specified")
}
validEndpoint := false
validEndpoints := []string{
"internal", "internalURL",
"admin", "adminURL",
"public", "publicURL",
"",
}
for _, endpoint := range validEndpoints {
if c.EndpointType == endpoint {
validEndpoint = true
}
}
if !validEndpoint {
return fmt.Errorf("Invalid endpoint type provided")
}
if c.MaxRetries < 0 {
return fmt.Errorf("max_retries should be a positive value")
}
clientOpts := new(clientconfig.ClientOpts)
// If a cloud entry was given, base AuthOptions on a clouds.yaml file.
if c.Cloud != "" {
clientOpts.Cloud = c.Cloud
// Passing region allows GetCloudFromYAML to apply per-region overrides
clientOpts.RegionName = c.Region
cloud, err := clientconfig.GetCloudFromYAML(clientOpts)
if err != nil {
return err
}
if c.Region == "" && cloud.RegionName != "" {
c.Region = cloud.RegionName
}
if c.CACertFile == "" && cloud.CACertFile != "" {
c.CACertFile = cloud.CACertFile
}
if c.ClientCertFile == "" && cloud.ClientCertFile != "" {
c.ClientCertFile = cloud.ClientCertFile
}
if c.ClientKeyFile == "" && cloud.ClientKeyFile != "" {
c.ClientKeyFile = cloud.ClientKeyFile
}
if c.Insecure == nil && cloud.Verify != nil {
v := (!*cloud.Verify)
c.Insecure = &v
}
} else {
authInfo := &clientconfig.AuthInfo{
AuthURL: c.IdentityEndpoint,
DefaultDomain: c.DefaultDomain,
DomainID: c.DomainID,
DomainName: c.DomainName,
Password: c.Password,
ProjectDomainID: c.ProjectDomainID,
ProjectDomainName: c.ProjectDomainName,
ProjectID: c.TenantID,
ProjectName: c.TenantName,
Token: c.Token,
UserDomainID: c.UserDomainID,
UserDomainName: c.UserDomainName,
Username: c.Username,
UserID: c.UserID,
ApplicationCredentialID: c.ApplicationCredentialID,
ApplicationCredentialName: c.ApplicationCredentialName,
ApplicationCredentialSecret: c.ApplicationCredentialSecret,
}
clientOpts.AuthInfo = authInfo
}
ao, err := clientconfig.AuthOptions(clientOpts)
if err != nil {
return err
}
log.Printf("[DEBUG] OpenStack allowReauth: %t", c.AllowReauth)
ao.AllowReauth = c.AllowReauth
client, err := openstack.NewClient(ao.IdentityEndpoint)
if err != nil {
return err
}
client.Context = c.Context
// Set UserAgent
client.UserAgent.Prepend(terraformUserAgent(c.TerraformVersion, c.SDKVersion))
config, err := internal.PrepareTLSConfig(c.CACertFile, c.ClientCertFile, c.ClientKeyFile, c.Insecure)
if err != nil {
return err
}
var logger osClient.Logger
// if OS_DEBUG is set, log the requests and responses
if os.Getenv("OS_DEBUG") != "" {
logger = &osClient.DefaultLogger{}
}
transport := &http.Transport{Proxy: http.ProxyFromEnvironment, TLSClientConfig: config}
client.HTTPClient = http.Client{
Transport: &osClient.RoundTripper{
Rt: transport,
MaxRetries: c.MaxRetries,
Logger: logger,
},
}
if !c.DisableNoCacheHeader {
extraHeaders := map[string][]string{
"Cache-Control": {"no-cache"},
}
client.HTTPClient.Transport.(*osClient.RoundTripper).SetHeaders(extraHeaders)
}
if c.MaxRetries > 0 {
client.MaxBackoffRetries = uint(c.MaxRetries)
client.RetryBackoffFunc = osClient.RetryBackoffFunc(logger)
}
if !c.DelayedAuth && !c.Swauth {
err = openstack.Authenticate(client, *ao)
if err != nil {
return err
}
}
c.authOpts = ao
c.OsClient = client
return nil
}
func (c *Config) Authenticate() error {
if !c.DelayedAuth {
return nil
}
c.MutexKV.Lock("auth")
defer c.MutexKV.Unlock("auth")
if c.authFailed != nil {
return c.authFailed
}
if !c.authenticated {
if err := openstack.Authenticate(c.OsClient, *c.authOpts); err != nil {
c.authFailed = err
return err
}
c.authenticated = true
}
return nil
}
// DetermineEndpoint is a helper method to determine if the user wants to
// override an endpoint returned from the catalog.
func (c *Config) DetermineEndpoint(client *gophercloud.ServiceClient, service string) *gophercloud.ServiceClient {
finalEndpoint := client.ResourceBaseURL()
if v, ok := c.EndpointOverrides[service]; ok {
if endpoint, ok := v.(string); ok && endpoint != "" {
finalEndpoint = endpoint
client.Endpoint = endpoint
client.ResourceBase = ""
}
}
log.Printf("[DEBUG] OpenStack Endpoint for %s: %s", service, finalEndpoint)
return client
}
// DetermineRegion is a helper method to determine the region based on
// the user's settings.
func (c *Config) DetermineRegion(region string) string {
// If a resource-level region was not specified, and a provider-level region was set,
// use the provider-level region.
if region == "" && c.Region != "" {
region = c.Region
}
log.Printf("[DEBUG] OpenStack Region is: %s", region)
return region
}
// The following methods assist with the creation of individual Service Clients
// which interact with the various OpenStack services.
type commonCommonServiceClientInitFunc func(*gophercloud.ProviderClient, gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error)
func (c *Config) CommonServiceClientInit(newClient commonCommonServiceClientInitFunc, region, service string) (*gophercloud.ServiceClient, error) {
if err := c.Authenticate(); err != nil {
return nil, err
}
client, err := newClient(c.OsClient, gophercloud.EndpointOpts{
Region: c.DetermineRegion(region),
Availability: clientconfig.GetEndpointType(c.EndpointType),
})
if err != nil {
return client, err
}
// Check if an endpoint override was specified for the volume service.
client = c.DetermineEndpoint(client, service)
return client, nil
}
func (c *Config) BlockStorageV1Client(region string) (*gophercloud.ServiceClient, error) {
return c.CommonServiceClientInit(openstack.NewBlockStorageV1, region, "volume")
}
func (c *Config) BlockStorageV2Client(region string) (*gophercloud.ServiceClient, error) {
return c.CommonServiceClientInit(openstack.NewBlockStorageV2, region, "volumev2")
}
func (c *Config) BlockStorageV3Client(region string) (*gophercloud.ServiceClient, error) {
return c.CommonServiceClientInit(openstack.NewBlockStorageV3, region, "volumev3")
}
func (c *Config) ComputeV2Client(region string) (*gophercloud.ServiceClient, error) {
return c.CommonServiceClientInit(openstack.NewComputeV2, region, "compute")
}
func (c *Config) DNSV2Client(region string) (*gophercloud.ServiceClient, error) {
return c.CommonServiceClientInit(openstack.NewDNSV2, region, "dns")
}
func (c *Config) IdentityV3Client(region string) (*gophercloud.ServiceClient, error) {
return c.CommonServiceClientInit(openstack.NewIdentityV3, region, "identity")
}
func (c *Config) ImageV2Client(region string) (*gophercloud.ServiceClient, error) {
return c.CommonServiceClientInit(openstack.NewImageServiceV2, region, "image")
}
func (c *Config) MessagingV2Client(region string) (*gophercloud.ServiceClient, error) {
if err := c.Authenticate(); err != nil {
return nil, err
}
client, err := openstack.NewMessagingV2(c.OsClient, "", gophercloud.EndpointOpts{
Region: c.DetermineRegion(region),
Availability: clientconfig.GetEndpointType(c.EndpointType),
})
if err != nil {
return client, err
}
// Check if an endpoint override was specified for the messaging service.
client = c.DetermineEndpoint(client, "message")
return client, nil
}
func (c *Config) NetworkingV2Client(region string) (*gophercloud.ServiceClient, error) {
return c.CommonServiceClientInit(openstack.NewNetworkV2, region, "network")
}
func (c *Config) ObjectStorageV1Client(region string) (*gophercloud.ServiceClient, error) {
var client *gophercloud.ServiceClient
var err error
// If Swift Authentication is being used, return a swauth client.
// Otherwise, use a Keystone-based client.
if c.Swauth {
if !c.DelayedAuth {
client, err = swauth.NewObjectStorageV1(c.OsClient, swauth.AuthOpts{
User: c.Username,
Key: c.Password,
})
} else {
c.MutexKV.Lock("SwAuth")
defer c.MutexKV.Unlock("SwAuth")
if c.swAuthFailed != nil {
return nil, c.swAuthFailed
}
if c.swClient == nil {
c.swClient, err = swauth.NewObjectStorageV1(c.OsClient, swauth.AuthOpts{
User: c.Username,
Key: c.Password,
})
if err != nil {
c.swAuthFailed = err
return nil, err
}
}
client = c.swClient
}
} else {
if err := c.Authenticate(); err != nil {
return nil, err
}
client, err = openstack.NewObjectStorageV1(c.OsClient, gophercloud.EndpointOpts{
Region: c.DetermineRegion(region),
Availability: clientconfig.GetEndpointType(c.EndpointType),
})
if err != nil {
return client, err
}
}
// Check if an endpoint override was specified for the object-store service.
client = c.DetermineEndpoint(client, "object-store")
return client, nil
}
func (c *Config) OrchestrationV1Client(region string) (*gophercloud.ServiceClient, error) {
return c.CommonServiceClientInit(openstack.NewOrchestrationV1, region, "orchestration")
}
func (c *Config) LoadBalancerV2Client(region string) (*gophercloud.ServiceClient, error) {
return c.CommonServiceClientInit(openstack.NewLoadBalancerV2, region, "octavia")
}
func (c *Config) DatabaseV1Client(region string) (*gophercloud.ServiceClient, error) {
return c.CommonServiceClientInit(openstack.NewDBV1, region, "database")
}
func (c *Config) ContainerInfraV1Client(region string) (*gophercloud.ServiceClient, error) {
return c.CommonServiceClientInit(openstack.NewContainerInfraV1, region, "container-infra")
}
func (c *Config) SharedfilesystemV2Client(region string) (*gophercloud.ServiceClient, error) {
return c.CommonServiceClientInit(openstack.NewSharedFileSystemV2, region, "sharev2")
}
func (c *Config) KeyManagerV1Client(region string) (*gophercloud.ServiceClient, error) {
return c.CommonServiceClientInit(openstack.NewKeyManagerV1, region, "key-manager")
}
|
[
"\"OS_DEBUG\""
] |
[] |
[
"OS_DEBUG"
] |
[]
|
["OS_DEBUG"]
|
go
| 1 | 0 | |
backends/memcache/memcache_test.go
|
package memcache_test
import (
"os"
"testing"
"time"
"github.com/garenwen/millstone/backends/memcache"
"github.com/garenwen/millstone/config"
"github.com/garenwen/millstone/tasks"
"github.com/stretchr/testify/assert"
)
func TestGroupCompleted(t *testing.T) {
memcacheURL := os.Getenv("MEMCACHE_URL")
if memcacheURL == "" {
t.Skip("MEMCACHE_URL is not defined")
}
groupUUID := "testGroupUUID"
task1 := &tasks.Signature{
UUID: "testTaskUUID1",
GroupUUID: groupUUID,
}
task2 := &tasks.Signature{
UUID: "testTaskUUID2",
GroupUUID: groupUUID,
}
backend := memcache.New(new(config.Config), []string{memcacheURL})
// Cleanup before the test
backend.PurgeState(task1.UUID)
backend.PurgeState(task2.UUID)
backend.PurgeGroupMeta(groupUUID)
groupCompleted, err := backend.GroupCompleted(groupUUID, 2)
if assert.Error(t, err) {
assert.False(t, groupCompleted)
assert.Equal(t, "memcache: cache miss", err.Error())
}
backend.InitGroup(groupUUID, []string{task1.UUID, task2.UUID})
groupCompleted, err = backend.GroupCompleted(groupUUID, 2)
if assert.Error(t, err) {
assert.False(t, groupCompleted)
assert.Equal(t, "memcache: cache miss", err.Error())
}
backend.SetStatePending(task1)
backend.SetStateStarted(task2)
groupCompleted, err = backend.GroupCompleted(groupUUID, 2)
if assert.NoError(t, err) {
assert.False(t, groupCompleted)
}
taskResults := []*tasks.TaskResult{new(tasks.TaskResult)}
backend.SetStateStarted(task1)
backend.SetStateSuccess(task2, taskResults)
groupCompleted, err = backend.GroupCompleted(groupUUID, 2)
if assert.NoError(t, err) {
assert.False(t, groupCompleted)
}
backend.SetStateFailure(task1, "Some error")
groupCompleted, err = backend.GroupCompleted(groupUUID, 2)
if assert.NoError(t, err) {
assert.True(t, groupCompleted)
}
}
func TestGetState(t *testing.T) {
memcacheURL := os.Getenv("MEMCACHE_URL")
if memcacheURL == "" {
t.Skip("MEMCACHE_URL is not defined")
}
signature := &tasks.Signature{
UUID: "testTaskUUID",
GroupUUID: "testGroupUUID",
}
backend := memcache.New(new(config.Config), []string{memcacheURL})
go func() {
backend.SetStatePending(signature)
time.Sleep(2 * time.Millisecond)
backend.SetStateReceived(signature)
time.Sleep(2 * time.Millisecond)
backend.SetStateStarted(signature)
time.Sleep(2 * time.Millisecond)
taskResults := []*tasks.TaskResult{
{
Type: "float64",
Value: 2,
},
}
backend.SetStateSuccess(signature, taskResults)
}()
var (
taskState *tasks.TaskState
err error
)
for {
taskState, err = backend.GetState(signature.UUID)
if taskState == nil {
assert.Equal(t, "memcache: cache miss", err.Error())
continue
}
assert.NoError(t, err)
if taskState.IsCompleted() {
break
}
}
}
func TestPurgeState(t *testing.T) {
memcacheURL := os.Getenv("MEMCACHE_URL")
if memcacheURL == "" {
t.Skip("MEMCACHE_URL is not defined")
}
signature := &tasks.Signature{
UUID: "testTaskUUID",
GroupUUID: "testGroupUUID",
}
backend := memcache.New(new(config.Config), []string{memcacheURL})
backend.SetStatePending(signature)
taskState, err := backend.GetState(signature.UUID)
assert.NotNil(t, taskState)
assert.NoError(t, err)
backend.PurgeState(taskState.TaskUUID)
taskState, err = backend.GetState(signature.UUID)
assert.Nil(t, taskState)
assert.Error(t, err)
}
|
[
"\"MEMCACHE_URL\"",
"\"MEMCACHE_URL\"",
"\"MEMCACHE_URL\""
] |
[] |
[
"MEMCACHE_URL"
] |
[]
|
["MEMCACHE_URL"]
|
go
| 1 | 0 | |
wandb/apis/public.py
|
import logging
import requests
import time
import sys
import os
import json
import re
import six
import yaml
import tempfile
import datetime
from gql import Client, gql
from gql.client import RetryError
from gql.transport.requests import RequestsHTTPTransport
from six.moves import urllib
import wandb
from wandb import Error, __version__
from wandb import util
from wandb.retry import retriable
from wandb.summary import HTTPSummary
from wandb import env
from wandb.apis import normalize_exceptions
logger = logging.getLogger(__name__)
PROJECT_FRAGMENT = '''fragment ProjectFragment on Project {
id
name
createdAt
isBenchmark
}'''
RUN_FRAGMENT = '''fragment RunFragment on Run {
id
tags
name
displayName
sweepName
state
config
readOnly
createdAt
heartbeatAt
description
notes
systemMetrics
summaryMetrics
historyLineCount
user {
name
username
}
historyKeys
}'''
FILE_FRAGMENT = '''fragment RunFilesFragment on Run {
files(names: $fileNames, after: $fileCursor, first: $fileLimit) {
edges {
node {
id
name
url(upload: $upload)
sizeBytes
mimetype
updatedAt
md5
}
cursor
}
pageInfo {
endCursor
hasNextPage
}
}
}'''
class RetryingClient(object):
def __init__(self, client):
self._client = client
@retriable(retry_timedelta=datetime.timedelta(
seconds=20),
check_retry_fn=util.no_retry_auth,
retryable_exceptions=(RetryError, requests.RequestException))
def execute(self, *args, **kwargs):
return self._client.execute(*args, **kwargs)
class Api(object):
"""
Used for querying the wandb server.
Examples:
Most common way to initialize
```
wandb.Api()
```
Args:
overrides (dict): You can set `base_url` if you are using a wandb server
other than https://api.wandb.ai.
You can also set defaults for `entity`, `project`, and `run`.
"""
_HTTP_TIMEOUT = env.get_http_timeout(9)
def __init__(self, overrides={}):
self.settings = {
'entity': None,
'project': None,
'run': "latest",
'base_url': env.get_base_url("https://api.wandb.ai")
}
self.settings.update(overrides)
if 'username' in overrides and 'entity' not in overrides:
wandb.termwarn('Passing "username" to Api is deprecated. please use "entity" instead.')
self.settings['entity'] = overrides['username']
self._projects = {}
self._runs = {}
self._sweeps = {}
self._base_client = Client(
transport=RequestsHTTPTransport(
headers={'User-Agent': self.user_agent, 'Use-Admin-Privileges': "true"},
use_json=True,
# this timeout won't apply when the DNS lookup fails. in that case, it will be 60s
# https://bugs.python.org/issue22889
timeout=self._HTTP_TIMEOUT,
auth=("api", self.api_key),
url='%s/graphql' % self.settings['base_url']
)
)
self._client = RetryingClient(self._base_client)
def create_run(self, **kwargs):
return Run.create(self, **kwargs)
@property
def client(self):
return self._client
@property
def user_agent(self):
return 'W&B Public Client %s' % __version__
@property
def api_key(self):
auth = requests.utils.get_netrc_auth(self.settings['base_url'])
key = None
if auth:
key = auth[-1]
# Environment should take precedence
if os.getenv("WANDB_API_KEY"):
key = os.environ["WANDB_API_KEY"]
return key
def flush(self):
"""
The api object keeps a local cache of runs, so if the state of the run may
change while executing your script you must clear the local cache with `api.flush()`
to get the latest values associated with the run."""
self._runs = {}
def _parse_path(self, path):
"""Parses paths in the following formats:
url: entity/project/runs/run_id
path: entity/project/run_id
docker: entity/project:run_id
entity is optional and will fallback to the current logged in user.
"""
run = self.settings['run']
project = self.settings['project']
entity = self.settings['entity']
parts = path.replace("/runs/", "/").strip("/ ").split("/")
if ":" in parts[-1]:
run = parts[-1].split(":")[-1]
parts[-1] = parts[-1].split(":")[0]
elif parts[-1]:
run = parts[-1]
if len(parts) > 1:
project = parts[1]
if entity and run == project:
project = parts[0]
else:
entity = parts[0]
else:
project = parts[0]
return (entity, project, run)
def projects(self, entity=None, per_page=None):
"""Get projects for a given entity.
Args:
entity (str): Name of the entity requested. If None will fallback to
default entity passed to :obj:`Api`. If no default entity, will raise a `ValueError`.
per_page (int): Sets the page size for query pagination. None will use the default size.
Usually there is no reason to change this.
Returns:
A :obj:`Projects` object which is an iterable collection of :obj:`Project` objects.
"""
if entity is None:
entity = self.settings['entity']
if entity is None:
raise ValueError('entity must be passed as a parameter, or set in settings')
if entity not in self._projects:
self._projects[entity] = Projects(self.client, entity, per_page=per_page)
return self._projects[entity]
def runs(self, path="", filters={}, order="-created_at", per_page=None):
"""Return a set of runs from a project that match the filters provided.
You can filter by `config.*`, `summary.*`, `state`, `entity`, `createdAt`, etc.
Examples:
Find runs in my_project config.experiment_name has been set to "foo"
```
api.runs(path="my_entity/my_project", {"config.experiment_name": "foo"})
```
Find runs in my_project config.experiment_name has been set to "foo" or "bar"
```
api.runs(path="my_entity/my_project",
{"$or": [{"config.experiment_name": "foo"}, {"config.experiment_name": "bar"}]})
```
Find runs in my_project sorted by ascending loss
```
api.runs(path="my_entity/my_project", {"order": "+summary.loss"})
```
Args:
path (str): path to project, should be in the form: "entity/project"
filters (dict): queries for specific runs using the MongoDB query language.
You can filter by run properties such as config.key, summary.key, state, entity, createdAt, etc.
For example: {"config.experiment_name": "foo"} would find runs with a config entry
of experiment name set to "foo"
You can compose operations to make more complicated queries,
see Reference for the language is at https://docs.mongodb.com/manual/reference/operator/query
order (str): Order can be `created_at`, `heartbeat_at`, `config.*.value`, or `summary.*`.
If you prepend order with a + order is ascending.
If you prepend order with a - order is descending (default).
The default order is run.created_at from newest to oldest.
Returns:
A :obj:`Runs` object, which is an iterable collection of :obj:`Run` objects.
"""
entity, project, run = self._parse_path(path)
key = path + str(filters) + str(order)
if not self._runs.get(key):
self._runs[key] = Runs(self.client, entity, project,
filters=filters, order=order, per_page=per_page)
return self._runs[key]
@normalize_exceptions
def run(self, path=""):
"""Returns a single run by parsing path in the form entity/project/run_id.
Args:
path (str): path to run in the form entity/project/run_id.
If api.entity is set, this can be in the form project/run_id
and if api.project is set this can just be the run_id.
Returns:
A :obj:`Run` object.
"""
entity, project, run = self._parse_path(path)
if not self._runs.get(path):
self._runs[path] = Run(self.client, entity, project, run)
return self._runs[path]
@normalize_exceptions
def sweep(self, path=""):
"""
Returns a sweep by parsing path in the form entity/project/sweep_id.
Args:
path (str, optional): path to sweep in the form entity/project/sweep_id. If api.entity
is set, this can be in the form project/sweep_id and if api.project is set
this can just be the sweep_id.
Returns:
A :obj:`Sweep` object.
"""
entity, project, sweep_id = self._parse_path(path)
if not self._sweeps.get(path):
self._sweeps[path] = Sweep(self.client, entity, project, sweep_id)
return self._sweeps[path]
class Attrs(object):
def __init__(self, attrs):
self._attrs = attrs
def snake_to_camel(self, string):
camel = "".join([i.title() for i in string.split("_")])
return camel[0].lower() + camel[1:]
def __getattr__(self, name):
key = self.snake_to_camel(name)
if key == "user":
raise AttributeError()
if key in self._attrs.keys():
return self._attrs[key]
elif name in self._attrs.keys():
return self._attrs[name]
else:
raise AttributeError(
"'{}' object has no attribute '{}'".format(repr(self), name))
class Paginator(object):
QUERY = None
def __init__(self, client, variables, per_page=50):
self.client = client
self.variables = variables
self.per_page = per_page
self.objects = []
self.index = -1
self.last_response = None
def __iter__(self):
self.index = -1
return self
def __len__(self):
if self.length is None:
self._load_page()
if self.length is None:
raise ValueError('Object doesn\'t provide length')
return self.length
@property
def length(self):
raise NotImplementedError()
@property
def more(self):
raise NotImplementedError()
@property
def cursor(self):
raise NotImplementedError()
def convert_objects(self):
raise NotImplementedError()
def update_variables(self):
self.variables.update(
{'perPage': self.per_page, 'cursor': self.cursor})
def _load_page(self):
if not self.more:
return False
self.update_variables()
self.last_response = self.client.execute(
self.QUERY, variable_values=self.variables)
self.objects.extend(self.convert_objects())
return True
def __getitem__(self, index):
loaded = True
while loaded and index > len(self.objects) - 1:
loaded = self._load_page()
return self.objects[index]
def __next__(self):
self.index += 1
if len(self.objects) <= self.index:
if not self._load_page():
raise StopIteration
return self.objects[self.index]
next = __next__
class User(Attrs):
def init(self, attrs):
super(User, self).__init__(attrs)
class Projects(Paginator):
"""
An iterable collection of :obj:`Project` objects.
"""
QUERY = gql('''
query Projects($entity: String, $cursor: String, $perPage: Int = 50) {
models(entityName: $entity, after: $cursor, first: $perPage) {
edges {
node {
...ProjectFragment
}
cursor
}
pageInfo {
endCursor
hasNextPage
}
}
}
%s
''' % PROJECT_FRAGMENT)
def __init__(self, client, entity, per_page=50):
self.entity = entity
variables = {
'entity': self.entity,
}
super(Projects, self).__init__(client, variables, per_page)
@property
def length(self):
return None
@property
def more(self):
if self.last_response:
return self.last_response['models']['pageInfo']['hasNextPage']
else:
return True
@property
def cursor(self):
if self.last_response:
return self.last_response['models']['edges'][-1]['cursor']
else:
return None
def convert_objects(self):
return [Project(self.entity, p["node"]["name"], p["node"])
for p in self.last_response['models']['edges']]
def __repr__(self):
return "<Projects {}>".format(self.entity)
class Project(Attrs):
"""A project is a namespace for runs"""
def __init__(self, entity, project, attrs):
super(Project, self).__init__(dict(attrs))
self.entity = entity
def __repr__(self):
return "<Project {}/{}>".format(self.entity, self.name)
class Runs(Paginator):
"""An iterable collection of runs associated with a project and optional filter.
"""
QUERY = gql('''
query Runs($project: String!, $entity: String!, $cursor: String, $perPage: Int = 50, $order: String, $filters: JSONString) {
project(name: $project, entityName: $entity) {
runCount(filters: $filters)
readOnly
runs(filters: $filters, after: $cursor, first: $perPage, order: $order) {
edges {
node {
...RunFragment
}
cursor
}
pageInfo {
endCursor
hasNextPage
}
}
}
}
%s
''' % RUN_FRAGMENT)
def __init__(self, client, entity, project, filters={}, order=None, per_page=50):
self.entity = entity
self.project = project
self.filters = filters
self.order = order
self._sweeps = {}
variables = {
'project': self.project, 'entity': self.entity, 'order': self.order,
'filters': json.dumps(self.filters)
}
super(Runs, self).__init__(client, variables, per_page)
@property
def length(self):
if self.last_response:
return self.last_response['project']['runCount']
else:
return None
@property
def more(self):
if self.last_response:
return self.last_response['project']['runs']['pageInfo']['hasNextPage']
else:
return True
@property
def cursor(self):
if self.last_response:
return self.last_response['project']['runs']['edges'][-1]['cursor']
else:
return None
def convert_objects(self):
objs = []
for run_response in self.last_response['project']['runs']['edges']:
run = Run(self.client, self.entity, self.project, run_response["node"]["name"], run_response["node"])
objs.append(run)
if run.sweep_name:
if run.sweep_name in self._sweeps:
sweep = self._sweeps[run.sweep_name]
else:
sweep = Sweep.get(self.client, self.entity, self.project,
run.sweep_name, withRuns=False)
self._sweeps[run.sweep_name] = sweep
run.sweep = sweep
if run.id not in sweep.runs_by_id:
sweep.runs_by_id[run.id] = run
sweep.runs.append(run)
return objs
def __repr__(self):
return "<Runs {}/{} ({})>".format(self.entity, self.project, len(self))
class Run(Attrs):
"""
A single run associated with an entity and project.
Attributes:
tags ([str]): a list of tags associated with the run
url (str): the url of this run
id (str): unique identifier for the run (defaults to eight characters)
name (str): the name of the run
state (str): one of: running, finished, crashed, aborted
config (dict): a dict of hyperparameters associated with the run
created_at (str): ISO timestamp when the run was started
system_metrics (dict): the latest system metrics recorded for the run
summary (dict): A mutable dict-like property that holds the current summary.
Calling update will persist any changes.
project (str): the project associated with the run
entity (str): the name of the entity associated with the run
user (str): the name of the user who created the run
path (str): Unique identifier [entity]/[project]/[run_id]
notes (str): Notes about the run
read_only (boolean): Whether the run is editable
history_keys (str): Keys of the history metrics that have been logged
with `wandb.log({key: value})`
"""
def __init__(self, client, entity, project, run_id, attrs={}):
"""
Run is always initialized by calling api.runs() where api is an instance of wandb.Api
"""
super(Run, self).__init__(dict(attrs))
self.client = client
self._entity = entity
self.project = project
self._files = {}
self._base_dir = env.get_dir(tempfile.gettempdir())
self.id = run_id
self.sweep = None
self.dir = os.path.join(self._base_dir, *self.path)
try:
os.makedirs(self.dir)
except OSError:
pass
self._summary = None
self.state = attrs.get("state", "not found")
self.load(force=not attrs)
@property
def entity(self):
return self._entity
@property
def username(self):
wandb.termwarn('Run.username is deprecated. Please use Run.entity instead.')
return self._entity
@property
def storage_id(self):
# For compatibility with wandb.Run, which has storage IDs
# in self.storage_id and names in self.id.
return self._attrs.get('id')
@property
def id(self):
return self._attrs.get('name')
@id.setter
def id(self, new_id):
attrs = self._attrs
attrs['name'] = new_id
return new_id
@property
def name(self):
return self._attrs.get('displayName')
@name.setter
def name(self, new_name):
self._attrs['displayName'] = new_name
return new_name
@classmethod
def create(cls, api, run_id=None, project=None, entity=None):
"""Create a run for the given project"""
run_id = run_id or util.generate_id()
project = project or api.settings.get("project")
mutation = gql('''
mutation upsertRun($project: String, $entity: String, $name: String!) {
upsertBucket(input: {modelName: $project, entityName: $entity, name: $name}) {
bucket {
project {
name
entity { name }
}
id
name
}
inserted
}
}
''')
variables = {'entity': entity,
'project': project, 'name': run_id}
res = api.client.execute(mutation, variable_values=variables)
res = res['upsertBucket']['bucket']
return Run(api.client, res["project"]["entity"]["name"], res["project"]["name"], res["name"], {
"id": res["id"],
"config": "{}",
"systemMetrics": "{}",
"summaryMetrics": "{}",
"tags": [],
"description": None,
"notes": None,
"state": "running"
})
def load(self, force=False):
query = gql('''
query Run($project: String!, $entity: String!, $name: String!) {
project(name: $project, entityName: $entity) {
run(name: $name) {
...RunFragment
}
}
}
%s
''' % RUN_FRAGMENT)
if force or not self._attrs:
response = self._exec(query)
if response is None or response.get('project') is None \
or response['project'].get('run') is None:
raise ValueError("Could not find run %s" % self)
self._attrs = response['project']['run']
self.state = self._attrs['state']
if self.sweep_name and not self.sweep:
# There may be a lot of runs. Don't bother pulling them all
# just for the sake of this one.
self.sweep = Sweep.get(self.client, self.entity, self.project,
self.sweep_name, withRuns=False)
self.sweep.runs.append(self)
self.sweep.runs_by_id[self.id] = self
self._attrs['summaryMetrics'] = json.loads(
self._attrs['summaryMetrics']) if self._attrs.get('summaryMetrics') else {}
self._attrs['systemMetrics'] = json.loads(
self._attrs['systemMetrics']) if self._attrs.get('systemMetrics') else {}
if self._attrs.get('user'):
self.user = User(self._attrs["user"])
config = {}
for key, value in six.iteritems(json.loads(self._attrs.get('config') or "{}")):
if isinstance(value, dict) and "value" in value:
config[key] = value["value"]
else:
config[key] = value
self._attrs['config'] = config
return self._attrs
@normalize_exceptions
def update(self):
"""
Persists changes to the run object to the wandb backend.
"""
mutation = gql('''
mutation upsertRun($id: String!, $description: String, $display_name: String, $notes: String, $tags: [String!], $config: JSONString!) {
upsertBucket(input: {id: $id, description: $description, displayName: $display_name, notes: $notes, tags: $tags, config: $config}) {
bucket {
...RunFragment
}
}
}
%s
''' % RUN_FRAGMENT)
res = self._exec(mutation, id=self.storage_id, tags=self.tags,
description=self.description, notes=self.notes, display_name=self.display_name, config=self.json_config)
self.summary.update()
@property
def json_config(self):
config = {}
for k, v in six.iteritems(self.config):
config[k] = {"value": v, "desc": None}
return json.dumps(config)
def _exec(self, query, **kwargs):
"""Execute a query against the cloud backend"""
variables = {'entity': self.entity,
'project': self.project, 'name': self.id}
variables.update(kwargs)
return self.client.execute(query, variable_values=variables)
def _sampled_history(self, keys, x_axis="_step", samples=500):
spec = {"keys": [x_axis] + keys, "samples": samples}
query = gql('''
query Run($project: String!, $entity: String!, $name: String!, $specs: [JSONString!]!) {
project(name: $project, entityName: $entity) {
run(name: $name) { sampledHistory(specs: $specs) }
}
}
''')
response = self._exec(query, specs=[json.dumps(spec)])
return [line for line in response['project']['run']['sampledHistory']]
def _full_history(self, samples=500, stream="default"):
node = "history" if stream == "default" else "events"
query = gql('''
query Run($project: String!, $entity: String!, $name: String!, $samples: Int) {
project(name: $project, entityName: $entity) {
run(name: $name) { %s(samples: $samples) }
}
}
''' % node)
response = self._exec(query, samples=samples)
return [json.loads(line) for line in response['project']['run'][node]]
@normalize_exceptions
def files(self, names=[], per_page=50):
"""
Args:
names (list): names of the requested files, if empty returns all files
per_page (int): number of results per page
Returns:
A :obj:`Files` object, which is an iterator over :obj:`File` obejcts.
"""
return Files(self.client, self, names, per_page)
@normalize_exceptions
def file(self, name):
"""
Args:
name (str): name of requested file.
Returns:
A :obj:`File` matching the name argument.
"""
return Files(self.client, self, [name])[0]
@normalize_exceptions
def history(self, samples=500, keys=None, x_axis="_step", pandas=True, stream="default"):
"""
Returns sampled history metrics for a run. This is simpler and faster if you are ok with
the history records being sampled.
Args:
samples (int, optional): The number of samples to return
pandas (bool, optional): Return a pandas dataframe
keys (list, optional): Only return metrics for specific keys
x_axis (str, optional): Use this metric as the xAxis defaults to _step
stream (str, optional): "default" for metrics, "system" for machine metrics
Returns:
If pandas=True returns a `pandas.DataFrame` of history metrics.
If pandas=False returns a list of dicts of history metrics.
"""
if keys and stream != "default":
wandb.termerror("stream must be default when specifying keys")
return []
elif keys:
lines = self._sampled_history(keys=keys, x_axis=x_axis, samples=samples)
else:
lines = self._full_history(samples=samples, stream=stream)
if pandas:
pandas = util.get_module("pandas")
if pandas:
lines = pandas.DataFrame.from_records(lines)
else:
print("Unable to load pandas, call history with pandas=False")
return lines
@normalize_exceptions
def scan_history(self, keys=None, page_size=1000):
"""
Returns an iterable collection of all history records for a run.
Example:
Export all the loss values for an example run
```python
run = api.run("l2k2/examples-numpy-boston/i0wt6xua")
history = run.scan_history(keys=["Loss"])
losses = [row["Loss"] for row in history]
```
Args:
keys ([str], optional): only fetch these keys, and only fetch rows that have all of keys defined.
page_size (int, optional): size of pages to fetch from the api
Returns:
An iterable collection over history records (dict).
"""
if keys is None:
return HistoryScan(run=self, client=self.client, page_size=page_size)
else:
return SampledHistoryScan(run=self, client=self.client, keys=keys, page_size=page_size)
@property
def summary(self):
if self._summary is None:
# TODO: fix the outdir issue
self._summary = HTTPSummary(
self, self.client, summary=self.summary_metrics)
return self._summary
@property
def path(self):
return [urllib.parse.quote_plus(str(self.entity)), urllib.parse.quote_plus(str(self.project)), urllib.parse.quote_plus(str(self.id))]
@property
def url(self):
path = self.path
path.insert(2, "runs")
return "https://app.wandb.ai/" + "/".join(path)
@property
def lastHistoryStep(self):
history_keys = self._attrs['historyKeys']
return history_keys['lastStep'] if 'lastStep' in history_keys else -1
def __repr__(self):
return "<Run {} ({})>".format("/".join(self.path), self.state)
class Sweep(Attrs):
"""A set of runs associated with a sweep
Instantiate with:
api.sweep(sweep_path)
Attributes:
runs (:obj:`Runs`): list of runs
id (str): sweep id
project (str): name of project
config (str): dictionary of sweep configuration
"""
QUERY = gql('''
query Sweep($project: String!, $entity: String, $name: String!, $withRuns: Boolean!) {
project(name: $project, entityName: $entity) {
sweep(sweepName: $name) {
id
name
bestLoss
config
runs @include(if: $withRuns) {
edges {
node {
...RunFragment
}
}
}
}
}
}
%s
''' % RUN_FRAGMENT)
def __init__(self, client, entity, project, sweep_id, attrs={}):
# TODO: Add agents / flesh this out.
super(Sweep, self).__init__(dict(attrs))
self.client = client
self._entity = entity
self.project = project
self.id = sweep_id
self.runs = []
self.runs_by_id = {}
self.load(force=not attrs)
@property
def entity(self):
return self._entity
@property
def username(self):
wandb.termwarn('Sweep.username is deprecated. please use Sweep.entity instead.')
return self._entity
@property
def config(self):
return yaml.load(self._attrs["config"])
def load(self, force=False):
if force or not self._attrs:
sweep = self.get(self.client, self.entity, self.project, self.id)
if sweep is None:
raise ValueError("Could not find sweep %s" % self)
self._attrs = sweep._attrs
self.runs = sweep.runs
self.runs_by_id = sweep.runs_by_id
return self._attrs
@property
def path(self):
return [urllib.parse.quote_plus(str(self.entity)), urllib.parse.quote_plus(str(self.project)), urllib.parse.quote_plus(str(self.id))]
@classmethod
def get(cls, client, entity=None, project=None, sid=None, withRuns=True, query=None, **kwargs):
"""Execute a query against the cloud backend"""
if query is None:
query = cls.QUERY
variables = {'entity': entity, 'project': project, 'name': sid, 'withRuns': withRuns}
variables.update(kwargs)
response = client.execute(query, variable_values=variables)
if response.get('project') is None:
return None
elif response['project'].get('sweep') is None:
return None
sweep_response = response['project']['sweep']
# TODO: make this paginate
runs_response = sweep_response.get('runs')
runs = []
if runs_response:
for r in runs_response['edges']:
run = Run(client, entity, project, r["node"]["name"], r["node"])
runs.append(run)
del sweep_response['runs']
sweep = cls(client, entity, project, sid, attrs=sweep_response)
sweep.runs = runs
for run in runs:
sweep.runs_by_id[run.id] = run
run.sweep = sweep
return sweep
def __repr__(self):
return "<Sweep {}>".format("/".join(self.path))
class Files(Paginator):
"""Files is an iterable collection of :obj:`File` objects."""
QUERY = gql('''
query Run($project: String!, $entity: String!, $name: String!, $fileCursor: String,
$fileLimit: Int = 50, $fileNames: [String] = [], $upload: Boolean = false) {
project(name: $project, entityName: $entity) {
run(name: $name) {
fileCount
...RunFilesFragment
}
}
}
%s
''' % FILE_FRAGMENT)
def __init__(self, client, run, names=[], per_page=50, upload=False):
self.run = run
variables = {
'project': run.project, 'entity': run.entity, 'name': run.id,
'fileNames': names, 'upload': upload
}
super(Files, self).__init__(client, variables, per_page)
@property
def length(self):
if self.last_response:
return self.last_response['project']['run']['fileCount']
else:
return None
@property
def more(self):
if self.last_response:
return self.last_response['project']['run']['files']['pageInfo']['hasNextPage']
else:
return True
@property
def cursor(self):
if self.last_response:
return self.last_response['project']['run']['files']['edges'][-1]['cursor']
else:
return None
def update_variables(self):
self.variables.update({'fileLimit': self.per_page, 'fileCursor': self.cursor})
def convert_objects(self):
return [File(self.client, r["node"])
for r in self.last_response['project']['run']['files']['edges']]
def __repr__(self):
return "<Files {} ({})>".format("/".join(self.run.path), len(self))
class File(object):
"""File is a class associated with a file saved by wandb.
Attributes:
name (string): filename
url (string): path to file
md5 (string): md5 of file
mimetype (string): mimetype of file
updated_at (string): timestamp of last update
size (int): size of file in bytes
"""
def __init__(self, client, attrs):
self.client = client
self._attrs = attrs
if self.size == 0:
raise AttributeError(
"File {} does not exist.".format(self._attrs["name"]))
@property
def name(self):
return self._attrs["name"]
@property
def url(self):
return self._attrs["url"]
@property
def md5(self):
return self._attrs["md5"]
@property
def mimetype(self):
return self._attrs["mimetype"]
@property
def updated_at(self):
return self._attrs["updatedAt"]
@property
def size(self):
return int(self._attrs["sizeBytes"])
@normalize_exceptions
@retriable(retry_timedelta=datetime.timedelta(
seconds=10),
check_retry_fn=util.no_retry_auth,
retryable_exceptions=(RetryError, requests.RequestException))
def download(self, replace=False, root="."):
"""Downloads a file previously saved by a run from the wandb server.
Args:
replace (boolean): If `True`, download will overwrite a local file
if it exists. Defaults to `False`.
root (str): Local directory to save the file. Defaults to ".".
Raises:
`ValueError` if file already exists and replace=False
"""
response = requests.get(self._attrs["url"], auth=(
"api", Api().api_key), stream=True, timeout=5)
response.raise_for_status()
path = os.path.join(root, self._attrs["name"])
if os.path.exists(path) and not replace:
raise ValueError(
"File already exists, pass replace=True to overwrite")
if "/" in path:
dir = "/".join(path.split("/")[0:-1])
util.mkdir_exists_ok(dir)
with open(path, "wb") as file:
for data in response.iter_content(chunk_size=1024):
file.write(data)
return open(path, "r")
def __repr__(self):
return "<File {} ({})>".format(self.name, self.mimetype)
class HistoryScan(object):
QUERY = gql('''
query HistoryPage($entity: String!, $project: String!, $run: String!, $minStep: Int64!, $maxStep: Int64!, $pageSize: Int!) {
project(name: $project, entityName: $entity) {
run(name: $run) {
history(minStep: $minStep, maxStep: $maxStep, samples: $pageSize)
}
}
}
''')
def __init__(self, client, run, page_size=1000):
self.client = client
self.run = run
self.page_size = page_size
self.page_offset = 0 # minStep for next page
self.scan_offset = 0 # index within current page of rows
self.rows = [] # current page of rows
def __iter__(self):
self.page_offset = 0
self.scan_offset = 0
self.rows = []
return self
def __next__(self):
while True:
if self.scan_offset < len(self.rows):
row = self.rows[self.scan_offset]
self.scan_offset += 1
return row
if self.page_offset > self.run.lastHistoryStep:
raise StopIteration()
self._load_next()
@normalize_exceptions
@retriable(
check_retry_fn=util.no_retry_auth,
retryable_exceptions=(RetryError, requests.RequestException))
def _load_next(self):
variables = {
"entity": self.run.entity,
"project": self.run.project,
"run": self.run.id,
"minStep": int(self.page_offset),
"maxStep": int(self.page_offset + self.page_size),
"pageSize": int(self.page_size)
}
res = self.client.execute(self.QUERY, variable_values=variables)
res = res['project']['run']['history']
self.rows = [json.loads(row) for row in res]
self.page_offset += self.page_size
self.scan_offset = 0
class SampledHistoryScan(object):
QUERY = gql('''
query SampledHistoryPage($entity: String!, $project: String!, $run: String!, $spec: JSONString!) {
project(name: $project, entityName: $entity) {
run(name: $run) {
sampledHistory(specs: [$spec])
}
}
}
''')
def __init__(self, client, run, keys, page_size=1000):
self.client = client
self.run = run
self.keys = keys
self.page_size = page_size
self.page_offset = 0 # minStep for next page
self.scan_offset = 0 # index within current page of rows
self.rows = [] # current page of rows
def __iter__(self):
self.page_offset = 0
self.scan_offset = 0
self.rows = []
return self
def __next__(self):
while True:
if self.scan_offset < len(self.rows):
row = self.rows[self.scan_offset]
self.scan_offset += 1
return row
if self.page_offset >= self.run.lastHistoryStep:
raise StopIteration()
self._load_next()
@normalize_exceptions
@retriable(
check_retry_fn=util.no_retry_auth,
retryable_exceptions=(RetryError, requests.RequestException))
def _load_next(self):
variables = {
"entity": self.run.entity,
"project": self.run.project,
"run": self.run.id,
"spec": json.dumps({
"keys": self.keys,
"minStep": int(self.page_offset),
"maxStep": int(self.page_offset + self.page_size),
"samples": int(self.page_size)
})
}
res = self.client.execute(self.QUERY, variable_values=variables)
res = res['project']['run']['sampledHistory']
self.rows = res[0]
self.page_offset += self.page_size
self.scan_offset = 0
|
[] |
[] |
[
"WANDB_API_KEY"
] |
[]
|
["WANDB_API_KEY"]
|
python
| 1 | 0 | |
mosaik_powerfactory/powerfactory_tools.py
|
# Imports
import sys
import os
POWERFACTORY_PATH = "C:\\Program Files\\DIgSILENT\\PowerFactory 2016"
os.environ["PATH"] = POWERFACTORY_PATH + ";" + os.environ["PATH"]
sys.path.append(POWERFACTORY_PATH + "\\python\\3.5")
import powerfactory
import re
import json
# Constants
ATTRIBUTES_FILE = os.path.dirname(os.path.realpath(__file__)) +"\\elements_attributes.json"
def parse_attributes_list(source_file,element):
"""Parses a PowerFactory attributes file and add it to the
elements_attributes.json
Currently there is no method to get all attributes of and element in
PowerFactory. This module add the method attributes_for_model to the
PowerFactory.Application class, which returns the attributes saved in
elements_attributes.json.
Adding attributes to the file is done by this mehtod. It parses a saved
attributes list of one element form PowerFactory and add the attributes to
elements_attributes.json.
Args:
source_file: The path of a file containing the raw printed attributes
list form PowerFactory
element: The name of the element classs
Returns:
None
"""
attr = None
with open (source_file, "r") as sourceFile:
data = sourceFile.read()
attr = re.findall(r'[a-z]:[\w:]+',data)
attr = list(set(attr))
attr.sort()
data = None
with open (ATTRIBUTES_FILE, 'r') as destFile:
data = json.load(destFile)
with open (ATTRIBUTES_FILE, 'w') as destFile:
data[element]=attr
json.dump(data,destFile)
def attributes_for_model(model,attr_type=None):
"""Returns the attributes of a modell
It returns the attributes of a modell saved in elemets_attributes.json.
To add attributes to this file use the method
powerfactory_tools.parse_attributes_list.
Args:
model: The name of the element class e.g. ElmLod
attr_type: Optional parameter, to get only parameter of a special type,
e.g. 'm'
Returns:
A list of attributes names of the element class
"""
with open (ATTRIBUTES_FILE, 'r') as f:
data = json.load(f)
try:
attrs = data[model]
except KeyError:
attrs = []
if attr_type is not None:
attrs = [attr for attr in attrs if attr.startswith(attr_type)]
return attrs
def elements_of_model(self, model, name="*"):
"""Returns all elements of the model class
This mehtod extends the powerfactor.Application class. And returns all
calculation relevant elements of the given class in the active project.
Args:
model: The name of the element class e.g. ElmLod
name: Optional parameter, to filter the names of the elements. It may
contain "*"
Returns:
A list of powerfactory.DataObject matching the input parameter
"""
if self.GetActiveProject() is None:
raise Exception("You have first to activate a project")
return self.GetCalcRelevantObjects('%s.%s' % (name, model),1,1)
def relevant_models(self, model="Elm*"):
"""Returns all model classes in the current project
This mehtod extends the powerfactor.Application class. It returns all
calculation relvant model classes of the active project
Args:
model: Optional parameter, to filter the model classes. It may contain a
"*"
Returns:
A list of the model class names
"""
elements = self.elements_of_model(model)
models = [elem.GetClassName() for elem in elements]
return list(set(models))
def get_grid(self,name):
"""Returns the grid with the given name.
This mehtod extends the powerfactor.Application class. It returns the grid
with the given name.
Args:
name: The name of the grid
Returns:
A powerfactory.DataObject of the grid with the given name
"""
grids = self.elements_of_model("ElmNet",name)
if not grids:
raise Exception("No grid with name: %s" % name)
if len(grids) > 1:
raise Exception("Found more of one gird with name: %s" % name)
return grids[0]
def element_with_unique_name(self,name):
"""Returns the element with unique_name
This mehtod extends the powerfactor.Application class. It returns the
element with the given unique_name.
Args:
name: The unique name of the element e.g. 'Netz\\Last.ElmLod'
Returns:
A powerfactory.DataObject with the element of unique_name
"""
elements_names = name.split('\\')
parrent = self.get_grid(elements_names[0])
for e in elements_names[1:]:
parrent = parrent.GetContents(e)[0]
return parrent
powerfactory.Application.elements_of_model = elements_of_model
powerfactory.Application.relevant_models = relevant_models
powerfactory.Application.get_grid = get_grid
powerfactory.Application.element_with_unique_name = element_with_unique_name
# Additional Methods for the Data Object Class
def unique_name(self):
"""Returns the unique_name of the element
This method extends the powerfactory.DataObject class. It returns the unique
name of the element.
Returns:
The unique_name of the element
"""
name = self.loc_name + "." + self.GetClassName()
parrent = self.GetParent()
while parrent.GetClassName() != 'IntPrjfolder':
name = parrent.loc_name + "\\" + name
parrent = parrent.GetParent()
return name
def children_elements(self, name="*", model="Elm*"):
"""Returns all children elements of the object (Rekursiv)
This method extends the powerfactor.DataObject class. It returns all
rekursiv children of the object.
Args:
name: Optional parameter, it filters the name of the children, it may
contain "*"
mdoel: Optional parameter it filters teh model class of the children
Retruns:
List of powerfactory.DataObject with the children of the object
"""
return self.GetContents("%s.%s" % (name, model),True)
def attributes(self,attr_type=None):
"""Return the attributes of the object
Returns all attributes of the object from the elements_attributes.json
Args:
attr_type: Optional parameter, to get only parameter of a special type,
e.g. 'm'
Returns:
A list of attributes of the object
"""
class_name = self.GetClassName()
return attributes_for_model(class_name,attr_type)
powerfactory.DataObject.unique_name = unique_name
powerfactory.DataObject.children_elements = children_elements
powerfactory.DataObject.attributes = attributes
|
[] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
python
| 1 | 0 | |
internal/configs/configurator.go
|
package configs
import (
"bytes"
"crypto"
"crypto/x509"
"encoding/json"
"encoding/pem"
"fmt"
"os"
"strings"
"github.com/nginxinc/kubernetes-ingress/internal/k8s/secrets"
"github.com/nginxinc/nginx-prometheus-exporter/collector"
"github.com/spiffe/go-spiffe/workload"
"github.com/nginxinc/kubernetes-ingress/internal/configs/version2"
conf_v1alpha1 "github.com/nginxinc/kubernetes-ingress/pkg/apis/configuration/v1alpha1"
"github.com/golang/glog"
api_v1 "k8s.io/api/core/v1"
networking "k8s.io/api/networking/v1beta1"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/nginxinc/kubernetes-ingress/internal/configs/version1"
"github.com/nginxinc/kubernetes-ingress/internal/nginx"
conf_v1 "github.com/nginxinc/kubernetes-ingress/pkg/apis/configuration/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
latCollector "github.com/nginxinc/kubernetes-ingress/internal/metrics/collectors"
)
const (
pemFileNameForWildcardTLSSecret = "/etc/nginx/secrets/wildcard" // #nosec G101
appProtectPolicyFolder = "/etc/nginx/waf/nac-policies/"
appProtectLogConfFolder = "/etc/nginx/waf/nac-logconfs/"
appProtectUserSigFolder = "/etc/nginx/waf/nac-usersigs/"
appProtectUserSigIndex = "/etc/nginx/waf/nac-usersigs/index.conf"
)
// DefaultServerSecretPath is the full path to the Secret with a TLS cert and a key for the default server. #nosec G101
const DefaultServerSecretPath = "/etc/nginx/secrets/default"
// DefaultServerSecretName is the filename of the Secret with a TLS cert and a key for the default server.
const DefaultServerSecretName = "default"
// WildcardSecretName is the filename of the Secret with a TLS cert and a key for the ingress resources with TLS termination enabled but not secret defined.
const WildcardSecretName = "wildcard"
// JWTKeyKey is the key of the data field of a Secret where the JWK must be stored.
const JWTKeyKey = "jwk"
// CAKey is the key of the data field of a Secret where the cert must be stored.
const CAKey = "ca.crt"
// ClientSecretKey is the key of the data field of a Secret where the OIDC client secret must be stored.
const ClientSecretKey = "client-secret"
// SPIFFE filenames and modes
const (
spiffeCertFileName = "spiffe_cert.pem"
spiffeKeyFileName = "spiffe_key.pem"
spiffeBundleFileName = "spiffe_rootca.pem"
spiffeCertsFileMode = os.FileMode(0644)
spiffeKeyFileMode = os.FileMode(0600)
)
// ExtendedResources holds all extended configuration resources, for which Configurator configures NGINX.
type ExtendedResources struct {
IngressExes []*IngressEx
MergeableIngresses []*MergeableIngresses
VirtualServerExes []*VirtualServerEx
TransportServerExes []*TransportServerEx
}
type tlsPassthroughPair struct {
Host string
UnixSocket string
}
// metricLabelsIndex keeps the relations between Ingress Controller resources and NGINX configuration.
// Used to be able to add Prometheus Metrics variable labels grouped by resource key.
type metricLabelsIndex struct {
ingressUpstreams map[string][]string
virtualServerUpstreams map[string][]string
transportServerUpstreams map[string][]string
ingressServerZones map[string][]string
virtualServerServerZones map[string][]string
transportServerServerZones map[string][]string
ingressUpstreamPeers map[string][]string
virtualServerUpstreamPeers map[string][]string
transportServerUpstreamPeers map[string][]string
}
// Configurator configures NGINX.
type Configurator struct {
nginxManager nginx.Manager
staticCfgParams *StaticConfigParams
cfgParams *ConfigParams
templateExecutor *version1.TemplateExecutor
templateExecutorV2 *version2.TemplateExecutor
ingresses map[string]*IngressEx
minions map[string]map[string]bool
virtualServers map[string]*VirtualServerEx
tlsPassthroughPairs map[string]tlsPassthroughPair
isWildcardEnabled bool
isPlus bool
labelUpdater collector.LabelUpdater
metricLabelsIndex *metricLabelsIndex
isPrometheusEnabled bool
latencyCollector latCollector.LatencyCollector
isLatencyMetricsEnabled bool
}
// NewConfigurator creates a new Configurator.
func NewConfigurator(nginxManager nginx.Manager, staticCfgParams *StaticConfigParams, config *ConfigParams,
templateExecutor *version1.TemplateExecutor, templateExecutorV2 *version2.TemplateExecutor, isPlus bool, isWildcardEnabled bool,
labelUpdater collector.LabelUpdater, isPrometheusEnabled bool, latencyCollector latCollector.LatencyCollector, isLatencyMetricsEnabled bool) *Configurator {
metricLabelsIndex := &metricLabelsIndex{
ingressUpstreams: make(map[string][]string),
virtualServerUpstreams: make(map[string][]string),
transportServerUpstreams: make(map[string][]string),
ingressServerZones: make(map[string][]string),
virtualServerServerZones: make(map[string][]string),
transportServerServerZones: make(map[string][]string),
ingressUpstreamPeers: make(map[string][]string),
virtualServerUpstreamPeers: make(map[string][]string),
transportServerUpstreamPeers: make(map[string][]string),
}
cnf := Configurator{
nginxManager: nginxManager,
staticCfgParams: staticCfgParams,
cfgParams: config,
ingresses: make(map[string]*IngressEx),
virtualServers: make(map[string]*VirtualServerEx),
templateExecutor: templateExecutor,
templateExecutorV2: templateExecutorV2,
minions: make(map[string]map[string]bool),
tlsPassthroughPairs: make(map[string]tlsPassthroughPair),
isPlus: isPlus,
isWildcardEnabled: isWildcardEnabled,
labelUpdater: labelUpdater,
metricLabelsIndex: metricLabelsIndex,
isPrometheusEnabled: isPrometheusEnabled,
latencyCollector: latencyCollector,
isLatencyMetricsEnabled: isLatencyMetricsEnabled,
}
return &cnf
}
// AddOrUpdateDHParam creates a dhparam file with the content of the string.
func (cnf *Configurator) AddOrUpdateDHParam(content string) (string, error) {
return cnf.nginxManager.CreateDHParam(content)
}
func findRemovedKeys(currentKeys []string, newKeys map[string]bool) []string {
var removedKeys []string
for _, name := range currentKeys {
if _, exists := newKeys[name]; !exists {
removedKeys = append(removedKeys, name)
}
}
return removedKeys
}
func (cnf *Configurator) updateIngressMetricsLabels(ingEx *IngressEx, upstreams []version1.Upstream) {
upstreamServerLabels := make(map[string][]string)
newUpstreams := make(map[string]bool)
var newUpstreamsNames []string
upstreamServerPeerLabels := make(map[string][]string)
newPeers := make(map[string]bool)
var newPeersIPs []string
for _, u := range upstreams {
upstreamServerLabels[u.Name] = []string{u.UpstreamLabels.Service, u.UpstreamLabels.ResourceType, u.UpstreamLabels.ResourceName, u.UpstreamLabels.ResourceNamespace}
newUpstreams[u.Name] = true
newUpstreamsNames = append(newUpstreamsNames, u.Name)
for _, server := range u.UpstreamServers {
s := fmt.Sprintf("%v:%v", server.Address, server.Port)
podInfo := ingEx.PodsByIP[s]
labelKey := fmt.Sprintf("%v/%v", u.Name, s)
upstreamServerPeerLabels[labelKey] = []string{podInfo.Name}
if cnf.staticCfgParams.NginxServiceMesh {
ownerLabelVal := fmt.Sprintf("%s/%s", podInfo.OwnerType, podInfo.OwnerName)
upstreamServerPeerLabels[labelKey] = append(upstreamServerPeerLabels[labelKey], ownerLabelVal)
}
newPeers[labelKey] = true
newPeersIPs = append(newPeersIPs, labelKey)
}
}
key := fmt.Sprintf("%v/%v", ingEx.Ingress.Namespace, ingEx.Ingress.Name)
removedUpstreams := findRemovedKeys(cnf.metricLabelsIndex.ingressUpstreams[key], newUpstreams)
cnf.metricLabelsIndex.ingressUpstreams[key] = newUpstreamsNames
cnf.latencyCollector.UpdateUpstreamServerLabels(upstreamServerLabels)
cnf.latencyCollector.DeleteUpstreamServerLabels(removedUpstreams)
removedPeers := findRemovedKeys(cnf.metricLabelsIndex.ingressUpstreamPeers[key], newPeers)
cnf.metricLabelsIndex.ingressUpstreamPeers[key] = newPeersIPs
cnf.latencyCollector.UpdateUpstreamServerPeerLabels(upstreamServerPeerLabels)
cnf.latencyCollector.DeleteUpstreamServerPeerLabels(removedPeers)
cnf.latencyCollector.DeleteMetrics(removedPeers)
if cnf.isPlus {
cnf.labelUpdater.UpdateUpstreamServerLabels(upstreamServerLabels)
cnf.labelUpdater.DeleteUpstreamServerLabels(removedUpstreams)
cnf.labelUpdater.UpdateUpstreamServerPeerLabels(upstreamServerPeerLabels)
cnf.labelUpdater.DeleteUpstreamServerPeerLabels(removedPeers)
serverZoneLabels := make(map[string][]string)
newZones := make(map[string]bool)
var newZonesNames []string
for _, rule := range ingEx.Ingress.Spec.Rules {
serverZoneLabels[rule.Host] = []string{"ingress", ingEx.Ingress.Name, ingEx.Ingress.Namespace}
newZones[rule.Host] = true
newZonesNames = append(newZonesNames, rule.Host)
}
removedZones := findRemovedKeys(cnf.metricLabelsIndex.ingressServerZones[key], newZones)
cnf.metricLabelsIndex.ingressServerZones[key] = newZonesNames
cnf.labelUpdater.UpdateServerZoneLabels(serverZoneLabels)
cnf.labelUpdater.DeleteServerZoneLabels(removedZones)
}
}
func (cnf *Configurator) deleteIngressMetricsLabels(key string) {
cnf.latencyCollector.DeleteUpstreamServerLabels(cnf.metricLabelsIndex.ingressUpstreams[key])
cnf.latencyCollector.DeleteUpstreamServerPeerLabels(cnf.metricLabelsIndex.ingressUpstreamPeers[key])
cnf.latencyCollector.DeleteMetrics(cnf.metricLabelsIndex.ingressUpstreamPeers[key])
if cnf.isPlus {
cnf.labelUpdater.DeleteUpstreamServerLabels(cnf.metricLabelsIndex.ingressUpstreams[key])
cnf.labelUpdater.DeleteServerZoneLabels(cnf.metricLabelsIndex.ingressServerZones[key])
cnf.labelUpdater.DeleteUpstreamServerPeerLabels(cnf.metricLabelsIndex.ingressUpstreamPeers[key])
}
delete(cnf.metricLabelsIndex.ingressUpstreams, key)
delete(cnf.metricLabelsIndex.ingressServerZones, key)
delete(cnf.metricLabelsIndex.ingressUpstreamPeers, key)
}
// AddOrUpdateIngress adds or updates NGINX configuration for the Ingress resource.
func (cnf *Configurator) AddOrUpdateIngress(ingEx *IngressEx) (Warnings, error) {
warnings, err := cnf.addOrUpdateIngress(ingEx)
if err != nil {
return warnings, fmt.Errorf("Error adding or updating ingress %v/%v: %w", ingEx.Ingress.Namespace, ingEx.Ingress.Name, err)
}
if err := cnf.nginxManager.Reload(nginx.ReloadForOtherUpdate); err != nil {
return warnings, fmt.Errorf("Error reloading NGINX for %v/%v: %w", ingEx.Ingress.Namespace, ingEx.Ingress.Name, err)
}
return warnings, nil
}
func (cnf *Configurator) addOrUpdateIngress(ingEx *IngressEx) (Warnings, error) {
apResources := cnf.updateApResources(ingEx)
if jwtKey, exists := ingEx.Ingress.Annotations[JWTKeyAnnotation]; exists {
// LocalSecretStore will not set Path if the secret is not on the filesystem.
// However, NGINX configuration for an Ingress resource, to handle the case of a missing secret,
// relies on the path to be always configured.
ingEx.SecretRefs[jwtKey].Path = cnf.nginxManager.GetFilenameForSecret(ingEx.Ingress.Namespace + "-" + jwtKey)
}
isMinion := false
nginxCfg, warnings := generateNginxCfg(ingEx, apResources, isMinion, cnf.cfgParams, cnf.isPlus, cnf.IsResolverConfigured(),
cnf.staticCfgParams, cnf.isWildcardEnabled)
name := objectMetaToFileName(&ingEx.Ingress.ObjectMeta)
content, err := cnf.templateExecutor.ExecuteIngressConfigTemplate(&nginxCfg)
if err != nil {
return warnings, fmt.Errorf("Error generating Ingress Config %v: %w", name, err)
}
cnf.nginxManager.CreateConfig(name, content)
cnf.ingresses[name] = ingEx
if (cnf.isPlus && cnf.isPrometheusEnabled) || cnf.isLatencyMetricsEnabled {
cnf.updateIngressMetricsLabels(ingEx, nginxCfg.Upstreams)
}
return warnings, nil
}
// AddOrUpdateMergeableIngress adds or updates NGINX configuration for the Ingress resources with Mergeable Types.
func (cnf *Configurator) AddOrUpdateMergeableIngress(mergeableIngs *MergeableIngresses) (Warnings, error) {
warnings, err := cnf.addOrUpdateMergeableIngress(mergeableIngs)
if err != nil {
return warnings, fmt.Errorf("Error when adding or updating ingress %v/%v: %w", mergeableIngs.Master.Ingress.Namespace, mergeableIngs.Master.Ingress.Name, err)
}
if err := cnf.nginxManager.Reload(nginx.ReloadForOtherUpdate); err != nil {
return warnings, fmt.Errorf("Error reloading NGINX for %v/%v: %w", mergeableIngs.Master.Ingress.Namespace, mergeableIngs.Master.Ingress.Name, err)
}
return warnings, nil
}
func (cnf *Configurator) addOrUpdateMergeableIngress(mergeableIngs *MergeableIngresses) (Warnings, error) {
masterApResources := cnf.updateApResources(mergeableIngs.Master)
// LocalSecretStore will not set Path if the secret is not on the filesystem.
// However, NGINX configuration for an Ingress resource, to handle the case of a missing secret,
// relies on the path to be always configured.
if jwtKey, exists := mergeableIngs.Master.Ingress.Annotations[JWTKeyAnnotation]; exists {
mergeableIngs.Master.SecretRefs[jwtKey].Path = cnf.nginxManager.GetFilenameForSecret(mergeableIngs.Master.Ingress.Namespace + "-" + jwtKey)
}
for _, minion := range mergeableIngs.Minions {
if jwtKey, exists := minion.Ingress.Annotations[JWTKeyAnnotation]; exists {
minion.SecretRefs[jwtKey].Path = cnf.nginxManager.GetFilenameForSecret(minion.Ingress.Namespace + "-" + jwtKey)
}
}
nginxCfg, warnings := generateNginxCfgForMergeableIngresses(mergeableIngs, masterApResources, cnf.cfgParams, cnf.isPlus,
cnf.IsResolverConfigured(), cnf.staticCfgParams, cnf.isWildcardEnabled)
name := objectMetaToFileName(&mergeableIngs.Master.Ingress.ObjectMeta)
content, err := cnf.templateExecutor.ExecuteIngressConfigTemplate(&nginxCfg)
if err != nil {
return warnings, fmt.Errorf("Error generating Ingress Config %v: %w", name, err)
}
cnf.nginxManager.CreateConfig(name, content)
cnf.ingresses[name] = mergeableIngs.Master
cnf.minions[name] = make(map[string]bool)
for _, minion := range mergeableIngs.Minions {
minionName := objectMetaToFileName(&minion.Ingress.ObjectMeta)
cnf.minions[name][minionName] = true
}
if (cnf.isPlus && cnf.isPrometheusEnabled) || cnf.isLatencyMetricsEnabled {
cnf.updateIngressMetricsLabels(mergeableIngs.Master, nginxCfg.Upstreams)
}
return warnings, nil
}
func (cnf *Configurator) updateVirtualServerMetricsLabels(virtualServerEx *VirtualServerEx, upstreams []version2.Upstream) {
labels := make(map[string][]string)
newUpstreams := make(map[string]bool)
var newUpstreamsNames []string
upstreamServerPeerLabels := make(map[string][]string)
newPeers := make(map[string]bool)
var newPeersIPs []string
for _, u := range upstreams {
labels[u.Name] = []string{u.UpstreamLabels.Service, u.UpstreamLabels.ResourceType, u.UpstreamLabels.ResourceName, u.UpstreamLabels.ResourceNamespace}
newUpstreams[u.Name] = true
newUpstreamsNames = append(newUpstreamsNames, u.Name)
for _, server := range u.Servers {
podInfo := virtualServerEx.PodsByIP[server.Address]
labelKey := fmt.Sprintf("%v/%v", u.Name, server.Address)
upstreamServerPeerLabels[labelKey] = []string{podInfo.Name}
if cnf.staticCfgParams.NginxServiceMesh {
ownerLabelVal := fmt.Sprintf("%s/%s", podInfo.OwnerType, podInfo.OwnerName)
upstreamServerPeerLabels[labelKey] = append(upstreamServerPeerLabels[labelKey], ownerLabelVal)
}
newPeers[labelKey] = true
newPeersIPs = append(newPeersIPs, labelKey)
}
}
key := fmt.Sprintf("%v/%v", virtualServerEx.VirtualServer.Namespace, virtualServerEx.VirtualServer.Name)
removedPeers := findRemovedKeys(cnf.metricLabelsIndex.virtualServerUpstreamPeers[key], newPeers)
cnf.metricLabelsIndex.virtualServerUpstreamPeers[key] = newPeersIPs
cnf.latencyCollector.UpdateUpstreamServerPeerLabels(upstreamServerPeerLabels)
cnf.latencyCollector.DeleteUpstreamServerPeerLabels(removedPeers)
removedUpstreams := findRemovedKeys(cnf.metricLabelsIndex.virtualServerUpstreams[key], newUpstreams)
cnf.latencyCollector.UpdateUpstreamServerLabels(labels)
cnf.metricLabelsIndex.virtualServerUpstreams[key] = newUpstreamsNames
cnf.latencyCollector.DeleteUpstreamServerLabels(removedUpstreams)
cnf.latencyCollector.DeleteMetrics(removedPeers)
if cnf.isPlus {
cnf.labelUpdater.UpdateUpstreamServerPeerLabels(upstreamServerPeerLabels)
cnf.labelUpdater.DeleteUpstreamServerPeerLabels(removedPeers)
cnf.labelUpdater.UpdateUpstreamServerLabels(labels)
cnf.labelUpdater.DeleteUpstreamServerLabels(removedUpstreams)
serverZoneLabels := make(map[string][]string)
newZones := make(map[string]bool)
newZonesNames := []string{virtualServerEx.VirtualServer.Spec.Host}
serverZoneLabels[virtualServerEx.VirtualServer.Spec.Host] = []string{
"virtualserver", virtualServerEx.VirtualServer.Name, virtualServerEx.VirtualServer.Namespace,
}
newZones[virtualServerEx.VirtualServer.Spec.Host] = true
removedZones := findRemovedKeys(cnf.metricLabelsIndex.virtualServerServerZones[key], newZones)
cnf.metricLabelsIndex.virtualServerServerZones[key] = newZonesNames
cnf.labelUpdater.UpdateServerZoneLabels(serverZoneLabels)
cnf.labelUpdater.DeleteServerZoneLabels(removedZones)
}
}
func (cnf *Configurator) deleteVirtualServerMetricsLabels(key string) {
cnf.latencyCollector.DeleteUpstreamServerLabels(cnf.metricLabelsIndex.virtualServerUpstreams[key])
cnf.latencyCollector.DeleteUpstreamServerPeerLabels(cnf.metricLabelsIndex.virtualServerUpstreamPeers[key])
cnf.latencyCollector.DeleteMetrics(cnf.metricLabelsIndex.virtualServerUpstreamPeers[key])
if cnf.isPlus {
cnf.labelUpdater.DeleteUpstreamServerLabels(cnf.metricLabelsIndex.virtualServerUpstreams[key])
cnf.labelUpdater.DeleteServerZoneLabels(cnf.metricLabelsIndex.virtualServerServerZones[key])
cnf.labelUpdater.DeleteUpstreamServerPeerLabels(cnf.metricLabelsIndex.virtualServerUpstreamPeers[key])
}
delete(cnf.metricLabelsIndex.virtualServerUpstreams, key)
delete(cnf.metricLabelsIndex.virtualServerServerZones, key)
delete(cnf.metricLabelsIndex.virtualServerUpstreamPeers, key)
}
// AddOrUpdateVirtualServer adds or updates NGINX configuration for the VirtualServer resource.
func (cnf *Configurator) AddOrUpdateVirtualServer(virtualServerEx *VirtualServerEx) (Warnings, error) {
warnings, err := cnf.addOrUpdateVirtualServer(virtualServerEx)
if err != nil {
return warnings, fmt.Errorf("Error adding or updating VirtualServer %v/%v: %w", virtualServerEx.VirtualServer.Namespace, virtualServerEx.VirtualServer.Name, err)
}
if err := cnf.nginxManager.Reload(nginx.ReloadForOtherUpdate); err != nil {
return warnings, fmt.Errorf("Error reloading NGINX for VirtualServer %v/%v: %w", virtualServerEx.VirtualServer.Namespace, virtualServerEx.VirtualServer.Name, err)
}
return warnings, nil
}
func (cnf *Configurator) addOrUpdateOpenTracingTracerConfig(content string) error {
err := cnf.nginxManager.CreateOpenTracingTracerConfig(content)
return err
}
func (cnf *Configurator) addOrUpdateVirtualServer(virtualServerEx *VirtualServerEx) (Warnings, error) {
apResources := cnf.updateApResourcesForVs(virtualServerEx)
name := getFileNameForVirtualServer(virtualServerEx.VirtualServer)
vsc := newVirtualServerConfigurator(cnf.cfgParams, cnf.isPlus, cnf.IsResolverConfigured(), cnf.staticCfgParams)
vsCfg, warnings := vsc.GenerateVirtualServerConfig(virtualServerEx, apResources)
content, err := cnf.templateExecutorV2.ExecuteVirtualServerTemplate(&vsCfg)
if err != nil {
return warnings, fmt.Errorf("Error generating VirtualServer config: %v: %w", name, err)
}
cnf.nginxManager.CreateConfig(name, content)
cnf.virtualServers[name] = virtualServerEx
if (cnf.isPlus && cnf.isPrometheusEnabled) || cnf.isLatencyMetricsEnabled {
cnf.updateVirtualServerMetricsLabels(virtualServerEx, vsCfg.Upstreams)
}
return warnings, nil
}
// AddOrUpdateVirtualServers adds or updates NGINX configuration for multiple VirtualServer resources.
func (cnf *Configurator) AddOrUpdateVirtualServers(virtualServerExes []*VirtualServerEx) (Warnings, error) {
allWarnings := newWarnings()
for _, vsEx := range virtualServerExes {
warnings, err := cnf.addOrUpdateVirtualServer(vsEx)
if err != nil {
return allWarnings, err
}
allWarnings.Add(warnings)
}
if err := cnf.nginxManager.Reload(nginx.ReloadForOtherUpdate); err != nil {
return allWarnings, fmt.Errorf("Error when reloading NGINX when updating Policy: %w", err)
}
return allWarnings, nil
}
func (cnf *Configurator) updateTransportServerMetricsLabels(transportServerEx *TransportServerEx, upstreams []version2.StreamUpstream) {
labels := make(map[string][]string)
newUpstreams := make(map[string]bool)
var newUpstreamsNames []string
upstreamServerPeerLabels := make(map[string][]string)
newPeers := make(map[string]bool)
var newPeersIPs []string
for _, u := range upstreams {
labels[u.Name] = []string{u.UpstreamLabels.Service, u.UpstreamLabels.ResourceType, u.UpstreamLabels.ResourceName, u.UpstreamLabels.ResourceNamespace}
newUpstreams[u.Name] = true
newUpstreamsNames = append(newUpstreamsNames, u.Name)
for _, server := range u.Servers {
podName := transportServerEx.PodsByIP[server.Address]
labelKey := fmt.Sprintf("%v/%v", u.Name, server.Address)
upstreamServerPeerLabels[labelKey] = []string{podName}
newPeers[labelKey] = true
newPeersIPs = append(newPeersIPs, labelKey)
}
}
key := fmt.Sprintf("%v/%v", transportServerEx.TransportServer.Namespace, transportServerEx.TransportServer.Name)
removedPeers := findRemovedKeys(cnf.metricLabelsIndex.transportServerUpstreamPeers[key], newPeers)
cnf.metricLabelsIndex.transportServerUpstreamPeers[key] = newPeersIPs
removedUpstreams := findRemovedKeys(cnf.metricLabelsIndex.transportServerUpstreams[key], newUpstreams)
cnf.metricLabelsIndex.transportServerUpstreams[key] = newUpstreamsNames
cnf.labelUpdater.UpdateStreamUpstreamServerPeerLabels(upstreamServerPeerLabels)
cnf.labelUpdater.DeleteStreamUpstreamServerPeerLabels(removedPeers)
cnf.labelUpdater.UpdateStreamUpstreamServerLabels(labels)
cnf.labelUpdater.DeleteStreamUpstreamServerLabels(removedUpstreams)
streamServerZoneLabels := make(map[string][]string)
newZones := make(map[string]bool)
zoneName := transportServerEx.TransportServer.Spec.Listener.Name
if transportServerEx.TransportServer.Spec.Host != "" {
zoneName = transportServerEx.TransportServer.Spec.Host
}
newZonesNames := []string{zoneName}
streamServerZoneLabels[zoneName] = []string{
"transportserver", transportServerEx.TransportServer.Name, transportServerEx.TransportServer.Namespace,
}
newZones[zoneName] = true
removedZones := findRemovedKeys(cnf.metricLabelsIndex.transportServerServerZones[key], newZones)
cnf.metricLabelsIndex.transportServerServerZones[key] = newZonesNames
cnf.labelUpdater.UpdateStreamServerZoneLabels(streamServerZoneLabels)
cnf.labelUpdater.DeleteStreamServerZoneLabels(removedZones)
}
func (cnf *Configurator) deleteTransportServerMetricsLabels(key string) {
cnf.labelUpdater.DeleteStreamUpstreamServerLabels(cnf.metricLabelsIndex.transportServerUpstreams[key])
cnf.labelUpdater.DeleteStreamServerZoneLabels(cnf.metricLabelsIndex.transportServerServerZones[key])
cnf.labelUpdater.DeleteStreamUpstreamServerPeerLabels(cnf.metricLabelsIndex.transportServerUpstreamPeers[key])
delete(cnf.metricLabelsIndex.transportServerUpstreams, key)
delete(cnf.metricLabelsIndex.transportServerServerZones, key)
delete(cnf.metricLabelsIndex.transportServerUpstreamPeers, key)
}
// AddOrUpdateTransportServer adds or updates NGINX configuration for the TransportServer resource.
// It is a responsibility of the caller to check that the TransportServer references an existing listener.
func (cnf *Configurator) AddOrUpdateTransportServer(transportServerEx *TransportServerEx) error {
err := cnf.addOrUpdateTransportServer(transportServerEx)
if err != nil {
return fmt.Errorf("Error adding or updating TransportServer %v/%v: %w", transportServerEx.TransportServer.Namespace, transportServerEx.TransportServer.Name, err)
}
if err := cnf.nginxManager.Reload(nginx.ReloadForOtherUpdate); err != nil {
return fmt.Errorf("Error reloading NGINX for TransportServer %v/%v: %w", transportServerEx.TransportServer.Namespace, transportServerEx.TransportServer.Name, err)
}
return nil
}
func (cnf *Configurator) addOrUpdateTransportServer(transportServerEx *TransportServerEx) error {
name := getFileNameForTransportServer(transportServerEx.TransportServer)
tsCfg := generateTransportServerConfig(transportServerEx, transportServerEx.ListenerPort, cnf.isPlus)
content, err := cnf.templateExecutorV2.ExecuteTransportServerTemplate(tsCfg)
if err != nil {
return fmt.Errorf("Error generating TransportServer config %v: %w", name, err)
}
if cnf.isPlus && cnf.isPrometheusEnabled {
cnf.updateTransportServerMetricsLabels(transportServerEx, tsCfg.Upstreams)
}
cnf.nginxManager.CreateStreamConfig(name, content)
// update TLS Passthrough Hosts config in case we have a TLS Passthrough TransportServer
// only TLS Passthrough TransportServers have non-empty hosts
if transportServerEx.TransportServer.Spec.Host != "" {
key := generateNamespaceNameKey(&transportServerEx.TransportServer.ObjectMeta)
cnf.tlsPassthroughPairs[key] = tlsPassthroughPair{
Host: transportServerEx.TransportServer.Spec.Host,
UnixSocket: generateUnixSocket(transportServerEx),
}
return cnf.updateTLSPassthroughHostsConfig()
}
return nil
}
// GetVirtualServerRoutesForVirtualServer returns the virtualServerRoutes that a virtualServer
// references, if that virtualServer exists
func (cnf *Configurator) GetVirtualServerRoutesForVirtualServer(key string) []*conf_v1.VirtualServerRoute {
vsFileName := getFileNameForVirtualServerFromKey(key)
if cnf.virtualServers[vsFileName] != nil {
return cnf.virtualServers[vsFileName].VirtualServerRoutes
}
return nil
}
func (cnf *Configurator) updateTLSPassthroughHostsConfig() error {
cfg := generateTLSPassthroughHostsConfig(cnf.tlsPassthroughPairs)
content, err := cnf.templateExecutorV2.ExecuteTLSPassthroughHostsTemplate(cfg)
if err != nil {
return fmt.Errorf("Error generating config for TLS Passthrough Unix Sockets map: %w", err)
}
cnf.nginxManager.CreateTLSPassthroughHostsConfig(content)
return nil
}
func generateTLSPassthroughHostsConfig(tlsPassthroughPairs map[string]tlsPassthroughPair) *version2.TLSPassthroughHostsConfig {
cfg := version2.TLSPassthroughHostsConfig{}
for _, pair := range tlsPassthroughPairs {
cfg[pair.Host] = pair.UnixSocket
}
return &cfg
}
func (cnf *Configurator) addOrUpdateCASecret(secret *api_v1.Secret) string {
name := objectMetaToFileName(&secret.ObjectMeta)
data := GenerateCAFileContent(secret)
return cnf.nginxManager.CreateSecret(name, data, nginx.TLSSecretFileMode)
}
func (cnf *Configurator) addOrUpdateJWKSecret(secret *api_v1.Secret) string {
name := objectMetaToFileName(&secret.ObjectMeta)
data := secret.Data[JWTKeyKey]
return cnf.nginxManager.CreateSecret(name, data, nginx.JWKSecretFileMode)
}
// AddOrUpdateResources adds or updates configuration for resources.
func (cnf *Configurator) AddOrUpdateResources(resources ExtendedResources) (Warnings, error) {
allWarnings := newWarnings()
for _, ingEx := range resources.IngressExes {
warnings, err := cnf.addOrUpdateIngress(ingEx)
if err != nil {
return allWarnings, fmt.Errorf("Error adding or updating ingress %v/%v: %w", ingEx.Ingress.Namespace, ingEx.Ingress.Name, err)
}
allWarnings.Add(warnings)
}
for _, m := range resources.MergeableIngresses {
warnings, err := cnf.addOrUpdateMergeableIngress(m)
if err != nil {
return allWarnings, fmt.Errorf("Error adding or updating mergeableIngress %v/%v: %w", m.Master.Ingress.Namespace, m.Master.Ingress.Name, err)
}
allWarnings.Add(warnings)
}
for _, vsEx := range resources.VirtualServerExes {
warnings, err := cnf.addOrUpdateVirtualServer(vsEx)
if err != nil {
return allWarnings, fmt.Errorf("Error adding or updating VirtualServer %v/%v: %w", vsEx.VirtualServer.Namespace, vsEx.VirtualServer.Name, err)
}
allWarnings.Add(warnings)
}
for _, tsEx := range resources.TransportServerExes {
err := cnf.addOrUpdateTransportServer(tsEx)
if err != nil {
return allWarnings, fmt.Errorf("Error adding or updating TransportServer %v/%v: %w", tsEx.TransportServer.Namespace, tsEx.TransportServer.Name, err)
}
}
if err := cnf.nginxManager.Reload(nginx.ReloadForOtherUpdate); err != nil {
return allWarnings, fmt.Errorf("Error when reloading NGINX when updating resources: %w", err)
}
return allWarnings, nil
}
func (cnf *Configurator) addOrUpdateTLSSecret(secret *api_v1.Secret) string {
name := objectMetaToFileName(&secret.ObjectMeta)
data := GenerateCertAndKeyFileContent(secret)
return cnf.nginxManager.CreateSecret(name, data, nginx.TLSSecretFileMode)
}
// AddOrUpdateSpecialTLSSecrets adds or updates a file with a TLS cert and a key from a Special TLS Secret (eg. DefaultServerSecret, WildcardTLSSecret).
func (cnf *Configurator) AddOrUpdateSpecialTLSSecrets(secret *api_v1.Secret, secretNames []string) error {
data := GenerateCertAndKeyFileContent(secret)
for _, secretName := range secretNames {
cnf.nginxManager.CreateSecret(secretName, data, nginx.TLSSecretFileMode)
}
if err := cnf.nginxManager.Reload(nginx.ReloadForOtherUpdate); err != nil {
return fmt.Errorf("Error when reloading NGINX when updating the special Secrets: %w", err)
}
return nil
}
// GenerateCertAndKeyFileContent generates a pem file content from the TLS secret.
func GenerateCertAndKeyFileContent(secret *api_v1.Secret) []byte {
var res bytes.Buffer
res.Write(secret.Data[api_v1.TLSCertKey])
res.WriteString("\n")
res.Write(secret.Data[api_v1.TLSPrivateKeyKey])
return res.Bytes()
}
// GenerateCAFileContent generates a pem file content from the TLS secret.
func GenerateCAFileContent(secret *api_v1.Secret) []byte {
var res bytes.Buffer
res.Write(secret.Data[CAKey])
return res.Bytes()
}
// DeleteIngress deletes NGINX configuration for the Ingress resource.
func (cnf *Configurator) DeleteIngress(key string) error {
name := keyToFileName(key)
cnf.nginxManager.DeleteConfig(name)
delete(cnf.ingresses, name)
delete(cnf.minions, name)
if (cnf.isPlus && cnf.isPrometheusEnabled) || cnf.isLatencyMetricsEnabled {
cnf.deleteIngressMetricsLabels(key)
}
if err := cnf.nginxManager.Reload(nginx.ReloadForOtherUpdate); err != nil {
return fmt.Errorf("Error when removing ingress %v: %w", key, err)
}
return nil
}
// DeleteVirtualServer deletes NGINX configuration for the VirtualServer resource.
func (cnf *Configurator) DeleteVirtualServer(key string) error {
name := getFileNameForVirtualServerFromKey(key)
cnf.nginxManager.DeleteConfig(name)
delete(cnf.virtualServers, name)
if (cnf.isPlus && cnf.isPrometheusEnabled) || cnf.isLatencyMetricsEnabled {
cnf.deleteVirtualServerMetricsLabels(key)
}
if err := cnf.nginxManager.Reload(nginx.ReloadForOtherUpdate); err != nil {
return fmt.Errorf("Error when removing VirtualServer %v: %w", key, err)
}
return nil
}
// DeleteTransportServer deletes NGINX configuration for the TransportServer resource.
func (cnf *Configurator) DeleteTransportServer(key string) error {
if cnf.isPlus && cnf.isPrometheusEnabled {
cnf.deleteTransportServerMetricsLabels(key)
}
err := cnf.deleteTransportServer(key)
if err != nil {
return fmt.Errorf("Error when removing TransportServer %v: %w", key, err)
}
err = cnf.nginxManager.Reload(nginx.ReloadForOtherUpdate)
if err != nil {
return fmt.Errorf("Error when removing TransportServer %v: %w", key, err)
}
return nil
}
func (cnf *Configurator) deleteTransportServer(key string) error {
name := getFileNameForTransportServerFromKey(key)
cnf.nginxManager.DeleteStreamConfig(name)
// update TLS Passthrough Hosts config in case we have a TLS Passthrough TransportServer
if _, exists := cnf.tlsPassthroughPairs[key]; exists {
delete(cnf.tlsPassthroughPairs, key)
return cnf.updateTLSPassthroughHostsConfig()
}
return nil
}
// UpdateEndpoints updates endpoints in NGINX configuration for the Ingress resources.
func (cnf *Configurator) UpdateEndpoints(ingExes []*IngressEx) error {
reloadPlus := false
for _, ingEx := range ingExes {
// It is safe to ignore warnings here as no new warnings should appear when updating Endpoints for Ingresses
_, err := cnf.addOrUpdateIngress(ingEx)
if err != nil {
return fmt.Errorf("Error adding or updating ingress %v/%v: %w", ingEx.Ingress.Namespace, ingEx.Ingress.Name, err)
}
if cnf.isPlus {
err := cnf.updatePlusEndpoints(ingEx)
if err != nil {
glog.Warningf("Couldn't update the endpoints via the API: %v; reloading configuration instead", err)
reloadPlus = true
}
}
}
if cnf.isPlus && !reloadPlus {
glog.V(3).Info("No need to reload nginx")
return nil
}
if err := cnf.nginxManager.Reload(nginx.ReloadForEndpointsUpdate); err != nil {
return fmt.Errorf("Error reloading NGINX when updating endpoints: %w", err)
}
return nil
}
// UpdateEndpointsMergeableIngress updates endpoints in NGINX configuration for a mergeable Ingress resource.
func (cnf *Configurator) UpdateEndpointsMergeableIngress(mergeableIngresses []*MergeableIngresses) error {
reloadPlus := false
for i := range mergeableIngresses {
// It is safe to ignore warnings here as no new warnings should appear when updating Endpoints for Ingresses
_, err := cnf.addOrUpdateMergeableIngress(mergeableIngresses[i])
if err != nil {
return fmt.Errorf("Error adding or updating mergeableIngress %v/%v: %w", mergeableIngresses[i].Master.Ingress.Namespace, mergeableIngresses[i].Master.Ingress.Name, err)
}
if cnf.isPlus {
for _, ing := range mergeableIngresses[i].Minions {
err = cnf.updatePlusEndpoints(ing)
if err != nil {
glog.Warningf("Couldn't update the endpoints via the API: %v; reloading configuration instead", err)
reloadPlus = true
}
}
}
}
if cnf.isPlus && !reloadPlus {
glog.V(3).Info("No need to reload nginx")
return nil
}
if err := cnf.nginxManager.Reload(nginx.ReloadForEndpointsUpdate); err != nil {
return fmt.Errorf("Error reloading NGINX when updating endpoints for %v: %w", mergeableIngresses, err)
}
return nil
}
// UpdateEndpointsForVirtualServers updates endpoints in NGINX configuration for the VirtualServer resources.
func (cnf *Configurator) UpdateEndpointsForVirtualServers(virtualServerExes []*VirtualServerEx) error {
reloadPlus := false
for _, vs := range virtualServerExes {
// It is safe to ignore warnings here as no new warnings should appear when updating Endpoints for VirtualServers
_, err := cnf.addOrUpdateVirtualServer(vs)
if err != nil {
return fmt.Errorf("Error adding or updating VirtualServer %v/%v: %w", vs.VirtualServer.Namespace, vs.VirtualServer.Name, err)
}
if cnf.isPlus {
err := cnf.updatePlusEndpointsForVirtualServer(vs)
if err != nil {
glog.Warningf("Couldn't update the endpoints via the API: %v; reloading configuration instead", err)
reloadPlus = true
}
}
}
if cnf.isPlus && !reloadPlus {
glog.V(3).Info("No need to reload nginx")
return nil
}
if err := cnf.nginxManager.Reload(nginx.ReloadForEndpointsUpdate); err != nil {
return fmt.Errorf("Error reloading NGINX when updating endpoints: %w", err)
}
return nil
}
func (cnf *Configurator) updatePlusEndpointsForVirtualServer(virtualServerEx *VirtualServerEx) error {
upstreams := createUpstreamsForPlus(virtualServerEx, cnf.cfgParams, cnf.staticCfgParams)
for _, upstream := range upstreams {
serverCfg := createUpstreamServersConfigForPlus(upstream)
endpoints := createEndpointsFromUpstream(upstream)
err := cnf.nginxManager.UpdateServersInPlus(upstream.Name, endpoints, serverCfg)
if err != nil {
return fmt.Errorf("Couldn't update the endpoints for %v: %w", upstream.Name, err)
}
}
return nil
}
// UpdateEndpointsForTransportServers updates endpoints in NGINX configuration for the TransportServer resources.
func (cnf *Configurator) UpdateEndpointsForTransportServers(transportServerExes []*TransportServerEx) error {
reloadPlus := false
for _, tsEx := range transportServerExes {
err := cnf.addOrUpdateTransportServer(tsEx)
if err != nil {
return fmt.Errorf("Error adding or updating TransportServer %v/%v: %w", tsEx.TransportServer.Namespace, tsEx.TransportServer.Name, err)
}
if cnf.isPlus {
err := cnf.updatePlusEndpointsForTransportServer(tsEx)
if err != nil {
glog.Warningf("Couldn't update the endpoints via the API: %v; reloading configuration instead", err)
reloadPlus = true
}
}
}
if cnf.isPlus && !reloadPlus {
glog.V(3).Info("No need to reload nginx")
return nil
}
if err := cnf.nginxManager.Reload(nginx.ReloadForEndpointsUpdate); err != nil {
return fmt.Errorf("Error reloading NGINX when updating endpoints: %w", err)
}
return nil
}
func (cnf *Configurator) updatePlusEndpointsForTransportServer(transportServerEx *TransportServerEx) error {
upstreamNamer := newUpstreamNamerForTransportServer(transportServerEx.TransportServer)
for _, u := range transportServerEx.TransportServer.Spec.Upstreams {
name := upstreamNamer.GetNameForUpstream(u.Name)
// subselector is not supported yet in TransportServer upstreams. That's why we pass "nil" here
endpointsKey := GenerateEndpointsKey(transportServerEx.TransportServer.Namespace, u.Service, nil, uint16(u.Port))
endpoints := transportServerEx.Endpoints[endpointsKey]
err := cnf.nginxManager.UpdateStreamServersInPlus(name, endpoints)
if err != nil {
return fmt.Errorf("Couldn't update the endpoints for %v: %w", u.Name, err)
}
}
return nil
}
func (cnf *Configurator) updatePlusEndpoints(ingEx *IngressEx) error {
ingCfg := parseAnnotations(ingEx, cnf.cfgParams, cnf.isPlus, cnf.staticCfgParams.MainAppProtectLoadModule, cnf.staticCfgParams.EnableInternalRoutes)
cfg := nginx.ServerConfig{
MaxFails: ingCfg.MaxFails,
MaxConns: ingCfg.MaxConns,
FailTimeout: ingCfg.FailTimeout,
SlowStart: ingCfg.SlowStart,
}
if ingEx.Ingress.Spec.Backend != nil {
endps, exists := ingEx.Endpoints[ingEx.Ingress.Spec.Backend.ServiceName+ingEx.Ingress.Spec.Backend.ServicePort.String()]
if exists {
if _, isExternalName := ingEx.ExternalNameSvcs[ingEx.Ingress.Spec.Backend.ServiceName]; isExternalName {
glog.V(3).Infof("Service %s is Type ExternalName, skipping NGINX Plus endpoints update via API", ingEx.Ingress.Spec.Backend.ServiceName)
} else {
name := getNameForUpstream(ingEx.Ingress, emptyHost, ingEx.Ingress.Spec.Backend)
err := cnf.nginxManager.UpdateServersInPlus(name, endps, cfg)
if err != nil {
return fmt.Errorf("Couldn't update the endpoints for %v: %w", name, err)
}
}
}
}
for _, rule := range ingEx.Ingress.Spec.Rules {
if rule.IngressRuleValue.HTTP == nil {
continue
}
for _, path := range rule.HTTP.Paths {
endps, exists := ingEx.Endpoints[path.Backend.ServiceName+path.Backend.ServicePort.String()]
if exists {
if _, isExternalName := ingEx.ExternalNameSvcs[path.Backend.ServiceName]; isExternalName {
glog.V(3).Infof("Service %s is Type ExternalName, skipping NGINX Plus endpoints update via API", path.Backend.ServiceName)
continue
}
name := getNameForUpstream(ingEx.Ingress, rule.Host, &path.Backend)
err := cnf.nginxManager.UpdateServersInPlus(name, endps, cfg)
if err != nil {
return fmt.Errorf("Couldn't update the endpoints for %v: %w", name, err)
}
}
}
}
return nil
}
// UpdateConfig updates NGINX configuration parameters.
func (cnf *Configurator) UpdateConfig(cfgParams *ConfigParams, ingExes []*IngressEx, mergeableIngs []*MergeableIngresses, virtualServerExes []*VirtualServerEx) (Warnings, error) {
cnf.cfgParams = cfgParams
allWarnings := newWarnings()
if cnf.cfgParams.MainServerSSLDHParamFileContent != nil {
fileName, err := cnf.nginxManager.CreateDHParam(*cnf.cfgParams.MainServerSSLDHParamFileContent)
if err != nil {
return allWarnings, fmt.Errorf("Error when updating dhparams: %w", err)
}
cfgParams.MainServerSSLDHParam = fileName
}
if cfgParams.MainTemplate != nil {
err := cnf.templateExecutor.UpdateMainTemplate(cfgParams.MainTemplate)
if err != nil {
return allWarnings, fmt.Errorf("Error when parsing the main template: %w", err)
}
}
if cfgParams.IngressTemplate != nil {
err := cnf.templateExecutor.UpdateIngressTemplate(cfgParams.IngressTemplate)
if err != nil {
return allWarnings, fmt.Errorf("Error when parsing the ingress template: %w", err)
}
}
if cfgParams.VirtualServerTemplate != nil {
err := cnf.templateExecutorV2.UpdateVirtualServerTemplate(cfgParams.VirtualServerTemplate)
if err != nil {
return allWarnings, fmt.Errorf("Error when parsing the VirtualServer template: %w", err)
}
}
mainCfg := GenerateNginxMainConfig(cnf.staticCfgParams, cfgParams)
mainCfgContent, err := cnf.templateExecutor.ExecuteMainConfigTemplate(mainCfg)
if err != nil {
return allWarnings, fmt.Errorf("Error when writing main Config")
}
cnf.nginxManager.CreateMainConfig(mainCfgContent)
for _, ingEx := range ingExes {
warnings, err := cnf.addOrUpdateIngress(ingEx)
if err != nil {
return allWarnings, err
}
allWarnings.Add(warnings)
}
for _, mergeableIng := range mergeableIngs {
warnings, err := cnf.addOrUpdateMergeableIngress(mergeableIng)
if err != nil {
return allWarnings, err
}
allWarnings.Add(warnings)
}
for _, vsEx := range virtualServerExes {
warnings, err := cnf.addOrUpdateVirtualServer(vsEx)
if err != nil {
return allWarnings, err
}
allWarnings.Add(warnings)
}
if mainCfg.OpenTracingLoadModule {
if err := cnf.addOrUpdateOpenTracingTracerConfig(mainCfg.OpenTracingTracerConfig); err != nil {
return allWarnings, fmt.Errorf("Error when updating OpenTracing tracer config: %w", err)
}
}
cnf.nginxManager.SetOpenTracing(mainCfg.OpenTracingLoadModule)
if err := cnf.nginxManager.Reload(nginx.ReloadForOtherUpdate); err != nil {
return allWarnings, fmt.Errorf("Error when updating config from ConfigMap: %w", err)
}
return allWarnings, nil
}
// UpdateTransportServers updates TransportServers.
func (cnf *Configurator) UpdateTransportServers(updatedTSExes []*TransportServerEx, deletedKeys []string) error {
for _, tsEx := range updatedTSExes {
err := cnf.addOrUpdateTransportServer(tsEx)
if err != nil {
return fmt.Errorf("Error adding or updating TransportServer %v/%v: %w", tsEx.TransportServer.Namespace, tsEx.TransportServer.Name, err)
}
}
for _, key := range deletedKeys {
err := cnf.deleteTransportServer(key)
if err != nil {
return fmt.Errorf("Error when removing TransportServer %v: %w", key, err)
}
}
if err := cnf.nginxManager.Reload(nginx.ReloadForOtherUpdate); err != nil {
return fmt.Errorf("Error when updating TransportServers: %w", err)
}
return nil
}
func keyToFileName(key string) string {
return strings.Replace(key, "/", "-", -1)
}
func objectMetaToFileName(meta *meta_v1.ObjectMeta) string {
return meta.Namespace + "-" + meta.Name
}
func generateNamespaceNameKey(objectMeta *meta_v1.ObjectMeta) string {
return fmt.Sprintf("%s/%s", objectMeta.Namespace, objectMeta.Name)
}
func getFileNameForVirtualServer(virtualServer *conf_v1.VirtualServer) string {
return fmt.Sprintf("vs_%s_%s", virtualServer.Namespace, virtualServer.Name)
}
func getFileNameForTransportServer(transportServer *conf_v1alpha1.TransportServer) string {
return fmt.Sprintf("ts_%s_%s", transportServer.Namespace, transportServer.Name)
}
func getFileNameForVirtualServerFromKey(key string) string {
replaced := strings.Replace(key, "/", "_", -1)
return fmt.Sprintf("vs_%s", replaced)
}
func getFileNameForTransportServerFromKey(key string) string {
replaced := strings.Replace(key, "/", "_", -1)
return fmt.Sprintf("ts_%s", replaced)
}
// HasIngress checks if the Ingress resource is present in NGINX configuration.
func (cnf *Configurator) HasIngress(ing *networking.Ingress) bool {
name := objectMetaToFileName(&ing.ObjectMeta)
_, exists := cnf.ingresses[name]
return exists
}
// HasMinion checks if the minion Ingress resource of the master is present in NGINX configuration.
func (cnf *Configurator) HasMinion(master *networking.Ingress, minion *networking.Ingress) bool {
masterName := objectMetaToFileName(&master.ObjectMeta)
if _, exists := cnf.minions[masterName]; !exists {
return false
}
return cnf.minions[masterName][objectMetaToFileName(&minion.ObjectMeta)]
}
// IsResolverConfigured checks if a DNS resolver is present in NGINX configuration.
func (cnf *Configurator) IsResolverConfigured() bool {
return len(cnf.cfgParams.ResolverAddresses) != 0
}
// GetIngressCounts returns the total count of Ingress resources that are handled by the Ingress Controller grouped by their type
func (cnf *Configurator) GetIngressCounts() map[string]int {
counters := map[string]int{
"master": 0,
"regular": 0,
"minion": 0,
}
// cnf.ingresses contains only master and regular Ingress Resources
for _, ing := range cnf.ingresses {
if ing.Ingress.Annotations["nginx.org/mergeable-ingress-type"] == "master" {
counters["master"]++
} else {
counters["regular"]++
}
}
for _, min := range cnf.minions {
counters["minion"] += len(min)
}
return counters
}
// GetVirtualServerCounts returns the total count of VS/VSR resources that are handled by the Ingress Controller
func (cnf *Configurator) GetVirtualServerCounts() (vsCount int, vsrCount int) {
vsCount = len(cnf.virtualServers)
for _, vs := range cnf.virtualServers {
vsrCount += len(vs.VirtualServerRoutes)
}
return vsCount, vsrCount
}
// AddOrUpdateSpiffeCerts writes Spiffe certs and keys to disk and reloads NGINX
func (cnf *Configurator) AddOrUpdateSpiffeCerts(svidResponse *workload.X509SVIDs) error {
svid := svidResponse.Default()
privateKeyBytes, err := x509.MarshalPKCS8PrivateKey(svid.PrivateKey.(crypto.PrivateKey))
if err != nil {
return fmt.Errorf("error when marshaling private key: %w", err)
}
cnf.nginxManager.CreateSecret(spiffeKeyFileName, createSpiffeKey(privateKeyBytes), spiffeKeyFileMode)
cnf.nginxManager.CreateSecret(spiffeCertFileName, createSpiffeCert(svid.Certificates), spiffeCertsFileMode)
cnf.nginxManager.CreateSecret(spiffeBundleFileName, createSpiffeCert(svid.TrustBundle), spiffeCertsFileMode)
err = cnf.nginxManager.Reload(nginx.ReloadForOtherUpdate)
if err != nil {
return fmt.Errorf("error when reloading NGINX when updating the SPIFFE Certs: %w", err)
}
return nil
}
func createSpiffeKey(content []byte) []byte {
return pem.EncodeToMemory(&pem.Block{
Type: "EC PRIVATE KEY",
Bytes: content,
})
}
func createSpiffeCert(certs []*x509.Certificate) []byte {
pemData := make([]byte, 0, len(certs))
for _, c := range certs {
b := &pem.Block{
Type: "CERTIFICATE",
Bytes: c.Raw,
}
pemData = append(pemData, pem.EncodeToMemory(b)...)
}
return pemData
}
func (cnf *Configurator) updateApResources(ingEx *IngressEx) (apRes AppProtectResources) {
if ingEx.AppProtectPolicy != nil {
policyFileName := appProtectPolicyFileNameFromUnstruct(ingEx.AppProtectPolicy)
policyContent := generateApResourceFileContent(ingEx.AppProtectPolicy)
cnf.nginxManager.CreateAppProtectResourceFile(policyFileName, policyContent)
apRes.AppProtectPolicy = policyFileName
}
for _, logConf := range ingEx.AppProtectLogs {
logConfFileName := appProtectLogConfFileNameFromUnstruct(logConf.LogConf)
logConfContent := generateApResourceFileContent(logConf.LogConf)
cnf.nginxManager.CreateAppProtectResourceFile(logConfFileName, logConfContent)
apRes.AppProtectLogconfs = append(apRes.AppProtectLogconfs, logConfFileName+" "+logConf.Dest)
}
return apRes
}
func (cnf *Configurator) updateApResourcesForVs(vsEx *VirtualServerEx) map[string]string {
apRes := make(map[string]string)
if vsEx.ApPolRefs != nil {
for apPolKey, apPol := range vsEx.ApPolRefs {
policyFileName := appProtectPolicyFileNameFromUnstruct(apPol)
policyContent := generateApResourceFileContent(apPol)
cnf.nginxManager.CreateAppProtectResourceFile(policyFileName, policyContent)
apRes[apPolKey] = policyFileName
}
}
if vsEx.LogConfRefs != nil {
for logConfKey, logConf := range vsEx.LogConfRefs {
logConfFileName := appProtectLogConfFileNameFromUnstruct(logConf)
logConfContent := generateApResourceFileContent(logConf)
cnf.nginxManager.CreateAppProtectResourceFile(logConfFileName, logConfContent)
apRes[logConfKey] = logConfFileName
}
}
return apRes
}
func appProtectPolicyFileNameFromUnstruct(unst *unstructured.Unstructured) string {
return fmt.Sprintf("%s%s_%s", appProtectPolicyFolder, unst.GetNamespace(), unst.GetName())
}
func appProtectLogConfFileNameFromUnstruct(unst *unstructured.Unstructured) string {
return fmt.Sprintf("%s%s_%s", appProtectLogConfFolder, unst.GetNamespace(), unst.GetName())
}
func appProtectUserSigFileNameFromUnstruct(unst *unstructured.Unstructured) string {
return fmt.Sprintf("%s%s_%s", appProtectUserSigFolder, unst.GetNamespace(), unst.GetName())
}
func generateApResourceFileContent(apResource *unstructured.Unstructured) []byte {
// Safe to ignore errors since validation already checked those
spec, _, _ := unstructured.NestedMap(apResource.Object, "spec")
data, _ := json.Marshal(spec)
return data
}
// AddOrUpdateAppProtectResource updates Ingresses and VirtualServers that use App Protect Resources
func (cnf *Configurator) AddOrUpdateAppProtectResource(resource *unstructured.Unstructured, ingExes []*IngressEx, mergeableIngresses []*MergeableIngresses, vsExes []*VirtualServerEx) (Warnings, error) {
allWarnings := newWarnings()
for _, ingEx := range ingExes {
warnings, err := cnf.addOrUpdateIngress(ingEx)
if err != nil {
return allWarnings, fmt.Errorf("Error adding or updating ingress %v/%v: %w", ingEx.Ingress.Namespace, ingEx.Ingress.Name, err)
}
allWarnings.Add(warnings)
}
for _, m := range mergeableIngresses {
warnings, err := cnf.addOrUpdateMergeableIngress(m)
if err != nil {
return allWarnings, fmt.Errorf("Error adding or updating mergeableIngress %v/%v: %w", m.Master.Ingress.Namespace, m.Master.Ingress.Name, err)
}
allWarnings.Add(warnings)
}
for _, vs := range vsExes {
warnings, err := cnf.addOrUpdateVirtualServer(vs)
if err != nil {
return allWarnings, fmt.Errorf("Error adding or updating VirtualServer %v/%v: %w", vs.VirtualServer.Namespace, vs.VirtualServer.Name, err)
}
allWarnings.Add(warnings)
}
if err := cnf.nginxManager.Reload(nginx.ReloadForOtherUpdate); err != nil {
return allWarnings, fmt.Errorf("Error when reloading NGINX when updating %v: %w", resource.GetKind(), err)
}
return allWarnings, nil
}
// DeleteAppProtectPolicy updates Ingresses and VirtualServers that use AP Policy after that policy is deleted
func (cnf *Configurator) DeleteAppProtectPolicy(polNamespaceName string, ingExes []*IngressEx, mergeableIngresses []*MergeableIngresses, vsExes []*VirtualServerEx) (Warnings, error) {
if len(ingExes)+len(mergeableIngresses)+len(vsExes) > 0 {
fName := strings.Replace(polNamespaceName, "/", "_", 1)
polFileName := appProtectPolicyFolder + fName
cnf.nginxManager.DeleteAppProtectResourceFile(polFileName)
}
allWarnings := newWarnings()
for _, ingEx := range ingExes {
warnings, err := cnf.addOrUpdateIngress(ingEx)
if err != nil {
return allWarnings, fmt.Errorf("Error adding or updating ingress %v/%v: %w", ingEx.Ingress.Namespace, ingEx.Ingress.Name, err)
}
allWarnings.Add(warnings)
}
for _, m := range mergeableIngresses {
warnings, err := cnf.addOrUpdateMergeableIngress(m)
if err != nil {
return allWarnings, fmt.Errorf("Error adding or updating mergeableIngress %v/%v: %w", m.Master.Ingress.Namespace, m.Master.Ingress.Name, err)
}
allWarnings.Add(warnings)
}
for _, v := range vsExes {
warnings, err := cnf.addOrUpdateVirtualServer(v)
if err != nil {
return allWarnings, fmt.Errorf("Error adding or updating VirtualServer %v/%v: %w", v.VirtualServer.Namespace, v.VirtualServer.Name, err)
}
allWarnings.Add(warnings)
}
if err := cnf.nginxManager.Reload(nginx.ReloadForOtherUpdate); err != nil {
return allWarnings, fmt.Errorf("Error when reloading NGINX when removing App Protect Policy: %w", err)
}
return allWarnings, nil
}
// DeleteAppProtectLogConf updates Ingresses and VirtualServers that use AP Log Configuration after that policy is deleted
func (cnf *Configurator) DeleteAppProtectLogConf(logConfNamespaceName string, ingExes []*IngressEx, mergeableIngresses []*MergeableIngresses, vsExes []*VirtualServerEx) (Warnings, error) {
if len(ingExes)+len(mergeableIngresses)+len(vsExes) > 0 {
fName := strings.Replace(logConfNamespaceName, "/", "_", 1)
logConfFileName := appProtectLogConfFolder + fName
cnf.nginxManager.DeleteAppProtectResourceFile(logConfFileName)
}
allWarnings := newWarnings()
for _, ingEx := range ingExes {
warnings, err := cnf.addOrUpdateIngress(ingEx)
if err != nil {
return allWarnings, fmt.Errorf("Error adding or updating ingress %v/%v: %w", ingEx.Ingress.Namespace, ingEx.Ingress.Name, err)
}
allWarnings.Add(warnings)
}
for _, m := range mergeableIngresses {
warnings, err := cnf.addOrUpdateMergeableIngress(m)
if err != nil {
return allWarnings, fmt.Errorf("Error adding or updating mergeableIngress %v/%v: %w", m.Master.Ingress.Namespace, m.Master.Ingress.Name, err)
}
allWarnings.Add(warnings)
}
for _, v := range vsExes {
warnings, err := cnf.addOrUpdateVirtualServer(v)
if err != nil {
return allWarnings, fmt.Errorf("Error adding or updating VirtualServer %v/%v: %w", v.VirtualServer.Namespace, v.VirtualServer.Name, err)
}
allWarnings.Add(warnings)
}
if err := cnf.nginxManager.Reload(nginx.ReloadForOtherUpdate); err != nil {
return allWarnings, fmt.Errorf("Error when reloading NGINX when removing App Protect Log Configuration: %w", err)
}
return allWarnings, nil
}
// RefreshAppProtectUserSigs writes all valid UDS files to fs and reloads NGINX
func (cnf *Configurator) RefreshAppProtectUserSigs(
userSigs []*unstructured.Unstructured, delPols []string, ingExes []*IngressEx, mergeableIngresses []*MergeableIngresses, vsExes []*VirtualServerEx,
) (Warnings, error) {
allWarnings := newWarnings()
for _, ingEx := range ingExes {
warnings, err := cnf.addOrUpdateIngress(ingEx)
if err != nil {
return allWarnings, fmt.Errorf("Error adding or updating ingress %v/%v: %w", ingEx.Ingress.Namespace, ingEx.Ingress.Name, err)
}
allWarnings.Add(warnings)
}
for _, m := range mergeableIngresses {
warnings, err := cnf.addOrUpdateMergeableIngress(m)
if err != nil {
return allWarnings, fmt.Errorf("Error adding or updating mergeableIngress %v/%v: %w", m.Master.Ingress.Namespace, m.Master.Ingress.Name, err)
}
allWarnings.Add(warnings)
}
for _, v := range vsExes {
warnings, err := cnf.addOrUpdateVirtualServer(v)
if err != nil {
return allWarnings, fmt.Errorf("Error adding or updating VirtualServer %v/%v: %w", v.VirtualServer.Namespace, v.VirtualServer.Name, err)
}
allWarnings.Add(warnings)
}
for _, file := range delPols {
cnf.nginxManager.DeleteAppProtectResourceFile(file)
}
var builder strings.Builder
cnf.nginxManager.ClearAppProtectFolder(appProtectUserSigFolder)
for _, sig := range userSigs {
fName := appProtectUserSigFileNameFromUnstruct(sig)
data := generateApResourceFileContent(sig)
cnf.nginxManager.CreateAppProtectResourceFile(fName, data)
fmt.Fprintf(&builder, "app_protect_user_defined_signatures %s;\n", fName)
}
cnf.nginxManager.CreateAppProtectResourceFile(appProtectUserSigIndex, []byte(builder.String()))
return allWarnings, cnf.nginxManager.Reload(nginx.ReloadForOtherUpdate)
}
// AddInternalRouteConfig adds internal route server to NGINX Configuration and reloads NGINX
func (cnf *Configurator) AddInternalRouteConfig() error {
cnf.staticCfgParams.EnableInternalRoutes = true
cnf.staticCfgParams.PodName = os.Getenv("POD_NAME")
mainCfg := GenerateNginxMainConfig(cnf.staticCfgParams, cnf.cfgParams)
mainCfgContent, err := cnf.templateExecutor.ExecuteMainConfigTemplate(mainCfg)
if err != nil {
return fmt.Errorf("Error when writing main Config: %w", err)
}
cnf.nginxManager.CreateMainConfig(mainCfgContent)
if err := cnf.nginxManager.Reload(nginx.ReloadForOtherUpdate); err != nil {
return fmt.Errorf("Error when reloading nginx: %w", err)
}
return nil
}
// AddOrUpdateSecret adds or updates a secret.
func (cnf *Configurator) AddOrUpdateSecret(secret *api_v1.Secret) string {
switch secret.Type {
case secrets.SecretTypeCA:
return cnf.addOrUpdateCASecret(secret)
case secrets.SecretTypeJWK:
return cnf.addOrUpdateJWKSecret(secret)
case secrets.SecretTypeOIDC:
// OIDC ClientSecret is not required on the filesystem, it is written directly to the config file.
return ""
default:
return cnf.addOrUpdateTLSSecret(secret)
}
}
// DeleteSecret deletes a secret.
func (cnf *Configurator) DeleteSecret(key string) {
cnf.nginxManager.DeleteSecret(keyToFileName(key))
}
|
[
"\"POD_NAME\""
] |
[] |
[
"POD_NAME"
] |
[]
|
["POD_NAME"]
|
go
| 1 | 0 | |
lily/wsgi.py
|
"""
WSGI config for lily project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'lily.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
cluster-operator/src/main/java/io/strimzi/operator/cluster/model/EntityUserOperator.java
|
/*
* Copyright Strimzi authors.
* License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
*/
package io.strimzi.operator.cluster.model;
import io.fabric8.kubernetes.api.model.Container;
import io.fabric8.kubernetes.api.model.ContainerBuilder;
import io.fabric8.kubernetes.api.model.EnvVar;
import io.fabric8.kubernetes.api.model.HasMetadata;
import io.fabric8.kubernetes.api.model.SecurityContext;
import io.fabric8.kubernetes.api.model.Volume;
import io.fabric8.kubernetes.api.model.VolumeMount;
import io.fabric8.kubernetes.api.model.rbac.RoleBinding;
import io.fabric8.kubernetes.api.model.rbac.RoleBindingBuilder;
import io.fabric8.kubernetes.api.model.rbac.RoleRef;
import io.fabric8.kubernetes.api.model.rbac.RoleRefBuilder;
import io.fabric8.kubernetes.api.model.rbac.Subject;
import io.fabric8.kubernetes.api.model.rbac.SubjectBuilder;
import io.strimzi.api.kafka.model.CertificateAuthority;
import io.strimzi.api.kafka.model.ContainerEnvVar;
import io.strimzi.api.kafka.model.EntityOperatorSpec;
import io.strimzi.api.kafka.model.EntityUserOperatorSpec;
import io.strimzi.api.kafka.model.Kafka;
import io.strimzi.api.kafka.model.Probe;
import io.strimzi.api.kafka.model.ProbeBuilder;
import io.strimzi.operator.cluster.ClusterOperatorConfig;
import java.util.ArrayList;
import java.util.List;
import static io.strimzi.operator.cluster.model.ModelUtils.createHttpProbe;
import static java.util.Arrays.asList;
import static java.util.Collections.singletonList;
/**
* Represents the User Operator deployment
*/
public class EntityUserOperator extends AbstractModel {
protected static final String APPLICATION_NAME = "entity-user-operator";
protected static final String USER_OPERATOR_CONTAINER_NAME = "user-operator";
private static final String NAME_SUFFIX = "-entity-user-operator";
protected static final String METRICS_AND_LOG_CONFIG_SUFFIX = NAME_SUFFIX + "-config";
// Port configuration
protected static final int HEALTHCHECK_PORT = 8081;
protected static final String HEALTHCHECK_PORT_NAME = "healthcheck";
// User Operator configuration keys
public static final String ENV_VAR_RESOURCE_LABELS = "STRIMZI_LABELS";
public static final String ENV_VAR_KAFKA_BOOTSTRAP_SERVERS = "STRIMZI_KAFKA_BOOTSTRAP_SERVERS";
public static final String ENV_VAR_ZOOKEEPER_CONNECT = "STRIMZI_ZOOKEEPER_CONNECT";
public static final String ENV_VAR_WATCHED_NAMESPACE = "STRIMZI_NAMESPACE";
public static final String ENV_VAR_FULL_RECONCILIATION_INTERVAL_MS = "STRIMZI_FULL_RECONCILIATION_INTERVAL_MS";
public static final String ENV_VAR_ZOOKEEPER_SESSION_TIMEOUT_MS = "STRIMZI_ZOOKEEPER_SESSION_TIMEOUT_MS";
public static final String ENV_VAR_CLIENTS_CA_CERT_SECRET_NAME = "STRIMZI_CA_CERT_NAME";
public static final String ENV_VAR_CLIENTS_CA_KEY_SECRET_NAME = "STRIMZI_CA_KEY_NAME";
public static final String ENV_VAR_CLIENTS_CA_NAMESPACE = "STRIMZI_CA_NAMESPACE";
public static final String ENV_VAR_CLIENTS_CA_VALIDITY = "STRIMZI_CA_VALIDITY";
public static final String ENV_VAR_CLIENTS_CA_RENEWAL = "STRIMZI_CA_RENEWAL";
public static final String ENV_VAR_CLUSTER_CA_CERT_SECRET_NAME = "STRIMZI_CLUSTER_CA_CERT_SECRET_NAME";
public static final String ENV_VAR_EO_KEY_SECRET_NAME = "STRIMZI_EO_KEY_SECRET_NAME";
public static final Probe DEFAULT_HEALTHCHECK_OPTIONS = new ProbeBuilder().withTimeoutSeconds(EntityUserOperatorSpec.DEFAULT_HEALTHCHECK_TIMEOUT)
.withInitialDelaySeconds(EntityUserOperatorSpec.DEFAULT_HEALTHCHECK_DELAY).build();
private String kafkaBootstrapServers;
private String zookeeperConnect;
private String watchedNamespace;
private String resourceLabels;
private long reconciliationIntervalMs;
private long zookeeperSessionTimeoutMs;
private int clientsCaValidityDays;
private int clientsCaRenewalDays;
protected List<ContainerEnvVar> templateContainerEnvVars;
protected SecurityContext templateContainerSecurityContext;
/**
* @param resource Kubernetes/OpenShift resource with metadata containing the namespace and cluster name
*/
protected EntityUserOperator(HasMetadata resource) {
super(resource, APPLICATION_NAME);
this.name = userOperatorName(cluster);
this.readinessPath = "/";
this.livenessProbeOptions = DEFAULT_HEALTHCHECK_OPTIONS;
this.livenessPath = "/";
this.readinessProbeOptions = DEFAULT_HEALTHCHECK_OPTIONS;
// create a default configuration
this.kafkaBootstrapServers = defaultBootstrapServers(cluster);
this.zookeeperConnect = defaultZookeeperConnect(cluster);
this.watchedNamespace = namespace;
this.reconciliationIntervalMs = EntityUserOperatorSpec.DEFAULT_FULL_RECONCILIATION_INTERVAL_SECONDS * 1_000;
this.zookeeperSessionTimeoutMs = EntityUserOperatorSpec.DEFAULT_ZOOKEEPER_SESSION_TIMEOUT_SECONDS * 1_000;
this.resourceLabels = ModelUtils.defaultResourceLabels(cluster);
this.ancillaryConfigName = metricAndLogConfigsName(cluster);
this.logAndMetricsConfigVolumeName = "entity-user-operator-metrics-and-logging";
this.logAndMetricsConfigMountPath = "/opt/user-operator/custom-config/";
this.clientsCaValidityDays = CertificateAuthority.DEFAULT_CERTS_VALIDITY_DAYS;
this.clientsCaRenewalDays = CertificateAuthority.DEFAULT_CERTS_RENEWAL_DAYS;
}
public void setWatchedNamespace(String watchedNamespace) {
this.watchedNamespace = watchedNamespace;
}
public String getWatchedNamespace() {
return watchedNamespace;
}
public void setReconciliationIntervalMs(long reconciliationIntervalMs) {
this.reconciliationIntervalMs = reconciliationIntervalMs;
}
public long getReconciliationIntervalMs() {
return reconciliationIntervalMs;
}
public void setClientsCaValidityDays(int clientsCaValidityDays) {
this.clientsCaValidityDays = clientsCaValidityDays;
}
public long getClientsCaValidityDays() {
return this.clientsCaValidityDays;
}
public void setClientsCaRenewalDays(int clientsCaRenewalDays) {
this.clientsCaRenewalDays = clientsCaRenewalDays;
}
public long getClientsCaRenewalDays() {
return this.clientsCaRenewalDays;
}
public void setZookeeperSessionTimeoutMs(long zookeeperSessionTimeoutMs) {
this.zookeeperSessionTimeoutMs = zookeeperSessionTimeoutMs;
}
public long getZookeeperSessionTimeoutMs() {
return zookeeperSessionTimeoutMs;
}
protected static String defaultZookeeperConnect(String cluster) {
return String.format("%s:%d", "localhost", EntityUserOperatorSpec.DEFAULT_ZOOKEEPER_PORT);
}
public void setZookeeperConnect(String zookeeperConnect) {
this.zookeeperConnect = zookeeperConnect;
}
public String getZookeeperConnect() {
return zookeeperConnect;
}
public void setKafkaBootstrapServers(String kafkaBootstrapServers) {
this.kafkaBootstrapServers = kafkaBootstrapServers;
}
public String getKafkaBootstrapServers() {
return kafkaBootstrapServers;
}
protected static String defaultBootstrapServers(String cluster) {
return KafkaCluster.serviceName(cluster) + ":" + EntityUserOperatorSpec.DEFAULT_BOOTSTRAP_SERVERS_PORT;
}
public static String userOperatorName(String cluster) {
return cluster + NAME_SUFFIX;
}
public static String metricAndLogConfigsName(String cluster) {
return cluster + METRICS_AND_LOG_CONFIG_SUFFIX;
}
/**
* Get the name of the UO role binding given the name of the {@code cluster}.
* @param cluster The cluster name.
* @return The name of the role binding.
*/
public static String roleBindingName(String cluster) {
return "strimzi-" + cluster + "-entity-user-operator";
}
@Override
protected String getDefaultLogConfigFileName() {
return "entityUserOperatorDefaultLoggingProperties";
}
@Override
String getAncillaryConfigMapKeyLogConfig() {
return "log4j2.properties";
}
/**
* Create an Entity User Operator from given desired resource
*
* @param kafkaAssembly desired resource with cluster configuration containing the Entity User Operator one
* @return Entity User Operator instance, null if not configured in the ConfigMap
*/
public static EntityUserOperator fromCrd(Kafka kafkaAssembly) {
EntityUserOperator result = null;
EntityOperatorSpec entityOperatorSpec = kafkaAssembly.getSpec().getEntityOperator();
if (entityOperatorSpec != null) {
EntityUserOperatorSpec userOperatorSpec = entityOperatorSpec.getUserOperator();
if (userOperatorSpec != null) {
String namespace = kafkaAssembly.getMetadata().getNamespace();
result = new EntityUserOperator(kafkaAssembly);
result.setOwnerReference(kafkaAssembly);
String image = userOperatorSpec.getImage();
if (image == null) {
image = System.getenv().getOrDefault(ClusterOperatorConfig.STRIMZI_DEFAULT_USER_OPERATOR_IMAGE, "strimzi/operator:latest");
}
result.setImage(image);
result.setWatchedNamespace(userOperatorSpec.getWatchedNamespace() != null ? userOperatorSpec.getWatchedNamespace() : namespace);
result.setReconciliationIntervalMs(userOperatorSpec.getReconciliationIntervalSeconds() * 1_000);
result.setZookeeperSessionTimeoutMs(userOperatorSpec.getZookeeperSessionTimeoutSeconds() * 1_000);
result.setLogging(userOperatorSpec.getLogging());
result.setGcLoggingEnabled(userOperatorSpec.getJvmOptions() == null ? DEFAULT_JVM_GC_LOGGING_ENABLED : userOperatorSpec.getJvmOptions().isGcLoggingEnabled());
if (userOperatorSpec.getJvmOptions() != null) {
result.setJavaSystemProperties(userOperatorSpec.getJvmOptions().getJavaSystemProperties());
}
result.setJvmOptions(userOperatorSpec.getJvmOptions());
result.setResources(userOperatorSpec.getResources());
if (userOperatorSpec.getReadinessProbe() != null) {
result.setReadinessProbe(userOperatorSpec.getReadinessProbe());
}
if (userOperatorSpec.getLivenessProbe() != null) {
result.setLivenessProbe(userOperatorSpec.getLivenessProbe());
}
if (kafkaAssembly.getSpec().getClientsCa() != null) {
if (kafkaAssembly.getSpec().getClientsCa().getValidityDays() > 0) {
result.setClientsCaValidityDays(kafkaAssembly.getSpec().getClientsCa().getValidityDays());
}
if (kafkaAssembly.getSpec().getClientsCa().getRenewalDays() > 0) {
result.setClientsCaRenewalDays(kafkaAssembly.getSpec().getClientsCa().getRenewalDays());
}
}
}
}
return result;
}
@Override
protected List<Container> getContainers(ImagePullPolicy imagePullPolicy) {
return singletonList(new ContainerBuilder()
.withName(USER_OPERATOR_CONTAINER_NAME)
.withImage(getImage())
.withArgs("/opt/strimzi/bin/user_operator_run.sh")
.withEnv(getEnvVars())
.withPorts(singletonList(createContainerPort(HEALTHCHECK_PORT_NAME, HEALTHCHECK_PORT, "TCP")))
.withLivenessProbe(createHttpProbe(livenessPath + "healthy", HEALTHCHECK_PORT_NAME, livenessProbeOptions))
.withReadinessProbe(createHttpProbe(readinessPath + "ready", HEALTHCHECK_PORT_NAME, readinessProbeOptions))
.withResources(getResources())
.withVolumeMounts(getVolumeMounts())
.withImagePullPolicy(determineImagePullPolicy(imagePullPolicy, getImage()))
.withSecurityContext(templateContainerSecurityContext)
.build());
}
@Override
protected List<EnvVar> getEnvVars() {
List<EnvVar> varList = new ArrayList<>();
varList.add(buildEnvVar(ENV_VAR_ZOOKEEPER_CONNECT, zookeeperConnect));
varList.add(buildEnvVar(ENV_VAR_KAFKA_BOOTSTRAP_SERVERS, kafkaBootstrapServers));
varList.add(buildEnvVar(ENV_VAR_WATCHED_NAMESPACE, watchedNamespace));
varList.add(buildEnvVar(ENV_VAR_RESOURCE_LABELS, resourceLabels));
varList.add(buildEnvVar(ENV_VAR_FULL_RECONCILIATION_INTERVAL_MS, Long.toString(reconciliationIntervalMs)));
varList.add(buildEnvVar(ENV_VAR_ZOOKEEPER_SESSION_TIMEOUT_MS, Long.toString(zookeeperSessionTimeoutMs)));
varList.add(buildEnvVar(ENV_VAR_CLIENTS_CA_KEY_SECRET_NAME, KafkaCluster.clientsCaKeySecretName(cluster)));
varList.add(buildEnvVar(ENV_VAR_CLIENTS_CA_CERT_SECRET_NAME, KafkaCluster.clientsCaCertSecretName(cluster)));
varList.add(buildEnvVar(ENV_VAR_CLIENTS_CA_NAMESPACE, namespace));
varList.add(buildEnvVar(ENV_VAR_CLIENTS_CA_VALIDITY, Integer.toString(clientsCaValidityDays)));
varList.add(buildEnvVar(ENV_VAR_CLIENTS_CA_RENEWAL, Integer.toString(clientsCaRenewalDays)));
varList.add(buildEnvVar(ENV_VAR_CLUSTER_CA_CERT_SECRET_NAME, KafkaCluster.clusterCaCertSecretName(cluster)));
varList.add(buildEnvVar(ENV_VAR_EO_KEY_SECRET_NAME, EntityOperator.secretName(cluster)));
varList.add(buildEnvVar(ENV_VAR_STRIMZI_GC_LOG_ENABLED, String.valueOf(gcLoggingEnabled)));
EntityOperator.javaOptions(varList, getJvmOptions(), javaSystemProperties);
// Add shared environment variables used for all containers
varList.addAll(getSharedEnvVars());
addContainerEnvsToExistingEnvs(varList, templateContainerEnvVars);
return varList;
}
public List<Volume> getVolumes() {
return singletonList(VolumeUtils.createConfigMapVolume(logAndMetricsConfigVolumeName, ancillaryConfigName));
}
private List<VolumeMount> getVolumeMounts() {
return asList(VolumeUtils.createVolumeMount(logAndMetricsConfigVolumeName, logAndMetricsConfigMountPath),
VolumeUtils.createVolumeMount(EntityOperator.TLS_SIDECAR_EO_CERTS_VOLUME_NAME, EntityOperator.TLS_SIDECAR_EO_CERTS_VOLUME_MOUNT),
VolumeUtils.createVolumeMount(EntityOperator.TLS_SIDECAR_CA_CERTS_VOLUME_NAME, EntityOperator.TLS_SIDECAR_CA_CERTS_VOLUME_MOUNT));
}
public RoleBinding generateRoleBinding(String namespace, String watchedNamespace) {
Subject ks = new SubjectBuilder()
.withKind("ServiceAccount")
.withName(EntityOperator.entityOperatorServiceAccountName(cluster))
.withNamespace(namespace)
.build();
RoleRef roleRef = new RoleRefBuilder()
.withName(EntityOperator.EO_CLUSTER_ROLE_NAME)
.withApiGroup("rbac.authorization.k8s.io")
.withKind("ClusterRole")
.build();
RoleBinding rb = new RoleBindingBuilder()
.withNewMetadata()
.withName(roleBindingName(cluster))
.withNamespace(watchedNamespace)
.withOwnerReferences(createOwnerReference())
.withLabels(labels.toMap())
.endMetadata()
.withRoleRef(roleRef)
.withSubjects(singletonList(ks))
.build();
return rb;
}
public void setContainerEnvVars(List<ContainerEnvVar> envVars) {
templateContainerEnvVars = envVars;
}
public void setContainerSecurityContext(SecurityContext securityContext) {
templateContainerSecurityContext = securityContext;
}
}
|
[] |
[] |
[] |
[]
|
[]
|
java
| 0 | 0 | |
log/log.go
|
package log
import "go.uber.org/zap"
var (
Logger *zap.Logger
err error
)
func InitLogger() error {
Logger, err = zap.NewProduction()
if err != nil {
return err
}
// _ = zap.NewProductionConfig()
return nil
}
// ref: https://tomokazu-kozuma.com/minimum-setting-method-of-golangs-logger-zap/
func NewProductionConfig() zap.Config {
return zap.Config{
Level: zap.NewAtomicLevelAt(zap.InfoLevel),
Development: false,
Sampling: &zap.SamplingConfig{
Initial: 100,
Thereafter: 100,
},
Encoding: "json",
EncoderConfig: zap.NewProductionEncoderConfig(),
OutputPaths: []string{"stderr"},
ErrorOutputPaths: []string{"stderr"},
}
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
metatrader/mt4.py
|
# -*- coding: utf-8 -*-
"""
@author: samuraitaiga
"""
import os
import sys
import logging
import subprocess
try:
import winreg
except:
import _winreg as winreg
import codecs
_mt4s = {}
_portable_mode = False
DEFAULT_MT4_NAME = 'default'
# mt4 program file path is written in origin.txt
ORIGIN_TXT = 'origin.txt'
MT4_EXE = 'terminal.exe'
class MT4(object):
"""
Notes:
meta trader4 class which can lunch metatrader4.
this class will only lunch metatrader4,
because metatrader4 can lunch either normal mode or backtest mode.
"""
prog_path = None
appdata_path = None
def __init__(self, prog_path):
self.prog_path_raw = prog_path
self.get_appdata_path
@property
def get_appdata_path(self):
if os.path.exists(self.prog_path_raw):
self.prog_path = self.prog_path_raw
if ((is_uac_enabled() == True) and (_portable_mode == False)):
self.appdata_path = get_appdata_path(self.prog_path_raw)
if self.appdata_path == None:
self.appdata_path = self.prog_path_raw
else:
self.appdata_path = self.prog_path_raw
else:
err_msg = 'prog_path_raw %s not exists' % self.prog_path_raw
logging.error(err_msg)
raise IOError(err_msg)
if not has_mt4_subdirs(self.appdata_path):
err_msg = 'appdata path %s has not sufficient dirs' % self.appdata_path
logging.error(err_msg)
raise IOError(err_msg)
def run(self, ea_name, conf=None, portable_mode=False):
"""
Notes:
run terminal.exe.
Args:
conf(string): abs path of conf file.
details see mt4 help doc Client Terminal/Tools/Configuration at Startup
"""
import subprocess
if conf:
if portable_mode == False:
prog = '"%s"' % os.path.join(self.prog_path, MT4_EXE)
else:
prog = '"%s" /portable' % os.path.join(self.prog_path, MT4_EXE)
conf = '"%s"' % conf
cmd = '%s %s' % (prog, conf)
p = subprocess.Popen(cmd)
p.wait()
if ((p.returncode == 0) or (p.returncode == 3)):
# Logging info will cause command prompt to wait for enter key which is not required in this case
#logging.info('cmd[%s] succeeded', cmd)
pass
else:
err_msg = 'run mt4 with cmd[%s] failed with %d error code!!' % (cmd, p.returncode)
logging.error(err_msg)
raise RuntimeError(err_msg)
def has_mt4_subdirs(appdata_path):
"""
Note:
check this appdata path has required mt4 sub dirs.
currently chech backtest related dirs.
- history
- profiles
- tester
- MQL4\\Experts
- MQL4\\Libraries
Returns:
True if has required mt4 sub dirs,
False if doesn't have
"""
sub_dirs = [os.path.join(appdata_path, 'history'),
os.path.join(appdata_path, 'profiles'),
os.path.join(appdata_path, 'tester'),
os.path.join(appdata_path, 'MQL4', 'Experts'),
os.path.join(appdata_path, 'MQL4', 'Libraries')]
ret = True
for sub_dir in sub_dirs:
if not os.path.exists(sub_dir) and not os.path.isdir(sub_dir):
ret = False
return ret
def is_uac_enabled():
"""
Note:
check uac is enabled or not from reg value.
Returns:
True if uac is enabled, False if uac is disabled.
"""
import winreg
reg_key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\Microsoft\Windows\CurrentVersion\Policies\System', 0, winreg.KEY_READ)
value, regtype = winreg.QueryValueEx(reg_key, 'EnableLUA')
if value == 1:
# reg value 1 means UAC is enabled
return True
else:
return False
def get_appdata_path(program_file_dir):
"""
Returns:
AppData path corresponding to provided program file path
e.g.: C:\\Users\\UserName\\AppData\\Roaming\\MetaQuotes\\Terminal\\7269C010EA668AEAE793BEE37C26ED57
"""
app_data = os.environ.get('APPDATA')
mt4_appdata_path = os.path.join(app_data, 'MetaQuotes', 'Terminal')
app_dir = None
walk_depth = 1
for root, dirs, files in os.walk(mt4_appdata_path):
# search ORIGIN_TXT until walk_depth
depth = root[len(mt4_appdata_path):].count(os.path.sep)
if ORIGIN_TXT in files:
origin_file = os.path.join(root, ORIGIN_TXT)
import codecs
with codecs.open(origin_file, 'r', 'utf-16') as fp:
line = fp.read()
if line == program_file_dir:
app_dir = root
break
if depth >= walk_depth:
dirs[:] = []
if app_dir == None:
err_msg = '%s does not have appdata dir!.' % program_file_dir
logging.error(err_msg)
#raise IOError(err_msg)
return app_dir
def initizalize(ntpath, alias = DEFAULT_MT4_NAME):
"""
Notes:
initialize mt4
Args:
ntpath(string): mt4 install folder path.
e.g.: C:\\Program Files (x86)\\MetaTrader 4 - Alpari Japan
alias(string): mt4 object alias name. default value is DEFAULT_MT4_NAME
"""
global _mt4s
if alias not in _mt4s:
# store mt4 object with alias name
_mt4s[alias] = MT4(ntpath, )
else:
logging.info('%s is already initialized' % alias)
def get_mt4(alias = DEFAULT_MT4_NAME, portable_mode = False):
"""
Notes:
return mt4 object which is initialized.
Args:
alias(string): mt4 object alias name. default value is DEFAULT_MT4_NAME
Returns:
mt4 object(metatrader.backtest.MT4): instantiated mt4 object
"""
global _mt4s
global _portable_mode
_portable_mode = portable_mode
if alias in _mt4s:
_mt4s[alias].get_appdata_path
return _mt4s[alias]
else:
raise RuntimeError('mt4[%s] is not initialized.' % alias)
|
[] |
[] |
[
"APPDATA"
] |
[]
|
["APPDATA"]
|
python
| 1 | 0 | |
daemon/networkdriver/utils.go
|
package networkdriver
import (
"encoding/binary"
"errors"
"fmt"
"net"
"github.com/docker/libcontainer/netlink"
)
var (
networkGetRoutesFct = netlink.NetworkGetRoutes
ErrNoDefaultRoute = errors.New("no default route")
)
func CheckNameserverOverlaps(nameservers []string, toCheck *net.IPNet) error {
if len(nameservers) > 0 {
for _, ns := range nameservers {
_, nsNetwork, err := net.ParseCIDR(ns)
if err != nil {
return err
}
if NetworkOverlaps(toCheck, nsNetwork) {
return ErrNetworkOverlapsWithNameservers
}
}
}
return nil
}
func CheckRouteOverlaps(toCheck *net.IPNet) error {
networks, err := networkGetRoutesFct()
if err != nil {
return err
}
for _, network := range networks {
if network.IPNet != nil && NetworkOverlaps(toCheck, network.IPNet) {
return ErrNetworkOverlaps
}
}
return nil
}
// Detects overlap between one IPNet and another
func NetworkOverlaps(netX *net.IPNet, netY *net.IPNet) bool {
if firstIP, _ := NetworkRange(netX); netY.Contains(firstIP) {
return true
}
if firstIP, _ := NetworkRange(netY); netX.Contains(firstIP) {
return true
}
return false
}
// Calculates the first and last IP addresses in an IPNet
func NetworkRange(network *net.IPNet) (net.IP, net.IP) {
var (
netIP = network.IP.To4()
firstIP = netIP.Mask(network.Mask)
lastIP = net.IPv4(0, 0, 0, 0).To4()
)
for i := 0; i < len(lastIP); i++ {
lastIP[i] = netIP[i] | ^network.Mask[i]
}
return firstIP, lastIP
}
// Given a netmask, calculates the number of available hosts
func NetworkSize(mask net.IPMask) int32 {
m := net.IPv4Mask(0, 0, 0, 0)
for i := 0; i < net.IPv4len; i++ {
m[i] = ^mask[i]
}
return int32(binary.BigEndian.Uint32(m)) + 1
}
// Return the IPv4 address of a network interface
func GetIfaceAddr(name string) (net.Addr, error) { //name = docker0
iface, err := net.InterfaceByName(name)
if err != nil {
return nil, err
}
addrs, err := iface.Addrs()
if err != nil {
return nil, err
}
var addrs4 []net.Addr
for _, addr := range addrs {
ip := (addr.(*net.IPNet)).IP
if ip4 := ip.To4(); len(ip4) == net.IPv4len {
addrs4 = append(addrs4, addr)
}
}
switch {
case len(addrs4) == 0:
return nil, fmt.Errorf("Interface %v has no IP addresses", name)
case len(addrs4) > 1:
fmt.Printf("Interface %v has more than 1 IPv4 address. Defaulting to using %v\n",
name, (addrs4[0].(*net.IPNet)).IP)
}
return addrs4[0], nil
}
func GetDefaultRouteIface() (*net.Interface, error) {
rs, err := networkGetRoutesFct()
if err != nil {
return nil, fmt.Errorf("unable to get routes: %v", err)
}
for _, r := range rs {
if r.Default {
return r.Iface, nil
}
}
return nil, ErrNoDefaultRoute
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
cmd/gtrace/main.go
|
package main
import (
"bufio"
"bytes"
"flag"
"fmt"
"go/ast"
"go/build"
"go/importer"
"go/parser"
"go/token"
"go/types"
"io"
"log"
"os"
"path/filepath"
"reflect"
"strings"
"text/tabwriter"
_ "unsafe" // For go:linkname.
)
//go:linkname build_goodOSArchFile go/build.(*Context).goodOSArchFile
func build_goodOSArchFile(*build.Context, string, map[string]bool) bool
func main() {
var (
verbose bool
suffix string
stubSuffix string
write bool
buildTag string
)
flag.BoolVar(&verbose,
"v", false,
"output debug info",
)
flag.BoolVar(&write,
"w", false,
"write trace to file",
)
flag.StringVar(&suffix,
"file-suffix", "_gtrace",
"suffix for generated go files",
)
flag.StringVar(&stubSuffix,
"stub-file-suffix", "_stub",
"suffix for generated stub go files",
)
flag.StringVar(&buildTag,
"tag", "",
"build tag which needs to be passed to enable tracing",
)
flag.Parse()
if verbose {
log.SetFlags(log.Lshortfile)
} else {
log.SetFlags(0)
}
var (
// Reports whether we were called from go:generate.
isGoGenerate bool
gofile string
workDir string
err error
)
if gofile = os.Getenv("GOFILE"); gofile != "" {
// NOTE: GOFILE is always a filename without path.
isGoGenerate = true
workDir, err = os.Getwd()
if err != nil {
log.Fatal(err)
}
} else {
args := flag.Args()
if len(args) == 0 {
log.Fatal("no $GOFILE env nor file parameter were given")
}
gofile = filepath.Base(args[0])
workDir = filepath.Dir(args[0])
}
{
prefix := filepath.Join(filepath.Base(workDir), gofile)
log.SetPrefix("[" + prefix + "] ")
}
buildCtx := build.Default
if verbose {
var sb strings.Builder
prettyPrint(&sb, buildCtx)
log.Printf("build context:\n%s", sb.String())
}
buildPkg, err := buildCtx.ImportDir(workDir, build.IgnoreVendor)
if err != nil {
log.Fatal(err)
}
srcFilePath := filepath.Join(workDir, gofile)
if verbose {
log.Printf("source file: %s", srcFilePath)
log.Printf("package files: %v", buildPkg.GoFiles)
}
var writers []*Writer
if isGoGenerate || write {
// We should respect Go suffixes like `_linux.go`.
name, tags, ext := splitOSArchTags(&buildCtx, gofile)
if verbose {
log.Printf(
"split os/args tags of %q: %q %q %q",
gofile, name, tags, ext,
)
}
openFile := func(name string) (*os.File, func()) {
p := filepath.Join(workDir, name)
if verbose {
log.Printf("destination file path: %+v", p)
}
f, err := os.OpenFile(p, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
log.Fatal(err)
}
return f, func() { f.Close() }
}
f, clean := openFile(name + suffix + tags + ext)
defer clean()
writers = append(writers, &Writer{
Context: buildCtx,
Output: f,
BuildTag: buildTag,
})
if buildTag != "" {
f, clean := openFile(name + suffix + stubSuffix + tags + ext)
defer clean()
writers = append(writers, &Writer{
Context: buildCtx,
Output: f,
BuildTag: buildTag,
Stub: true,
})
}
} else {
writers = append(writers, &Writer{
Context: buildCtx,
Output: os.Stdout,
BuildTag: buildTag,
Stub: true,
})
}
var (
pkgFiles = make([]*os.File, 0, len(buildPkg.GoFiles))
astFiles = make([]*ast.File, 0, len(buildPkg.GoFiles))
buildConstraints []string
)
fset := token.NewFileSet()
for _, name := range buildPkg.GoFiles {
base, _, _ := splitOSArchTags(&buildCtx, name)
if isGenerated(base, suffix) {
// Skip gtrace generated files.
if verbose {
log.Printf("skipped package file: %q", name)
}
continue
}
if verbose {
log.Printf("parsing package file: %q", name)
}
file, err := os.Open(filepath.Join(workDir, name))
if err != nil {
log.Fatal(err)
}
defer file.Close()
ast, err := parser.ParseFile(fset, file.Name(), file, parser.ParseComments)
if err != nil {
log.Fatalf("parse %q error: %v", file.Name(), err)
}
pkgFiles = append(pkgFiles, file)
astFiles = append(astFiles, ast)
if name == gofile {
if _, err := file.Seek(0, io.SeekStart); err != nil {
log.Fatal(err)
}
buildConstraints, err = scanBuildConstraints(file)
if err != nil {
log.Fatal(err)
}
}
}
info := types.Info{
Types: make(map[ast.Expr]types.TypeAndValue),
Defs: make(map[*ast.Ident]types.Object),
Uses: make(map[*ast.Ident]types.Object),
}
conf := types.Config{
IgnoreFuncBodies: true,
DisableUnusedImportCheck: true,
Importer: importer.ForCompiler(fset, "source", nil),
}
pkg, err := conf.Check(".", fset, astFiles, &info)
if err != nil {
log.Fatalf("type error: %v", err)
}
var items []*GenItem
for i, astFile := range astFiles {
if pkgFiles[i].Name() != srcFilePath {
continue
}
var (
depth int
item *GenItem
)
logf := func(s string, args ...interface{}) {
if !verbose {
return
}
log.Print(
strings.Repeat(" ", depth*4),
fmt.Sprintf(s, args...),
)
}
ast.Inspect(astFile, func(n ast.Node) (next bool) {
logf("%T", n)
if n == nil {
item = nil
depth--
return true
}
defer func() {
if next {
depth++
}
}()
switch v := n.(type) {
case
*ast.FuncDecl,
*ast.ValueSpec:
return false
case *ast.Ident:
logf("ident %q", v.Name)
if item != nil {
item.Ident = v
}
return false
case *ast.CommentGroup:
for i, c := range v.List {
logf("#%d comment %q", i, c.Text)
text, ok := TrimConfigComment(c.Text)
if ok {
if item == nil {
item = &GenItem{}
}
if err := item.ParseComment(text); err != nil {
log.Fatalf(
"malformed comment string: %q: %v",
text, err,
)
}
}
}
return false
case *ast.StructType:
logf("struct %+v", v)
if item != nil {
item.StructType = v
items = append(items, item)
item = nil
}
return false
}
return true
})
}
p := Package{
Package: pkg,
BuildConstraints: buildConstraints,
}
traces := make(map[string]*Trace)
for _, item := range items {
t := &Trace{
Name: item.Ident.Name,
Flag: item.Flag,
}
p.Traces = append(p.Traces, t)
traces[item.Ident.Name] = t
}
for i, item := range items {
t := p.Traces[i]
for _, field := range item.StructType.Fields.List {
if _, ok := field.Type.(*ast.FuncType); !ok {
continue
}
name := field.Names[0].Name
fn, ok := field.Type.(*ast.FuncType)
if !ok {
continue
}
f, err := buildFunc(info, traces, fn)
if err != nil {
log.Printf(
"skipping hook %s due to error: %v",
name, err,
)
continue
}
var config GenConfig
if doc := field.Doc; doc != nil {
for _, line := range doc.List {
text, ok := TrimConfigComment(line.Text)
if !ok {
continue
}
err := config.ParseComment(text)
if err != nil {
log.Fatalf(
"malformed comment string: %q: %v",
text, err,
)
}
}
}
t.Hooks = append(t.Hooks, Hook{
Name: name,
Func: f,
Flag: item.GenConfig.Flag | config.Flag,
})
}
}
for _, w := range writers {
if err := w.Write(p); err != nil {
log.Fatal(err)
}
}
log.Println("OK")
}
func buildFunc(info types.Info, traces map[string]*Trace, fn *ast.FuncType) (ret *Func, err error) {
ret = new(Func)
for _, p := range fn.Params.List {
t := info.TypeOf(p.Type)
if t == nil {
log.Fatalf("unknown type: %s", p.Type)
}
var names []string
for _, n := range p.Names {
name := n.Name
if name == "_" {
name = ""
}
names = append(names, name)
}
if len(names) == 0 {
// Case where arg is not named.
names = []string{""}
}
for _, name := range names {
ret.Params = append(ret.Params, Param{
Name: name,
Type: t,
})
}
}
if fn.Results == nil {
return ret, nil
}
if len(fn.Results.List) > 1 {
return nil, fmt.Errorf(
"unsupported number of function results",
)
}
r := fn.Results.List[0]
switch x := r.Type.(type) {
case *ast.FuncType:
result, err := buildFunc(info, traces, x)
if err != nil {
return nil, err
}
ret.Result = append(ret.Result, result)
return ret, nil
case *ast.Ident:
if t, ok := traces[x.Name]; ok {
t.Nested = true
ret.Result = append(ret.Result, t)
return ret, nil
}
}
return nil, fmt.Errorf(
"unsupported function result type %s",
info.TypeOf(r.Type),
)
}
func splitOSArchTags(ctx *build.Context, name string) (base, tags, ext string) {
fileTags := make(map[string]bool)
build_goodOSArchFile(ctx, name, fileTags)
ext = filepath.Ext(name)
switch len(fileTags) {
case 0: // *
base = strings.TrimSuffix(name, ext)
case 1: // *_GOOS or *_GOARCH
i := strings.LastIndexByte(name, '_')
base = name[:i]
tags = strings.TrimSuffix(name[i:], ext)
case 2: // *_GOOS_GOARCH
var i int
i = strings.LastIndexByte(name, '_')
i = strings.LastIndexByte(name[:i], '_')
base = name[:i]
tags = strings.TrimSuffix(name[i:], ext)
default:
panic(fmt.Sprintf(
"gtrace: internal error: unexpected number of OS/arch tags: %d",
len(fileTags),
))
}
return
}
type Package struct {
*types.Package
BuildConstraints []string
Traces []*Trace
}
type Trace struct {
Name string
Hooks []Hook
Flag GenFlag
Nested bool
}
func (*Trace) isFuncResult() bool { return true }
type Hook struct {
Name string
Func *Func
Flag GenFlag
}
type Param struct {
Name string // Might be empty.
Type types.Type
}
type FuncResult interface {
isFuncResult() bool
}
type Func struct {
Params []Param
Result []FuncResult // 0 or 1.
}
func (*Func) isFuncResult() bool { return true }
func (f *Func) HasResult() bool {
return len(f.Result) > 0
}
type GenFlag uint8
func (f GenFlag) Has(x GenFlag) bool {
return f&x != 0
}
const (
GenZero GenFlag = 1 << iota >> 1
GenShortcut
GenShortcutPublic
GenContext
GenAll = ^GenFlag(0)
)
type GenConfig struct {
Flag GenFlag
}
func TrimConfigComment(text string) (string, bool) {
s := strings.TrimPrefix(text, "//gtrace:")
if text != s {
return s, true
}
return "", false
}
func (g *GenConfig) ParseComment(text string) (err error) {
prefix, text := split(text, ' ')
switch prefix {
case "gen":
case "set":
return g.ParseParameter(text)
default:
return fmt.Errorf("unknown prefix: %q", prefix)
}
return nil
}
func (g *GenConfig) ParseParameter(text string) (err error) {
text = strings.TrimSpace(text)
param, _ := split(text, '=')
if param == "" {
return nil
}
switch param {
case "shortcut":
g.Flag |= GenShortcut
case "Shortcut", "SHORTCUT":
g.Flag |= GenShortcutPublic
case "context":
g.Flag |= GenContext
default:
return fmt.Errorf("unexpected parameter: %q", param)
}
return nil
}
type GenItem struct {
GenConfig
Ident *ast.Ident
StructType *ast.StructType
}
func split(s string, c byte) (s1, s2 string) {
i := strings.IndexByte(s, c)
if i == -1 {
return s, ""
}
return s[:i], s[i+1:]
}
func rsplit(s string, c byte) (s1, s2 string) {
i := strings.LastIndexByte(s, c)
if i == -1 {
return s, ""
}
return s[:i], s[i+1:]
}
func scanBuildConstraints(r io.Reader) (cs []string, err error) {
br := bufio.NewReader(r)
for {
line, err := br.ReadBytes('\n')
if err != nil {
return nil, err
}
line = bytes.TrimSpace(line)
if comm := bytes.TrimPrefix(line, []byte("//")); !bytes.Equal(comm, line) {
comm = bytes.TrimSpace(comm)
if bytes.HasPrefix(comm, []byte("+build")) {
cs = append(cs, string(line))
continue
}
}
if bytes.HasPrefix(line, []byte("package ")) {
break
}
}
return cs, nil
}
func prettyPrint(w io.Writer, x interface{}) {
tw := tabwriter.NewWriter(w, 0, 2, 2, ' ', 0)
t := reflect.TypeOf(x)
v := reflect.ValueOf(x)
for i := 0; i < t.NumField(); i++ {
if v.Field(i).IsZero() {
continue
}
fmt.Fprintf(tw, "%s:\t%v\n",
t.Field(i).Name,
v.Field(i),
)
}
tw.Flush()
}
func isGenerated(base, suffix string) bool {
i := strings.Index(base, suffix)
if i == -1 {
return false
}
n := len(base)
m := i + len(suffix)
return m == n || base[m] == '_'
}
|
[
"\"GOFILE\""
] |
[] |
[
"GOFILE"
] |
[]
|
["GOFILE"]
|
go
| 1 | 0 | |
unit-tests/test-post-get-event.py
|
#! /usr/local/bin/python2.7
# -*- coding: utf-8 -*-
#
# This software was developed by employees of the National Institute of
# Standards and Technology (NIST), and others.
# This software has been contributed to the public domain.
# Pursuant to title 15 Untied States Code Section 105, works of NIST
# employees are not subject to copyright protection in the United States
# and are considered to be in the public domain.
# As a result, a formal license is not needed to use this software.
#
# This software is provided "AS IS."
# NIST MAKES NO WARRANTY OF ANY KIND, EXPRESS, IMPLIED
# OR STATUTORY, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTY OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT
# AND DATA ACCURACY. NIST does not warrant or make any representations
# regarding the use of the software or the results thereof, including but
# not limited to the correctness, accuracy, reliability or usefulness of
# this software.
import unittest
import json
import requests
import argparse
import os
import sys
import time
class TestPostEvent(unittest.TestCase):
def loginAsAdmin(self):
params = {}
params["emailAddress"] = "[email protected]"
params["password"] = "Administrator12!"
params["privilege"] = "admin"
r = requests.post(
"https://" + host + ":" + str(8443) + "/admin/authenticate",
data=json.dumps(params),
verify=False)
resp = r.json()
token = resp["sessionId"]
return token
def setUp(self):
self.adminToken = self.loginAsAdmin()
sensorConfig = json.load(open("TestSensor.config.json"))
url = "https://" + host + ":" + str(
8443) + "/admin/addSensor/" + self.adminToken
r = requests.post(url, data=json.dumps(sensorConfig), verify=False)
self.assertTrue(r.status_code == 200)
self.dataToPost = json.load(open("sensor.event"))
self.t = int(time.time())
self.t = self.dataToPost["t"]
self.sensorId = self.dataToPost["SensorID"]
self.url = "https://" + host + ":" + str(443)
r = requests.post(
self.url + "/spectrumbrowser/isAuthenticationRequired",
verify=False)
jsonVal = r.json()
print jsonVal
if jsonVal["AuthenticationRequired"]:
print("please disable authentication on the server")
sys.exit()
self.sessionToken = jsonVal["SessionToken"]
def testPostEvent(self):
url = self.url + "/eventstream/postCaptureEvent"
r = requests.post(
url, data=json.dumps(self.dataToPost, indent=4),
verify=False)
print "status code ", r.status_code
url = self.url + "/eventstream/getCaptureEvents/" + self.sensorId + "/" + str(
self.t) + "/" + str(1) + "/" + self.sessionToken
r = requests.post(url, verify=False)
print "status code ", r.status_code
self.assertTrue(r.status_code == 200)
print r.json()
def tearDown(self):
url = "https://" + host + ":" + str(
8443) + "/admin/purgeSensor/" + self.sensorId + "/" + self.adminToken
r = requests.post(url, verify=False)
r = requests.post("https://" + host + ":" + str(8443) +
"/admin/logOut/" + self.adminToken,
verify=False)
self.assertTrue(r.status_code == 200)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Process command line args")
parser.add_argument("-host", help="Server host.")
parser.add_argument("-file", help="Data file.")
args = parser.parse_args()
global filename
global host
host = args.host
if host is None:
host = os.environ.get("MSOD_WEB_HOST")
if host is None:
print "Require host and web port"
os._exit()
filename = args.file
if filename is None:
filename = "NorfolkTestSample.txt"
if not os.path.exists(filename):
print "Require data file -file argument."
os._exit()
suite = unittest.TestLoader().loadTestsFromTestCase(TestPostEvent)
unittest.TextTestRunner(verbosity=2).run(suite)
|
[] |
[] |
[
"MSOD_WEB_HOST"
] |
[]
|
["MSOD_WEB_HOST"]
|
python
| 1 | 0 | |
huskyai/scraper/bing-image-search.py
|
import os
import requests
import matplotlib.pyplot as plt
from PIL import Image
from io import BytesIO, StringIO
import uuid
#config
download_folder = "data/huskies"
search_term = "siberian husky"
bing_api_key = os.path.join(os.getenv('HOME'), ".bingimagessearchkey")
subscription_key = open(bing_api_key,"rt").readline().rstrip("\n")
count = 100
max_page = 10
#setup
os.makedirs(download_folder,exist_ok=True)
search_url = "https://huskyai-imagesearch.cognitiveservices.azure.com/bing/v7.0/images/search"
headers = {"Ocp-Apim-Subscription-Key" : subscription_key}
#query and save images
offset = 0
for current_page in range(max_page):
print("Page:" + str(current_page+1))
params = {"q": search_term, "license": "public", "imageType": "photo", "count": count, "offset": offset}
response = requests.get(search_url, headers=headers, params=params)
response.raise_for_status()
search_results = response.json()
print("Offset:" + str(offset))
print("Next Offset" + str(search_results["nextOffset"]))
image_count = len(search_results["value"][:])
for i in range(image_count):
url = search_results["value"][:][i]["thumbnailUrl"] #contentUrl
id = search_results["value"][:][i]["imageId"]
print(f"Processing ({i}) - {id}")
image_data = requests.get(url)
image_data.raise_for_status()
filename = os.path.join(download_folder, id + ".jpg")
image = Image.open(BytesIO(image_data.content))
image = image.save(filename, "JPEG")
offset = search_results["nextOffset"]
print("Done")
#plot the images
# f, axes = plt.subplots(4, 4)
# for i in range(4):
# for j in range(4):
# image_data = requests.get(thumbnail_urls[i+4*j])
# image_data.raise_for_status()
# image = Image.open(BytesIO(image_data.content))
# axes[i][j].imshow(image)
# axes[i][j].axis("off")
# plt.show()
|
[] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
python
| 1 | 0 | |
examples/tci/v20190318/SubmitFullBodyClassTask.go
|
package main
import (
"fmt"
"github.com/liucy1983/tencentcloud-sdk-go/tencentcloud/common"
"github.com/liucy1983/tencentcloud-sdk-go/tencentcloud/common/errors"
"github.com/liucy1983/tencentcloud-sdk-go/tencentcloud/common/profile"
tci "github.com/liucy1983/tencentcloud-sdk-go/tencentcloud/tci/v20190318"
)
func main() {
credential := common.NewCredential(
// os.Getenv("TENCENTCLOUD_SECRET_ID"),
// os.Getenv("TENCENTCLOUD_SECRET_KEY"),
"", "",
)
cpf := profile.NewClientProfile()
cpf.HttpProfile.ReqMethod = "POST"
cpf.HttpProfile.ReqTimeout = 30
cpf.HttpProfile.Endpoint = "tci.tencentcloudapi.com"
client, _ := tci.NewClient(credential, "ap-guangzhou", cpf)
req := tci.NewSubmitFullBodyClassTaskRequest()
req.FileContent = common.StringPtr("https://edu-test-1253131631.cos.ap-guangzhou.myqcloud.com/aieduautotest/autotest_vedio.mp4")
req.FileType = common.StringPtr("vod_url")
req.Lang = common.Int64Ptr(0)
req.LibrarySet = common.StringPtrs([]string{"library_15603955264181591716"})
req.VocabLibNameList = common.StringPtrs([]string{"testlib2"})
req.VoiceEncodeType = common.Int64Ptr(1)
req.VoiceFileType = common.Int64Ptr(10)
// 通过client对象调用想要访问的接口,需要传入请求对象
response, err := client.SubmitFullBodyClassTask(req)
// 处理异常
fmt.Println(err)
if _, ok := err.(*errors.TencentCloudSDKError); ok {
fmt.Printf("An API error has returned: %s", err)
return
}
// 非SDK异常,直接失败。实际代码中可以加入其他的处理。
if err != nil {
panic(err)
}
// 打印返回的json字符串
fmt.Printf("%s", response.ToJsonString())
}
|
[
"\"TENCENTCLOUD_SECRET_ID\"",
"\"TENCENTCLOUD_SECRET_KEY\""
] |
[] |
[
"TENCENTCLOUD_SECRET_ID",
"TENCENTCLOUD_SECRET_KEY"
] |
[]
|
["TENCENTCLOUD_SECRET_ID", "TENCENTCLOUD_SECRET_KEY"]
|
go
| 2 | 0 | |
cmd/api-server/main.go
|
package main
import (
"flag"
"net/http"
"os"
"github.com/aerogear/mobile-developer-console/pkg/mobile"
"github.com/aerogear/mobile-developer-console/pkg/config"
"github.com/aerogear/mobile-developer-console/pkg/web"
log "github.com/sirupsen/logrus"
"context"
"github.com/aerogear/mobile-developer-console/pkg/stub"
sc "github.com/kubernetes-incubator/service-catalog/pkg/client/clientset_generated/clientset"
buildv1 "github.com/openshift/client-go/build/clientset/versioned/typed/build/v1"
"github.com/operator-framework/operator-sdk/pkg/sdk"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
)
var (
kubeconfig string
namespace string
)
func main() {
flag.Parse()
config := config.GetConfig()
staticFilesDir := config.StaticFilesDir
apiRoutePrefix := config.ApiRoutePrefix
initLogger(config.LogLevel, config.LogFormat)
if namespace == "" {
log.Fatalf("-namespace is a required flag or it can be set via NAMESPACE env var")
}
if os.Getenv("KUBERNETES_CONFIG") == "" {
log.Warnf("KUBERNETES_CONFIG is not set. It is required if you are running the application outside of a kubernetes cluster.")
}
web.SetupWS(config.WsWriteWait, config.WsPongWait)
router := web.NewRouter(staticFilesDir, apiRoutePrefix)
apiGroup := router.Group(apiRoutePrefix)
cfg, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
if err != nil {
log.Fatalf("Error loading config: %v", err)
}
k8sClient, err := kubernetes.NewForConfig(cfg)
if err != nil {
log.Fatalf("Error init k8s client: %s", err.Error())
}
scClient, err := sc.NewForConfig(cfg)
if err != nil {
log.Fatalf("Error init service catalog client: %v", err)
}
buildClient, err := buildv1.NewForConfig(cfg)
if err != nil {
log.Fatalf("Error init build client: %v", err)
}
{
siLister := mobile.NewServiceInstanceLister(scClient.ServicecatalogV1beta1(), namespace)
mobileServiceInstancesHandler := web.NewMobileServiceInstancesHandler(siLister, namespace)
web.SetupMobileServicesRoute(apiGroup, mobileServiceInstancesHandler)
}
{
buildCRUDL := mobile.NewBuildCRUDL(buildClient, namespace, cfg.Host)
mobileBuildsHandler := web.NewMobileBuildsHandler(buildCRUDL, namespace)
web.SetupMobileBuildsRoute(apiGroup, mobileBuildsHandler)
}
secretsCRUDL := mobile.NewSecretsCRUDL(k8sClient.CoreV1())
{
buildConfigCRUDL := mobile.NewBuildConfigCRUDL(buildClient, namespace)
mobileBuildConfigsHandler := web.NewMobileBuildConfigsHandler(buildConfigCRUDL, secretsCRUDL, namespace)
web.SetupMobileBuildConfigsRoute(apiGroup, mobileBuildConfigsHandler)
}
mobileClientsRepo := mobile.NewMobileClientRepo(namespace)
{
mobileClientsHandler := web.NewMobileClientsHandler(mobileClientsRepo, namespace)
web.SetupMoileClientsRoute(apiGroup, mobileClientsHandler)
}
resource := "v1"
kind := "Secret"
resyncPeriod := config.OperatorResyncPeriod
go func() {
sdk.Watch(resource, kind, namespace, resyncPeriod)
sdk.Handle(stub.NewHandler(mobileClientsRepo))
sdk.Run(context.Background())
}()
log.WithFields(log.Fields{"listenAddress": config.ListenAddress}).Info("Starting application")
log.Fatal(http.ListenAndServe(config.ListenAddress, router))
}
func initLogger(level, format string) {
logLevel, err := log.ParseLevel(level)
if err != nil {
log.Fatalf("log level %v is not allowed. Must be one of [debug, info, warning, error, fatal, panic]", level)
logLevel = log.InfoLevel
}
log.SetLevel(logLevel)
switch format {
case "json":
log.SetFormatter(&log.JSONFormatter{})
case "text":
log.SetFormatter(&log.TextFormatter{DisableColors: true})
default:
log.Fatalf("log format %v is not allowed. Must be one of [text, json]", format)
}
}
func init() {
flag.StringVar(&kubeconfig, "kubeconfig", os.Getenv("KUBERNETES_CONFIG"), "Path to a kubeconfig. Only required if out-of-cluster.")
flag.StringVar(&namespace, "namespace", os.Getenv("NAMESPACE"), "Name space. Only required if out-of-cluster.")
}
|
[
"\"KUBERNETES_CONFIG\"",
"\"KUBERNETES_CONFIG\"",
"\"NAMESPACE\""
] |
[] |
[
"KUBERNETES_CONFIG",
"NAMESPACE"
] |
[]
|
["KUBERNETES_CONFIG", "NAMESPACE"]
|
go
| 2 | 0 | |
cmd/init-virtual-kubelet/main.go
|
// Copyright 2019-2022 The Liqo Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"flag"
"os"
"path/filepath"
"time"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/klog/v2"
liqonetutils "github.com/liqotech/liqo/pkg/liqonet/utils"
"github.com/liqotech/liqo/pkg/utils"
vk "github.com/liqotech/liqo/pkg/vkMachinery"
"github.com/liqotech/liqo/pkg/vkMachinery/csr"
)
const timeout = 30 * time.Second
func main() {
var config *rest.Config
var distribution string
klog.Info("Loading client config")
flag.StringVar(&distribution, "k8s-distribution", "kubernetes", "determine the provider to adapt csr generation")
ctx := context.Background()
kubeconfigPath, ok := os.LookupEnv("KUBECONFIG")
if !ok {
kubeconfigPath = filepath.Join(os.Getenv("HOME"), ".kube", "config")
}
klog.Infof("Loading client: %s", kubeconfigPath)
config, err := utils.UserConfig(kubeconfigPath)
if err != nil {
klog.Fatalf("Unable to create client config: %s", err)
}
client, err := kubernetes.NewForConfig(config)
if err != nil {
klog.Fatalf("Unable to create client: %s", err)
}
name, ok := os.LookupEnv("POD_NAME")
if !ok {
klog.Fatal("Unable to create CSR: POD_NAME undefined")
}
podIP, err := liqonetutils.GetPodIP()
if err != nil {
klog.Fatal(err)
}
namespace, ok := os.LookupEnv("POD_NAMESPACE")
if !ok {
klog.Fatal("Unable to create CSR: POD_NAMESPACE undefined")
}
nodeName, ok := os.LookupEnv("NODE_NAME")
if !ok {
klog.Fatal("Unable to create CSR: NODE_NAME undefined")
}
defer func() {
if err = csr.PersistCertificates(ctx, client, nodeName, namespace,
vk.CsrLocation, vk.KeyLocation, vk.CertLocation); err != nil {
klog.Error(err)
os.Exit(1)
}
}()
_, hasCertificate, err := csr.GetCSRSecret(ctx, client, nodeName, namespace)
if !apierrors.IsNotFound(err) && !hasCertificate {
if err != nil {
klog.Fatal(err)
} else {
klog.Info("Certificate already present for this nodeName. Skipping")
}
return
}
// Generate Key and CSR files in PEM format
if err := csr.CreateCSRResource(ctx, name, client, nodeName, namespace, distribution, podIP); err != nil {
klog.Fatalf("Unable to create CSR: %s", err)
}
cancelCtx, cancel := context.WithTimeout(ctx, timeout)
csrWatcher := csr.NewWatcher(client, 0, labels.SelectorFromSet(vk.CsrLabels))
csrWatcher.Start(ctx)
cert, err := csrWatcher.RetrieveCertificate(cancelCtx, name)
cancel()
if err != nil {
klog.Fatalf("Unable to get certificate: %w", err)
}
if err := csr.StoreCertificate(ctx, client, cert, namespace, nodeName); err != nil {
klog.Fatal("Unable to store the CRT file in secret")
}
}
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
core/src/main/java/org/apache/accumulo/core/client/ClientConfiguration.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.core.client;
import static com.google.common.base.Preconditions.checkArgument;
import java.io.File;
import java.io.StringReader;
import java.io.StringWriter;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import org.apache.accumulo.core.conf.Property;
import org.apache.accumulo.core.conf.PropertyType;
import org.apache.commons.configuration2.CompositeConfiguration;
import org.apache.commons.configuration2.Configuration;
import org.apache.commons.configuration2.MapConfiguration;
import org.apache.commons.configuration2.PropertiesConfiguration;
import org.apache.commons.configuration2.builder.FileBasedConfigurationBuilder;
import org.apache.commons.configuration2.builder.fluent.Parameters;
import org.apache.commons.configuration2.ex.ConfigurationException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
/**
* Contains a list of property keys recognized by the Accumulo client and convenience methods for
* setting them.
*
* @since 1.6.0
* @deprecated since 2.0.0, replaced by {@link Accumulo#newClient()}
*/
@Deprecated
public class ClientConfiguration {
private static final Logger log = LoggerFactory.getLogger(ClientConfiguration.class);
public static final String USER_ACCUMULO_DIR_NAME = ".accumulo";
public static final String USER_CONF_FILENAME = "config";
public static final String GLOBAL_CONF_FILENAME = "client.conf";
private final CompositeConfiguration compositeConfig;
public enum ClientProperty {
// SSL
RPC_SSL_TRUSTSTORE_PATH(Property.RPC_SSL_TRUSTSTORE_PATH),
RPC_SSL_TRUSTSTORE_PASSWORD(Property.RPC_SSL_TRUSTSTORE_PASSWORD),
RPC_SSL_TRUSTSTORE_TYPE(Property.RPC_SSL_TRUSTSTORE_TYPE),
RPC_SSL_KEYSTORE_PATH(Property.RPC_SSL_KEYSTORE_PATH),
RPC_SSL_KEYSTORE_PASSWORD(Property.RPC_SSL_KEYSTORE_PASSWORD),
RPC_SSL_KEYSTORE_TYPE(Property.RPC_SSL_KEYSTORE_TYPE),
RPC_USE_JSSE(Property.RPC_USE_JSSE),
GENERAL_SECURITY_CREDENTIAL_PROVIDER_PATHS(Property.GENERAL_SECURITY_CREDENTIAL_PROVIDER_PATHS),
INSTANCE_RPC_SSL_CLIENT_AUTH(Property.INSTANCE_RPC_SSL_CLIENT_AUTH),
INSTANCE_RPC_SSL_ENABLED(Property.INSTANCE_RPC_SSL_ENABLED),
// ZooKeeper
INSTANCE_ZK_HOST(Property.INSTANCE_ZK_HOST),
INSTANCE_ZK_TIMEOUT(Property.INSTANCE_ZK_TIMEOUT),
// Instance information
INSTANCE_NAME("instance.name", null, PropertyType.STRING,
"Name of Accumulo instance to connect to"),
INSTANCE_ID("instance.id", null, PropertyType.STRING,
"UUID of Accumulo instance to connect to"),
// Tracing
TRACE_SPAN_RECEIVERS(Property.TRACE_SPAN_RECEIVERS),
TRACE_SPAN_RECEIVER_PREFIX(Property.TRACE_SPAN_RECEIVER_PREFIX),
TRACE_ZK_PATH(Property.TRACE_ZK_PATH),
// SASL / GSSAPI(Kerberos)
/**
* @since 1.7.0
*/
INSTANCE_RPC_SASL_ENABLED(Property.INSTANCE_RPC_SASL_ENABLED),
/**
* @since 1.7.0
*/
RPC_SASL_QOP(Property.RPC_SASL_QOP),
/**
* @since 1.7.0
*/
KERBEROS_SERVER_PRIMARY("kerberos.server.primary", "accumulo", PropertyType.STRING,
"The first component of the Kerberos principal, the 'primary', "
+ "that Accumulo servers use to login");
private String key;
private String defaultValue;
private PropertyType type;
private String description;
private ClientProperty(Property prop) {
this(prop.getKey(), prop.getDefaultValue(), prop.getType(), prop.getDescription());
}
private ClientProperty(String key, String defaultValue, PropertyType type, String description) {
this.key = key;
this.defaultValue = defaultValue;
this.type = type;
this.description = description;
}
public String getKey() {
return key;
}
public String getDefaultValue() {
return defaultValue;
}
private PropertyType getType() {
return type;
}
public String getDescription() {
return description;
}
public static ClientProperty getPropertyByKey(String key) {
for (ClientProperty prop : ClientProperty.values())
if (prop.getKey().equals(key))
return prop;
return null;
}
}
private ClientConfiguration(List<? extends Configuration> configs) {
compositeConfig = new CompositeConfiguration(configs);
}
/**
* Attempts to load a configuration file from the system using the default search paths. Uses the
* <em>ACCUMULO_CLIENT_CONF_PATH</em> environment variable, split on <em>File.pathSeparator</em>,
* for a list of target files.
* <p>
* If <em>ACCUMULO_CLIENT_CONF_PATH</em> is not set, uses the following in this order:
* <ul>
* <li>~/.accumulo/config
* <li><em>$ACCUMULO_CONF_DIR</em>/client.conf, if <em>$ACCUMULO_CONF_DIR</em> is defined.
* <li>/etc/accumulo/client.conf
* <li>/etc/accumulo/conf/client.conf
* </ul>
* <p>
* A client configuration will then be read from each location using
* <em>PropertiesConfiguration</em> to construct a configuration. That means the latest item will
* be the one in the configuration.
*
* @see PropertiesConfiguration
* @see File#pathSeparator
*/
public static ClientConfiguration loadDefault() {
return loadFromSearchPath(getDefaultSearchPath());
}
/**
* Initializes an empty configuration object to be further configured with other methods on the
* class.
*
* @since 1.9.0
*/
public static ClientConfiguration create() {
return new ClientConfiguration(Collections.emptyList());
}
/**
* Initializes a configuration object from the contents of a configuration file. Currently
* supports Java "properties" files. The returned object can be further configured with subsequent
* calls to other methods on this class.
*
* @param file
* the path to the configuration file
* @since 1.9.0
*/
public static ClientConfiguration fromFile(File file) {
FileBasedConfigurationBuilder<PropertiesConfiguration> propsBuilder =
new FileBasedConfigurationBuilder<>(PropertiesConfiguration.class)
.configure(new Parameters().properties().setFile(file));
try {
return new ClientConfiguration(Collections.singletonList(propsBuilder.getConfiguration()));
} catch (ConfigurationException e) {
throw new IllegalArgumentException("Bad configuration file: " + file, e);
}
}
/**
* Initializes a configuration object from the contents of a map. The returned object can be
* further configured with subsequent calls to other methods on this class.
*
* @param properties
* a map containing the configuration properties to use
* @since 1.9.0
*/
public static ClientConfiguration fromMap(Map<String,String> properties) {
MapConfiguration mapConf = new MapConfiguration(properties);
return new ClientConfiguration(Collections.singletonList(mapConf));
}
@SuppressFBWarnings(value = "PATH_TRAVERSAL_IN",
justification = "process runs in same security context as user who provided path")
private static ClientConfiguration loadFromSearchPath(List<String> paths) {
List<Configuration> configs = new LinkedList<>();
for (String path : paths) {
File conf = new File(path);
if (conf.isFile() && conf.canRead()) {
FileBasedConfigurationBuilder<PropertiesConfiguration> propsBuilder =
new FileBasedConfigurationBuilder<>(PropertiesConfiguration.class)
.configure(new Parameters().properties().setFile(conf));
try {
configs.add(propsBuilder.getConfiguration());
log.info("Loaded client configuration file {}", conf);
} catch (ConfigurationException e) {
throw new IllegalStateException("Error loading client configuration file " + conf, e);
}
}
}
// We couldn't find the client configuration anywhere
if (configs.isEmpty()) {
log.debug(
"Found no client.conf in default paths. Using default client configuration values.");
}
return new ClientConfiguration(configs);
}
public static ClientConfiguration deserialize(String serializedConfig) {
PropertiesConfiguration propConfig = new PropertiesConfiguration();
try {
propConfig.getLayout().load(propConfig, new StringReader(serializedConfig));
} catch (ConfigurationException e) {
throw new IllegalArgumentException(
"Error deserializing client configuration: " + serializedConfig, e);
}
return new ClientConfiguration(Collections.singletonList(propConfig));
}
/**
* Muck the value of {@code clientConfPath} if it points to a directory by appending
* {@code client.conf} to the end of the file path. This is a no-op if the value is not a
* directory on the filesystem.
*
* @param clientConfPath
* The value of ACCUMULO_CLIENT_CONF_PATH.
*/
@SuppressFBWarnings(value = "PATH_TRAVERSAL_IN",
justification = "process runs in same security context as user who provided path")
static String getClientConfPath(String clientConfPath) {
if (clientConfPath == null) {
return null;
}
File filePath = new File(clientConfPath);
// If clientConfPath is a directory, tack on the default client.conf file name.
if (filePath.exists() && filePath.isDirectory()) {
return new File(filePath, "client.conf").toString();
}
return clientConfPath;
}
private static List<String> getDefaultSearchPath() {
String clientConfSearchPath = getClientConfPath(System.getenv("ACCUMULO_CLIENT_CONF_PATH"));
List<String> clientConfPaths;
if (clientConfSearchPath != null) {
clientConfPaths = Arrays.asList(clientConfSearchPath.split(File.pathSeparator));
} else {
// if $ACCUMULO_CLIENT_CONF_PATH env isn't set, priority from top to bottom is:
// ~/.accumulo/config
// $ACCUMULO_CONF_DIR/client.conf
// /etc/accumulo/client.conf
// /etc/accumulo/conf/client.conf
clientConfPaths = new LinkedList<>();
clientConfPaths.add(System.getProperty("user.home") + File.separator + USER_ACCUMULO_DIR_NAME
+ File.separator + USER_CONF_FILENAME);
if (System.getenv("ACCUMULO_CONF_DIR") != null) {
clientConfPaths
.add(System.getenv("ACCUMULO_CONF_DIR") + File.separator + GLOBAL_CONF_FILENAME);
}
clientConfPaths.add("/etc/accumulo/" + GLOBAL_CONF_FILENAME);
clientConfPaths.add("/etc/accumulo/conf/" + GLOBAL_CONF_FILENAME);
}
return clientConfPaths;
}
public String serialize() {
PropertiesConfiguration propConfig = new PropertiesConfiguration();
propConfig.copy(compositeConfig);
StringWriter writer = new StringWriter();
try {
propConfig.getLayout().save(propConfig, writer);
} catch (ConfigurationException e) {
// this should never happen
throw new IllegalStateException(e);
}
return writer.toString();
}
/**
* Returns the value for prop, the default value if not present.
*
*/
public String get(ClientProperty prop) {
if (compositeConfig.containsKey(prop.getKey()))
return compositeConfig.getString(prop.getKey());
else
return prop.getDefaultValue();
}
private void checkType(ClientProperty property, PropertyType type) {
if (!property.getType().equals(type)) {
String msg = "Configuration method intended for type " + type + " called with a "
+ property.getType() + " argument (" + property.getKey() + ")";
throw new IllegalArgumentException(msg);
}
}
/**
* Gets all properties under the given prefix in this configuration.
*
* @param property
* prefix property, must be of type PropertyType.PREFIX
* @return a map of property keys to values
* @throws IllegalArgumentException
* if property is not a prefix
*/
public Map<String,String> getAllPropertiesWithPrefix(ClientProperty property) {
checkType(property, PropertyType.PREFIX);
Map<String,String> propMap = new HashMap<>();
String prefix = property.getKey();
if (prefix.endsWith(".")) {
prefix = prefix.substring(0, prefix.length() - 1);
}
Iterator<?> iter = compositeConfig.getKeys(prefix);
while (iter.hasNext()) {
String p = (String) iter.next();
propMap.put(p, compositeConfig.getString(p));
}
return propMap;
}
/**
* Sets the value of property to value
*
*/
public void setProperty(ClientProperty prop, String value) {
with(prop, value);
}
/**
* Same as {@link #setProperty(ClientProperty, String)} but returns the ClientConfiguration for
* chaining purposes
*/
public ClientConfiguration with(ClientProperty prop, String value) {
return with(prop.getKey(), value);
}
/**
* Sets the value of property to value
*
* @since 1.9.0
*/
public void setProperty(String prop, String value) {
with(prop, value);
}
/**
* Same as {@link #setProperty(String, String)} but returns the ClientConfiguration for chaining
* purposes
*
* @since 1.9.0
*/
public ClientConfiguration with(String prop, String value) {
compositeConfig.setProperty(prop, value);
return this;
}
/**
* Same as {@link #with(ClientProperty, String)} for ClientProperty.INSTANCE_NAME
*
*/
public ClientConfiguration withInstance(String instanceName) {
checkArgument(instanceName != null, "instanceName is null");
return with(ClientProperty.INSTANCE_NAME, instanceName);
}
/**
* Same as {@link #with(ClientProperty, String)} for ClientProperty.INSTANCE_ID
*
*/
public ClientConfiguration withInstance(UUID instanceId) {
checkArgument(instanceId != null, "instanceId is null");
return with(ClientProperty.INSTANCE_ID, instanceId.toString());
}
/**
* Same as {@link #with(ClientProperty, String)} for ClientProperty.INSTANCE_ZK_HOST
*
*/
public ClientConfiguration withZkHosts(String zooKeepers) {
checkArgument(zooKeepers != null, "zooKeepers is null");
return with(ClientProperty.INSTANCE_ZK_HOST, zooKeepers);
}
/**
* Same as {@link #with(ClientProperty, String)} for ClientProperty.INSTANCE_ZK_TIMEOUT
*
*/
public ClientConfiguration withZkTimeout(int timeout) {
return with(ClientProperty.INSTANCE_ZK_TIMEOUT, String.valueOf(timeout));
}
/**
* Same as {@link #withSsl(boolean, boolean)} with useJsseConfig set to false
*
*/
public ClientConfiguration withSsl(boolean sslEnabled) {
return withSsl(sslEnabled, false);
}
/**
* Same as {@link #with(ClientProperty, String)} for ClientProperty.INSTANCE_RPC_SSL_ENABLED and
* ClientProperty.RPC_USE_JSSE
*
*/
public ClientConfiguration withSsl(boolean sslEnabled, boolean useJsseConfig) {
return with(ClientProperty.INSTANCE_RPC_SSL_ENABLED, String.valueOf(sslEnabled))
.with(ClientProperty.RPC_USE_JSSE, String.valueOf(useJsseConfig));
}
/**
* Same as {@link #withTruststore(String, String, String)} with password null and type null
*
*/
public ClientConfiguration withTruststore(String path) {
return withTruststore(path, null, null);
}
/**
* Same as {@link #with(ClientProperty, String)} for ClientProperty.RPC_SSL_TRUSTORE_PATH,
* ClientProperty.RPC_SSL_TRUSTORE_PASSWORD, and ClientProperty.RPC_SSL_TRUSTORE_TYPE
*
*/
public ClientConfiguration withTruststore(String path, String password, String type) {
checkArgument(path != null, "path is null");
setProperty(ClientProperty.RPC_SSL_TRUSTSTORE_PATH, path);
if (password != null)
setProperty(ClientProperty.RPC_SSL_TRUSTSTORE_PASSWORD, password);
if (type != null)
setProperty(ClientProperty.RPC_SSL_TRUSTSTORE_TYPE, type);
return this;
}
/**
* Same as {@link #withKeystore(String, String, String)} with password null and type null
*
*/
public ClientConfiguration withKeystore(String path) {
return withKeystore(path, null, null);
}
/**
* Same as {@link #with(ClientProperty, String)} for ClientProperty.INSTANCE_RPC_SSL_CLIENT_AUTH,
* ClientProperty.RPC_SSL_KEYSTORE_PATH, ClientProperty.RPC_SSL_KEYSTORE_PASSWORD, and
* ClientProperty.RPC_SSL_KEYSTORE_TYPE
*
*/
public ClientConfiguration withKeystore(String path, String password, String type) {
checkArgument(path != null, "path is null");
setProperty(ClientProperty.INSTANCE_RPC_SSL_CLIENT_AUTH, "true");
setProperty(ClientProperty.RPC_SSL_KEYSTORE_PATH, path);
if (password != null)
setProperty(ClientProperty.RPC_SSL_KEYSTORE_PASSWORD, password);
if (type != null)
setProperty(ClientProperty.RPC_SSL_KEYSTORE_TYPE, type);
return this;
}
/**
* Same as {@link #with(ClientProperty, String)} for ClientProperty.INSTANCE_RPC_SASL_ENABLED.
*
* @since 1.7.0
*/
public ClientConfiguration withSasl(boolean saslEnabled) {
return with(ClientProperty.INSTANCE_RPC_SASL_ENABLED, String.valueOf(saslEnabled));
}
/**
* Show whether SASL has been set on this configuration.
*
* @since 1.9.0
*/
public boolean hasSasl() {
return compositeConfig.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(),
Boolean.parseBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getDefaultValue()));
}
/**
* Same as {@link #with(ClientProperty, String)} for ClientProperty.INSTANCE_RPC_SASL_ENABLED and
* ClientProperty.GENERAL_KERBEROS_PRINCIPAL.
*
* @param saslEnabled
* Should SASL(kerberos) be enabled
* @param kerberosServerPrimary
* The 'primary' component of the Kerberos principal Accumulo servers use to login (e.g.
* 'accumulo' in 'accumulo/_HOST@REALM')
* @since 1.7.0
*/
public ClientConfiguration withSasl(boolean saslEnabled, String kerberosServerPrimary) {
return withSasl(saslEnabled).with(ClientProperty.KERBEROS_SERVER_PRIMARY,
kerberosServerPrimary);
}
public boolean containsKey(String key) {
return compositeConfig.containsKey(key);
}
public Iterator<String> getKeys() {
return compositeConfig.getKeys();
}
public String getString(String key) {
return compositeConfig.getString(key);
}
}
|
[
"\"ACCUMULO_CLIENT_CONF_PATH\"",
"\"ACCUMULO_CONF_DIR\"",
"\"ACCUMULO_CONF_DIR\""
] |
[] |
[
"ACCUMULO_CONF_DIR",
"ACCUMULO_CLIENT_CONF_PATH"
] |
[]
|
["ACCUMULO_CONF_DIR", "ACCUMULO_CLIENT_CONF_PATH"]
|
java
| 2 | 0 | |
parser.py
|
#!/usr/bin/python3
from collections import OrderedDict
import subprocess
import os
#
ROOT_DIR = 'models/syntaxnet'
PARSER_EVAL = 'bazel-bin/syntaxnet/parser_eval'
#MODEL_DIR = 'syntaxnet/models/parsey_mcparseface'
MODELS_DIR = 'syntaxnet/models/parsey_universal/'
CONTEXT = 'syntaxnet/models/parsey_universal/context.pbtxt'
#ENV VARS:
MODELS = [l.strip() for l in os.getenv('PARSEY_MODELS', 'English').split(',')]
BATCH_SIZE = os.getenv('PARSEY_BATCH_SIZE', '1')
def split_tokens(parse):
# Format the result.
def format_token(line):
x = OrderedDict(zip(
["id", "form", "lemma", "upostag", "xpostag", "feats", "head", "deprel", "deps", "misc"],
line.split("\t")
))
for key, val in x.items():
if val == "_": del x[key] # = None
x['id'] = int(x['id'])
x['head'] = int(x['head'])
if x['feats']:
feat_dict = {}
for feat in x['feats'].split('|'):
split_feat = feat.split('=')
feat_dict[split_feat[0]] = split_feat[1]
x['feats'] = feat_dict
return x
return [format_token(line) for line in parse.strip().split("\n")]
def make_tree(split_tokens, sentence):
tokens = { tok["id"]: tok for tok in split_tokens }
tokens[0] = OrderedDict([ ("sentence", sentence) ])
for tok in split_tokens:
tokens[tok['head']]\
.setdefault('tree', OrderedDict()) \
.setdefault(tok['deprel'], []) \
.append(tok)
del tok['head']
del tok['deprel']
return tokens[0]
def conll_to_dict(conll):
conll_list = conll.strip().split("\n\n")
return map(split_tokens, conll_list)
def open_parser_eval(args):
return subprocess.Popen(
[PARSER_EVAL] + args,
cwd=ROOT_DIR,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE
)
def send_input(process, input_str, num_lines):
#print("sending input: %s, %s" % (input_str, num_lines))
input_str = input_str.encode('utf8')
process.stdin.write(input_str)
process.stdin.write(b"\n\n") # signal end of documents
process.stdin.flush()
response = b""
while num_lines > 0:
line = process.stdout.readline()
print("line: %s" % line)
if line.strip() == b"":
# empty line signals end of output for one sentence
num_lines -= 1
response += line
return response.decode('utf8')
def create_pipeline(model):
model_dir = MODELS_DIR + model
# tokenizer = open_parser_eval([
# "--input=stdin-untoken",
# "--output=stdout-conll",
# "--hidden_layer_sizes=128,128",
# "--arg_prefix=brain_tokenizer",
# "--graph_builder=greedy",
# "--task_context=%s" % CONTEXT,
# "--resource_dir=%s" % model_dir,
# "--model_path=%s/tokenizer-params" % model_dir,
# "--slim_model",
# "--batch_size=32",
# #"--batch_size=1",
# "--alsologtostderr"
# ])
# Open the morpher
morpher = open_parser_eval([
"--input=stdin",
"--output=stdout-conll",
"--hidden_layer_sizes=64",
"--arg_prefix=brain_morpher",
"--graph_builder=structured",
"--task_context=%s" % CONTEXT,
"--resource_dir=%s" % model_dir,
"--model_path=%s/morpher-params" % model_dir,
"--slim_model",
"--batch_size=%s" % BATCH_SIZE,
"--alsologtostderr"])
# Open the part-of-speech tagger.
pos_tagger = open_parser_eval([
"--input=stdin-conll",
"--output=stdout-conll",
"--hidden_layer_sizes=64",
"--arg_prefix=brain_tagger",
"--graph_builder=structured",
"--task_context=%s" % CONTEXT,
"--resource_dir=%s" % model_dir,
"--model_path=%s/tagger-params" % model_dir,
"--slim_model",
"--batch_size=%s" % BATCH_SIZE,
"--alsologtostderr"])
# Open the syntactic dependency parser.
dependency_parser = open_parser_eval([
"--input=stdin-conll",
"--output=stdout-conll",
"--hidden_layer_sizes=512,512",
"--arg_prefix=brain_parser",
"--graph_builder=structured",
"--task_context=%s" % CONTEXT,
"--resource_dir=%s" % model_dir,
"--model_path=%s/parser-params" % model_dir,
"--slim_model",
"--batch_size=%s" % BATCH_SIZE,
"--alsologtostderr"])
return [morpher, pos_tagger, dependency_parser]
#brain process pipelines:
pipelines = {}
for model in MODELS:
pipelines[model] = create_pipeline(model)
def parse_sentences(sentences, request_args):
sentences = sentences.strip() + '\n'
num_lines = sentences.count('\n')
lang = request_args.get('language', default=MODELS[0])
pipeline = pipelines[lang]
# print("TOKENIZER! %s, %s" % ( sentences, num_lines))
# print(send_input(pipeline[3], sentences, num_lines))
# Do the morphing
morphed = send_input(pipeline[0], sentences, num_lines)
# Do POS tagging.
pos_tags = send_input(pipeline[1], morphed, num_lines)
# Do syntax parsing.
dependency_parse = send_input(pipeline[2], pos_tags, num_lines)
#print(dependency_parse)
#return [make_tree(st, sen) for sen, st in zip(sentences.split("\n"), split_tokens_list)]
return conll_to_dict(dependency_parse)
if __name__ == "__main__":
import sys, pprint
pprint.pprint(parse_sentence(sys.stdin.read().strip())["tree"])
|
[] |
[] |
[
"PARSEY_MODELS",
"PARSEY_BATCH_SIZE"
] |
[]
|
["PARSEY_MODELS", "PARSEY_BATCH_SIZE"]
|
python
| 2 | 0 | |
lc112/solution_test.go
|
package solution
import (
"testing"
)
func TestHasPathSum(t *testing.T) {
root := &TreeNode{
Val: 5,
}
root.Left = &TreeNode{
Val: 2,
}
root.Left.Left = &TreeNode{
Val: 1,
}
root.Right = &TreeNode{
Val: 8,
}
root.Right.Left = &TreeNode{
Val: 3,
}
root.Right.Left.Right = &TreeNode{
Val: 7,
}
root.Right.Right = &TreeNode{
Val: 9,
}
resu1 := hasPathSum(root, 23)
resu2 := hasPathSum(root, 51)
if !resu1 || resu2 {
t.Fail()
}
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
server/server.go
|
package main
import (
"archive/zip"
"context"
"encoding/json"
"flag"
"io"
"io/ioutil"
"os"
"os/exec"
"os/signal"
"path"
"strconv"
"strings"
"syscall"
"time"
"github.com/gin-gonic/gin"
"github.com/go-redis/redis"
uuid "github.com/satori/go.uuid"
"database/sql"
"fmt"
"path/filepath"
"deepstack.io/server/middlewares"
"deepstack.io/server/requests"
"deepstack.io/server/response"
"deepstack.io/server/structures"
"deepstack.io/server/utils"
_ "github.com/mattn/go-sqlite3"
)
var temp_path = "/deeptemp/"
var DATA_DIR = "/datastore"
var db *sql.DB
var API_KEY = ""
var sub_key = ""
var state = true
var gpu = true
var request_timeout = 60.0
var expiring_date = time.Now()
var settings structures.Settings
var sub_data = structures.ActivationData{}
var redis_client *redis.Client
func scene(c *gin.Context) {
img_id := uuid.NewV4().String()
req_id := uuid.NewV4().String()
file, _ := c.FormFile("image")
c.SaveUploadedFile(file, filepath.Join(temp_path, img_id))
req_data := requests.RecognitionRequest{Imgid: img_id, Reqid: req_id, Reqtype: "scene"}
req_string, _ := json.Marshal(req_data)
redis_client.RPush("scene_queue", req_string)
t1 := time.Now()
for true {
output, _ := redis_client.Get(req_id).Result()
duration := time.Since(t1).Seconds()
if output != "" {
var res response.RecognitionResponse
json.Unmarshal([]byte(output), &res)
if res.Success == false {
var error_response response.ErrorResponseInternal
json.Unmarshal([]byte(output), &error_response)
final_res := response.ErrorResponse{Success: false, Error: error_response.Error}
c.JSON(error_response.Code, final_res)
return
} else {
c.JSON(200, res)
return
}
break
} else if duration > request_timeout {
final_res := response.ErrorResponse{Success: false, Error: "failed to process request before timeout"}
c.JSON(500, final_res)
return
}
time.Sleep(5 * time.Millisecond)
}
}
func detection(c *gin.Context, queue_name string) {
nms := c.PostForm("min_confidence")
if nms == "" {
nms = "0.40"
}
img_id := uuid.NewV4().String()
req_id := uuid.NewV4().String()
detec_req := requests.DetectionRequest{Imgid: img_id, Minconfidence: nms, Reqtype: "detection", Reqid: req_id}
face_req_string, _ := json.Marshal(detec_req)
file, _ := c.FormFile("image")
c.SaveUploadedFile(file, filepath.Join(temp_path, img_id))
redis_client.RPush(queue_name, face_req_string)
t1 := time.Now()
for true {
output, _ := redis_client.Get(req_id).Result()
duration := time.Since(t1).Seconds()
if output != "" {
var res response.DetectionResponse
json.Unmarshal([]byte(output), &res)
if res.Success == false {
var error_response response.ErrorResponseInternal
json.Unmarshal([]byte(output), &error_response)
final_res := response.ErrorResponse{Success: false, Error: error_response.Error}
c.JSON(error_response.Code, final_res)
return
} else {
c.JSON(200, res)
return
}
break
} else if duration > request_timeout {
final_res := response.ErrorResponse{Success: false, Error: "failed to process request before timeout"}
c.JSON(500, final_res)
return
}
time.Sleep(1 * time.Millisecond)
}
}
func facedetection(c *gin.Context) {
file, _ := c.FormFile("image")
nms := c.PostForm("min_confidence")
if nms == "" {
nms = "0.55"
}
img_id := uuid.NewV4().String()
req_id := uuid.NewV4().String()
face_req := requests.FaceDetectionRequest{Imgid: img_id, Reqtype: "detect", Reqid: req_id, Minconfidence: nms}
face_req_string, _ := json.Marshal(face_req)
c.SaveUploadedFile(file, filepath.Join(temp_path, img_id))
redis_client.RPush("face_queue", face_req_string)
t1 := time.Now()
for true {
output, _ := redis_client.Get(req_id).Result()
duration := time.Since(t1).Seconds()
if output != "" {
var res response.FaceDetectionResponse
json.Unmarshal([]byte(output), &res)
if res.Success == false {
var error_response response.ErrorResponseInternal
json.Unmarshal([]byte(output), &error_response)
final_res := response.ErrorResponse{Success: false, Error: error_response.Error}
c.JSON(error_response.Code, final_res)
} else {
c.JSON(200, res)
return
}
break
} else if duration > request_timeout {
final_res := response.ErrorResponse{Success: false, Error: "failed to process request before timeout"}
c.JSON(500, final_res)
return
}
time.Sleep(1 * time.Millisecond)
}
}
func facerecognition(c *gin.Context) {
file, _ := c.FormFile("image")
threshold := c.PostForm("min_confidence")
if threshold == "" {
threshold = "0.67"
}
img_id := uuid.NewV4().String()
req_id := uuid.NewV4().String()
c.SaveUploadedFile(file, filepath.Join(temp_path, img_id))
face_req := requests.FaceRecognitionRequest{Imgid: img_id, Reqtype: "recognize", Reqid: req_id, Minconfidence: threshold}
face_req_string, _ := json.Marshal(face_req)
redis_client.RPush("face_queue", face_req_string)
t1 := time.Now()
for true {
output, _ := redis_client.Get(req_id).Result()
duration := time.Since(t1).Seconds()
if output != "" {
var res response.FaceRecognitionResponse
json.Unmarshal([]byte(output), &res)
if res.Success == false {
var error_response response.ErrorResponseInternal
json.Unmarshal([]byte(output), &error_response)
final_res := response.ErrorResponse{Success: false, Error: error_response.Error}
c.JSON(error_response.Code, final_res)
return
} else {
c.JSON(200, res)
return
}
break
} else if duration > request_timeout {
final_res := response.ErrorResponse{Success: false, Error: "failed to process request before timeout"}
c.JSON(500, final_res)
return
}
time.Sleep(1 * time.Millisecond)
}
}
func faceregister(c *gin.Context) {
userid := c.PostForm("userid")
form, _ := c.MultipartForm()
user_images := []string{}
if form != nil {
for filename, _ := range form.File {
file, _ := c.FormFile(filename)
img_id := uuid.NewV4().String()
c.SaveUploadedFile(file, filepath.Join(temp_path, img_id))
user_images = append(user_images, img_id)
}
}
req_id := uuid.NewV4().String()
request_body := requests.FaceRegisterRequest{Userid: userid, Images: user_images, Reqid: req_id, Reqtype: "register"}
request_string, _ := json.Marshal(request_body)
redis_client.RPush("face_queue", request_string)
t1 := time.Now()
for true {
output, _ := redis_client.Get(req_id).Result()
duration := time.Since(t1).Seconds()
if output != "" {
var res response.FaceRegisterResponse
json.Unmarshal([]byte(output), &res)
if res.Success == false {
var error_response response.ErrorResponseInternal
json.Unmarshal([]byte(output), &error_response)
final_res := response.ErrorResponse{Success: false, Error: error_response.Error}
c.JSON(error_response.Code, final_res)
return
} else {
c.JSON(200, res)
return
}
break
} else if duration > request_timeout {
final_res := response.ErrorResponse{Success: false, Error: "failed to process request before timeout"}
c.JSON(500, final_res)
return
}
time.Sleep(1 * time.Millisecond)
}
}
func facematch(c *gin.Context) {
form, _ := c.MultipartForm()
user_images := []string{}
if form != nil {
for filename, _ := range form.File {
file, _ := c.FormFile(filename)
img_id := uuid.NewV4().String()
c.SaveUploadedFile(file, filepath.Join(temp_path, img_id))
user_images = append(user_images, img_id)
}
}
req_id := uuid.NewV4().String()
request_body := requests.FaceMatchRequest{Images: user_images, Reqid: req_id, Reqtype: "match"}
request_string, _ := json.Marshal(request_body)
redis_client.RPush("face_queue", request_string)
t1 := time.Now()
for true {
output, _ := redis_client.Get(req_id).Result()
duration := time.Since(t1).Seconds()
if output != "" {
var res response.FaceMatchResponse
json.Unmarshal([]byte(output), &res)
if res.Success == false {
var error_response response.ErrorResponseInternal
json.Unmarshal([]byte(output), &error_response)
final_res := response.ErrorResponse{Success: false, Error: error_response.Error}
c.JSON(error_response.Code, final_res)
return
} else {
c.JSON(200, res)
return
}
break
} else if duration > request_timeout {
final_res := response.ErrorResponse{Success: false, Error: "failed to process request before timeout"}
c.JSON(500, final_res)
return
}
time.Sleep(1 * time.Millisecond)
}
}
func listface(c *gin.Context) {
TB_EMBEDDINGS := "TB_EMBEDDINGS"
face2 := os.Getenv("VISION-FACE2")
if face2 == "True" {
TB_EMBEDDINGS = "TB_EMBEDDINGS2"
}
rows, _ := db.Query(fmt.Sprintf("select userid from %s", TB_EMBEDDINGS))
var userids = []string{}
for rows.Next() {
var userid string
rows.Scan(&userid)
userids = append(userids, userid)
}
res := response.FacelistResponse{Success: true, Faces: userids}
c.JSON(200, res)
return
}
func deleteface(c *gin.Context) {
userid := c.PostForm("userid")
TB_EMBEDDINGS := "TB_EMBEDDINGS"
face2 := os.Getenv("VISION-FACE2")
if face2 == "True" {
TB_EMBEDDINGS = "TB_EMBEDDINGS2"
}
trans, _ := db.Begin()
stmt, _ := trans.Prepare(fmt.Sprintf("DELETE FROM %s WHERE userid=?", TB_EMBEDDINGS))
defer stmt.Close()
stmt.Exec(userid)
trans.Commit()
res := response.FaceDeleteResponse{Success: true}
c.JSON(200, res)
return
}
func register_model(c *gin.Context) {
model_file, _ := c.FormFile("model")
config_file, _ := c.FormFile("config")
model_name := c.PostForm("name")
MODEL_DIR := DATA_DIR + "/models/vision/" + model_name + "/"
model_exists, _ := utils.PathExists(MODEL_DIR)
message := "model updated"
if model_exists == false {
os.MkdirAll(MODEL_DIR, os.ModePerm)
message = "model registered"
}
c.SaveUploadedFile(model_file, MODEL_DIR+"model.pb")
c.SaveUploadedFile(config_file, MODEL_DIR+"config.json")
res := response.ModelRegisterResponse{Success: true, Message: message}
c.JSON(200, res)
}
func delete_model(c *gin.Context) {
model_name := c.PostForm("name")
MODEL_DIR := DATA_DIR + "/models/vision/" + model_name + "/"
os.RemoveAll(MODEL_DIR)
res := response.ModelDeleteResponse{Success: true, Message: "Model removed"}
c.JSON(200, res)
return
}
func list_models(c *gin.Context) {
model_list, err := filepath.Glob(DATA_DIR + "/models/vision/*")
models := []structures.ModelInfo{}
if err == nil {
for _, file := range model_list {
model_name := filepath.Base(file)
fileStat, _ := os.Stat(file + "/model.pb")
size := float32(fileStat.Size()) / (1000 * 1000)
model_info := structures.ModelInfo{Name: model_name, Dateupdated: fileStat.ModTime(), Modelsize: size}
models = append(models, model_info)
}
}
res := structures.AllModels{Models: models, Success: true}
c.JSON(200, res)
return
}
func single_request_loop(c *gin.Context, queue_name string) {
img_id := uuid.NewV4().String()
req_id := uuid.NewV4().String()
file, _ := c.FormFile("image")
c.SaveUploadedFile(file, filepath.Join(temp_path, img_id))
req_data := requests.RecognitionRequest{Imgid: img_id, Reqid: req_id, Reqtype: "custom"}
req_string, _ := json.Marshal(req_data)
redis_client.RPush(queue_name, req_string)
for true {
output, _ := redis_client.Get(req_id).Result()
if output != "" {
var res response.RecognitionResponse
json.Unmarshal([]byte(output), &res)
if res.Success == false {
var error_response response.ErrorResponseInternal
json.Unmarshal([]byte(output), &error_response)
final_res := response.ErrorResponse{Success: false, Error: error_response.Error}
c.JSON(error_response.Code, final_res)
return
} else {
c.JSON(200, res)
return
}
break
}
time.Sleep(1 * time.Millisecond)
}
}
func backup(c *gin.Context) {
file_id := uuid.NewV4().String() + ".zip"
backup_name := "Backup_" + time.Now().Format("2006-01-02T15:04:05") + ".backup"
output_file, _ := os.Create(temp_path + "/" + file_id)
zip_archive := zip.NewWriter(output_file)
models, err := filepath.Glob(DATA_DIR + "/models/vision/*")
if err == nil {
for _, file := range models {
model_name := filepath.Base(file)
utils.AddFileToZip(zip_archive, path.Join(file, "model.pb"), "models/vision/"+model_name+"/model.pb")
utils.AddFileToZip(zip_archive, path.Join(file, "config.json"), "models/vision/"+model_name+"/config.json")
}
}
utils.AddFileToZip(zip_archive, DATA_DIR+"/faceembedding.db", "faceembedding.db")
zip_archive.Close()
output_file.Close()
data_file, _ := os.Open(temp_path + "/" + file_id)
info, err := os.Stat(temp_path + "/" + file_id)
if err != nil {
fmt.Println(err)
}
contentLength := info.Size()
contentType := "application/octet-stream"
extraHeaders := map[string]string{
"Content-Disposition": "attachment; filename=" + backup_name,
}
c.DataFromReader(200, contentLength, contentType, data_file, extraHeaders)
}
func restore(c *gin.Context) {
backup_file, _ := c.FormFile("file")
backup_path := temp_path + "/deepstack.backup"
c.SaveUploadedFile(backup_file, backup_path)
defer os.Remove(backup_path)
zip_reader, err := zip.OpenReader(backup_path)
if err != nil {
response := response.ErrorResponse{Success: false, Error: "Invalid backup file"}
c.JSON(200, response)
return
}
defer zip_reader.Close()
for _, f := range zip_reader.File {
f_path := f.Name
data, err := f.Open()
if err != nil {
fmt.Println(err)
}
fpath := path.Join(DATA_DIR, f_path)
os.MkdirAll(filepath.Dir(fpath), os.ModePerm)
outFile, err := os.OpenFile(fpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode())
_, err = io.Copy(outFile, data)
outFile.Close()
}
res := response.RestoreResponse{Success: true}
c.JSON(200, res)
return
}
func printfromprocess(cmd *exec.Cmd) {
for true {
out, err := cmd.StdoutPipe()
if err == nil {
outData, _ := ioutil.ReadAll(out)
fmt.Println(string(outData))
time.Sleep(1 * time.Second)
}
}
}
func printlogs() {
face1 := os.Getenv("VISION-FACE")
face2 := os.Getenv("VISION-FACE2")
detection := os.Getenv("VISION-DETECTION")
scene := os.Getenv("VISION-SCENE")
if face1 == "True" || face2 == "True" {
fmt.Println("/v1/vision/face")
fmt.Println("---------------------------------------")
fmt.Println("/v1/vision/face/recognize")
fmt.Println("---------------------------------------")
fmt.Println("/v1/vision/face/register")
fmt.Println("---------------------------------------")
fmt.Println("/v1/vision/face/match")
fmt.Println("---------------------------------------")
fmt.Println("/v1/vision/face/list")
fmt.Println("---------------------------------------")
fmt.Println("/v1/vision/face/delete")
fmt.Println("---------------------------------------")
}
if detection == "True" {
fmt.Println("/v1/vision/detection")
fmt.Println("---------------------------------------")
}
if scene == "True" {
fmt.Println("/v1/vision/scene")
fmt.Println("---------------------------------------")
}
models, err := filepath.Glob(DATA_DIR + "/models/vision/*")
custom := os.Getenv("VISION-CUSTOM")
if err == nil && custom == "True" {
for _, file := range models {
model_name := filepath.Base(file)
fmt.Println("v1/vision/custom/" + model_name)
fmt.Println("---------------------------------------")
}
}
fmt.Println("---------------------------------------")
fmt.Println("v1/backup")
fmt.Println("---------------------------------------")
fmt.Println("v1/restore")
}
func home(c *gin.Context) {
c.HTML(200, "index.html", gin.H{})
}
func initActivation() {
face := os.Getenv("VISION_FACE")
detection := os.Getenv("VISION_DETECTION")
scene := os.Getenv("VISION_SCENE")
if os.Getenv("VISION-FACE") == "" {
os.Setenv("VISION-FACE", face)
}
if os.Getenv("VISION-DETECTION") == "" {
os.Setenv("VISION-DETECTION", detection)
}
if os.Getenv("VISION-SCENE") == "" {
os.Setenv("VISION-SCENE", scene)
}
}
func launchservices() {
}
func main() {
initActivation()
var visionFace string
var visionDetection string
var visionScene string
var apiKey string
var adminKey string
var port int
var modelStoreDetection string
var mode string
if os.Getenv("PROFILE") == "" {
os.Chdir("C://DeepStack//server")
platformdata, err := ioutil.ReadFile("platform.json")
if err == nil {
var platform structures.PLATFORM
json.Unmarshal(platformdata, &platform)
os.Setenv("PROFILE", platform.PROFILE)
os.Setenv("CUDA_MODE", platform.CUDA_MODE)
}
}
versionfile, err := os.Open("version.txt")
if err == nil {
versiondata, _ := ioutil.ReadAll(versionfile)
version := string(versiondata)
fmt.Println("DeepStack: Version " + version)
}
flag.StringVar(&visionFace, "VISION-FACE", os.Getenv("VISION-FACE"), "enable face detection")
flag.StringVar(&visionDetection, "VISION-DETECTION", os.Getenv("VISION-DETECTION"), "enable object detection")
flag.StringVar(&visionScene, "VISION-SCENE", os.Getenv("VISION-SCENE"), "enable scene recognition")
flag.StringVar(&apiKey, "API-KEY", os.Getenv("API-KEY"), "api key to secure endpoints")
flag.StringVar(&adminKey, "ADMIN-KEY", os.Getenv("ADMIN-KEY"), "admin key to secure admin endpoints")
flag.StringVar(&modelStoreDetection, "MODELSTORE-DETECTION", "/modelstore/detection/", "path to custom detection models")
flag.Float64Var(&request_timeout, "TIMEOUT", 60, "request timeout in seconds")
flag.StringVar(&mode, "MODE", "Medium", "performance mode")
getPort := os.Getenv("PORT")
intPortVal, err := strconv.Atoi(getPort)
if err != nil {
flag.IntVar(&port, "PORT", 5000, "port")
} else {
flag.IntVar(&port, "PORT", intPortVal, "port")
}
flag.Parse()
PROFILE := os.Getenv("PROFILE")
if !strings.HasSuffix(modelStoreDetection, "/") {
modelStoreDetection = modelStoreDetection + "/"
}
APPDIR := os.Getenv("APPDIR")
DATA_DIR = os.Getenv("DATA_DIR")
startedProcesses := make([]*exec.Cmd, 0)
redis_server := "redis-server"
interpreter := "python3"
if PROFILE == "windows_native" {
APPDIR = "C://DeepStack"
interpreter = filepath.Join(APPDIR, "interpreter", "python.exe")
redis_server = filepath.Join(APPDIR, "redis", "redis-server.exe")
os.Setenv("VISION-FACE", visionFace)
os.Setenv("VISION-DETECTION", visionDetection)
os.Setenv("VISION-SCENE", visionScene)
os.Setenv("APPDIR", APPDIR)
os.Setenv("MODE", mode)
}
if DATA_DIR == "" {
DATA_DIR = "/datastore"
if PROFILE == "windows_native" {
DATA_DIR = filepath.Join(os.Getenv("LocalAppData"), "DeepStack")
}
}
temp_path = os.Getenv("TEMP_PATH")
if temp_path == "" {
temp_path = "/deeptemp/"
if PROFILE == "windows_native" {
temp_path = filepath.Join(os.TempDir(), "DeepStack")
}
}
logdir := filepath.Join(APPDIR, "logs")
if PROFILE == "windows_native" {
os.Setenv("DATA_DIR", DATA_DIR)
os.Setenv("TEMP_PATH", temp_path)
logdir = filepath.Join(DATA_DIR, "logs")
}
request_timeout_str := os.Getenv("TIMEOUT")
request_timeout_val, err := strconv.ParseFloat(request_timeout_str, 64)
if request_timeout_str != "" && err == nil {
request_timeout = request_timeout_val
}
os.Mkdir(logdir, 0755)
os.Mkdir(DATA_DIR, 0755)
os.Mkdir(temp_path, 0755)
if PROFILE == "windows_native" {
go utils.CreateDirs(logdir, DATA_DIR, temp_path)
}
stdout, _ := os.Create(filepath.Join(logdir, "stdout.txt"))
defer stdout.Close()
stderr, _ := os.Create(filepath.Join(logdir, "stderr.txt"))
defer stderr.Close()
ctx := context.TODO()
initScript := filepath.Join(APPDIR, "init.py")
detectionScript := filepath.Join(APPDIR, "intelligencelayer/shared/detection.py")
faceScript := filepath.Join(APPDIR, "intelligencelayer/shared/face.py")
sceneScript := filepath.Join(APPDIR, "intelligencelayer/shared/scene.py")
initcmd := exec.CommandContext(ctx, "bash", "-c", interpreter+" "+initScript)
if PROFILE == "windows_native" {
initcmd = exec.CommandContext(ctx, interpreter, initScript)
}
initcmd.Dir = APPDIR
initcmd.Stdout = stdout
initcmd.Stderr = stderr
rediscmd := exec.CommandContext(ctx, "bash", "-c", redis_server+" --daemonize yes")
if PROFILE == "windows_native" {
rediscmd = exec.CommandContext(ctx, redis_server)
rediscmd.Dir = filepath.Join(APPDIR, "redis")
}
rediscmd.Stdout = stdout
rediscmd.Stderr = stderr
err = rediscmd.Start()
if err != nil {
stderr.WriteString("Redis server failed to start: " + err.Error())
}
err = initcmd.Run()
startedProcesses = append(startedProcesses, initcmd)
if err != nil {
stderr.WriteString("Init process failed to start " + err.Error())
}
redis_client = redis.NewClient(&redis.Options{
Addr: "localhost:6379",
Password: "",
DB: 0,
})
if visionDetection == "True" {
detectioncmd := exec.CommandContext(ctx, "bash", "-c", interpreter+" "+detectionScript)
if PROFILE == "windows_native" {
detectioncmd = exec.CommandContext(ctx, interpreter, detectionScript)
}
startedProcesses = append(startedProcesses, detectioncmd)
detectioncmd.Dir = filepath.Join(APPDIR, "intelligencelayer/shared")
detectioncmd.Stdout = stdout
detectioncmd.Stderr = stderr
detectioncmd.Env = os.Environ()
err = detectioncmd.Start()
if err != nil {
stderr.WriteString("Detection process failed to start" + err.Error())
}
// go utils.KeepProcess(detectioncmd, redis_client, "detection", PROFILE, interpreter, detectionScript, APPDIR, stdout, stderr, &ctx, startedProcesses)
}
if visionFace == "True" {
facecmd := exec.CommandContext(ctx, "bash", "-c", interpreter+" "+faceScript)
if PROFILE == "windows_native" {
facecmd = exec.CommandContext(ctx, interpreter, faceScript)
}
startedProcesses = append(startedProcesses, facecmd)
facecmd.Dir = filepath.Join(APPDIR, "intelligencelayer/shared")
facecmd.Stdout = stdout
facecmd.Stderr = stderr
facecmd.Env = os.Environ()
err = facecmd.Start()
if err != nil {
stderr.WriteString("face process failed to start " + err.Error())
}
// go utils.KeepProcess(facecmd, redis_client, "face", PROFILE, interpreter, faceScript, APPDIR, stdout, stderr, &ctx, startedProcesses)
}
if visionScene == "True" {
scenecmd := exec.CommandContext(ctx, "bash", "-c", interpreter+" "+sceneScript)
if PROFILE == "windows_native" {
scenecmd = exec.CommandContext(ctx, interpreter, sceneScript)
}
startedProcesses = append(startedProcesses, scenecmd)
scenecmd.Dir = filepath.Join(APPDIR, "intelligencelayer/shared")
scenecmd.Stdout = stdout
scenecmd.Stderr = stderr
scenecmd.Env = os.Environ()
err = scenecmd.Start()
if err != nil {
stderr.WriteString("scene process failed to start: " + err.Error())
}
// go utils.KeepProcess(scenecmd, redis_client, "scene", PROFILE, interpreter, sceneScript, APPDIR, stdout, stderr, &ctx, startedProcesses)
}
db, _ = sql.Open("sqlite3", filepath.Join(DATA_DIR, "faceembedding.db"))
gin.SetMode(gin.ReleaseMode)
server := gin.New()
go utils.LogToServer(&sub_data)
admin_key := os.Getenv("ADMIN-KEY")
api_key := os.Getenv("API-KEY")
if admin_key != "" || api_key != "" {
if admin_key != "" {
settings.ADMIN_KEY = admin_key
} else {
settings.ADMIN_KEY = ""
}
if api_key != "" {
settings.API_KEY = api_key
} else {
settings.API_KEY = ""
}
}
server.Use(gin.Recovery())
v1 := server.Group("/v1")
v1.Use(gin.Logger())
vision := v1.Group("/vision")
vision.Use(middlewares.CheckApiKey(&sub_data, &settings))
{
vision.POST("/scene", middlewares.CheckScene(), middlewares.CheckImage(), scene)
vision.POST("/detection", middlewares.CheckDetection(), middlewares.CheckImage(), middlewares.CheckConfidence(), func(c *gin.Context) {
detection(c, "detection_queue")
})
facegroup := vision.Group("/face")
facegroup.Use(middlewares.CheckFace())
{
facegroup.POST("/", middlewares.CheckImage(), middlewares.CheckConfidence(), facedetection)
facegroup.POST("/recognize", middlewares.CheckImage(), middlewares.CheckConfidence(), facerecognition)
facegroup.POST("/register", middlewares.CheckMultiImage(), middlewares.CheckUserID(), faceregister)
facegroup.POST("/match", middlewares.CheckFaceMatch(), facematch)
facegroup.POST("/delete", middlewares.CheckUserID(), deleteface)
facegroup.POST("/list", listface)
}
vision.POST("/addmodel", middlewares.CheckAdminKey(&sub_data, &settings), middlewares.CheckRegisterModel(&sub_data, DATA_DIR), register_model)
vision.POST("/deletemodel", middlewares.CheckAdminKey(&sub_data, &settings), middlewares.CheckDeleteModel(DATA_DIR), delete_model)
vision.POST("/listmodels", middlewares.CheckAdminKey(&sub_data, &settings), list_models)
custom := vision.Group("/custom")
custom.Use(middlewares.CheckImage())
{
models, err := filepath.Glob(modelStoreDetection + "*.pt")
if err == nil {
for _, file := range models {
model_name := filepath.Base(file)
model_name = model_name[:strings.LastIndex(model_name, ".")]
modelcmd := exec.CommandContext(ctx, "bash", "-c", interpreter+" "+detectionScript+" --model "+file+" --name "+model_name)
if PROFILE == "windows_native" {
modelcmd = exec.CommandContext(ctx, interpreter, detectionScript, "--model", file, "--name", model_name)
}
startedProcesses = append(startedProcesses, modelcmd)
modelcmd.Dir = filepath.Join(APPDIR, "intelligencelayer/shared")
modelcmd.Stdout = stdout
modelcmd.Stderr = stderr
err = modelcmd.Start()
if err != nil {
stderr.WriteString(err.Error())
}
custom.POST(model_name, func(c *gin.Context) {
detection(c, model_name+"_queue")
})
fmt.Println("---------------------------------------")
fmt.Println("v1/vision/custom/" + model_name)
}
}
}
}
v1.POST("/backup", middlewares.CheckAdminKey(&sub_data, &settings), backup)
v1.POST("/restore", middlewares.CheckAdminKey(&sub_data, &settings), middlewares.CheckRestore(), restore)
server.Static("/assets", "./assets")
server.LoadHTMLGlob("templates/*")
server.GET("/", home)
server.GET("/admin", home)
port2 := strconv.Itoa(port)
printlogs()
signalChannel := make(chan os.Signal, 2)
signal.Notify(signalChannel, os.Interrupt, syscall.SIGTERM)
go func() {
sig := <-signalChannel
if sig == syscall.SIGTERM || sig == syscall.SIGKILL {
for _, process := range startedProcesses {
err = process.Process.Kill()
if err != nil {
stderr.WriteString(err.Error())
}
}
}
}()
server.Run(":" + port2)
}
|
[
"\"VISION-FACE2\"",
"\"VISION-FACE2\"",
"\"VISION-FACE\"",
"\"VISION-FACE2\"",
"\"VISION-DETECTION\"",
"\"VISION-SCENE\"",
"\"VISION-CUSTOM\"",
"\"VISION_FACE\"",
"\"VISION_DETECTION\"",
"\"VISION_SCENE\"",
"\"VISION-FACE\"",
"\"VISION-DETECTION\"",
"\"VISION-SCENE\"",
"\"PROFILE\"",
"\"VISION-FACE\"",
"\"VISION-DETECTION\"",
"\"VISION-SCENE\"",
"\"API-KEY\"",
"\"ADMIN-KEY\"",
"\"PORT\"",
"\"PROFILE\"",
"\"APPDIR\"",
"\"DATA_DIR\"",
"\"LocalAppData\"",
"\"TEMP_PATH\"",
"\"TIMEOUT\"",
"\"ADMIN-KEY\"",
"\"API-KEY\""
] |
[] |
[
"API-KEY",
"VISION-CUSTOM",
"PORT",
"LocalAppData",
"TEMP_PATH",
"VISION-FACE",
"DATA_DIR",
"VISION-DETECTION",
"VISION-FACE2",
"PROFILE",
"VISION-SCENE",
"ADMIN-KEY",
"APPDIR",
"VISION_SCENE",
"VISION_FACE",
"TIMEOUT",
"VISION_DETECTION"
] |
[]
|
["API-KEY", "VISION-CUSTOM", "PORT", "LocalAppData", "TEMP_PATH", "VISION-FACE", "DATA_DIR", "VISION-DETECTION", "VISION-FACE2", "PROFILE", "VISION-SCENE", "ADMIN-KEY", "APPDIR", "VISION_SCENE", "VISION_FACE", "TIMEOUT", "VISION_DETECTION"]
|
go
| 17 | 0 | |
com.ibm.streamsx.messaging/impl/java/src/com/ibm/streamsx/messaging/kafka/KafkaBaseOper.java
|
/*******************************************************************************
* Copyright (C) 2014, International Business Machines Corporation
* All Rights Reserved
*******************************************************************************/
package com.ibm.streamsx.messaging.kafka;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.logging.Logger;
import javax.security.auth.login.Configuration;
import com.ibm.streams.operator.AbstractOperator;
import com.ibm.streams.operator.OperatorContext;
import com.ibm.streams.operator.StreamSchema;
import com.ibm.streams.operator.OperatorContext.ContextCheck;
import com.ibm.streams.operator.Type.MetaType;
import com.ibm.streams.operator.compile.OperatorContextChecker;
import com.ibm.streams.operator.logging.TraceLevel;
import com.ibm.streams.operator.model.Libraries;
import com.ibm.streams.operator.model.Parameter;
import com.ibm.streamsx.messaging.common.PropertyProvider;
/**
* Base operator for all the common functions
*
*/
@Libraries({ "opt/downloaded/*","opt/*" })
public abstract class KafkaBaseOper extends AbstractOperator {
protected Properties properties = new Properties(),
finalProperties = new Properties(),
appConfigProperties = new Properties();
// application configuration name
protected String appConfigName;
protected String propertiesFile = null;
protected AttributeHelper topicAH = new AttributeHelper("topic"), //$NON-NLS-1$
keyAH = new AttributeHelper("key"), //$NON-NLS-1$
messageAH = new AttributeHelper("message"); //$NON-NLS-1$
protected List<String> topics = new ArrayList<String>();
private List<String> appConfigPropName = new ArrayList<String>();
private String jaasFile = null;
private String jaasFilePropName = "jaasFile"; //$NON-NLS-1$
private static final Logger trace = Logger.getLogger(KafkaBaseOper.class
.getCanonicalName());
/*
* Check that either appConfig, propertiesFile, or kafkaProperty parameter specified.
*/
@ContextCheck(runtime = false, compile = true)
public static void checkCompileCompatability(OperatorContextChecker checker) {
OperatorContext operContext = checker.getOperatorContext();
if (!operContext.getParameterNames().contains("propertiesFile") //$NON-NLS-1$
&& !operContext.getParameterNames().contains("kafkaProperty") //$NON-NLS-1$
&& !operContext.getParameterNames().contains("appConfigName")) { //$NON-NLS-1$
checker.setInvalidContext(
Messages.getString("MISSING_PRPERTIES"), //$NON-NLS-1$
new String[] {});
}
checker.checkDependentParameters("jaasFilePropName", "appConfigName"); //$NON-NLS-1$ //$NON-NLS-2$
}
/*
* The method checkParametersRuntime validates that the reconnection policy
* parameters are appropriate
*/
@ContextCheck(compile = false)
public static void checkParametersRuntime(OperatorContextChecker checker) {
if((checker.getOperatorContext().getParameterNames().contains("appConfigName"))) { //$NON-NLS-1$
String appConfigName = checker.getOperatorContext().getParameterValues("appConfigName").get(0); //$NON-NLS-1$
List<String> appConfigPropName = checker.getOperatorContext().getParameterValues("appConfigPropertyName"); //$NON-NLS-1$
PropertyProvider provider = new PropertyProvider(checker.getOperatorContext().getPE(), appConfigName);
for (String propName : appConfigPropName){
String prop = provider.getProperty(propName);
if(prop == null || prop.trim().length() == 0) {
trace.log(TraceLevel.ERROR, Messages.getString("PROPERTY_NOT_FOUND_IN_APP_CONFIG", propName, appConfigName )); //$NON-NLS-1$
checker.setInvalidContext(
Messages.getString("PROPERTY_NOT_FOUND_IN_APP_CONFIG"), //$NON-NLS-1$
new Object[] {propName, appConfigName});
} else {
trace.log(TraceLevel.TRACE, "Property " + propName + " was found in the appConfig: " + appConfigName); //$NON-NLS-1$ //$NON-NLS-2$
}
}
}
}
protected static void checkForMessageAttribute(OperatorContext operContext, StreamSchema operSchema) throws Exception {
List<String> messageAttrParameter = operContext.getParameterValues("messageAttribute"); //$NON-NLS-1$
String messageAttrString = "message"; //$NON-NLS-1$
if (!messageAttrParameter.isEmpty()){
messageAttrString = messageAttrParameter.get(0);
}
if ( !operSchema.getAttributeNames().contains(messageAttrString)
|| !operSchema.getAttributeNames().contains(messageAttrString)){
throw new UnsupportedStreamsKafkaAttributeException(Messages.getString("ATTRIBUTE_MESSAGE_OR_MESSAGEATTRIBUTE_REQUIRED")); //$NON-NLS-1$
}
}
public void initialize(OperatorContext context) throws Exception {
super.initialize(context);
getKafkaProperties(context);
}
/*
* Order of precedence for properties that get used (when duplicate property values provided):
* 1. PropertyProvider props overwrite
* 2. Properties parameter properties which overwrite.
* 3. Config file properties.
*/
protected void getKafkaProperties(OperatorContext context)
throws IOException, FileNotFoundException, UnsupportedStreamsKafkaConfigurationException {
String propFile = getPropertiesFile();
// Lowest priority properties file first
if (propFile != null) {
finalProperties.load(new FileReader(propFile));
}
// Next add properties that were input as parameters
finalProperties.putAll(properties);
// Highest priority PropertyProvider last
PropertyProvider propertyProvider = null;
if(getAppConfigName() != null) {
propertyProvider = new PropertyProvider(context.getPE(), getAppConfigName());
appConfigProperties = getCurrentAppConfigProperties(propertyProvider);
}
finalProperties.putAll(appConfigProperties);
finalProperties = transformTrustStoreProperty(finalProperties);
String jaasFile = getJaasFile();
if(jaasFile != null) {
System.setProperty("java.security.auth.login.config", jaasFile); //$NON-NLS-1$
// If we are not setting the java.security.auth.login.config
// we must refresh the Configuration to load the security change
Configuration config = Configuration.getConfiguration();
config.refresh();
}
if (finalProperties == null || finalProperties.isEmpty())
throw new UnsupportedStreamsKafkaConfigurationException(
Messages.getString("KAFKA_CONNECTION_PROPERTIES_MUST_BE_SPECIFIED")); //$NON-NLS-1$
}
/*
* If appConfigPropName not specified, put all properties
* Else, just put the specified properties.
*/
private Properties getCurrentAppConfigProperties(PropertyProvider propertyProvider) {
Properties localAppConfigProps = new Properties();
if (appConfigPropName.isEmpty()){
// Then we want to add all properties from appConfig
trace.log(TraceLevel.INFO, "Adding all properties from appConfig (appConfigPropertyName not specified): " + getAppConfigName()); //$NON-NLS-1$
localAppConfigProps.putAll(propertyProvider.getAllProperties());
} else {
for (String propName : appConfigPropName){
localAppConfigProps.put(propName, propertyProvider.getProperty(propName));
}
}
return localAppConfigProps;
}
protected boolean newPropertiesExist(OperatorContext context) {
PropertyProvider propertyProvider = null;
boolean newProperties = false;
if(getAppConfigName() != null) {
propertyProvider = new PropertyProvider(context.getPE(), getAppConfigName());
Properties currentAppConfigProperties = getCurrentAppConfigProperties(propertyProvider);
if(!currentAppConfigProperties.equals(appConfigProperties)){
newProperties = true;
}
}
return newProperties;
}
private Properties transformTrustStoreProperty(Properties props) {
final String trustorePropertyName = "ssl.truststore.location"; //$NON-NLS-1$
final String trustorePasswordPropertyName = "ssl.truststore.password"; //$NON-NLS-1$
final String securityProtocolPropertyName = "security.protocol"; //$NON-NLS-1$
String trustStoreFile = props.getProperty(trustorePropertyName);
String securityProtocol = props.getProperty(securityProtocolPropertyName);
String trustStorePassword = props.getProperty(trustorePasswordPropertyName);
if (trustStoreFile != null){
trustStoreFile = getAbsoluteFilePath(trustStoreFile);
props.setProperty(trustorePropertyName, trustStoreFile);
trace.log(TraceLevel.INFO, "TrustStore location set to " + trustStoreFile); //$NON-NLS-1$
} else if (securityProtocol != null && (securityProtocol.equalsIgnoreCase("SSL") //$NON-NLS-1$
|| securityProtocol.equalsIgnoreCase("SASL_SSL"))){ //$NON-NLS-1$
Map<String, String> env = System.getenv();
//get java default truststore
trustStoreFile = env.get("STREAMS_INSTALL") + "/java/jre/lib/security/cacerts"; //$NON-NLS-1$ //$NON-NLS-2$
props.setProperty(trustorePropertyName, trustStoreFile);
if (trustStorePassword == null)
props.setProperty(trustorePasswordPropertyName, "changeit"); //$NON-NLS-1$
trace.log(TraceLevel.WARN, "Automatically setting to default Java trust store."); //$NON-NLS-1$
}
return props;
}
public void initSchema(StreamSchema ss) throws Exception {
trace.log(TraceLevel.INFO, "Connection properties: " + finalProperties); //$NON-NLS-1$
Set<MetaType> supportedTypes = new HashSet<MetaType>();
supportedTypes.add(MetaType.RSTRING);
supportedTypes.add(MetaType.USTRING);
supportedTypes.add(MetaType.BLOB);
keyAH.initialize(ss, false, supportedTypes);
messageAH.initialize(ss, true, supportedTypes);
// blobs are not supported for topics
supportedTypes.remove(MetaType.BLOB);
topicAH.initialize(ss, false, supportedTypes);
}
@Parameter(cardinality = -1, optional = true, description = "Specify a Kafka property \\\"key=value\\\" form. "
+ "This will override any property specified in the properties file. "
+ "The hierarchy of properties goes: properties from appConfig beat out kafkaProperty parameter properties, which beat out properties from the propertiesFile. ")
public void setKafkaProperty(List<String> values) throws UnsupportedStreamsKafkaConfigurationException {
for (String value : values) {
int idx = value.indexOf("="); //$NON-NLS-1$
if (idx == -1)
throw new UnsupportedStreamsKafkaConfigurationException(Messages.getString("PROPERTY_NOT_IN_KEY_VALUE_FORMAT", value )); //$NON-NLS-1$
String name = value.substring(0, idx);
String v = value.substring(idx + 1, value.length());
properties.setProperty(name, v);
}
}
@Parameter(optional = true, description = "Properties file containing kafka properties. "
+ "Properties file is recommended to be stored in the etc directory. "
+ "If a relative path is specified, the path is relative to the application directory. "
+ "The hierarchy of properties goes: properties from appConfig beat out kafkaProperty parameter properties, which beat out properties from the propertiesFile. ")
public void setPropertiesFile(String value) {
this.propertiesFile = value;
}
public String getPropertiesFile() {
trace.log(TraceLevel.TRACE, "Properties file: " + propertiesFile); //$NON-NLS-1$
if (propertiesFile == null) return null;
propertiesFile = getAbsoluteFilePath(propertiesFile);
return propertiesFile;
}
@Parameter(optional = true, description = "This parameter specifies the name of application configuration that stores client properties, "
+ "the property specified via application configuration is overridden by the properties file and kafkaProperty parameter. "
+ "The hierarchy of properties goes: properties from appConfig beat out kafkaProperty parameter properties, which beat out properties from the propertiesFile. ")
public void setAppConfigName(String appConfigName) {
this.appConfigName = appConfigName;
}
public String getAppConfigName() {
return appConfigName;
}
@Parameter(optional = true, description = "List of Kafka properties to retrieve from application configuration. "
+ "The property name in the application configuration must the same as the Kafka property name. "
+ "You may also supply jaasFile as a property name to act as the jaasFile parameter value.")
public void setAppConfigPropertyName(List<String> propNames) {
if (propNames != null)
this.appConfigPropName.addAll(propNames);
}
@Parameter(optional = true, description = "This parameter specifies the property name of the jaasFile location in the application configuration. "
+ "The default name is jaasFile. ")
public void setJaasFilePropName(String jaasFilePropName) {
this.jaasFilePropName = jaasFilePropName;
}
@Parameter(optional = true, description = "Location of the jaas file to be used for SASL connections. "
+ "Jaas file is recommended to be stored in the etc directory. "
+ "If a relative path is specified, the path is relative to the application directory."
+ "This sets the system property java.security.auth.login.config. This can also be set using the appConfig by "
+ "specifying jaasFile=<jaas.conf location>.")
public void setJaasFile(String value) {
jaasFile = value;
}
/*
* Check if jaasFile was specified by the jaasFile parameter.
* If not, check if it's in our properties list.
*/
public String getJaasFile() {
if (finalProperties.containsKey(jaasFilePropName)){
jaasFile = finalProperties.getProperty(jaasFilePropName);
trace.log(TraceLevel.INFO, "Found jaasFile in properties!"); //$NON-NLS-1$
}
trace.log(TraceLevel.INFO, "Jaas file: " + jaasFile); //$NON-NLS-1$
if (jaasFile == null) return null;
jaasFile = getAbsoluteFilePath(jaasFile);
return jaasFile;
}
public String getAbsoluteFilePath(String filePath){
File file = new File(filePath);
// if the properties file is relative, the path is relative to the application directory
if (!file.isAbsolute())
{
filePath = getOperatorContext().getPE().getApplicationDirectory().getAbsolutePath() + "/" + filePath; //$NON-NLS-1$
}
return filePath;
}
@Parameter(optional = true, description = "Name of the attribute for the message. If this parameter is not specified, then by default the operator will look for an attribute named \\\"message\\\". If the \\\"message\\\" attribute is not found, a runtime error will be returned.")
public void setMessageAttribute(String value) {
messageAH.setName(value);
}
@Parameter(optional = true, description = "Name of the attribute for the key. Default is \\\"key\\\".")
public void setKeyAttribute(String value) {
keyAH.setName(value);
}
@Override
public void shutdown() throws Exception {
OperatorContext context = getOperatorContext();
trace.log(TraceLevel.ALL, "Operator " + context.getName() + " shutting down in PE: " + context.getPE().getPEId() + " in Job: " + context.getPE().getJobId() ); //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$
// Must call super.shutdown()
super.shutdown();
}
public static final String BASE_DESC = "Specify properties as described here: http://kafka.apache.org/documentation.html#configuration. " //$NON-NLS-1$
+ "If you are using Java security modules for login/authentication, ensure that they are compatible with IBM Java, as IBM Streams only runs " //$NON-NLS-1$
+ "with IBM Java. The SPL Attribute Types supported are rstring, ustring, and blob. The topic must be of type rstring/ustring, while the key and message must " //$NON-NLS-1$
+ "be of the same type (rstring, ustring, or blob). " //$NON-NLS-1$
+ "\\n\\n**Kafka 0.9 Server Support**: " //$NON-NLS-1$
+ "By default this toolkit builds with Kafka 0.10 client JARs. The Kafka 0.10 client is not compatible with Kafka 0.9 brokers. " //$NON-NLS-1$
+ "To use this operator with Kafka 0.9 brokers, you must rebuild with the kafka-0.9 target after cleaning. From the toolkit root directory: " //$NON-NLS-1$
+ "\\n\\n ant clean" //$NON-NLS-1$
+ "\\n\\n ant kafka-0.9" + //$NON-NLS-1$
"\\n\\n**AppConfig**: " //$NON-NLS-1$
+ "You must provide properties for the operator using at least one of the following parameters: kafkaProperty, propertiesFile, or appConfigName. " //$NON-NLS-1$
+ "The hierarchy of properties goes: properties from appConfig beat out kafkaProperty parameter properties, which beat out properties from the propertiesFile. "; //$NON-NLS-1$
}
|
[] |
[] |
[] |
[]
|
[]
|
java
| 0 | 0 | |
testdata/env.go
|
package main
import (
"os"
)
func main() {
// Check for environment variables (set by the test runner).
println("ENV1:", os.Getenv("ENV1"))
v, ok := os.LookupEnv("ENV2")
if !ok {
println("ENV2 not found")
}
println("ENV2:", v)
found := false
expected := "ENV1=" + os.Getenv("ENV1")
for _, envVar := range os.Environ() {
if envVar == expected {
found = true
}
}
if !found {
println("could not find " + expected + " in os.Environ()")
}
// Check for command line arguments.
// Argument 0 is skipped because it is the program name, which varies by
// test run.
println()
for _, arg := range os.Args[1:] {
println("arg:", arg)
}
}
|
[
"\"ENV1\"",
"\"ENV1\""
] |
[] |
[
"ENV1"
] |
[]
|
["ENV1"]
|
go
| 1 | 0 | |
main.go
|
package main
import (
"encoding/json"
"flag"
"fmt"
"net/http"
"os"
"os/exec"
"strings"
"gopl.io/ch4/github"
)
const (
gitHubAPIURL = "https://api.github.com"
createIssueHelp = "https://docs.github.com/en/rest/reference/issues#create-an-issue"
)
func getRepositoryIssues(ownerName string, repoName string) (
[]github.Issue, error) {
getRepoIssuesURL := fmt.Sprintf(
"%s/repos/%s/%s/issues",
gitHubAPIURL,
ownerName,
repoName)
var issues []github.Issue
request, err := http.NewRequest("GET", getRepoIssuesURL, nil)
if err != nil {
return issues, err
}
request.Header.Set("Accept", "application/vnd.github.v3+json")
response, err := http.DefaultClient.Do(request)
if err != nil {
return issues, err
}
if response.StatusCode != http.StatusOK {
response.Body.Close()
err := fmt.Errorf("HTTP error: %s", response.Status)
return issues, err
}
if err := json.NewDecoder(response.Body).Decode(&issues); err != nil {
response.Body.Close()
return issues, err
}
response.Body.Close()
return issues, nil
}
func printRepositoryIssues(issues []github.Issue) {
fmt.Printf("Total issue number: %d", len(issues))
for i, issue := range issues {
prettyIssue, err := json.MarshalIndent(issue, "", "\t")
if err != nil {
fmt.Printf("Can not prettify the issue number %d", issue.Number)
}
fmt.Printf("\nIssue no. %d\n", i+1)
fmt.Println(string(prettyIssue))
}
}
func createIssue(
username string,
password string,
ownerName string,
repoName string,
jsonIssueProperties string) (github.Issue, error) {
createIssueURL := fmt.Sprintf(
"%s/repos/%s/%s/issues",
gitHubAPIURL,
ownerName,
repoName,
)
var issue github.Issue
bodyReader := strings.NewReader(jsonIssueProperties)
request, err := http.NewRequest("POST", createIssueURL, bodyReader)
if err != nil {
return issue, err
}
request.Header.Set("Accept", "application/vnd.github.v3+json")
request.SetBasicAuth(username, password)
response, err := http.DefaultClient.Do(request)
if err != nil {
return issue, err
}
if response.StatusCode != http.StatusCreated {
response.Body.Close()
err := fmt.Errorf("HTTP error: %s", response.Status)
return issue, err
}
if err := json.NewDecoder(response.Body).Decode(&issue); err != nil {
response.Body.Close()
return issue, err
}
response.Body.Close()
return issue, nil
}
func updateIssue(
username string,
password string,
ownerName string,
repoName string,
issueNumber uint64,
jsonIssueProperties string) (github.Issue, error) {
createIssueURL := fmt.Sprintf(
"%s/repos/%s/%s/issues/%d",
gitHubAPIURL,
ownerName,
repoName,
issueNumber,
)
var issue github.Issue
bodyReader := strings.NewReader(jsonIssueProperties)
request, err := http.NewRequest("PATCH", createIssueURL, bodyReader)
if err != nil {
return issue, err
}
request.Header.Set("Accept", "application/vnd.github.v3+json")
request.SetBasicAuth(username, password)
response, err := http.DefaultClient.Do(request)
if err != nil {
return issue, err
}
if response.StatusCode != http.StatusOK {
response.Body.Close()
err := fmt.Errorf("HTTP error: %s", response.Status)
return issue, err
}
if err := json.NewDecoder(response.Body).Decode(&issue); err != nil {
response.Body.Close()
return issue, err
}
response.Body.Close()
return issue, nil
}
func openFileInEditor(filename string) error {
editorName := os.Getenv("EDITOR")
if editorName == "" {
editorName = "vim"
}
executable, err := exec.LookPath(editorName)
if err != nil {
return err
}
command := exec.Command(executable, filename)
command.Stdin = os.Stdin
command.Stdout = os.Stdout
command.Stderr = os.Stderr
return command.Run()
}
func getEditorOutput() (string, error) {
file, err := os.CreateTemp(os.TempDir(), "*")
if err != nil {
return "Can not create a temporary file", err
}
filename := file.Name()
defer os.Remove(filename)
if err = file.Close(); err != nil {
errMsg := fmt.Sprintf(
"A temporary file %s is already open",
filename,
)
return errMsg, err
}
if err = openFileInEditor(filename); err != nil {
return "Text editor error", err
}
bytes, err := os.ReadFile(filename)
if err != nil {
return fmt.Sprintf("Can not read the file %s", filename), err
}
return string(bytes), nil
}
func main() {
userName := flag.String("user", "", "a GitHub user name")
password := flag.String("password", "", "a Github user password")
ownerName := flag.String("owner", "", "owner of Github repository")
repoName := flag.String("repo", "", "repository name")
_ = flag.String("repo-issues", "", "command to list repository issues")
_ = flag.String("create-issue", "", "command to create an issue")
updateIssueCmd := flag.NewFlagSet("update-issue", flag.ExitOnError)
issueNumber := updateIssueCmd.Uint64("issue-number", 0, "An issue number")
flag.Parse()
argumentPerName := map[string]string{
"user": *userName,
"password": *password,
"owner": *ownerName,
"repo": *repoName,
}
var inputError string
for name, value := range argumentPerName {
if value == "" {
inputError += "The script requires an argument " + name + "\n"
}
}
if inputError != "" {
fmt.Println(inputError)
return
}
if len(os.Args) < 10 {
fmt.Println("expected 'repo-issues' or 'create-issue' subcommands")
os.Exit(1)
}
switch os.Args[9] {
case "repo-issues":
issues, err := getRepositoryIssues(*ownerName, *repoName)
if err != nil {
fmt.Printf("ERROR: %s", err)
return
}
printRepositoryIssues(issues)
case "create-issue":
issueProperties, err := getEditorOutput()
if err != nil {
fmt.Println(issueProperties, err)
return
}
issue, err := createIssue(
*userName,
*password,
*ownerName,
*repoName,
issueProperties,
)
if err != nil {
fmt.Printf("ERROR: %s", err)
return
}
prettyIssue, err := json.MarshalIndent(issue, "", "\t")
if err != nil {
fmt.Printf("Can not prettify the created issue %v\n", issue)
}
fmt.Printf("Created issue:\n%s", string(prettyIssue))
case "update-issue":
updateIssueCmd.Parse(os.Args[10:])
if *issueNumber == 0 {
fmt.Println("Add 'issue-number' argument")
return
}
issueProperties, err := getEditorOutput()
if err != nil {
fmt.Println(issueProperties, err)
return
}
issue, err := updateIssue(
*userName,
*password,
*ownerName,
*repoName,
*issueNumber,
issueProperties,
)
if err != nil {
fmt.Printf("ERROR: %s", err)
return
}
prettyIssue, err := json.MarshalIndent(issue, "", "\t")
if err != nil {
fmt.Printf("Can not prettify the created issue %v\n", issue)
}
fmt.Printf("Created issue:\n%s", string(prettyIssue))
default:
fmt.Printf("Command %s is not implemented", os.Args[9])
}
}
|
[
"\"EDITOR\""
] |
[] |
[
"EDITOR"
] |
[]
|
["EDITOR"]
|
go
| 1 | 0 | |
cmd/manager/main.go
|
package main
import (
"context"
"flag"
"fmt"
"os"
"runtime"
routev1 "github.com/openshift/api/route/v1"
"github.com/operator-framework/operator-sdk/pkg/k8sutil"
"github.com/operator-framework/operator-sdk/pkg/leader"
"github.com/operator-framework/operator-sdk/pkg/metrics"
sdkVersion "github.com/operator-framework/operator-sdk/version"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/intstr"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
"sigs.k8s.io/controller-runtime/pkg/client/config"
"sigs.k8s.io/controller-runtime/pkg/manager"
logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
"sigs.k8s.io/controller-runtime/pkg/runtime/signals"
"github.com/ucloud/grafana-operator/pkg/apis"
grafanav1alpha1 "github.com/ucloud/grafana-operator/pkg/apis/monitor/v1alpha1"
"github.com/ucloud/grafana-operator/pkg/controller"
"github.com/ucloud/grafana-operator/pkg/controller/common"
config2 "github.com/ucloud/grafana-operator/pkg/controller/config"
"github.com/ucloud/grafana-operator/version"
)
var log = logf.Log.WithName("cmd")
var flagImage string
var flagImageTag string
var flagPluginsInitContainerImage string
var flagPluginsInitContainerTag string
var flagJsonnetLocation string
var (
metricsHost = "0.0.0.0"
metricsPort int32 = 8080
)
func printVersion() {
log.Info(fmt.Sprintf("Go Version: %s", runtime.Version()))
log.Info(fmt.Sprintf("Go OS/Arch: %s/%s", runtime.GOOS, runtime.GOARCH))
log.Info(fmt.Sprintf("operator-sdk Version: %v", sdkVersion.Version))
log.Info(fmt.Sprintf("operator Version: %v", version.Version))
}
func init() {
flagset := flag.CommandLine
flagset.StringVar(&flagImage, "grafana-image", "", "Overrides the default Grafana image")
flagset.StringVar(&flagImageTag, "grafana-image-tag", "", "Overrides the default Grafana image tag")
flagset.StringVar(&flagPluginsInitContainerImage, "grafana-plugins-init-container-image", "", "Overrides the default Grafana Plugins Init Container image")
flagset.StringVar(&flagPluginsInitContainerTag, "grafana-plugins-init-container-tag", "", "Overrides the default Grafana Plugins Init Container tag")
flagset.StringVar(&flagJsonnetLocation, "jsonnet-location", "", "Overrides the base path of the jsonnet libraries")
flagset.Parse(os.Args[1:])
}
func main() {
// The logger instantiated here can be changed to any logger
// implementing the logr.Logger interface. This logger will
// be propagated through the whole operator, generating
// uniform and structured logs.
logf.SetLogger(logf.ZapLogger(false))
printVersion()
namespace, err := k8sutil.GetWatchNamespace()
if err != nil {
log.Error(err, "failed to get watch namespace")
os.Exit(1)
}
// Controller configuration
controllerConfig := config2.GetControllerConfig()
controllerConfig.AddConfigItem(config2.ConfigGrafanaImage, flagImage)
controllerConfig.AddConfigItem(config2.ConfigGrafanaImageTag, flagImageTag)
controllerConfig.AddConfigItem(config2.ConfigPluginsInitContainerImage, flagPluginsInitContainerImage)
controllerConfig.AddConfigItem(config2.ConfigPluginsInitContainerTag, flagPluginsInitContainerTag)
controllerConfig.AddConfigItem(config2.ConfigOperatorNamespace, namespace)
controllerConfig.AddConfigItem(config2.ConfigDashboardLabelSelector, "")
controllerConfig.AddConfigItem(config2.ConfigJsonnetBasePath, flagJsonnetLocation)
// Get a config to talk to the apiserver
cfg, err := config.GetConfig()
if err != nil {
log.Error(err, "")
os.Exit(1)
}
// Become the leader before proceeding
leader.Become(context.TODO(), "grafana-operator-lock")
// Create a new Cmd to provide shared dependencies and start components
mgr, err := manager.New(cfg, manager.Options{
Namespace: namespace,
MetricsBindAddress: fmt.Sprintf("%s:%d", metricsHost, metricsPort),
})
if err != nil {
log.Error(err, "")
os.Exit(1)
}
log.Info("Registering Components.")
// Starting the resource auto-detection for the grafana controller
autodetect, err := common.NewAutoDetect(mgr)
if err != nil {
log.Error(err, "failed to start the background process to auto-detect the operator capabilities")
} else {
autodetect.Start()
defer autodetect.Stop()
}
// Setup Scheme for all resources
if err := apis.AddToScheme(mgr.GetScheme()); err != nil {
log.Error(err, "")
os.Exit(1)
}
// Setup Scheme for OpenShift routes
if err := routev1.AddToScheme(mgr.GetScheme()); err != nil {
log.Error(err, "")
os.Exit(1)
}
// Setup all Controllers
if err := controller.AddToManager(mgr, autodetect.SubscriptionChannel); err != nil {
log.Error(err, "")
os.Exit(1)
}
servicePorts := []v1.ServicePort{
{
Name: metrics.OperatorPortName,
Protocol: v1.ProtocolTCP,
Port: metricsPort,
TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: metricsPort},
},
}
_, err = metrics.CreateMetricsService(context.TODO(), cfg, servicePorts)
if err != nil {
log.Error(err, "error starting metrics service")
}
if os.Getenv("ENABLE_WEBHOOKS") == "true" {
startWebHook(mgr)
}
log.Info("Starting the Cmd.")
signalHandler := signals.SetupSignalHandler()
if err := mgr.Start(signalHandler); err != nil {
log.Error(err, "manager exited non-zero")
os.Exit(1)
}
}
func startWebHook(mgr manager.Manager) {
log.Info("Starting the WebHook.")
ws := mgr.GetWebhookServer()
ws.CertDir = "/etc/webhook/certs"
ws.Port = 7443
if err := (&grafanav1alpha1.GrafanaDashboard{}).SetupWebhookWithManager(mgr); err != nil {
log.Error(err, "unable to create webHook", "webHook", "GrafanaDashboard")
os.Exit(1)
}
if err := (&grafanav1alpha1.GrafanaDataSource{}).SetupWebhookWithManager(mgr); err != nil {
log.Error(err, "unable to create webHook", "webHook", "GrafanaDataSource")
os.Exit(1)
}
}
//func serveProfiling() {
// log.Info("Starting the Profiling.")
// mux := mux.NewPathRecorderMux("grafana-operator")
// routes.Profiling{}.Install(mux)
// go wait.Until(func() {
// err := http.ListenAndServe("[::]:10269", mux)
// if err != nil {
// log.Error(err, "starting metrics server failed")
// os.Exit(1)
// }
// }, 5*time.Second, wait.NeverStop)
//}
|
[
"\"ENABLE_WEBHOOKS\""
] |
[] |
[
"ENABLE_WEBHOOKS"
] |
[]
|
["ENABLE_WEBHOOKS"]
|
go
| 1 | 0 | |
templates/settings.py
|
"""Copyright 2008 Orbitz WorldWide
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
# Django settings for graphite project.
# DO NOT MODIFY THIS FILE DIRECTLY - use local_settings.py instead
import os
import sys
from os.path import abspath, dirname, join
from warnings import warn
from django.core.urlresolvers import reverse_lazy
GRAPHITE_WEB_APP_SETTINGS_LOADED = False
WEBAPP_VERSION = '0.10.0-alpha'
DEBUG = False
JAVASCRIPT_DEBUG = False
# Filesystem layout
WEB_DIR = dirname( abspath(__file__) )
WEBAPP_DIR = dirname(WEB_DIR)
GRAPHITE_ROOT = dirname(WEBAPP_DIR)
# Initialize additional path variables
# Defaults for these are set after local_settings is imported
STATIC_ROOT = ''
STATIC_URL = '/static/'
URL_PREFIX = ''
CONF_DIR = ''
DASHBOARD_CONF = ''
GRAPHTEMPLATES_CONF = ''
STORAGE_DIR = ''
WHITELIST_FILE = ''
INDEX_FILE = ''
LOG_DIR = ''
CERES_DIR = ''
WHISPER_DIR = ''
RRD_DIR = ''
STANDARD_DIRS = []
CLUSTER_SERVERS = []
# Cluster settings
CLUSTER_SERVERS = []
REMOTE_FIND_TIMEOUT = 3.0
REMOTE_FETCH_TIMEOUT = 6.0
REMOTE_RETRY_DELAY = 60.0
REMOTE_READER_CACHE_SIZE_LIMIT = 1000
CARBON_METRIC_PREFIX='carbon'
CARBONLINK_HOSTS = ["127.0.0.1:7002"]
CARBONLINK_TIMEOUT = 1.0
CARBONLINK_HASHING_KEYFUNC = None
CARBONLINK_RETRY_DELAY = 15
REPLICATION_FACTOR = 1
MEMCACHE_HOSTS = []
MEMCACHE_KEY_PREFIX = ''
FIND_CACHE_DURATION = 300
FIND_TOLERANCE = 2 * FIND_CACHE_DURATION
DEFAULT_CACHE_DURATION = 60 #metric data and graphs are cached for one minute by default
LOG_CACHE_PERFORMANCE = False
LOG_ROTATE = True
MAX_FETCH_RETRIES = 2
#Remote rendering settings
REMOTE_RENDERING = False #if True, rendering is delegated to RENDERING_HOSTS
RENDERING_HOSTS = []
REMOTE_RENDER_CONNECT_TIMEOUT = 1.0
LOG_RENDERING_PERFORMANCE = False
#Miscellaneous settings
SMTP_SERVER = "localhost"
DOCUMENTATION_URL = "http://graphite.readthedocs.org/"
ALLOW_ANONYMOUS_CLI = True
LOG_METRIC_ACCESS = False
LEGEND_MAX_ITEMS = 10
RRD_CF = 'AVERAGE'
STORAGE_FINDERS = ('{{storage_finders|join("', '")}}',)
#Authentication settings
USE_LDAP_AUTH = False
LDAP_SERVER = "" # "ldapserver.mydomain.com"
LDAP_PORT = 389
LDAP_USE_TLS = False
LDAP_SEARCH_BASE = "" # "OU=users,DC=mydomain,DC=com"
LDAP_BASE_USER = "" # "CN=some_readonly_account,DC=mydomain,DC=com"
LDAP_BASE_PASS = "" # "my_password"
LDAP_USER_QUERY = "" # "(username=%s)" For Active Directory use "(sAMAccountName=%s)"
LDAP_URI = None
#Set this to True to delegate authentication to the web server
USE_REMOTE_USER_AUTHENTICATION = False
REMOTE_USER_BACKEND = "" # Provide an alternate or subclassed backend
# Django 1.5 requires this so we set a default but warn the user
SECRET_KEY = 'UNSAFE_DEFAULT'
# Django 1.5 requires this to be set. Here we default to prior behavior and allow all
ALLOWED_HOSTS = [ '*' ]
# Override to link a different URL for login (e.g. for django_openid_auth)
LOGIN_URL = reverse_lazy('account_login')
# Set the default timezone to UTC
TIME_ZONE = 'UTC'
# Set to True to require authentication to save or delete dashboards
DASHBOARD_REQUIRE_AUTHENTICATION = False
# Require Django change/delete permissions to save or delete dashboards.
# NOTE: Requires DASHBOARD_REQUIRE_AUTHENTICATION to be set
DASHBOARD_REQUIRE_PERMISSIONS = False
# Name of a group to which the user must belong to save or delete dashboards. Alternative to
# DASHBOARD_REQUIRE_PERMISSIONS, particularly useful when using only LDAP (without Admin app)
# NOTE: Requires DASHBOARD_REQUIRE_AUTHENTICATION to be set
DASHBOARD_REQUIRE_EDIT_GROUP = None
DATABASES = None
# If using rrdcached, set to the address or socket of the daemon
FLUSHRRDCACHED = ''
## Load our local_settings
try:
from graphite.local_settings import * # noqa
except ImportError:
print >> sys.stderr, "Could not import graphite.local_settings, using defaults!"
## Load Django settings if they werent picked up in local_settings
if not GRAPHITE_WEB_APP_SETTINGS_LOADED:
from graphite.app_settings import * # noqa
STATICFILES_DIRS = (
join(WEBAPP_DIR, 'content'),
)
## Set config dependent on flags set in local_settings
# Path configuration
if not STATIC_ROOT:
STATIC_ROOT = join(GRAPHITE_ROOT, 'static')
if not CONF_DIR:
CONF_DIR = os.environ.get('GRAPHITE_CONF_DIR', join(GRAPHITE_ROOT, 'conf'))
if not DASHBOARD_CONF:
DASHBOARD_CONF = join(CONF_DIR, 'dashboard.conf')
if not GRAPHTEMPLATES_CONF:
GRAPHTEMPLATES_CONF = join(CONF_DIR, 'graphTemplates.conf')
if not STORAGE_DIR:
STORAGE_DIR = os.environ.get('GRAPHITE_STORAGE_DIR', join(GRAPHITE_ROOT, 'storage'))
if not WHITELIST_FILE:
WHITELIST_FILE = join(STORAGE_DIR, 'lists', 'whitelist')
if not INDEX_FILE:
INDEX_FILE = join(STORAGE_DIR, 'index')
if not LOG_DIR:
LOG_DIR = join(STORAGE_DIR, 'log', 'webapp')
if not WHISPER_DIR:
WHISPER_DIR = join(STORAGE_DIR, 'whisper/')
if not CERES_DIR:
CERES_DIR = join(STORAGE_DIR, 'ceres/')
if not RRD_DIR:
RRD_DIR = join(STORAGE_DIR, 'rrd/')
if not STANDARD_DIRS:
try:
import whisper # noqa
if os.path.exists(WHISPER_DIR):
STANDARD_DIRS.append(WHISPER_DIR)
except ImportError:
print >> sys.stderr, "WARNING: whisper module could not be loaded, whisper support disabled"
try:
import rrdtool # noqa
if os.path.exists(RRD_DIR):
STANDARD_DIRS.append(RRD_DIR)
except ImportError:
pass
if DATABASES is None:
DATABASES = {
'default': {
'NAME': join(STORAGE_DIR, 'graphite.db'),
'ENGINE': 'django.db.backends.sqlite3',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
},
}
# Handle URL prefix in static files handling
if URL_PREFIX and not STATIC_URL.startswith(URL_PREFIX):
STATIC_URL = '/{0}{1}'.format(URL_PREFIX.strip('/'), STATIC_URL)
# Default sqlite db file
# This is set here so that a user-set STORAGE_DIR is available
if 'sqlite3' in DATABASES.get('default',{}).get('ENGINE','') \
and not DATABASES.get('default',{}).get('NAME'):
DATABASES['default']['NAME'] = join(STORAGE_DIR, 'graphite.db')
# Caching shortcuts
if MEMCACHE_HOSTS:
CACHES['default'] = {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': MEMCACHE_HOSTS,
'TIMEOUT': DEFAULT_CACHE_DURATION,
'KEY_PREFIX': MEMCACHE_KEY_PREFIX,
}
# Authentication shortcuts
if USE_LDAP_AUTH and LDAP_URI is None:
LDAP_URI = "ldap://%s:%d/" % (LDAP_SERVER, LDAP_PORT)
if USE_REMOTE_USER_AUTHENTICATION or REMOTE_USER_BACKEND:
MIDDLEWARE_CLASSES += ('django.contrib.auth.middleware.RemoteUserMiddleware',)
if REMOTE_USER_BACKEND:
AUTHENTICATION_BACKENDS.insert(0,REMOTE_USER_BACKEND)
else:
AUTHENTICATION_BACKENDS.insert(0,'django.contrib.auth.backends.RemoteUserBackend')
if USE_LDAP_AUTH:
AUTHENTICATION_BACKENDS.insert(0,'graphite.account.ldapBackend.LDAPBackend')
if SECRET_KEY == 'UNSAFE_DEFAULT':
warn('SECRET_KEY is set to an unsafe default. This should be set in local_settings.py for better security')
USE_TZ = True
|
[] |
[] |
[
"GRAPHITE_STORAGE_DIR",
"GRAPHITE_CONF_DIR"
] |
[]
|
["GRAPHITE_STORAGE_DIR", "GRAPHITE_CONF_DIR"]
|
python
| 2 | 0 | |
doc/conf.py
|
# -*- coding: utf-8 -*-
#
# files management documentation build configuration file.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
from os.path import join, dirname, abspath
from topology_lib_files_management import __version__
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.inheritance_diagram',
'sphinx.ext.intersphinx',
'sphinxcontrib.plantuml',
'sphinx.ext.graphviz',
'autoapi.sphinx'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'files management'
copyright = '2016, maria alas'
author = 'maria alas'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'monokai'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%Y-%m-%d'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'topology_lib_files_managementdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
'index', 'topology_lib_files_management.tex',
'files management Documentation',
'maria alas', 'manual'
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
'index',
'topology_lib_files_management',
'files management Documentation',
[author], 1
),
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
'topology_lib_files_management',
'files management Documentation',
author,
'topology_lib_files_management',
(
'This library will have every command related with files '
'management'
),
'Miscellaneous'
),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Add style overrides
def setup(app):
app.add_stylesheet('styles/custom.css')
# autoapi configuration
autoapi_modules = {
'topology_lib_files_management': None
}
# Configure PlantUML
plantuml = 'java -jar ' + join(dirname(abspath(__name__)), 'plantuml.8030.jar')
plantuml_output_format = 'svg'
# Configure Graphviz
graphviz_output_format = 'svg'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/3.4', None)
}
# Setup theme if not building in readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) is not None
if not on_rtd:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
|
[] |
[] |
[
"READTHEDOCS"
] |
[]
|
["READTHEDOCS"]
|
python
| 1 | 0 | |
sdk/keyvault/azure-security-keyvault-secrets/src/test/java/com/azure/security/keyvault/secrets/SecretClientTestBase.java
|
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package com.azure.security.keyvault.secrets;
import com.azure.core.credential.TokenCredential;
import com.azure.core.exception.HttpResponseException;
import com.azure.core.http.HttpClient;
import com.azure.core.http.HttpPipeline;
import com.azure.core.http.HttpPipelineBuilder;
import com.azure.core.http.policy.BearerTokenAuthenticationPolicy;
import com.azure.core.http.policy.ExponentialBackoff;
import com.azure.core.http.policy.HttpLogDetailLevel;
import com.azure.core.http.policy.HttpLogOptions;
import com.azure.core.http.policy.HttpLoggingPolicy;
import com.azure.core.http.policy.HttpPipelinePolicy;
import com.azure.core.http.policy.HttpPolicyProviders;
import com.azure.core.http.policy.RetryPolicy;
import com.azure.core.http.policy.RetryStrategy;
import com.azure.core.http.policy.UserAgentPolicy;
import com.azure.core.http.rest.Response;
import com.azure.core.test.TestBase;
import com.azure.core.test.TestMode;
import com.azure.core.util.Configuration;
import com.azure.core.util.CoreUtils;
import com.azure.identity.ClientSecretCredentialBuilder;
import com.azure.security.keyvault.secrets.models.KeyVaultSecret;
import com.azure.security.keyvault.secrets.models.SecretProperties;
import java.time.Duration;
import java.util.stream.Stream;
import org.junit.jupiter.api.Test;
import java.time.OffsetDateTime;
import java.time.ZoneOffset;
import java.util.*;
import java.util.function.BiConsumer;
import java.util.function.Consumer;
import org.junit.jupiter.params.provider.Arguments;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.fail;
public abstract class SecretClientTestBase extends TestBase {
static final String DISPLAY_NAME_WITH_ARGUMENTS = "{displayName} with [{arguments}]";
private static final String AZURE_TEST_KEYVAULT_SECRET_SERVICE_VERSIONS = "AZURE_KEYVAULT_TEST_SECRETS_SERVICE_VERSIONS";
private static final String SERVICE_VERSION_FROM_ENV = Configuration.getGlobalConfiguration().get(AZURE_TEST_KEYVAULT_SECRET_SERVICE_VERSIONS);
private static final String SECRET_NAME = "javaSecretTemp";
private static final String SECRET_VALUE = "Chocolate is hidden in the toothpaste cabinet";
private static final String SDK_NAME = "client_name";
private static final String SDK_VERSION = "client_version";
@Override
protected String getTestName() {
return "";
}
void beforeTestSetup() {
}
HttpPipeline getHttpPipeline(HttpClient httpClient, SecretServiceVersion serviceVersion) {
TokenCredential credential = null;
if (!interceptorManager.isPlaybackMode()) {
String clientId = System.getenv("ARM_CLIENTID");
String clientKey = System.getenv("ARM_CLIENTKEY");
String tenantId = System.getenv("AZURE_TENANT_ID");
Objects.requireNonNull(clientId, "The client id cannot be null");
Objects.requireNonNull(clientKey, "The client key cannot be null");
Objects.requireNonNull(tenantId, "The tenant id cannot be null");
credential = new ClientSecretCredentialBuilder()
.clientSecret(clientKey)
.clientId(clientId)
.tenantId(tenantId)
.build();
}
// Closest to API goes first, closest to wire goes last.
final List<HttpPipelinePolicy> policies = new ArrayList<>();
policies.add(new UserAgentPolicy(SDK_NAME, SDK_VERSION, Configuration.getGlobalConfiguration().clone(), serviceVersion));
HttpPolicyProviders.addBeforeRetryPolicies(policies);
RetryStrategy strategy = new ExponentialBackoff(5, Duration.ofSeconds(2), Duration.ofSeconds(16));
policies.add(new RetryPolicy(strategy));
if (credential != null) {
policies.add(new BearerTokenAuthenticationPolicy(credential, SecretAsyncClient.KEY_VAULT_SCOPE));
}
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(new HttpLoggingPolicy(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)));
if (getTestMode() == TestMode.RECORD) {
policies.add(interceptorManager.getRecordPolicy());
}
HttpPipeline pipeline = new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient == null ? interceptorManager.getPlaybackClient() : httpClient)
.build();
return pipeline;
}
@Test
public abstract void setSecret(HttpClient httpClient, SecretServiceVersion serviceVersion);
void setSecretRunner(Consumer<KeyVaultSecret> testRunner) {
final Map<String, String> tags = new HashMap<>();
tags.put("foo", "baz");
String resourceId = generateResourceId(SECRET_NAME);
final KeyVaultSecret secret = new KeyVaultSecret(resourceId, SECRET_VALUE)
.setProperties(new SecretProperties()
.setExpiresOn(OffsetDateTime.of(2050, 1, 30, 0, 0, 0, 0, ZoneOffset.UTC))
.setNotBefore(OffsetDateTime.of(2000, 1, 30, 12, 59, 59, 0, ZoneOffset.UTC))
.setTags(tags)
.setContentType("text"));
testRunner.accept(secret);
}
@Test
public abstract void setSecretEmptyName(HttpClient httpClient, SecretServiceVersion serviceVersion);
@Test
public abstract void setSecretEmptyValue(HttpClient httpClient, SecretServiceVersion serviceVersion);
void setSecretEmptyValueRunner(Consumer<KeyVaultSecret> testRunner) {
String resourceId = generateResourceId(SECRET_NAME);
KeyVaultSecret secret = new KeyVaultSecret(resourceId, "");
testRunner.accept(secret);
}
@Test public abstract void setSecretNull(HttpClient httpClient, SecretServiceVersion serviceVersion);
@Test
public abstract void updateSecret(HttpClient httpClient, SecretServiceVersion serviceVersion);
void updateSecretRunner(BiConsumer<KeyVaultSecret, KeyVaultSecret> testRunner) {
final Map<String, String> tags = new HashMap<>();
tags.put("first tag", "first value");
tags.put("second tag", "second value");
String resourceId = generateResourceId("testSecretUpdate");
final KeyVaultSecret originalSecret = new KeyVaultSecret(resourceId, "testSecretVal")
.setProperties(new SecretProperties()
.setExpiresOn(OffsetDateTime.of(2050, 5, 25, 0, 0, 0, 0, ZoneOffset.UTC))
.setTags(tags));
final KeyVaultSecret updatedSecret = new KeyVaultSecret(resourceId, "testSecretVal")
.setProperties(new SecretProperties()
.setExpiresOn(OffsetDateTime.of(2060, 5, 25, 0, 0, 0, 0, ZoneOffset.UTC))
.setTags(tags));
testRunner.accept(originalSecret, updatedSecret);
}
@Test
public abstract void updateDisabledSecret(HttpClient httpClient, SecretServiceVersion serviceVersion);
void updateDisabledSecretRunner(BiConsumer<KeyVaultSecret, KeyVaultSecret> testRunner) {
final Map<String, String> tags = new HashMap<>();
String resourceId = generateResourceId("testUpdateOfDisabledSecret");
final KeyVaultSecret originalSecret = new KeyVaultSecret(resourceId, "testSecretUpdateDisabledVal")
.setProperties(new SecretProperties()
.setExpiresOn(OffsetDateTime.of(2050, 5, 25, 0, 0, 0, 0, ZoneOffset.UTC))
.setEnabled(false));
final KeyVaultSecret updatedSecret = new KeyVaultSecret(resourceId, "testSecretUpdateDisabledVal")
.setProperties(new SecretProperties()
.setExpiresOn(OffsetDateTime.of(2050, 5, 25, 0, 0, 0, 0, ZoneOffset.UTC))
.setEnabled(false));
testRunner.accept(originalSecret, updatedSecret);
}
@Test
public abstract void getSecret(HttpClient httpClient, SecretServiceVersion serviceVersion);
void getSecretRunner(Consumer<KeyVaultSecret> testRunner) {
String resourceId = generateResourceId("testSecretGet");
final KeyVaultSecret originalSecret = new KeyVaultSecret(resourceId, "testSecretGetVal")
.setProperties(new SecretProperties()
.setExpiresOn(OffsetDateTime.of(2050, 5, 25, 0, 0, 0, 0, ZoneOffset.UTC)));
testRunner.accept(originalSecret);
}
@Test
public abstract void getSecretSpecificVersion(HttpClient httpClient, SecretServiceVersion serviceVersion);
void getSecretSpecificVersionRunner(BiConsumer<KeyVaultSecret, KeyVaultSecret> testRunner) {
String resourceId = generateResourceId("testSecretGetVersion");
final KeyVaultSecret secret = new KeyVaultSecret(resourceId, "testSecretGetVersionVal")
.setProperties(new SecretProperties()
.setExpiresOn(OffsetDateTime.of(2050, 5, 25, 0, 0, 0, 0, ZoneOffset.UTC)));
final KeyVaultSecret secretWithNewVal = new KeyVaultSecret(resourceId, "newVal")
.setProperties(new SecretProperties()
.setExpiresOn(OffsetDateTime.of(2050, 5, 25, 0, 0, 0, 0, ZoneOffset.UTC)));
testRunner.accept(secret, secretWithNewVal);
}
@Test
public abstract void getSecretNotFound(HttpClient httpClient, SecretServiceVersion serviceVersion);
@Test
public abstract void deleteSecret(HttpClient httpClient, SecretServiceVersion serviceVersion);
void deleteSecretRunner(Consumer<KeyVaultSecret> testRunner) {
String resourceId = generateResourceId("testSecretDelete");
final KeyVaultSecret secretToDelete = new KeyVaultSecret(resourceId, "testSecretDeleteVal")
.setProperties(new SecretProperties()
.setExpiresOn(OffsetDateTime.of(2050, 5, 25, 0, 0, 0, 0, ZoneOffset.UTC)));
testRunner.accept(secretToDelete);
}
@Test
public abstract void deleteSecretNotFound(HttpClient httpClient, SecretServiceVersion serviceVersion);
@Test
public abstract void getDeletedSecret(HttpClient httpClient, SecretServiceVersion serviceVersion);
void getDeletedSecretRunner(Consumer<KeyVaultSecret> testRunner) {
String resourceId = generateResourceId("testSecretGetDeleted");
final KeyVaultSecret secretToDeleteAndGet = new KeyVaultSecret(resourceId, "testSecretGetDeleteVal")
.setProperties(new SecretProperties()
.setExpiresOn(OffsetDateTime.of(2050, 5, 25, 0, 0, 0, 0, ZoneOffset.UTC)));
testRunner.accept(secretToDeleteAndGet);
}
@Test
public abstract void getDeletedSecretNotFound(HttpClient httpClient, SecretServiceVersion serviceVersion);
@Test
public abstract void recoverDeletedSecret(HttpClient httpClient, SecretServiceVersion serviceVersion);
void recoverDeletedSecretRunner(Consumer<KeyVaultSecret> testRunner) {
String resourceId = generateResourceId("testSecretRecover");
final KeyVaultSecret secretToDeleteAndRecover = new KeyVaultSecret(resourceId, "testSecretRecoverVal")
.setProperties(new SecretProperties()
.setExpiresOn(OffsetDateTime.of(2050, 5, 25, 0, 0, 0, 0, ZoneOffset.UTC)));
testRunner.accept(secretToDeleteAndRecover);
}
@Test
public abstract void recoverDeletedSecretNotFound(HttpClient httpClient, SecretServiceVersion serviceVersion);
@Test
public abstract void backupSecret(HttpClient httpClient, SecretServiceVersion serviceVersion);
void backupSecretRunner(Consumer<KeyVaultSecret> testRunner) {
final KeyVaultSecret secretToBackup = new KeyVaultSecret(generateResourceId("testSecretBackup"), "testSecretBackupVal")
.setProperties(new SecretProperties()
.setExpiresOn(OffsetDateTime.of(2060, 5, 25, 0, 0, 0, 0, ZoneOffset.UTC)));
testRunner.accept(secretToBackup);
}
@Test
public abstract void backupSecretNotFound(HttpClient httpClient, SecretServiceVersion serviceVersion);
@Test
public abstract void restoreSecret(HttpClient httpClient, SecretServiceVersion serviceVersion);
void restoreSecretRunner(Consumer<KeyVaultSecret> testRunner) {
final KeyVaultSecret secretToBackupAndRestore = new KeyVaultSecret(generateResourceId("testSecretRestore"), "testSecretRestoreVal")
.setProperties(new SecretProperties()
.setExpiresOn(OffsetDateTime.of(2080, 5, 25, 0, 0, 0, 0, ZoneOffset.UTC)));
testRunner.accept(secretToBackupAndRestore);
}
@Test
public abstract void restoreSecretFromMalformedBackup(HttpClient httpClient, SecretServiceVersion serviceVersion);
@Test
public abstract void listSecrets(HttpClient httpClient, SecretServiceVersion serviceVersion);
void listSecretsRunner(Consumer<HashMap<String, KeyVaultSecret>> testRunner) {
HashMap<String, KeyVaultSecret> secrets = new HashMap<>();
String secretName;
String secretVal;
for (int i = 0; i < 2; i++) {
secretName = generateResourceId("listSecret" + i);
secretVal = "listSecretVal" + i;
KeyVaultSecret secret = new KeyVaultSecret(secretName, secretVal)
.setProperties(new SecretProperties()
.setExpiresOn(OffsetDateTime.of(2050, 5, 25, 0, 0, 0, 0, ZoneOffset.UTC)));
secrets.put(secretName, secret);
}
testRunner.accept(secrets);
}
@Test
public abstract void listDeletedSecrets(HttpClient httpClient, SecretServiceVersion serviceVersion);
void listDeletedSecretsRunner(Consumer<HashMap<String, KeyVaultSecret>> testRunner) {
HashMap<String, KeyVaultSecret> secrets = new HashMap<>();
String secretName;
String secretVal;
for (int i = 0; i < 3; i++) {
secretName = generateResourceId("listDeletedSecretsTest" + i);
secretVal = "listDeletedSecretVal" + i;
secrets.put(secretName, new KeyVaultSecret(secretName, secretVal)
.setProperties(new SecretProperties()
.setExpiresOn(OffsetDateTime.of(2090, 5, 25, 0, 0, 0, 0, ZoneOffset.UTC))));
}
testRunner.accept(secrets);
}
@Test
public abstract void listSecretVersions(HttpClient httpClient, SecretServiceVersion serviceVersion);
void listSecretVersionsRunner(Consumer<List<KeyVaultSecret>> testRunner) {
List<KeyVaultSecret> secrets = new ArrayList<>();
String secretVal;
String secretName = generateResourceId("listSecretVersion");
for (int i = 1; i < 5; i++) {
secretVal = "listSecretVersionVal" + i;
secrets.add(new KeyVaultSecret(secretName, secretVal)
.setProperties(new SecretProperties()
.setExpiresOn(OffsetDateTime.of(2090, 5, i, 0, 0, 0, 0, ZoneOffset.UTC))));
}
testRunner.accept(secrets);
}
/**
* Helper method to verify that the Response matches what was expected. This method assumes a response status of 200.
*
* @param expected Secret expected to be returned by the service
* @param response Response returned by the service, the body should contain a Secret
*/
static void assertSecretEquals(KeyVaultSecret expected, Response<KeyVaultSecret> response) {
assertSecretEquals(expected, response, 200);
}
/**
* Helper method to verify that the RestResponse matches what was expected.
*
* @param expected ConfigurationSetting expected to be returned by the service
* @param response RestResponse returned from the service, the body should contain a ConfigurationSetting
* @param expectedStatusCode Expected HTTP status code returned by the service
*/
static void assertSecretEquals(KeyVaultSecret expected, Response<KeyVaultSecret> response, final int expectedStatusCode) {
assertNotNull(response);
assertEquals(expectedStatusCode, response.getStatusCode());
assertSecretEquals(expected, response.getValue());
}
/**
* Helper method to verify that the returned ConfigurationSetting matches what was expected.
*
* @param expected ConfigurationSetting expected to be returned by the service
* @param actual ConfigurationSetting contained in the RestResponse body
*/
static void assertSecretEquals(KeyVaultSecret expected, KeyVaultSecret actual) {
assertEquals(expected.getName(), actual.getName());
assertEquals(expected.getValue(), actual.getValue());
assertEquals(expected.getProperties().getExpiresOn(), actual.getProperties().getExpiresOn());
assertEquals(expected.getProperties().getNotBefore(), actual.getProperties().getNotBefore());
}
public String getEndpoint() {
final String endpoint = interceptorManager.isPlaybackMode()
? "http://localhost:8080"
: System.getenv("AZURE_KEYVAULT_ENDPOINT");
Objects.requireNonNull(endpoint);
return endpoint;
}
static void assertRestException(Runnable exceptionThrower, int expectedStatusCode) {
assertRestException(exceptionThrower, HttpResponseException.class, expectedStatusCode);
}
static void assertRestException(Runnable exceptionThrower, Class<? extends HttpResponseException> expectedExceptionType, int expectedStatusCode) {
try {
exceptionThrower.run();
fail();
} catch (Throwable ex) {
assertRestException(ex, expectedExceptionType, expectedStatusCode);
}
}
/**
* Helper method to verify the error was a HttpRequestException and it has a specific HTTP response code.
*
* @param exception Expected error thrown during the test
* @param expectedStatusCode Expected HTTP status code contained in the error response
*/
static void assertRestException(Throwable exception, int expectedStatusCode) {
assertRestException(exception, HttpResponseException.class, expectedStatusCode);
}
static void assertRestException(Throwable exception, Class<? extends HttpResponseException> expectedExceptionType, int expectedStatusCode) {
assertEquals(expectedExceptionType, exception.getClass());
assertEquals(expectedStatusCode, ((HttpResponseException) exception).getResponse().getStatusCode());
}
String generateResourceId(String suffix) {
if (interceptorManager.isPlaybackMode()) {
return suffix;
}
String id = UUID.randomUUID().toString();
return suffix.length() > 0 ? id + "-" + suffix : id;
}
/**
* Helper method to verify that a command throws an IllegalArgumentException.
*
* @param exceptionThrower Command that should throw the exception
*/
static <T> void assertRunnableThrowsException(Runnable exceptionThrower, Class<T> exception) {
try {
exceptionThrower.run();
fail();
} catch (Exception ex) {
assertEquals(exception, ex.getClass());
}
}
public void sleepInRecordMode(long millis) {
if (interceptorManager.isPlaybackMode()) {
return;
}
try {
Thread.sleep(millis);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
public void sleep(long millis) {
try {
Thread.sleep(millis);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
/**
* Returns a stream of arguments that includes all combinations of eligible {@link HttpClient HttpClients} and
* service versions that should be tested.
*
* @return A stream of HttpClient and service version combinations to test.
*/
static Stream<Arguments> getTestParameters() {
// when this issues is closed, the newer version of junit will have better support for
// cartesian product of arguments - https://github.com/junit-team/junit5/issues/1427
List<Arguments> argumentsList = new ArrayList<>();
getHttpClients()
.forEach(httpClient -> {
Arrays.stream(SecretServiceVersion.values()).filter(SecretClientTestBase::shouldServiceVersionBeTested)
.forEach(serviceVersion -> argumentsList.add(Arguments.of(httpClient, serviceVersion)));
});
return argumentsList.stream();
}
/**
* Returns whether the given service version match the rules of test framework.
*
* <ul>
* <li>Using latest service version as default if no environment variable is set.</li>
* <li>If it's set to ALL, all Service versions in {@link SecretServiceVersion} will be tested.</li>
* <li>Otherwise, Service version string should match env variable.</li>
* </ul>
*
* Environment values currently supported are: "ALL", "${version}".
* Use comma to separate http clients want to test.
* e.g. {@code set AZURE_TEST_SERVICE_VERSIONS = V1_0, V2_0}
*
* @param serviceVersion ServiceVersion needs to check
* @return Boolean indicates whether filters out the service version or not.
*/
private static boolean shouldServiceVersionBeTested(SecretServiceVersion serviceVersion) {
if (CoreUtils.isNullOrEmpty(SERVICE_VERSION_FROM_ENV)) {
return SecretServiceVersion.getLatest().equals(serviceVersion);
}
if (AZURE_TEST_SERVICE_VERSIONS_VALUE_ALL.equalsIgnoreCase(SERVICE_VERSION_FROM_ENV)) {
return true;
}
String[] configuredServiceVersionList = SERVICE_VERSION_FROM_ENV.split(",");
return Arrays.stream(configuredServiceVersionList).anyMatch(configuredServiceVersion ->
serviceVersion.getVersion().equals(configuredServiceVersion.trim()));
}
}
|
[
"\"ARM_CLIENTID\"",
"\"ARM_CLIENTKEY\"",
"\"AZURE_TENANT_ID\"",
"\"AZURE_KEYVAULT_ENDPOINT\""
] |
[] |
[
"ARM_CLIENTID",
"ARM_CLIENTKEY",
"AZURE_TENANT_ID",
"AZURE_KEYVAULT_ENDPOINT"
] |
[]
|
["ARM_CLIENTID", "ARM_CLIENTKEY", "AZURE_TENANT_ID", "AZURE_KEYVAULT_ENDPOINT"]
|
java
| 4 | 0 | |
protokube/cmd/protokube/main.go
|
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"bytes"
"flag"
"fmt"
"io"
"net"
"os"
"path"
"strings"
"k8s.io/kops/dns-controller/pkg/dns"
"k8s.io/kops/dnsprovider/pkg/dnsprovider"
"k8s.io/kops/protokube/pkg/gossip"
gossipdns "k8s.io/kops/protokube/pkg/gossip/dns"
"k8s.io/kops/protokube/pkg/gossip/mesh"
"k8s.io/kops/protokube/pkg/protokube"
// Load DNS plugins
"github.com/golang/glog"
"github.com/spf13/pflag"
_ "k8s.io/kops/dnsprovider/pkg/dnsprovider/providers/aws/route53"
k8scoredns "k8s.io/kops/dnsprovider/pkg/dnsprovider/providers/coredns"
_ "k8s.io/kops/dnsprovider/pkg/dnsprovider/providers/google/clouddns"
)
var (
flags = pflag.NewFlagSet("", pflag.ExitOnError)
// BuildVersion is overwritten during build. This can be used to resolve issues.
BuildVersion = "0.1"
)
func main() {
fmt.Printf("protokube version %s\n", BuildVersion)
if err := run(); err != nil {
glog.Errorf("Error: %v", err)
os.Exit(1)
}
os.Exit(0)
}
// run is responsible for running the protokube service controller
func run() error {
var zones []string
var applyTaints, initializeRBAC, containerized, master, tlsAuth bool
var cloud, clusterID, dnsServer, dnsProviderID, dnsInternalSuffix, gossipSecret, gossipListen string
var flagChannels, tlsCert, tlsKey, tlsCA, peerCert, peerKey, peerCA string
var etcdBackupImage, etcdBackupStore, etcdImageSource, etcdElectionTimeout, etcdHeartbeatInterval string
flag.BoolVar(&applyTaints, "apply-taints", applyTaints, "Apply taints to nodes based on the role")
flag.BoolVar(&containerized, "containerized", containerized, "Set if we are running containerized.")
flag.BoolVar(&initializeRBAC, "initialize-rbac", initializeRBAC, "Set if we should initialize RBAC")
flag.BoolVar(&master, "master", master, "Whether or not this node is a master")
flag.StringVar(&cloud, "cloud", "aws", "CloudProvider we are using (aws,digitalocean,gce)")
flag.StringVar(&clusterID, "cluster-id", clusterID, "Cluster ID")
flag.StringVar(&dnsInternalSuffix, "dns-internal-suffix", dnsInternalSuffix, "DNS suffix for internal domain names")
flag.StringVar(&dnsServer, "dns-server", dnsServer, "DNS Server")
flag.StringVar(&flagChannels, "channels", flagChannels, "channels to install")
flag.StringVar(&gossipListen, "gossip-listen", "0.0.0.0:3999", "address:port on which to bind for gossip")
flag.StringVar(&peerCA, "peer-ca", peerCA, "Path to a file containing the peer ca in PEM format")
flag.StringVar(&peerCert, "peer-cert", peerCert, "Path to a file containing the peer certificate")
flag.StringVar(&peerKey, "peer-key", peerKey, "Path to a file containing the private key for the peers")
flag.BoolVar(&tlsAuth, "tls-auth", tlsAuth, "Indicates the peers and client should enforce authentication via CA")
flag.StringVar(&tlsCA, "tls-ca", tlsCA, "Path to a file containing the ca for client certificates")
flag.StringVar(&tlsCert, "tls-cert", tlsCert, "Path to a file containing the certificate for etcd server")
flag.StringVar(&tlsKey, "tls-key", tlsKey, "Path to a file containing the private key for etcd server")
flags.StringSliceVarP(&zones, "zone", "z", []string{}, "Configure permitted zones and their mappings")
flags.StringVar(&dnsProviderID, "dns", "aws-route53", "DNS provider we should use (aws-route53, google-clouddns, coredns, digitalocean)")
flags.StringVar(&etcdBackupImage, "etcd-backup-image", "", "Set to override the image for (experimental) etcd backups")
flags.StringVar(&etcdBackupStore, "etcd-backup-store", "", "Set to enable (experimental) etcd backups")
flags.StringVar(&etcdImageSource, "etcd-image", "k8s.gcr.io/etcd:2.2.1", "Etcd Source Container Registry")
flags.StringVar(&etcdElectionTimeout, "etcd-election-timeout", etcdElectionTimeout, "time in ms for an election to timeout")
flags.StringVar(&etcdHeartbeatInterval, "etcd-heartbeat-interval", etcdHeartbeatInterval, "time in ms of a heartbeat interval")
flags.StringVar(&gossipSecret, "gossip-secret", gossipSecret, "Secret to use to secure gossip")
manageEtcd := false
flag.BoolVar(&manageEtcd, "manage-etcd", manageEtcd, "Set to manage etcd (deprecated in favor of etcd-manager)")
// Trick to avoid 'logging before flag.Parse' warning
flag.CommandLine.Parse([]string{})
flag.Set("logtostderr", "true")
flags.AddGoFlagSet(flag.CommandLine)
flags.Parse(os.Args)
var volumes protokube.Volumes
var internalIP net.IP
if cloud == "aws" {
awsVolumes, err := protokube.NewAWSVolumes()
if err != nil {
glog.Errorf("Error initializing AWS: %q", err)
os.Exit(1)
}
volumes = awsVolumes
if clusterID == "" {
clusterID = awsVolumes.ClusterID()
}
if internalIP == nil {
internalIP = awsVolumes.InternalIP()
}
} else if cloud == "digitalocean" {
if clusterID == "" {
glog.Error("digitalocean requires --cluster-id")
os.Exit(1)
}
doVolumes, err := protokube.NewDOVolumes(clusterID)
if err != nil {
glog.Errorf("Error initializing DigitalOcean: %q", err)
os.Exit(1)
}
volumes = doVolumes
if internalIP == nil {
internalIP, err = protokube.GetDropletInternalIP()
if err != nil {
glog.Errorf("Error getting droplet internal IP: %s", err)
os.Exit(1)
}
}
} else if cloud == "gce" {
gceVolumes, err := protokube.NewGCEVolumes()
if err != nil {
glog.Errorf("Error initializing GCE: %q", err)
os.Exit(1)
}
volumes = gceVolumes
if clusterID == "" {
clusterID = gceVolumes.ClusterID()
}
if internalIP == nil {
internalIP = gceVolumes.InternalIP()
}
} else if cloud == "vsphere" {
glog.Info("Initializing vSphere volumes")
vsphereVolumes, err := protokube.NewVSphereVolumes()
if err != nil {
glog.Errorf("Error initializing vSphere: %q", err)
os.Exit(1)
}
volumes = vsphereVolumes
if internalIP == nil {
internalIP = vsphereVolumes.InternalIp()
}
} else if cloud == "baremetal" {
if internalIP == nil {
ip, err := findInternalIP()
if err != nil {
glog.Errorf("error finding internal IP: %v", err)
os.Exit(1)
}
internalIP = ip
}
} else {
glog.Errorf("Unknown cloud %q", cloud)
os.Exit(1)
}
if clusterID == "" {
return fmt.Errorf("cluster-id is required (cannot be determined from cloud)")
}
glog.Infof("cluster-id: %s", clusterID)
if internalIP == nil {
glog.Errorf("Cannot determine internal IP")
os.Exit(1)
}
if dnsInternalSuffix == "" {
// TODO: Maybe only master needs DNS?
dnsInternalSuffix = ".internal." + clusterID
glog.Infof("Setting dns-internal-suffix to %q", dnsInternalSuffix)
}
// Make sure it's actually a suffix (starts with .)
if !strings.HasPrefix(dnsInternalSuffix, ".") {
dnsInternalSuffix = "." + dnsInternalSuffix
}
rootfs := "/"
if containerized {
rootfs = "/rootfs/"
}
protokube.RootFS = rootfs
protokube.Containerized = containerized
var dnsProvider protokube.DNSProvider
if dnsProviderID == "gossip" {
dnsTarget := &gossipdns.HostsFile{
Path: path.Join(rootfs, "etc/hosts"),
}
var gossipSeeds gossip.SeedProvider
var err error
var gossipName string
if cloud == "aws" {
gossipSeeds, err = volumes.(*protokube.AWSVolumes).GossipSeeds()
if err != nil {
return err
}
gossipName = volumes.(*protokube.AWSVolumes).InstanceID()
} else if cloud == "gce" {
gossipSeeds, err = volumes.(*protokube.GCEVolumes).GossipSeeds()
if err != nil {
return err
}
gossipName = volumes.(*protokube.GCEVolumes).InstanceName()
} else {
glog.Fatalf("seed provider for %q not yet implemented", cloud)
}
id := os.Getenv("HOSTNAME")
if id == "" {
glog.Warningf("Unable to fetch HOSTNAME for use as node identifier")
}
channelName := "dns"
gossipState, err := mesh.NewMeshGossiper(gossipListen, channelName, gossipName, []byte(gossipSecret), gossipSeeds)
if err != nil {
glog.Errorf("Error initializing gossip: %v", err)
os.Exit(1)
}
go func() {
err := gossipState.Start()
if err != nil {
glog.Fatalf("gossip exited unexpectedly: %v", err)
} else {
glog.Fatalf("gossip exited unexpectedly, but without error")
}
}()
dnsView := gossipdns.NewDNSView(gossipState)
zoneInfo := gossipdns.DNSZoneInfo{
Name: gossipdns.DefaultZoneName,
}
if _, err := dnsView.AddZone(zoneInfo); err != nil {
glog.Fatalf("error creating zone: %v", err)
}
go func() {
gossipdns.RunDNSUpdates(dnsTarget, dnsView)
glog.Fatalf("RunDNSUpdates exited unexpectedly")
}()
dnsProvider = &protokube.GossipDnsProvider{DNSView: dnsView, Zone: zoneInfo}
} else {
var dnsScope dns.Scope
var dnsController *dns.DNSController
{
var file io.Reader
if dnsProviderID == k8scoredns.ProviderName {
var lines []string
lines = append(lines, "etcd-endpoints = "+dnsServer)
lines = append(lines, "zones = "+zones[0])
config := "[global]\n" + strings.Join(lines, "\n") + "\n"
file = bytes.NewReader([]byte(config))
}
dnsProvider, err := dnsprovider.GetDnsProvider(dnsProviderID, file)
if err != nil {
return fmt.Errorf("Error initializing DNS provider %q: %v", dnsProviderID, err)
}
if dnsProvider == nil {
return fmt.Errorf("DNS provider %q could not be initialized", dnsProviderID)
}
zoneRules, err := dns.ParseZoneRules(zones)
if err != nil {
return fmt.Errorf("unexpected zone flags: %q", err)
}
dnsController, err = dns.NewDNSController([]dnsprovider.Interface{dnsProvider}, zoneRules)
if err != nil {
return err
}
dnsScope, err = dnsController.CreateScope("protokube")
if err != nil {
return err
}
// We don't really use readiness - our records are simple
dnsScope.MarkReady()
}
dnsProvider = &protokube.KopsDnsProvider{
DNSScope: dnsScope,
DNSController: dnsController,
}
}
modelDir := "model/etcd"
var channels []string
if flagChannels != "" {
channels = strings.Split(flagChannels, ",")
}
k := &protokube.KubeBoot{
ApplyTaints: applyTaints,
Channels: channels,
DNS: dnsProvider,
ManageEtcd: manageEtcd,
EtcdBackupImage: etcdBackupImage,
EtcdBackupStore: etcdBackupStore,
EtcdImageSource: etcdImageSource,
EtcdElectionTimeout: etcdElectionTimeout,
EtcdHeartbeatInterval: etcdHeartbeatInterval,
InitializeRBAC: initializeRBAC,
InternalDNSSuffix: dnsInternalSuffix,
InternalIP: internalIP,
Kubernetes: protokube.NewKubernetesContext(),
Master: master,
ModelDir: modelDir,
PeerCA: peerCA,
PeerCert: peerCert,
PeerKey: peerKey,
TLSAuth: tlsAuth,
TLSCA: tlsCA,
TLSCert: tlsCert,
TLSKey: tlsKey,
}
k.Init(volumes)
if dnsProvider != nil {
go dnsProvider.Run()
}
k.RunSyncLoop()
return fmt.Errorf("Unexpected exit")
}
// findInternalIP attempts to discover the internal IP address by inspecting the network interfaces
func findInternalIP() (net.IP, error) {
var ips []net.IP
networkInterfaces, err := net.Interfaces()
if err != nil {
return nil, fmt.Errorf("error querying interfaces to determine internal ip: %v", err)
}
for i := range networkInterfaces {
networkInterface := &networkInterfaces[i]
flags := networkInterface.Flags
name := networkInterface.Name
if (flags & net.FlagLoopback) != 0 {
glog.V(2).Infof("Ignoring interface %s - loopback", name)
continue
}
// Not a lot else to go on...
if !strings.HasPrefix(name, "eth") && !strings.HasPrefix(name, "en") {
glog.V(2).Infof("Ignoring interface %s - name does not look like ethernet device", name)
continue
}
addrs, err := networkInterface.Addrs()
if err != nil {
return nil, fmt.Errorf("error querying network interface %s for IP addresses: %v", name, err)
}
for _, addr := range addrs {
ip, _, err := net.ParseCIDR(addr.String())
if err != nil {
return nil, fmt.Errorf("error parsing address %s on network interface %s: %v", addr.String(), name, err)
}
if ip.IsLoopback() {
glog.V(2).Infof("Ignoring address %s (loopback)", ip)
continue
}
if ip.IsLinkLocalMulticast() || ip.IsLinkLocalUnicast() {
glog.V(2).Infof("Ignoring address %s (link-local)", ip)
continue
}
ips = append(ips, ip)
}
}
if len(ips) == 0 {
return nil, fmt.Errorf("unable to determine internal ip (no addresses found)")
}
if len(ips) == 1 {
return ips[0], nil
}
var ipv4s []net.IP
for _, ip := range ips {
if ip.To4() != nil {
ipv4s = append(ipv4s, ip)
}
}
glog.Warningf("Found multiple internal IPs")
for _, ip := range ips {
glog.Warningf("\tip: %s", ip.String())
}
if len(ipv4s) != 0 {
// TODO: sort?
if len(ipv4s) == 1 {
glog.Warningf("choosing IPv4 address: %s", ipv4s[0].String())
} else {
glog.Warningf("arbitrarily choosing IPv4 address: %s", ipv4s[0].String())
}
return ipv4s[0], nil
}
glog.Warningf("arbitrarily choosing address: %s", ips[0].String())
return ips[0], nil
}
|
[
"\"HOSTNAME\""
] |
[] |
[
"HOSTNAME"
] |
[]
|
["HOSTNAME"]
|
go
| 1 | 0 | |
src/virtualenv/create/creator.py
|
from __future__ import absolute_import, print_function, unicode_literals
import json
import logging
import os
import shutil
import sys
from abc import ABCMeta, abstractmethod
from argparse import ArgumentTypeError
from ast import literal_eval
from collections import OrderedDict
from stat import S_IWUSR
import six
from six import add_metaclass
from virtualenv.discovery.cached_py_info import LogCmd
from virtualenv.info import WIN_CPYTHON_2
from virtualenv.pyenv_cfg import PyEnvCfg
from virtualenv.util.path import Path
from virtualenv.util.subprocess import run_cmd
from virtualenv.util.zipapp import ensure_file_on_disk
from virtualenv.version import __version__
HERE = Path(__file__).absolute().parent
DEBUG_SCRIPT = HERE / "debug.py"
@add_metaclass(ABCMeta)
class Creator(object):
"""A class that given a python Interpreter creates a virtual environment"""
def __init__(self, options, interpreter):
"""Construct a new virtual environment creator.
:param options: the CLI option as parsed from :meth:`add_parser_arguments`
:param interpreter: the interpreter to create virtual environment from
"""
self.interpreter = interpreter
self._debug = None
self.dest = Path(options.dest)
self.clear = options.clear
self.pyenv_cfg = PyEnvCfg.from_folder(self.dest)
def __repr__(self):
return six.ensure_str(self.__unicode__())
def __unicode__(self):
return "{}({})".format(self.__class__.__name__, ", ".join("{}={}".format(k, v) for k, v in self._args()))
def _args(self):
return [
("dest", six.ensure_text(str(self.dest))),
("clear", self.clear),
]
@classmethod
def can_create(cls, interpreter):
"""Determine if we can create a virtual environment.
:param interpreter: the interpreter in question
:return: ``None`` if we can't create, any other object otherwise that will be forwarded to \
:meth:`add_parser_arguments`
"""
return True
@classmethod
def add_parser_arguments(cls, parser, interpreter, meta):
"""Add CLI arguments for the creator.
:param parser: the CLI parser
:param interpreter: the interpreter we're asked to create virtual environment for
:param meta: value as returned by :meth:`can_create`
"""
parser.add_argument(
"dest", help="directory to create virtualenv at", type=cls.validate_dest, default="venv", nargs="?",
)
parser.add_argument(
"--clear",
dest="clear",
action="store_true",
help="remove the destination directory if exist before starting (will overwrite files otherwise)",
default=False,
)
@abstractmethod
def create(self):
"""Perform the virtual environment creation."""
raise NotImplementedError
@classmethod
def validate_dest(cls, raw_value):
"""No path separator in the path, valid chars and must be write-able"""
def non_write_able(dest, value):
common = Path(*os.path.commonprefix([value.parts, dest.parts]))
raise ArgumentTypeError(
"the destination {} is not write-able at {}".format(dest.relative_to(common), common)
)
# the file system must be able to encode
# note in newer CPython this is always utf-8 https://www.python.org/dev/peps/pep-0529/
encoding = sys.getfilesystemencoding()
refused = OrderedDict()
kwargs = {"errors": "ignore"} if encoding != "mbcs" else {}
for char in six.ensure_text(raw_value):
try:
trip = char.encode(encoding, **kwargs).decode(encoding)
if trip == char:
continue
raise ValueError(trip)
except ValueError:
refused[char] = None
if refused:
raise ArgumentTypeError(
"the file system codec ({}) cannot handle characters {!r} within {!r}".format(
encoding, "".join(refused.keys()), raw_value
)
)
for char in (i for i in (os.pathsep, os.altsep) if i is not None):
if char in raw_value:
raise ArgumentTypeError(
"destination {!r} must not contain the path separator ({}) as this would break "
"the activation scripts".format(raw_value, char)
)
value = Path(raw_value)
if value.exists() and value.is_file():
raise ArgumentTypeError("the destination {} already exists and is a file".format(value))
if (3, 3) <= sys.version_info <= (3, 6):
# pre 3.6 resolve is always strict, aka must exists, sidestep by using os.path operation
dest = Path(os.path.realpath(raw_value))
else:
dest = value.resolve()
value = dest
while dest:
if dest.exists():
if os.access(six.ensure_text(str(dest)), os.W_OK):
break
else:
non_write_able(dest, value)
base, _ = dest.parent, dest.name
if base == dest:
non_write_able(dest, value) # pragma: no cover
dest = base
return str(value)
def run(self):
if self.dest.exists() and self.clear:
logging.debug("delete %s", self.dest)
def onerror(func, path, exc_info):
if not os.access(path, os.W_OK):
os.chmod(path, S_IWUSR)
func(path)
else:
raise
shutil.rmtree(str(self.dest), ignore_errors=True, onerror=onerror)
self.create()
self.set_pyenv_cfg()
def set_pyenv_cfg(self):
self.pyenv_cfg.content = OrderedDict()
self.pyenv_cfg["home"] = self.interpreter.system_exec_prefix
self.pyenv_cfg["implementation"] = self.interpreter.implementation
self.pyenv_cfg["version_info"] = ".".join(str(i) for i in self.interpreter.version_info)
self.pyenv_cfg["virtualenv"] = __version__
@property
def debug(self):
"""
:return: debug information about the virtual environment (only valid after :meth:`create` has run)
"""
if self._debug is None and self.exe is not None:
self._debug = get_env_debug_info(self.exe, self.debug_script())
return self._debug
# noinspection PyMethodMayBeStatic
def debug_script(self):
return DEBUG_SCRIPT
def get_env_debug_info(env_exe, debug_script):
env = os.environ.copy()
env.pop(str("PYTHONPATH"), None)
with ensure_file_on_disk(debug_script) as debug_script:
cmd = [str(env_exe), str(debug_script)]
if WIN_CPYTHON_2:
cmd = [six.ensure_text(i) for i in cmd]
logging.debug(str("debug via %r"), LogCmd(cmd))
code, out, err = run_cmd(cmd)
# noinspection PyBroadException
try:
if code != 0:
result = literal_eval(out)
else:
result = json.loads(out)
if err:
result["err"] = err
except Exception as exception:
return {"out": out, "err": err, "returncode": code, "exception": repr(exception)}
if "sys" in result and "path" in result["sys"]:
del result["sys"]["path"][0]
return result
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
src/test/java/io/gate/impl/GateApiAsyncRestClientImplTest.java
|
package io.gate.impl;
import io.gate.GateApiAsyncRestClient;
import io.gate.GateApiClientFactory;
import io.gate.domain.account.Transaction;
import io.gate.domain.general.Asset;
import io.gate.domain.market.MarketInfo;
import io.gate.domain.market.MarketTicker;
import io.gate.domain.market.OrderBook;
import io.gate.security.ApiCredentials;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import java.util.List;
import java.util.concurrent.ExecutionException;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.*;
import static org.junit.jupiter.api.Assertions.assertNotNull;
public class GateApiAsyncRestClientImplTest {
private GateApiAsyncRestClient gateApiAsyncRestClient;
@BeforeEach
public void setUp() {
String apiKey = System.getenv("API_KEY");
String secret = System.getenv("SECRET");
ApiCredentials apiCredentials = new ApiCredentials(apiKey, secret);
this.gateApiAsyncRestClient = GateApiClientFactory.newInstance(apiCredentials).newAsyncRestClient();
}
@Test
public void getAssets_ShouldReturnAssets() throws ExecutionException, InterruptedException {
List<Asset> assets = gateApiAsyncRestClient.getAssets().get();
assertThat(assets, is(not(empty())));
}
@Test
public void getMarketInfo_ShouldReturnMarketInfo() throws ExecutionException, InterruptedException {
List<MarketInfo> marketInfoList = gateApiAsyncRestClient.getMarketInfo().get();
assertThat(marketInfoList, allOf(notNullValue(), is(not(empty()))));
}
@Test
public void getMarketTickers_ShouldReturnMarketTickers() throws ExecutionException, InterruptedException {
List<MarketTicker> marketTickers = gateApiAsyncRestClient.getMarketTickers().get();
assertThat(marketTickers, allOf(notNullValue(), is(not(empty()))));
}
@Test
public void getOrderBook_ShouldReturnOrderBookForBTCUSDT() throws ExecutionException, InterruptedException {
OrderBook orderBook = gateApiAsyncRestClient.getOrderBook("BTC_USDT", 0, 10, true).get();
assertNotNull(orderBook);
assertThat(orderBook.getAsks(), is(not(empty())));
assertThat(orderBook.getBids(), is(not(empty())));
}
@Test
public void getDeposits_ShouldReturnDeposits() throws ExecutionException, InterruptedException {
List<Transaction> deposits = gateApiAsyncRestClient.getDeposits(null, null, null, null, null).get();
}
}
|
[
"\"API_KEY\"",
"\"SECRET\""
] |
[] |
[
"API_KEY",
"SECRET"
] |
[]
|
["API_KEY", "SECRET"]
|
java
| 2 | 0 | |
cmd/client/shell.go
|
/*
* Copyright (C) 2016 Red Hat, Inc.
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package client
import (
"errors"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"github.com/mitchellh/go-homedir"
"github.com/spf13/cobra"
"github.com/skydive-project/skydive/config"
shttp "github.com/skydive-project/skydive/http"
"github.com/skydive-project/skydive/logging"
)
var ShellCmd = &cobra.Command{
Use: "shell",
Short: "Shell Command Line Interface",
Long: "Skydive Shell Command Line Interface, yet another shell",
SilenceUsage: false,
Run: func(cmd *cobra.Command, args []string) {
shellMain()
},
}
var (
ErrContinue = errors.New("<continue input>")
ErrQuit = errors.New("<quit session>")
)
func actionGremlinQuery(s *Session, query string) error {
var values interface{}
queryHelper := NewGremlinQueryHelper(&s.authenticationOpts)
if err := queryHelper.Query(query, &values); err != nil {
return err
}
printJSON(values)
return nil
}
func actionSetVarUsername(s *Session, arg string) error {
s.authenticationOpts.Username = arg
return nil
}
func actionSetVarPassword(s *Session, arg string) error {
s.authenticationOpts.Password = arg
return nil
}
func actionSetVarAnalyzer(s *Session, arg string) error {
s.analyzerAddr = arg
config.GetConfig().Set("analyzers", s.analyzerAddr)
return nil
}
var vocaGremlinBase = []string{
"V(",
"Context(",
"Flows(",
}
var vocaGremlinExt = []string{
"Has(",
"Dedup()",
"ShortestPathTo(", // 1 or 2
"Both()",
"Count()",
"Range(", // 2
"Limit(", // 1
"Sort(",
"Out()",
"OutV()",
"OutE()",
"In()",
"InV()",
"InE()",
}
func completeG(s *Session, prefix string) []string {
if prefix == "" {
return vocaGremlinBase
}
return vocaGremlinExt
}
type command struct {
name string
action func(*Session, string) error
complete func(*Session, string) []string
arg string
document string
}
var commands = []command{
{
name: "g",
action: actionGremlinQuery,
complete: completeG,
arg: "<gremlin expression>",
document: "evaluate a gremlin expression",
},
{
name: "username",
action: actionSetVarUsername,
complete: nil,
arg: "<username>",
document: "set the analyzer connection username",
},
{
name: "password",
action: actionSetVarPassword,
complete: nil,
arg: "<password>",
document: "set the analyzer connection password",
},
{
name: "analyzer",
action: actionSetVarAnalyzer,
complete: nil,
arg: "<address:port>",
document: "set the analyzer connection address",
},
}
func (s *Session) completeWord(line string, pos int) (string, []string, string) {
if strings.HasPrefix(line, "g") {
// complete commands
if !strings.Contains(line[0:pos], ".") {
pre, post := line[0:pos], line[pos:]
result := []string{}
for _, command := range commands {
name := command.name
if strings.HasPrefix(name, pre) {
// having complete means that this command takes an argument (for now)
if !strings.HasPrefix(post, ".") && command.arg != "" {
name = name + "."
}
result = append(result, name)
}
}
return "", result, post
}
// complete command arguments
for _, command := range commands {
if command.complete == nil {
continue
}
cmdPrefix := command.name + "."
if strings.HasPrefix(line, cmdPrefix) && pos >= len(cmdPrefix) {
complLine := ""
if len(line)-len(cmdPrefix) > 0 {
complLine = line[len(cmdPrefix) : len(line)-len(cmdPrefix)]
}
return line, command.complete(s, complLine), ""
}
}
return "", nil, ""
}
if strings.HasPrefix(line, ":") {
// complete commands
if !strings.Contains(line[0:pos], " ") {
pre, post := line[0:pos], line[pos:]
result := []string{}
for _, command := range commands {
name := ":" + command.name
if strings.HasPrefix(name, pre) {
// having complete means that this command takes an argument (for now)
if !strings.HasPrefix(post, " ") && command.arg != "" {
name = name + " "
}
result = append(result, name)
}
}
return "", result, post
}
// complete command arguments
for _, command := range commands {
if command.complete == nil {
continue
}
cmdPrefix := ":" + command.name + " "
if strings.HasPrefix(line, cmdPrefix) && pos >= len(cmdPrefix) {
return cmdPrefix, command.complete(s, line[len(cmdPrefix):pos]), ""
}
}
return "", nil, ""
}
return "", nil, ""
}
func shellMain() {
s, err := NewSession()
if err != nil {
panic(err)
}
rl := newContLiner()
defer rl.Close()
var historyFile string
home, err := homeDir()
if err != nil {
logging.GetLogger().Errorf("home: %s", err)
} else {
historyFile = filepath.Join(home, "history")
f, err := os.Open(historyFile)
if err != nil {
if !os.IsNotExist(err) {
logging.GetLogger().Errorf("%s", err)
}
} else {
_, err := rl.ReadHistory(f)
if err != nil {
logging.GetLogger().Errorf("while reading history: %s", err)
}
}
}
rl.SetWordCompleter(s.completeWord)
for {
in, err := rl.Prompt()
if err != nil {
if err == io.EOF {
break
}
fmt.Fprintf(os.Stderr, "fatal: %s", err)
os.Exit(1)
}
if in == "" {
continue
}
if err := rl.Reindent(); err != nil {
fmt.Fprintf(os.Stderr, "error: %s\n", err)
rl.Clear()
continue
}
err = s.Eval(in)
if err != nil {
if err == ErrContinue {
continue
} else if err == ErrQuit {
break
}
fmt.Println(err)
}
rl.Accepted()
}
if historyFile != "" {
err := os.MkdirAll(filepath.Dir(historyFile), 0755)
if err != nil {
logging.GetLogger().Errorf("%s", err)
} else {
f, err := os.Create(historyFile)
if err != nil {
logging.GetLogger().Errorf("%s", err)
} else {
_, err := rl.WriteHistory(f)
if err != nil {
logging.GetLogger().Errorf("while saving history: %s", err)
}
}
}
}
}
func homeDir() (home string, err error) {
home = os.Getenv("SKYDIVE_HOME")
if home != "" {
return
}
home, err = homedir.Dir()
if err != nil {
return
}
home = filepath.Join(home, ".skydive")
return
}
type Session struct {
authenticationOpts shttp.AuthenticationOpts
analyzerAddr string
}
func NewSession() (*Session, error) {
s := &Session{
analyzerAddr: "localhost:8082",
authenticationOpts: shttp.AuthenticationOpts{
Username: "admin",
Password: "password",
},
}
config.GetConfig().Set("analyzers", s.analyzerAddr)
return s, nil
}
func (s *Session) Eval(in string) error {
logging.GetLogger().Debugf("eval >>> %q", in)
for _, command := range commands {
if command.name == "g" && strings.HasPrefix(in, command.name) {
err := command.action(s, in)
if err != nil {
logging.GetLogger().Errorf("%s: %s", command.name, err)
}
return nil
}
arg := strings.TrimPrefix(in, ":"+command.name)
if arg == in {
continue
}
if arg == "" || strings.HasPrefix(arg, " ") {
arg = strings.TrimSpace(arg)
err := command.action(s, arg)
if err != nil {
if err == ErrQuit {
return err
}
logging.GetLogger().Errorf("%s: %s", command.name, err)
}
return nil
}
}
return nil
}
|
[
"\"SKYDIVE_HOME\""
] |
[] |
[
"SKYDIVE_HOME"
] |
[]
|
["SKYDIVE_HOME"]
|
go
| 1 | 0 | |
main.go
|
package main
import (
"bytes"
"fmt"
"os"
"path/filepath"
"github.com/bitrise-io/go-utils/command"
"github.com/bitrise-io/go-utils/log"
"github.com/bitrise-io/go-utils/pathutil"
"github.com/bitrise-tools/go-steputils/input"
)
// ConfigsModel ...
type ConfigsModel struct {
RecordID string
RemoveFrames string
EmulatorSerial string
}
type adbModel struct {
adbBinPath string
serial string
}
func createConfigsModelFromEnvs() ConfigsModel {
return ConfigsModel{
RecordID: os.Getenv("record_id"),
EmulatorSerial: os.Getenv("emulator_serial"),
RemoveFrames: os.Getenv("remove_frames"),
}
}
func (configs ConfigsModel) print() {
log.Infof("Configs:")
log.Printf("- RecordID: %s", configs.RecordID)
log.Printf("- RemoveFrames: %s", configs.RemoveFrames)
log.Printf("- EmulatorSerial: %s", configs.EmulatorSerial)
}
func (configs ConfigsModel) validate() error {
if err := input.ValidateIfNotEmpty(configs.RecordID); err != nil {
return fmt.Errorf("RecordID, error: %s", err)
}
if err := input.ValidateIfNotEmpty(configs.RemoveFrames); err != nil {
return fmt.Errorf("RemoveFrames, error: %s", err)
}
if err := input.ValidateIfNotEmpty(configs.EmulatorSerial); err != nil {
return fmt.Errorf("EmulatorSerial, error: %s", err)
}
return nil
}
func (model adbModel) pull(commands ...string) (string, error) {
cmd := command.New(model.adbBinPath, append([]string{"-s", model.serial, "pull"}, commands...)...)
return cmd.RunAndReturnTrimmedCombinedOutput()
}
func (model adbModel) shell(commands ...string) (string, error) {
cmd := command.New(model.adbBinPath, append([]string{"-s", model.serial, "exec-out"}, commands...)...)
return cmd.RunAndReturnTrimmedCombinedOutput()
}
func (model adbModel) shellDetached(commands ...string) (string, error) {
cmd := command.New(model.adbBinPath, append([]string{"-s", model.serial, "shell"}, commands...)...)
rCmd := cmd.GetCmd()
var b bytes.Buffer
rCmd.Stdout = &b
rCmd.Stderr = &b
err := rCmd.Start()
return b.String(), err
}
func mainE() error {
// Input validation
configs := createConfigsModelFromEnvs()
fmt.Println()
configs.print()
if err := configs.validate(); err != nil {
log.Errorf("Issue with input: %s", err)
os.Exit(1)
}
fmt.Println()
//
// Main
log.Infof("Checking compability")
androidHome := os.Getenv("ANDROID_HOME")
if androidHome == "" {
return fmt.Errorf("no ANDROID_HOME set")
}
adbBinPath := filepath.Join(androidHome, "platform-tools/adb")
exists, err := pathutil.IsPathExists(adbBinPath)
if err != nil {
return fmt.Errorf("failed to check if path exists: %s, error: %s", adbBinPath, err)
}
if !exists {
return fmt.Errorf("adb binary doesn't exist at: %s", adbBinPath)
}
adb := adbModel{adbBinPath: adbBinPath, serial: configs.EmulatorSerial}
out, err := adb.shell("echo \"$(which screenrecord)\"")
if err != nil {
return fmt.Errorf("failed to run adb command, error: %s, output: %s", err, out)
}
if out == "" {
return fmt.Errorf("screenrecord binary is not available on the device")
}
log.Donef("- Done")
fmt.Println()
log.Infof("Stop recording")
_, err = adb.shell("killall -INT screenrecord && while [ \"$(pgrep screenrecord)\" != \"\" ]; do sleep 1; done")
if err != nil {
return fmt.Errorf("failed to run adb command, error: %s, output: %s", err, out)
}
log.Printf("- Check if screen recording stopped")
out, err = adb.shell("echo \"$(pgrep screenrecord)\"")
if err != nil {
return fmt.Errorf("failed to run adb command, error: %s, output: %s", err, out)
}
if out != "" {
return fmt.Errorf("screenrecord still running, output: %s", out)
}
log.Donef("- Stopped")
fmt.Println()
log.Infof("Pulling video")
deployDir := os.Getenv("BITRISE_DEPLOY_DIR")
exportedPath := filepath.Join(deployDir, fmt.Sprintf("%s.mp4", configs.RecordID))
_, err = adb.pull(fmt.Sprintf("/data/local/tmp/%s.mp4", configs.RecordID), exportedPath)
if err != nil {
return fmt.Errorf("failed to run adb command, error: %s, output: %s", err, out)
}
log.Donef("- Done")
if configs.RemoveFrames == "true" {
fmt.Println()
log.Infof("Remove duplicated frames")
trimmedExportedPath := filepath.Join(deployDir, fmt.Sprintf("%s_trimmed.mp4", configs.RecordID))
trimCommand := command.New("ffmpeg", "-i", exportedPath, "-vf", "mpdecimate,setpts=N/FRAME_RATE/TB", trimmedExportedPath)
trimCommand.SetStdout(os.Stdout)
trimCommand.SetStderr(os.Stderr)
err = trimCommand.Run()
if err != nil {
return fmt.Errorf("failed to run ffmpeg command, error: %s", err)
}
err = os.RemoveAll(exportedPath)
if err != nil {
return fmt.Errorf("failed to remove file(%s), error: %s", exportedPath, err)
}
log.Donef("- Done")
}
return nil
}
func main() {
err := mainE()
if err != nil {
log.Errorf("Error: %v", err)
os.Exit(1)
}
}
|
[
"\"record_id\"",
"\"emulator_serial\"",
"\"remove_frames\"",
"\"ANDROID_HOME\"",
"\"BITRISE_DEPLOY_DIR\""
] |
[] |
[
"ANDROID_HOME",
"remove_frames",
"BITRISE_DEPLOY_DIR",
"record_id",
"emulator_serial"
] |
[]
|
["ANDROID_HOME", "remove_frames", "BITRISE_DEPLOY_DIR", "record_id", "emulator_serial"]
|
go
| 5 | 0 | |
config.go
|
package main
import (
"fmt"
"io/ioutil"
"os"
"regexp"
"strconv"
"strings"
"github.com/tkanos/gonfig"
)
var (
config rabbitExporterConfig
defaultConfig = rabbitExporterConfig{
RabbitURL: "http://127.0.0.1:15672",
RabbitUsername: "guest",
RabbitPassword: "guest",
PublishPort: "9419",
PublishAddr: "",
OutputFormat: "TTY", //JSON
CAFile: "ca.pem",
CertFile: "client-cert.pem",
KeyFile: "client-key.pem",
InsecureSkipVerify: false,
ExcludeMetrics: []string{},
SkipQueues: regexp.MustCompile("^$"),
IncludeQueues: regexp.MustCompile(".*"),
SkipVHost: regexp.MustCompile("^$"),
IncludeVHost: regexp.MustCompile(".*"),
RabbitCapabilities: parseCapabilities("no_sort,bert"),
EnabledExporters: []string{"exchange", "node", "overview", "queue"},
Timeout: 30,
MaxQueues: 0,
}
)
type rabbitExporterConfig struct {
RabbitURL string `json:"rabbit_url"`
RabbitUsername string `json:"rabbit_user"`
RabbitPassword string `json:"rabbit_pass"`
PublishPort string `json:"publish_port"`
PublishAddr string `json:"publish_addr"`
OutputFormat string `json:"output_format"`
CAFile string `json:"ca_file"`
CertFile string `json:"cert_file"`
KeyFile string `json:"key_file"`
InsecureSkipVerify bool `json:"insecure_skip_verify"`
ExcludeMetrics []string `json:"exlude_metrics"`
SkipQueues *regexp.Regexp `json:"-"`
IncludeQueues *regexp.Regexp `json:"-"`
SkipVHost *regexp.Regexp `json:"-"`
IncludeVHost *regexp.Regexp `json:"-"`
IncludeQueuesString string `json:"include_queues"`
SkipQueuesString string `json:"skip_queues"`
SkipVHostString string `json:"skip_vhost"`
IncludeVHostString string `json:"include_vhost"`
RabbitCapabilitiesString string `json:"rabbit_capabilities"`
RabbitCapabilities rabbitCapabilitySet `json:"-"`
EnabledExporters []string `json:"enabled_exporters"`
Timeout int `json:"timeout"`
MaxQueues int `json:"max_queues"`
}
type rabbitCapability string
type rabbitCapabilitySet map[rabbitCapability]bool
const (
rabbitCapNoSort rabbitCapability = "no_sort"
rabbitCapBert rabbitCapability = "bert"
)
var allRabbitCapabilities = rabbitCapabilitySet{
rabbitCapNoSort: true,
rabbitCapBert: true,
}
func initConfigFromFile(configFile string) error {
config = rabbitExporterConfig{}
err := gonfig.GetConf(configFile, &config)
if err != nil {
return err
}
if url := config.RabbitURL; url != "" {
if valid, _ := regexp.MatchString("https?://[a-zA-Z.0-9]+", strings.ToLower(url)); !valid {
panic(fmt.Errorf("rabbit URL must start with http:// or https://"))
}
}
config.SkipQueues = regexp.MustCompile(config.SkipQueuesString)
config.IncludeQueues = regexp.MustCompile(config.IncludeQueuesString)
config.SkipVHost = regexp.MustCompile(config.SkipVHostString)
config.IncludeVHost = regexp.MustCompile(config.IncludeVHostString)
config.RabbitCapabilities = parseCapabilities(config.RabbitCapabilitiesString)
return nil
}
func initConfig() {
config = defaultConfig
if url := os.Getenv("RABBIT_URL"); url != "" {
if valid, _ := regexp.MatchString("https?://[a-zA-Z.0-9]+", strings.ToLower(url)); valid {
config.RabbitURL = url
} else {
panic(fmt.Errorf("rabbit URL must start with http:// or https://"))
}
}
var user string
var pass string
if len(os.Getenv("RABBIT_USER_FILE")) != 0 {
fileContents, err := ioutil.ReadFile(os.Getenv("RABBIT_USER_FILE"))
if err != nil {
panic(err)
}
user = strings.TrimSpace(string(fileContents))
} else {
user = os.Getenv("RABBIT_USER")
}
if user != "" {
config.RabbitUsername = user
}
if len(os.Getenv("RABBIT_PASSWORD_FILE")) != 0 {
fileContents, err := ioutil.ReadFile(os.Getenv("RABBIT_PASSWORD_FILE"))
if err != nil {
panic(err)
}
pass = strings.TrimSpace(string(fileContents))
} else {
pass = os.Getenv("RABBIT_PASSWORD")
}
if pass != "" {
config.RabbitPassword = pass
}
if port := os.Getenv("PUBLISH_PORT"); port != "" {
if _, err := strconv.Atoi(port); err == nil {
config.PublishPort = port
} else {
panic(fmt.Errorf("the configured port is not a valid number: %v", port))
}
}
if addr := os.Getenv("PUBLISH_ADDR"); addr != "" {
config.PublishAddr = addr
}
if output := os.Getenv("OUTPUT_FORMAT"); output != "" {
config.OutputFormat = output
}
if cafile := os.Getenv("CAFILE"); cafile != "" {
config.CAFile = cafile
}
if certfile := os.Getenv("CERTFILE"); certfile != "" {
config.CertFile = certfile
}
if keyfile := os.Getenv("KEYFILE"); keyfile != "" {
config.KeyFile = keyfile
}
if insecureSkipVerify := os.Getenv("SKIPVERIFY"); insecureSkipVerify == "true" || insecureSkipVerify == "1" || insecureSkipVerify == "TRUE" {
config.InsecureSkipVerify = true
}
if ExcludeMetrics := os.Getenv("EXCLUDE_METRICS"); ExcludeMetrics != "" {
config.ExcludeMetrics = strings.Split(ExcludeMetrics, ",")
}
if SkipQueues := os.Getenv("SKIP_QUEUES"); SkipQueues != "" {
config.SkipQueues = regexp.MustCompile(SkipQueues)
}
if IncludeQueues := os.Getenv("INCLUDE_QUEUES"); IncludeQueues != "" {
config.IncludeQueues = regexp.MustCompile(IncludeQueues)
}
if SkipVHost := os.Getenv("SKIP_VHOST"); SkipVHost != "" {
config.SkipVHost = regexp.MustCompile(SkipVHost)
}
if IncludeVHost := os.Getenv("INCLUDE_VHOST"); IncludeVHost != "" {
config.IncludeVHost = regexp.MustCompile(IncludeVHost)
}
if rawCapabilities := os.Getenv("RABBIT_CAPABILITIES"); rawCapabilities != "" {
config.RabbitCapabilities = parseCapabilities(rawCapabilities)
}
if enabledExporters := os.Getenv("RABBIT_EXPORTERS"); enabledExporters != "" {
config.EnabledExporters = strings.Split(enabledExporters, ",")
}
if timeout := os.Getenv("RABBIT_TIMEOUT"); timeout != "" {
t, err := strconv.Atoi(timeout)
if err != nil {
panic(fmt.Errorf("timeout is not a number: %v", err))
}
config.Timeout = t
}
if maxQueues := os.Getenv("MAX_QUEUES"); maxQueues != "" {
m, err := strconv.Atoi(maxQueues)
if err != nil {
panic(fmt.Errorf("maxQueues is not a number: %v", err))
}
config.MaxQueues = m
}
}
func parseCapabilities(raw string) rabbitCapabilitySet {
result := make(rabbitCapabilitySet)
candidates := strings.Split(raw, ",")
for _, maybeCapStr := range candidates {
maybeCap := rabbitCapability(strings.TrimSpace(maybeCapStr))
enabled, present := allRabbitCapabilities[maybeCap]
if enabled && present {
result[maybeCap] = true
}
}
return result
}
func isCapEnabled(config rabbitExporterConfig, cap rabbitCapability) bool {
exists, enabled := config.RabbitCapabilities[cap]
return exists && enabled
}
|
[
"\"RABBIT_URL\"",
"\"RABBIT_USER_FILE\"",
"\"RABBIT_USER_FILE\"",
"\"RABBIT_USER\"",
"\"RABBIT_PASSWORD_FILE\"",
"\"RABBIT_PASSWORD_FILE\"",
"\"RABBIT_PASSWORD\"",
"\"PUBLISH_PORT\"",
"\"PUBLISH_ADDR\"",
"\"OUTPUT_FORMAT\"",
"\"CAFILE\"",
"\"CERTFILE\"",
"\"KEYFILE\"",
"\"SKIPVERIFY\"",
"\"EXCLUDE_METRICS\"",
"\"SKIP_QUEUES\"",
"\"INCLUDE_QUEUES\"",
"\"SKIP_VHOST\"",
"\"INCLUDE_VHOST\"",
"\"RABBIT_CAPABILITIES\"",
"\"RABBIT_EXPORTERS\"",
"\"RABBIT_TIMEOUT\"",
"\"MAX_QUEUES\""
] |
[] |
[
"RABBIT_URL",
"RABBIT_USER",
"RABBIT_CAPABILITIES",
"INCLUDE_QUEUES",
"CAFILE",
"SKIP_QUEUES",
"INCLUDE_VHOST",
"EXCLUDE_METRICS",
"PUBLISH_PORT",
"RABBIT_TIMEOUT",
"MAX_QUEUES",
"SKIP_VHOST",
"RABBIT_USER_FILE",
"SKIPVERIFY",
"OUTPUT_FORMAT",
"RABBIT_EXPORTERS",
"RABBIT_PASSWORD_FILE",
"RABBIT_PASSWORD",
"CERTFILE",
"KEYFILE",
"PUBLISH_ADDR"
] |
[]
|
["RABBIT_URL", "RABBIT_USER", "RABBIT_CAPABILITIES", "INCLUDE_QUEUES", "CAFILE", "SKIP_QUEUES", "INCLUDE_VHOST", "EXCLUDE_METRICS", "PUBLISH_PORT", "RABBIT_TIMEOUT", "MAX_QUEUES", "SKIP_VHOST", "RABBIT_USER_FILE", "SKIPVERIFY", "OUTPUT_FORMAT", "RABBIT_EXPORTERS", "RABBIT_PASSWORD_FILE", "RABBIT_PASSWORD", "CERTFILE", "KEYFILE", "PUBLISH_ADDR"]
|
go
| 21 | 0 | |
samples/snippets/prediction_service/predict_image_classification_sample_test.py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pathlib
import predict_image_classification_sample
ENDPOINT_ID = "71213169107795968" # permanent_50_flowers_endpoint
PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT")
PATH_TO_IMG = pathlib.Path(__file__).parent.absolute() / "resources/daisy.jpg"
def test_ucaip_generated_predict_image_classification_sample(capsys):
predict_image_classification_sample.predict_image_classification_sample(
filename=PATH_TO_IMG, project=PROJECT_ID, endpoint_id=ENDPOINT_ID
)
out, _ = capsys.readouterr()
assert 'deployed_model_id:' in out
|
[] |
[] |
[
"BUILD_SPECIFIC_GCLOUD_PROJECT"
] |
[]
|
["BUILD_SPECIFIC_GCLOUD_PROJECT"]
|
python
| 1 | 0 | |
sylph-runners/flink/src/main/java/ideal/sylph/runner/flink/FlinkRunnerModule.java
|
/*
* Copyright (C) 2018 The Sylph Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ideal.sylph.runner.flink;
import com.google.inject.Binder;
import com.google.inject.Inject;
import com.google.inject.Module;
import com.google.inject.Provider;
import com.google.inject.Scopes;
import ideal.sylph.runner.flink.yarn.YarnClusterConfiguration;
import ideal.sylph.spi.exception.SylphException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.yarn.client.api.TimelineClient;
import org.apache.hadoop.yarn.client.api.YarnClient;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.util.Arrays;
import java.util.Optional;
import java.util.Set;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import static ideal.sylph.runner.flink.FlinkRunner.FLINK_DIST;
import static ideal.sylph.spi.exception.StandardErrorCode.CONFIG_ERROR;
import static java.util.Objects.requireNonNull;
public class FlinkRunnerModule
implements Module
{
private static final Logger logger = LoggerFactory.getLogger(FlinkRunnerModule.class);
@Override
public void configure(Binder binder)
{
binder.bind(YarnConfiguration.class).toProvider(FlinkRunnerModule::loadYarnConfiguration).in(Scopes.SINGLETON);
binder.bind(YarnClient.class).toProvider(YarnClientProvider.class).in(Scopes.SINGLETON);
binder.bind(YarnClusterConfiguration.class).toProvider(YarnClusterConfigurationProvider.class).in(Scopes.SINGLETON);
}
private static class YarnClientProvider
implements Provider<YarnClient>
{
@Inject private YarnConfiguration yarnConfiguration;
@Override
public YarnClient get()
{
YarnClient client = YarnClient.createYarnClient();
if (yarnConfiguration.getBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, false)) {
try {
TimelineClient.createTimelineClient();
}
catch (NoClassDefFoundError e) {
logger.warn("createTimelineClient() error with {}", TimelineClient.class.getResource(TimelineClient.class.getSimpleName() + ".class"), e);
yarnConfiguration.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, false);
}
}
client.init(yarnConfiguration);
client.start();
return client;
}
}
private static class YarnClusterConfigurationProvider
implements Provider<YarnClusterConfiguration>
{
@Inject private YarnConfiguration yarnConf;
@Override
public YarnClusterConfiguration get()
{
Path flinkJar = new Path(getFlinkJarFile().toURI());
@SuppressWarnings("ConstantConditions") final Set<Path> resourcesToLocalize = Stream
.of("conf/flink-conf.yaml", "conf/log4j.properties", "conf/logback.xml")
.map(x -> new Path(new File(System.getenv("FLINK_HOME"), x).toURI()))
.collect(Collectors.toSet());
String home = "hdfs:///tmp/sylph/apps";
return new YarnClusterConfiguration(
yarnConf,
home,
flinkJar,
resourcesToLocalize);
}
}
private static YarnConfiguration loadYarnConfiguration()
{
Configuration hadoopConf = new Configuration();
hadoopConf.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem");
Stream.of("yarn-site.xml", "core-site.xml", "hdfs-site.xml").forEach(file -> {
File site = new File(requireNonNull(System.getenv("HADOOP_CONF_DIR"), "ENV HADOOP_CONF_DIR is not setting"), file);
if (site.exists() && site.isFile()) {
hadoopConf.addResource(new org.apache.hadoop.fs.Path(site.toURI()));
}
else {
throw new SylphException(CONFIG_ERROR, site + " not exists");
}
});
YarnConfiguration yarnConf = new YarnConfiguration(hadoopConf);
// try (PrintWriter pw = new PrintWriter(new FileWriter(yarnSite))) { //write local file
// yarnConf.writeXml(pw);
// }
return yarnConf;
}
private static File getFlinkJarFile()
{
String flinkHome = requireNonNull(System.getenv("FLINK_HOME"), "FLINK_HOME env not setting");
if (!new File(flinkHome).exists()) {
throw new IllegalArgumentException("FLINK_HOME " + flinkHome + " not exists");
}
String errorMessage = "error not search " + FLINK_DIST + "*.jar";
File[] files = requireNonNull(new File(flinkHome, "lib").listFiles(), errorMessage);
Optional<File> file = Arrays.stream(files)
.filter(f -> f.getName().startsWith(FLINK_DIST)).findFirst();
return file.orElseThrow(() -> new IllegalArgumentException(errorMessage));
}
}
|
[
"\"FLINK_HOME\"",
"\"HADOOP_CONF_DIR\"",
"\"FLINK_HOME\""
] |
[] |
[
"FLINK_HOME",
"HADOOP_CONF_DIR"
] |
[]
|
["FLINK_HOME", "HADOOP_CONF_DIR"]
|
java
| 2 | 0 | |
middleware/jwt.go
|
package middleware
import (
"os"
echo "github.com/labstack/echo/v4"
"github.com/labstack/echo/v4/middleware"
)
func JWTMiddleware() echo.MiddlewareFunc {
return middleware.JWTWithConfig(middleware.JWTConfig{
SigningMethod: "HS256",
SigningKey: []byte(os.Getenv("JWT_SECRET")),
})
}
|
[
"\"JWT_SECRET\""
] |
[] |
[
"JWT_SECRET"
] |
[]
|
["JWT_SECRET"]
|
go
| 1 | 0 | |
sanity_check_raf_db.py
|
import os
import click
import psycopg2
import s3fs
from dotenv import load_dotenv, find_dotenv
from config.internal_config import ENVIRONMENT
from voucher_opt.constants import COUNTRY, CONTROL, ACTION_CODE, ACTION_GENERATED_DATE, EXPLORATION, LOGPROB, \
MODEL_VERSION, PROJECT, SEGMENT, ACTION_DESC
from tools.monitor_rok import validate_model_in_rok
from tools.monitor_utils import get_latest_model_ids, export_to_influx, plural_suffix
rok_DB_CONN_TRIES = 3
ACTION_DESCRIPTION = 'action_description'
rok_TO_PREDICTION_COLUMNS = {
ACTION_CODE: ACTION_CODE,
ACTION_DESCRIPTION: ACTION_DESC,
ACTION_GENERATED_DATE: ACTION_GENERATED_DATE,
CONTROL: CONTROL,
COUNTRY: COUNTRY,
EXPLORATION: EXPLORATION,
LOGPROB: LOGPROB,
MODEL_VERSION: MODEL_VERSION,
PROJECT: PROJECT,
SEGMENT: SEGMENT
}
load_dotenv(find_dotenv())
@click.command()
@click.argument('execution_date', type=str)
def main(execution_date):
print("rok DB sanity check")
print(f'environment = {ENVIRONMENT.name}')
print(f'execution_date = {execution_date}')
rok_conn = psycopg2.connect(dbname=os.getenv('rok_DB'), host=os.getenv('rok_HOST'), user=os.getenv('rok_USER'),
password=os.getenv('rok_PASSWORD'))
s3 = s3fs.S3FileSystem()
errors = {}
latest_model_ids = get_latest_model_ids(execution_date, s3)
for model_id in latest_model_ids:
validate_model_in_rok(model_id, errors, rok_conn, s3)
print()
print(f'Found in total {sum(len(model_errors) for model_errors in errors.values())} '
f'error{plural_suffix(errors)} related to the rok customer tracking table:\n')
print()
for model_id, model_errors in errors.items():
if len(model_errors):
print(f'Errors for {model_id}:')
print('*' * 150)
print(*model_errors, sep='\n')
print('*' * 150 + '\n')
export_to_influx('rok', errors, execution_date)
if __name__ == '__main__':
main()
|
[] |
[] |
[
"rok_PASSWORD",
"rok_USER",
"rok_HOST",
"rok_DB"
] |
[]
|
["rok_PASSWORD", "rok_USER", "rok_HOST", "rok_DB"]
|
python
| 4 | 0 | |
vendor/github.com/intel/multus-cni/multus/multus.go
|
// Copyright (c) 2017 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// This is a "Multi-plugin".The delegate concept refered from CNI project
// It reads other plugin netconf, and then invoke them, e.g.
// flannel or sriov plugin.
package main
import (
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"net"
"os"
"path/filepath"
"strings"
"time"
"github.com/containernetworking/cni/libcni"
"github.com/containernetworking/cni/pkg/invoke"
"github.com/containernetworking/cni/pkg/skel"
cnitypes "github.com/containernetworking/cni/pkg/types"
cniversion "github.com/containernetworking/cni/pkg/version"
"github.com/containernetworking/plugins/pkg/ns"
k8s "github.com/intel/multus-cni/k8sclient"
"github.com/intel/multus-cni/logging"
"github.com/intel/multus-cni/types"
"github.com/vishvananda/netlink"
"k8s.io/apimachinery/pkg/util/wait"
)
var version = "master@git"
var commit = "unknown commit"
var date = "unknown date"
var defaultReadinessBackoff = wait.Backoff{
Steps: 4,
Duration: 250 * time.Millisecond,
Factor: 4.0,
Jitter: 0.1,
}
func printVersionString() string {
return fmt.Sprintf("multus-cni version:%s, commit:%s, date:%s",
version, commit, date)
}
func saveScratchNetConf(containerID, dataDir string, netconf []byte) error {
logging.Debugf("saveScratchNetConf: %s, %s, %s", containerID, dataDir, string(netconf))
if err := os.MkdirAll(dataDir, 0700); err != nil {
return logging.Errorf("failed to create the multus data directory(%q): %v", dataDir, err)
}
path := filepath.Join(dataDir, containerID)
err := ioutil.WriteFile(path, netconf, 0600)
if err != nil {
return logging.Errorf("failed to write container data in the path(%q): %v", path, err)
}
return err
}
func consumeScratchNetConf(containerID, dataDir string) ([]byte, error) {
logging.Debugf("consumeScratchNetConf: %s, %s", containerID, dataDir)
path := filepath.Join(dataDir, containerID)
defer os.Remove(path)
return ioutil.ReadFile(path)
}
func getIfname(delegate *types.DelegateNetConf, argif string, idx int) string {
logging.Debugf("getIfname: %v, %s, %d", delegate, argif, idx)
if delegate.IfnameRequest != "" {
return delegate.IfnameRequest
}
if delegate.MasterPlugin {
// master plugin always uses the CNI-provided interface name
return argif
}
// Otherwise construct a unique interface name from the delegate's
// position in the delegate list
return fmt.Sprintf("net%d", idx)
}
func saveDelegates(containerID, dataDir string, delegates []*types.DelegateNetConf) error {
logging.Debugf("saveDelegates: %s, %s, %v", containerID, dataDir, delegates)
delegatesBytes, err := json.Marshal(delegates)
if err != nil {
return logging.Errorf("error serializing delegate netconf: %v", err)
}
if err = saveScratchNetConf(containerID, dataDir, delegatesBytes); err != nil {
return logging.Errorf("error in saving the delegates : %v", err)
}
return err
}
func validateIfName(nsname string, ifname string) error {
logging.Debugf("validateIfName: %s, %s", nsname, ifname)
podNs, err := ns.GetNS(nsname)
if err != nil {
return logging.Errorf("no netns: %v", err)
}
err = podNs.Do(func(_ ns.NetNS) error {
_, err := netlink.LinkByName(ifname)
if err != nil {
if err.Error() == "Link not found" {
return nil
}
return err
}
return logging.Errorf("ifname %s is already exist", ifname)
})
return err
}
func conflistAdd(rt *libcni.RuntimeConf, rawnetconflist []byte, binDir string, exec invoke.Exec) (cnitypes.Result, error) {
logging.Debugf("conflistAdd: %v, %s, %s", rt, string(rawnetconflist), binDir)
// In part, adapted from K8s pkg/kubelet/dockershim/network/cni/cni.go
binDirs := filepath.SplitList(os.Getenv("CNI_PATH"))
binDirs = append(binDirs, binDir)
cniNet := libcni.NewCNIConfig(binDirs, exec)
confList, err := libcni.ConfListFromBytes(rawnetconflist)
if err != nil {
return nil, logging.Errorf("error in converting the raw bytes to conflist: %v", err)
}
result, err := cniNet.AddNetworkList(confList, rt)
if err != nil {
return nil, logging.Errorf("error in getting result from AddNetworkList: %v", err)
}
return result, nil
}
func conflistDel(rt *libcni.RuntimeConf, rawnetconflist []byte, binDir string, exec invoke.Exec) error {
logging.Debugf("conflistDel: %v, %s, %s", rt, string(rawnetconflist), binDir)
// In part, adapted from K8s pkg/kubelet/dockershim/network/cni/cni.go
binDirs := filepath.SplitList(os.Getenv("CNI_PATH"))
binDirs = append(binDirs, binDir)
cniNet := libcni.NewCNIConfig(binDirs, exec)
confList, err := libcni.ConfListFromBytes(rawnetconflist)
if err != nil {
return logging.Errorf("error in converting the raw bytes to conflist: %v", err)
}
err = cniNet.DelNetworkList(confList, rt)
if err != nil {
return logging.Errorf("error in getting result from DelNetworkList: %v", err)
}
return err
}
func delegateAdd(exec invoke.Exec, ifName string, delegate *types.DelegateNetConf, rt *libcni.RuntimeConf, binDir string, cniArgs string) (cnitypes.Result, error) {
logging.Debugf("delegateAdd: %v, %s, %v, %v, %s", exec, ifName, delegate, rt, binDir)
if os.Setenv("CNI_IFNAME", ifName) != nil {
return nil, logging.Errorf("Multus: error in setting CNI_IFNAME")
}
if err := validateIfName(os.Getenv("CNI_NETNS"), ifName); err != nil {
return nil, logging.Errorf("cannot set %q ifname to %q: %v", delegate.Conf.Type, ifName, err)
}
if delegate.MacRequest != "" || delegate.IPRequest != "" {
if cniArgs != "" {
cniArgs = fmt.Sprintf("%s;IgnoreUnknown=true", cniArgs)
} else {
cniArgs = "IgnoreUnknown=true"
}
if delegate.MacRequest != "" {
// validate Mac address
_, err := net.ParseMAC(delegate.MacRequest)
if err != nil {
return nil, logging.Errorf("failed to parse mac address %q", delegate.MacRequest)
}
cniArgs = fmt.Sprintf("%s;MAC=%s", cniArgs, delegate.MacRequest)
logging.Debugf("Set MAC address %q to %q", delegate.MacRequest, ifName)
}
if delegate.IPRequest != "" {
// validate IP address
if strings.Contains(delegate.IPRequest, "/") {
_, _, err := net.ParseCIDR(delegate.IPRequest)
if err != nil {
return nil, logging.Errorf("failed to parse CIDR %q", delegate.MacRequest)
}
} else if net.ParseIP(delegate.IPRequest) == nil {
return nil, logging.Errorf("failed to parse IP address %q", delegate.IPRequest)
}
cniArgs = fmt.Sprintf("%s;IP=%s", cniArgs, delegate.IPRequest)
logging.Debugf("Set IP address %q to %q", delegate.IPRequest, ifName)
}
if os.Setenv("CNI_ARGS", cniArgs) != nil {
return nil, logging.Errorf("cannot set %q mac to %q and ip to %q", delegate.Conf.Type, delegate.MacRequest, delegate.IPRequest)
}
}
if delegate.ConfListPlugin != false {
result, err := conflistAdd(rt, delegate.Bytes, binDir, exec)
if err != nil {
return nil, logging.Errorf("Multus: error in invoke Conflist add - %q: %v", delegate.ConfList.Name, err)
}
return result, nil
}
result, err := invoke.DelegateAdd(delegate.Conf.Type, delegate.Bytes, exec)
if err != nil {
return nil, logging.Errorf("Multus: error in invoke Delegate add - %q: %v", delegate.Conf.Type, err)
}
return result, nil
}
func delegateDel(exec invoke.Exec, ifName string, delegateConf *types.DelegateNetConf, rt *libcni.RuntimeConf, binDir string) error {
logging.Debugf("delegateDel: %v, %s, %v, %v, %s", exec, ifName, delegateConf, rt, binDir)
if os.Setenv("CNI_IFNAME", ifName) != nil {
return logging.Errorf("Multus: error in setting CNI_IFNAME")
}
if delegateConf.ConfListPlugin != false {
err := conflistDel(rt, delegateConf.Bytes, binDir, exec)
if err != nil {
return logging.Errorf("Multus: error in invoke Conflist Del - %q: %v", delegateConf.ConfList.Name, err)
}
return err
}
if err := invoke.DelegateDel(delegateConf.Conf.Type, delegateConf.Bytes, exec); err != nil {
return logging.Errorf("Multus: error in invoke Delegate del - %q: %v", delegateConf.Conf.Type, err)
}
return nil
}
func delPlugins(exec invoke.Exec, argIfname string, delegates []*types.DelegateNetConf, lastIdx int, rt *libcni.RuntimeConf, binDir string) error {
logging.Debugf("delPlugins: %v, %s, %v, %d, %v, %s", exec, argIfname, delegates, lastIdx, rt, binDir)
if os.Setenv("CNI_COMMAND", "DEL") != nil {
return logging.Errorf("Multus: error in setting CNI_COMMAND to DEL")
}
var errorstrings []string
for idx := lastIdx; idx >= 0; idx-- {
ifName := getIfname(delegates[idx], argIfname, idx)
rt.IfName = ifName
// Attempt to delete all but do not error out, instead, collect all errors.
if err := delegateDel(exec, ifName, delegates[idx], rt, binDir); err != nil {
errorstrings = append(errorstrings, err.Error())
}
}
// Check if we had any errors, and send them all back.
if len(errorstrings) > 0 {
return fmt.Errorf(strings.Join(errorstrings, " / "))
}
return nil
}
func cmdAdd(args *skel.CmdArgs, exec invoke.Exec, kubeClient k8s.KubeClient) (cnitypes.Result, error) {
logging.Debugf("cmdAdd: %v, %v, %v", args, exec, kubeClient)
n, err := types.LoadNetConf(args.StdinData)
if err != nil {
return nil, logging.Errorf("err in loading netconf: %v", err)
}
k8sArgs, err := k8s.GetK8sArgs(args)
if err != nil {
return nil, logging.Errorf("Multus: Err in getting k8s args: %v", err)
}
wait.ExponentialBackoff(defaultReadinessBackoff, func() (bool, error) {
_, err := os.Stat(n.ReadinessIndicatorFile)
switch {
case err == nil:
return true, nil
default:
return false, nil
}
})
if n.ClusterNetwork != "" {
err = k8s.GetDefaultNetworks(k8sArgs, n, kubeClient)
if err != nil {
return nil, logging.Errorf("Multus: Failed to get clusterNetwork/defaultNetworks: %v", err)
}
// First delegate is always the master plugin
n.Delegates[0].MasterPlugin = true
}
numK8sDelegates, kc, err := k8s.TryLoadPodDelegates(k8sArgs, n, kubeClient)
if err != nil {
return nil, logging.Errorf("Multus: Err in loading K8s Delegates k8s args: %v", err)
}
if numK8sDelegates == 0 {
// cache the multus config if we have only Multus delegates
if err := saveDelegates(args.ContainerID, n.CNIDir, n.Delegates); err != nil {
return nil, logging.Errorf("Multus: Err in saving the delegates: %v", err)
}
}
var result, tmpResult cnitypes.Result
var netStatus []*types.NetworkStatus
var rt *libcni.RuntimeConf
lastIdx := 0
cniArgs := os.Getenv("CNI_ARGS")
for idx, delegate := range n.Delegates {
lastIdx = idx
ifName := getIfname(delegate, args.IfName, idx)
rt, _ = types.LoadCNIRuntimeConf(args, k8sArgs, ifName, n.RuntimeConfig)
tmpResult, err = delegateAdd(exec, ifName, delegate, rt, n.BinDir, cniArgs)
if err != nil {
break
}
// Master plugin result is always used if present
if delegate.MasterPlugin || result == nil {
result = tmpResult
}
//create the network status, only in case Multus as kubeconfig
if n.Kubeconfig != "" && kc != nil {
if kc.Podnamespace != "kube-system" {
delegateNetStatus, err := types.LoadNetworkStatus(tmpResult, delegate.Conf.Name, delegate.MasterPlugin)
if err != nil {
return nil, logging.Errorf("Multus: Err in setting networks status: %v", err)
}
netStatus = append(netStatus, delegateNetStatus)
}
}
}
if err != nil {
// Ignore errors; DEL must be idempotent anyway
_ = delPlugins(exec, args.IfName, n.Delegates, lastIdx, rt, n.BinDir)
return nil, logging.Errorf("Multus: Err in tearing down failed plugins: %v", err)
}
//set the network status annotation in apiserver, only in case Multus as kubeconfig
if n.Kubeconfig != "" && kc != nil {
if kc.Podnamespace != "kube-system" {
err = k8s.SetNetworkStatus(kc, netStatus)
if err != nil {
return nil, logging.Errorf("Multus: Err set the networks status: %v", err)
}
}
}
return result, nil
}
func cmdGet(args *skel.CmdArgs, exec invoke.Exec, kubeClient k8s.KubeClient) (cnitypes.Result, error) {
logging.Debugf("cmdGet: %v, %v, %v", args, exec, kubeClient)
in, err := types.LoadNetConf(args.StdinData)
if err != nil {
return nil, err
}
// FIXME: call all delegates
return in.PrevResult, nil
}
func cmdDel(args *skel.CmdArgs, exec invoke.Exec, kubeClient k8s.KubeClient) error {
logging.Debugf("cmdDel: %v, %v, %v", args, exec, kubeClient)
in, err := types.LoadNetConf(args.StdinData)
if err != nil {
return err
}
if args.Netns == "" {
return nil
}
netns, err := ns.GetNS(args.Netns)
if err != nil {
// if NetNs is passed down by the Cloud Orchestration Engine, or if it called multiple times
// so don't return an error if the device is already removed.
// https://github.com/kubernetes/kubernetes/issues/43014#issuecomment-287164444
_, ok := err.(ns.NSPathNotExistErr)
if ok {
return nil
}
return fmt.Errorf("failed to open netns %q: %v", netns, err)
}
defer netns.Close()
k8sArgs, err := k8s.GetK8sArgs(args)
if err != nil {
return logging.Errorf("Multus: Err in getting k8s args: %v", err)
}
numK8sDelegates, kc, err := k8s.TryLoadPodDelegates(k8sArgs, in, kubeClient)
if err != nil {
return err
}
if numK8sDelegates == 0 {
// re-read the scratch multus config if we have only Multus delegates
netconfBytes, err := consumeScratchNetConf(args.ContainerID, in.CNIDir)
if err != nil {
if os.IsNotExist(err) {
// Per spec should ignore error if resources are missing / already removed
return nil
}
return logging.Errorf("Multus: Err in reading the delegates: %v", err)
}
if err := json.Unmarshal(netconfBytes, &in.Delegates); err != nil {
return logging.Errorf("Multus: failed to load netconf: %v", err)
}
}
//unset the network status annotation in apiserver, only in case Multus as kubeconfig
if in.Kubeconfig != "" && kc != nil {
if kc.Podnamespace != "kube-system" {
err := k8s.SetNetworkStatus(kc, nil)
if err != nil {
return logging.Errorf("Multus: Err unset the networks status: %v", err)
}
}
}
rt, _ := types.LoadCNIRuntimeConf(args, k8sArgs, "", in.RuntimeConfig)
return delPlugins(exec, args.IfName, in.Delegates, len(in.Delegates)-1, rt, in.BinDir)
}
func main() {
// Init command line flags to clear vendored packages' one, especially in init()
flag.CommandLine = flag.NewFlagSet(os.Args[0], flag.ExitOnError)
// add version flag
versionOpt := false
flag.BoolVar(&versionOpt, "version", false, "Show application version")
flag.BoolVar(&versionOpt, "v", false, "Show application version")
flag.Parse()
if versionOpt == true {
fmt.Printf("%s\n", printVersionString())
return
}
skel.PluginMain(
func(args *skel.CmdArgs) error {
result, err := cmdAdd(args, nil, nil)
if err != nil {
return err
}
return result.Print()
},
func(args *skel.CmdArgs) error {
result, err := cmdGet(args, nil, nil)
if err != nil {
return err
}
return result.Print()
},
func(args *skel.CmdArgs) error { return cmdDel(args, nil, nil) },
cniversion.All, "meta-plugin that delegates to other CNI plugins")
}
|
[
"\"CNI_PATH\"",
"\"CNI_PATH\"",
"\"CNI_NETNS\"",
"\"CNI_ARGS\""
] |
[] |
[
"CNI_ARGS",
"CNI_NETNS",
"CNI_PATH"
] |
[]
|
["CNI_ARGS", "CNI_NETNS", "CNI_PATH"]
|
go
| 3 | 0 | |
executor/executor_test.go
|
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package executor_test
import (
"context"
"flag"
"fmt"
"math"
"net"
"os"
"strconv"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/golang/protobuf/proto"
. "github.com/pingcap/check"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/kvproto/pkg/kvrpcpb"
"github.com/pingcap/parser"
"github.com/pingcap/parser/auth"
"github.com/pingcap/parser/model"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/parser/terror"
"github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/ddl"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/domain/infosync"
"github.com/pingcap/tidb/errno"
"github.com/pingcap/tidb/executor"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/meta"
"github.com/pingcap/tidb/meta/autoid"
"github.com/pingcap/tidb/planner"
plannercore "github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/server"
"github.com/pingcap/tidb/session"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/statistics"
"github.com/pingcap/tidb/store/copr"
"github.com/pingcap/tidb/store/mockstore"
"github.com/pingcap/tidb/store/mockstore/unistore"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/table/tables"
"github.com/pingcap/tidb/tablecodec"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util"
"github.com/pingcap/tidb/util/admin"
"github.com/pingcap/tidb/util/deadlockhistory"
"github.com/pingcap/tidb/util/gcutil"
"github.com/pingcap/tidb/util/israce"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/memory"
"github.com/pingcap/tidb/util/mock"
"github.com/pingcap/tidb/util/rowcodec"
"github.com/pingcap/tidb/util/testkit"
"github.com/pingcap/tidb/util/testleak"
"github.com/pingcap/tidb/util/testutil"
"github.com/pingcap/tidb/util/timeutil"
"github.com/pingcap/tipb/go-tipb"
"github.com/tikv/client-go/v2/oracle"
"github.com/tikv/client-go/v2/testutils"
"github.com/tikv/client-go/v2/tikv"
"github.com/tikv/client-go/v2/tikvrpc"
"google.golang.org/grpc"
)
func TestT(t *testing.T) {
CustomVerboseFlag = true
*CustomParallelSuiteFlag = true
logLevel := os.Getenv("log_level")
err := logutil.InitLogger(logutil.NewLogConfig(logLevel, logutil.DefaultLogFormat, "", logutil.EmptyFileLogConfig, false))
if err != nil {
t.Fatal(err)
}
autoid.SetStep(5000)
config.UpdateGlobal(func(conf *config.Config) {
conf.Log.SlowThreshold = 30000 // 30s
conf.TiKVClient.AsyncCommit.SafeWindow = 0
conf.TiKVClient.AsyncCommit.AllowedClockDrift = 0
})
tikv.EnableFailpoints()
tmpDir := config.GetGlobalConfig().TempStoragePath
_ = os.RemoveAll(tmpDir) // clean the uncleared temp file during the last run.
_ = os.MkdirAll(tmpDir, 0755)
testleak.BeforeTest()
TestingT(t)
testleak.AfterTestT(t)()
}
var _ = Suite(&testSuite{&baseTestSuite{}})
var _ = Suite(&testSuiteP1{&baseTestSuite{}})
var _ = Suite(&testSuiteP2{&baseTestSuite{}})
var _ = Suite(&testSuite1{})
var _ = SerialSuites(&testSerialSuite2{})
var _ = Suite(&testSuite2{&baseTestSuite{}})
var _ = Suite(&testSuite3{&baseTestSuite{}})
var _ = Suite(&testSuite4{&baseTestSuite{}})
var _ = Suite(&testSuite5{&baseTestSuite{}})
var _ = Suite(&testSuiteJoin1{&baseTestSuite{}})
var _ = Suite(&testSuiteJoin2{&baseTestSuite{}})
var _ = Suite(&testSuiteJoin3{&baseTestSuite{}})
var _ = SerialSuites(&testSuiteJoinSerial{&baseTestSuite{}})
var _ = Suite(&testSuiteAgg{baseTestSuite: &baseTestSuite{}})
var _ = Suite(&testSuite6{&baseTestSuite{}})
var _ = Suite(&testSuite7{&baseTestSuite{}})
var _ = Suite(&testSuite8{&baseTestSuite{}})
var _ = SerialSuites(&testShowStatsSuite{&baseTestSuite{}})
var _ = Suite(&testBypassSuite{})
var _ = Suite(&testUpdateSuite{})
var _ = Suite(&testPointGetSuite{})
var _ = Suite(&testBatchPointGetSuite{})
var _ = SerialSuites(&testRecoverTable{})
var _ = SerialSuites(&testMemTableReaderSuite{&testClusterTableBase{}})
var _ = SerialSuites(&testFlushSuite{})
var _ = SerialSuites(&testAutoRandomSuite{&baseTestSuite{}})
var _ = SerialSuites(&testClusterTableSuite{})
var _ = SerialSuites(&testPrepareSerialSuite{&baseTestSuite{}})
var _ = SerialSuites(&testSplitTable{&baseTestSuite{}})
var _ = Suite(&testSuiteWithData{baseTestSuite: &baseTestSuite{}})
var _ = SerialSuites(&testSerialSuite1{&baseTestSuite{}})
var _ = SerialSuites(&testSlowQuery{&baseTestSuite{}})
var _ = Suite(&partitionTableSuite{&baseTestSuite{}})
var _ = SerialSuites(&tiflashTestSuite{})
var _ = SerialSuites(&globalIndexSuite{&baseTestSuite{}})
var _ = SerialSuites(&testSerialSuite{&baseTestSuite{}})
var _ = SerialSuites(&testStaleTxnSerialSuite{&baseTestSuite{}})
var _ = Suite(&testStaleTxnSuite{&baseTestSuite{}})
var _ = SerialSuites(&testCoprCache{})
var _ = SerialSuites(&testPrepareSuite{})
var _ = SerialSuites(&testResourceTagSuite{&baseTestSuite{}})
type testSuite struct{ *baseTestSuite }
type testSuiteP1 struct{ *baseTestSuite }
type testSuiteP2 struct{ *baseTestSuite }
type testSplitTable struct{ *baseTestSuite }
type testSuiteWithData struct {
*baseTestSuite
testData testutil.TestData
}
type testSlowQuery struct{ *baseTestSuite }
type partitionTableSuite struct{ *baseTestSuite }
type globalIndexSuite struct{ *baseTestSuite }
type testSerialSuite struct{ *baseTestSuite }
type testStaleTxnSerialSuite struct{ *baseTestSuite }
type testStaleTxnSuite struct{ *baseTestSuite }
type testCoprCache struct {
store kv.Storage
dom *domain.Domain
cls testutils.Cluster
}
type testPrepareSuite struct{ testData testutil.TestData }
type testResourceTagSuite struct{ *baseTestSuite }
type baseTestSuite struct {
cluster testutils.Cluster
store kv.Storage
domain *domain.Domain
*parser.Parser
ctx *mock.Context // nolint:structcheck
}
var mockTikv = flag.Bool("mockTikv", true, "use mock tikv store in executor test")
func (s *baseTestSuite) SetUpSuite(c *C) {
s.Parser = parser.New()
flag.Lookup("mockTikv")
useMockTikv := *mockTikv
if useMockTikv {
store, err := mockstore.NewMockStore(
mockstore.WithClusterInspector(func(c testutils.Cluster) {
mockstore.BootstrapWithSingleStore(c)
s.cluster = c
}),
)
c.Assert(err, IsNil)
s.store = store
session.SetSchemaLease(0)
session.DisableStats4Test()
}
d, err := session.BootstrapSession(s.store)
c.Assert(err, IsNil)
d.SetStatsUpdating(true)
s.domain = d
config.UpdateGlobal(func(conf *config.Config) {
conf.OOMAction = config.OOMActionLog
})
}
func (s *testSuiteWithData) SetUpSuite(c *C) {
s.baseTestSuite.SetUpSuite(c)
var err error
s.testData, err = testutil.LoadTestSuiteData("testdata", "executor_suite")
c.Assert(err, IsNil)
}
func (s *testSuiteWithData) TearDownSuite(c *C) {
s.baseTestSuite.TearDownSuite(c)
c.Assert(s.testData.GenerateOutputIfNeeded(), IsNil)
}
func (s *testPrepareSuite) SetUpSuite(c *C) {
var err error
s.testData, err = testutil.LoadTestSuiteData("testdata", "prepare_suite")
c.Assert(err, IsNil)
}
func (s *testPrepareSuite) TearDownSuite(c *C) {
c.Assert(s.testData.GenerateOutputIfNeeded(), IsNil)
}
func (s *baseTestSuite) TearDownSuite(c *C) {
s.domain.Close()
s.store.Close()
}
func (s *globalIndexSuite) SetUpSuite(c *C) {
s.baseTestSuite.SetUpSuite(c)
config.UpdateGlobal(func(conf *config.Config) {
conf.EnableGlobalIndex = true
})
}
func (s *testSuiteP1) TestPessimisticSelectForUpdate(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(id int primary key, a int)")
tk.MustExec("insert into t values(1, 1)")
tk.MustExec("begin PESSIMISTIC")
tk.MustQuery("select a from t where id=1 for update").Check(testkit.Rows("1"))
tk.MustExec("update t set a=a+1 where id=1")
tk.MustExec("commit")
tk.MustQuery("select a from t where id=1").Check(testkit.Rows("2"))
}
func (s *testSuite) TearDownTest(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
r := tk.MustQuery("show tables")
for _, tb := range r.Rows() {
tableName := tb[0]
tk.MustExec(fmt.Sprintf("drop table %v", tableName))
}
}
func (s *testSuiteP1) TestBind(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists testbind")
tk.MustExec("create table testbind(i int, s varchar(20))")
tk.MustExec("create index index_t on testbind(i,s)")
tk.MustExec("create global binding for select * from testbind using select * from testbind use index for join(index_t)")
c.Assert(len(tk.MustQuery("show global bindings").Rows()), Equals, 1)
tk.MustExec("create session binding for select * from testbind using select * from testbind use index for join(index_t)")
c.Assert(len(tk.MustQuery("show session bindings").Rows()), Equals, 1)
tk.MustExec("drop session binding for select * from testbind")
}
func (s *testSuiteP1) TestChangePumpAndDrainer(c *C) {
tk := testkit.NewTestKit(c, s.store)
// change pump or drainer's state need connect to etcd
// so will meet error "URL scheme must be http, https, unix, or unixs: /tmp/tidb"
err := tk.ExecToErr("change pump to node_state ='paused' for node_id 'pump1'")
c.Assert(err, ErrorMatches, "URL scheme must be http, https, unix, or unixs.*")
err = tk.ExecToErr("change drainer to node_state ='paused' for node_id 'drainer1'")
c.Assert(err, ErrorMatches, "URL scheme must be http, https, unix, or unixs.*")
}
func (s *testSuiteP1) TestLoadStats(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
c.Assert(tk.ExecToErr("load stats"), NotNil)
c.Assert(tk.ExecToErr("load stats ./xxx.json"), NotNil)
}
func (s *testSuiteP1) TestPlanRecreator(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int, index idx_a(a))")
tk.MustExec("plan recreator dump explain select * from t where a=10")
}
func (s *testSuiteP1) TestShow(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("create database test_show;")
tk.MustExec("use test_show")
tk.MustQuery("show engines")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int primary key)")
c.Assert(len(tk.MustQuery("show index in t").Rows()), Equals, 1)
c.Assert(len(tk.MustQuery("show index from t").Rows()), Equals, 1)
tk.MustQuery("show charset").Check(testkit.Rows(
"utf8 UTF-8 Unicode utf8_bin 3",
"utf8mb4 UTF-8 Unicode utf8mb4_bin 4",
"ascii US ASCII ascii_bin 1",
"latin1 Latin1 latin1_bin 1",
"binary binary binary 1"))
c.Assert(len(tk.MustQuery("show master status").Rows()), Equals, 1)
tk.MustQuery("show create database test_show").Check(testkit.Rows("test_show CREATE DATABASE `test_show` /*!40100 DEFAULT CHARACTER SET utf8mb4 */"))
tk.MustQuery("show privileges").Check(testkit.Rows("Alter Tables To alter the table",
"Alter routine Functions,Procedures To alter or drop stored functions/procedures",
"Create Databases,Tables,Indexes To create new databases and tables",
"Create routine Databases To use CREATE FUNCTION/PROCEDURE",
"Create temporary tables Databases To use CREATE TEMPORARY TABLE",
"Create view Tables To create new views",
"Create user Server Admin To create new users",
"Delete Tables To delete existing rows",
"Drop Databases,Tables To drop databases, tables, and views",
"Event Server Admin To create, alter, drop and execute events",
"Execute Functions,Procedures To execute stored routines",
"File File access on server To read and write files on the server",
"Grant option Databases,Tables,Functions,Procedures To give to other users those privileges you possess",
"Index Tables To create or drop indexes",
"Insert Tables To insert data into tables",
"Lock tables Databases To use LOCK TABLES (together with SELECT privilege)",
"Process Server Admin To view the plain text of currently executing queries",
"Proxy Server Admin To make proxy user possible",
"References Databases,Tables To have references on tables",
"Reload Server Admin To reload or refresh tables, logs and privileges",
"Replication client Server Admin To ask where the slave or master servers are",
"Replication slave Server Admin To read binary log events from the master",
"Select Tables To retrieve rows from table",
"Show databases Server Admin To see all databases with SHOW DATABASES",
"Show view Tables To see views with SHOW CREATE VIEW",
"Shutdown Server Admin To shut down the server",
"Super Server Admin To use KILL thread, SET GLOBAL, CHANGE MASTER, etc.",
"Trigger Tables To use triggers",
"Create tablespace Server Admin To create/alter/drop tablespaces",
"Update Tables To update existing rows",
"Usage Server Admin No privileges - allow connect only",
"BACKUP_ADMIN Server Admin ",
"RESTORE_ADMIN Server Admin ",
"SYSTEM_USER Server Admin ",
"SYSTEM_VARIABLES_ADMIN Server Admin ",
"ROLE_ADMIN Server Admin ",
"CONNECTION_ADMIN Server Admin ",
"RESTRICTED_TABLES_ADMIN Server Admin ",
"RESTRICTED_STATUS_ADMIN Server Admin ",
"RESTRICTED_VARIABLES_ADMIN Server Admin ",
"RESTRICTED_USER_ADMIN Server Admin ",
"RESTRICTED_CONNECTION_ADMIN Server Admin ",
"RESTRICTED_REPLICA_WRITER_ADMIN Server Admin ",
))
c.Assert(len(tk.MustQuery("show table status").Rows()), Equals, 1)
}
func (s *testSuite3) TestAdmin(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists admin_test")
tk.MustExec("create table admin_test (c1 int, c2 int, c3 int default 1, index (c1))")
tk.MustExec("insert admin_test (c1) values (1),(2),(NULL)")
ctx := context.Background()
// cancel DDL jobs test
r, err := tk.Exec("admin cancel ddl jobs 1")
c.Assert(err, IsNil, Commentf("err %v", err))
req := r.NewChunk()
err = r.Next(ctx, req)
c.Assert(err, IsNil)
row := req.GetRow(0)
c.Assert(row.Len(), Equals, 2)
c.Assert(row.GetString(0), Equals, "1")
c.Assert(row.GetString(1), Matches, "*DDL Job:1 not found")
// show ddl test;
r, err = tk.Exec("admin show ddl")
c.Assert(err, IsNil)
req = r.NewChunk()
err = r.Next(ctx, req)
c.Assert(err, IsNil)
row = req.GetRow(0)
c.Assert(row.Len(), Equals, 6)
txn, err := s.store.Begin()
c.Assert(err, IsNil)
ddlInfo, err := admin.GetDDLInfo(txn)
c.Assert(err, IsNil)
c.Assert(row.GetInt64(0), Equals, ddlInfo.SchemaVer)
// TODO: Pass this test.
// rowOwnerInfos := strings.Split(row.Data[1].GetString(), ",")
// ownerInfos := strings.Split(ddlInfo.Owner.String(), ",")
// c.Assert(rowOwnerInfos[0], Equals, ownerInfos[0])
serverInfo, err := infosync.GetServerInfoByID(ctx, row.GetString(1))
c.Assert(err, IsNil)
c.Assert(row.GetString(2), Equals, serverInfo.IP+":"+
strconv.FormatUint(uint64(serverInfo.Port), 10))
c.Assert(row.GetString(3), Equals, "")
req = r.NewChunk()
err = r.Next(ctx, req)
c.Assert(err, IsNil)
c.Assert(req.NumRows() == 0, IsTrue)
err = txn.Rollback()
c.Assert(err, IsNil)
// show DDL jobs test
r, err = tk.Exec("admin show ddl jobs")
c.Assert(err, IsNil)
req = r.NewChunk()
err = r.Next(ctx, req)
c.Assert(err, IsNil)
row = req.GetRow(0)
c.Assert(row.Len(), Equals, 11)
txn, err = s.store.Begin()
c.Assert(err, IsNil)
historyJobs, err := admin.GetHistoryDDLJobs(txn, admin.DefNumHistoryJobs)
c.Assert(len(historyJobs), Greater, 1)
c.Assert(len(row.GetString(1)), Greater, 0)
c.Assert(err, IsNil)
c.Assert(row.GetInt64(0), Equals, historyJobs[0].ID)
c.Assert(err, IsNil)
r, err = tk.Exec("admin show ddl jobs 20")
c.Assert(err, IsNil)
req = r.NewChunk()
err = r.Next(ctx, req)
c.Assert(err, IsNil)
row = req.GetRow(0)
c.Assert(row.Len(), Equals, 11)
c.Assert(row.GetInt64(0), Equals, historyJobs[0].ID)
c.Assert(err, IsNil)
// show DDL job queries test
tk.MustExec("use test")
tk.MustExec("drop table if exists admin_test2")
tk.MustExec("create table admin_test2 (c1 int, c2 int, c3 int default 1, index (c1))")
result := tk.MustQuery(`admin show ddl job queries 1, 1, 1`)
result.Check(testkit.Rows())
result = tk.MustQuery(`admin show ddl job queries 1, 2, 3, 4`)
result.Check(testkit.Rows())
historyJobs, err = admin.GetHistoryDDLJobs(txn, admin.DefNumHistoryJobs)
result = tk.MustQuery(fmt.Sprintf("admin show ddl job queries %d", historyJobs[0].ID))
result.Check(testkit.Rows(historyJobs[0].Query))
c.Assert(err, IsNil)
// check table test
tk.MustExec("create table admin_test1 (c1 int, c2 int default 1, index (c1))")
tk.MustExec("insert admin_test1 (c1) values (21),(22)")
r, err = tk.Exec("admin check table admin_test, admin_test1")
c.Assert(err, IsNil)
c.Assert(r, IsNil)
// error table name
err = tk.ExecToErr("admin check table admin_test_error")
c.Assert(err, NotNil)
// different index values
sctx := tk.Se.(sessionctx.Context)
dom := domain.GetDomain(sctx)
is := dom.InfoSchema()
c.Assert(is, NotNil)
tb, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("admin_test"))
c.Assert(err, IsNil)
c.Assert(tb.Indices(), HasLen, 1)
_, err = tb.Indices()[0].Create(mock.NewContext(), txn, types.MakeDatums(int64(10)), kv.IntHandle(1), nil)
c.Assert(err, IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
errAdmin := tk.ExecToErr("admin check table admin_test")
c.Assert(errAdmin, NotNil)
if config.CheckTableBeforeDrop {
err = tk.ExecToErr("drop table admin_test")
c.Assert(err.Error(), Equals, errAdmin.Error())
// Drop inconsistency index.
tk.MustExec("alter table admin_test drop index c1")
tk.MustExec("admin check table admin_test")
}
// checksum table test
tk.MustExec("create table checksum_with_index (id int, count int, PRIMARY KEY(id), KEY(count))")
tk.MustExec("create table checksum_without_index (id int, count int, PRIMARY KEY(id))")
r, err = tk.Exec("admin checksum table checksum_with_index, checksum_without_index")
c.Assert(err, IsNil)
res := tk.ResultSetToResult(r, Commentf("admin checksum table"))
// Mocktikv returns 1 for every table/index scan, then we will xor the checksums of a table.
// For "checksum_with_index", we have two checksums, so the result will be 1^1 = 0.
// For "checksum_without_index", we only have one checksum, so the result will be 1.
res.Sort().Check(testkit.Rows("test checksum_with_index 0 2 2", "test checksum_without_index 1 1 1"))
tk.MustExec("drop table if exists t1;")
tk.MustExec("CREATE TABLE t1 (c2 BOOL, PRIMARY KEY (c2));")
tk.MustExec("INSERT INTO t1 SET c2 = '0';")
tk.MustExec("ALTER TABLE t1 ADD COLUMN c3 DATETIME NULL DEFAULT '2668-02-03 17:19:31';")
tk.MustExec("ALTER TABLE t1 ADD INDEX idx2 (c3);")
tk.MustExec("ALTER TABLE t1 ADD COLUMN c4 bit(10) default 127;")
tk.MustExec("ALTER TABLE t1 ADD INDEX idx3 (c4);")
tk.MustExec("admin check table t1;")
// Test admin show ddl jobs table name after table has been droped.
tk.MustExec("drop table if exists t1;")
re := tk.MustQuery("admin show ddl jobs 1")
rows := re.Rows()
c.Assert(len(rows), Equals, 1)
c.Assert(rows[0][2], Equals, "t1")
// Test for reverse scan get history ddl jobs when ddl history jobs queue has multiple regions.
txn, err = s.store.Begin()
c.Assert(err, IsNil)
historyJobs, err = admin.GetHistoryDDLJobs(txn, 20)
c.Assert(err, IsNil)
// Split region for history ddl job queues.
m := meta.NewMeta(txn)
startKey := meta.DDLJobHistoryKey(m, 0)
endKey := meta.DDLJobHistoryKey(m, historyJobs[0].ID)
s.cluster.SplitKeys(startKey, endKey, int(historyJobs[0].ID/5))
historyJobs2, err := admin.GetHistoryDDLJobs(txn, 20)
c.Assert(err, IsNil)
c.Assert(historyJobs, DeepEquals, historyJobs2)
}
func (s *testSuiteP2) TestAdminShowDDLJobs(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("create database if not exists test_admin_show_ddl_jobs")
tk.MustExec("use test_admin_show_ddl_jobs")
tk.MustExec("create table t (a int);")
re := tk.MustQuery("admin show ddl jobs 1")
row := re.Rows()[0]
c.Assert(row[1], Equals, "test_admin_show_ddl_jobs")
jobID, err := strconv.Atoi(row[0].(string))
c.Assert(err, IsNil)
err = kv.RunInNewTxn(context.Background(), s.store, true, func(ctx context.Context, txn kv.Transaction) error {
t := meta.NewMeta(txn)
job, err := t.GetHistoryDDLJob(int64(jobID))
c.Assert(err, IsNil)
c.Assert(job, NotNil)
// Test for compatibility. Old TiDB version doesn't have SchemaName field, and the BinlogInfo maybe nil.
// See PR: 11561.
job.BinlogInfo = nil
job.SchemaName = ""
err = t.AddHistoryDDLJob(job, true)
c.Assert(err, IsNil)
return nil
})
c.Assert(err, IsNil)
re = tk.MustQuery("admin show ddl jobs 1")
row = re.Rows()[0]
c.Assert(row[1], Equals, "test_admin_show_ddl_jobs")
re = tk.MustQuery("admin show ddl jobs 1 where job_type='create table'")
row = re.Rows()[0]
c.Assert(row[1], Equals, "test_admin_show_ddl_jobs")
c.Assert(row[9], Equals, "<nil>")
// Test the START_TIME and END_TIME field.
re = tk.MustQuery("admin show ddl jobs where job_type = 'create table' and start_time > str_to_date('20190101','%Y%m%d%H%i%s')")
row = re.Rows()[0]
c.Assert(row[2], Equals, "t")
c.Assert(row[9], Equals, "<nil>")
}
func (s *testSuiteP2) TestAdminChecksumOfPartitionedTable(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("USE test;")
tk.MustExec("DROP TABLE IF EXISTS admin_checksum_partition_test;")
tk.MustExec("CREATE TABLE admin_checksum_partition_test (a INT) PARTITION BY HASH(a) PARTITIONS 4;")
tk.MustExec("INSERT INTO admin_checksum_partition_test VALUES (1), (2);")
r := tk.MustQuery("ADMIN CHECKSUM TABLE admin_checksum_partition_test;")
r.Check(testkit.Rows("test admin_checksum_partition_test 1 5 5"))
}
func (s *baseTestSuite) fillData(tk *testkit.TestKit, table string) {
tk.MustExec("use test")
tk.MustExec(fmt.Sprintf("create table %s(id int not null default 1, name varchar(255), PRIMARY KEY(id));", table))
// insert data
tk.MustExec(fmt.Sprintf("insert INTO %s VALUES (1, \"hello\");", table))
tk.CheckExecResult(1, 0)
tk.MustExec(fmt.Sprintf("insert into %s values (2, \"hello\");", table))
tk.CheckExecResult(1, 0)
}
type testCase struct {
data1 []byte
data2 []byte
expected []string
restData []byte
expectedMsg string
}
func checkCases(tests []testCase, ld *executor.LoadDataInfo,
c *C, tk *testkit.TestKit, ctx sessionctx.Context, selectSQL, deleteSQL string) {
origin := ld.IgnoreLines
for _, tt := range tests {
ld.IgnoreLines = origin
c.Assert(ctx.NewTxn(context.Background()), IsNil)
ctx.GetSessionVars().StmtCtx.DupKeyAsWarning = true
ctx.GetSessionVars().StmtCtx.BadNullAsWarning = true
ctx.GetSessionVars().StmtCtx.InLoadDataStmt = true
ctx.GetSessionVars().StmtCtx.InDeleteStmt = false
data, reachLimit, err1 := ld.InsertData(context.Background(), tt.data1, tt.data2)
c.Assert(err1, IsNil)
c.Assert(reachLimit, IsFalse)
err1 = ld.CheckAndInsertOneBatch(context.Background(), ld.GetRows(), ld.GetCurBatchCnt())
c.Assert(err1, IsNil)
ld.SetMaxRowsInBatch(20000)
if tt.restData == nil {
c.Assert(data, HasLen, 0,
Commentf("data1:%v, data2:%v, data:%v", string(tt.data1), string(tt.data2), string(data)))
} else {
c.Assert(data, DeepEquals, tt.restData,
Commentf("data1:%v, data2:%v, data:%v", string(tt.data1), string(tt.data2), string(data)))
}
ld.SetMessage()
tk.CheckLastMessage(tt.expectedMsg)
ctx.StmtCommit()
txn, err := ctx.Txn(true)
c.Assert(err, IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
r := tk.MustQuery(selectSQL)
r.Check(testutil.RowsWithSep("|", tt.expected...))
tk.MustExec(deleteSQL)
}
}
func (s *testSuiteP1) TestSelectWithoutFrom(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
r := tk.MustQuery("select 1 + 2*3;")
r.Check(testkit.Rows("7"))
r = tk.MustQuery(`select _utf8"string";`)
r.Check(testkit.Rows("string"))
r = tk.MustQuery("select 1 order by 1;")
r.Check(testkit.Rows("1"))
}
// TestSelectBackslashN Issue 3685.
func (s *testSuiteP1) TestSelectBackslashN(c *C) {
tk := testkit.NewTestKit(c, s.store)
sql := `select \N;`
r := tk.MustQuery(sql)
r.Check(testkit.Rows("<nil>"))
rs, err := tk.Exec(sql)
c.Check(err, IsNil)
fields := rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "NULL")
c.Assert(rs.Close(), IsNil)
sql = `select "\N";`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("N"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `N`)
c.Assert(rs.Close(), IsNil)
tk.MustExec("use test;")
tk.MustExec("create table test (`\\N` int);")
tk.MustExec("insert into test values (1);")
tk.CheckExecResult(1, 0)
sql = "select * from test;"
r = tk.MustQuery(sql)
r.Check(testkit.Rows("1"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `\N`)
c.Assert(rs.Close(), IsNil)
sql = `select \N from test;`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("<nil>"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(err, IsNil)
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `NULL`)
c.Assert(rs.Close(), IsNil)
sql = `select (\N) from test;`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("<nil>"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `NULL`)
c.Assert(rs.Close(), IsNil)
sql = "select `\\N` from test;"
r = tk.MustQuery(sql)
r.Check(testkit.Rows("1"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `\N`)
c.Assert(rs.Close(), IsNil)
sql = "select (`\\N`) from test;"
r = tk.MustQuery(sql)
r.Check(testkit.Rows("1"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `\N`)
c.Assert(rs.Close(), IsNil)
sql = `select '\N' from test;`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("N"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `N`)
c.Assert(rs.Close(), IsNil)
sql = `select ('\N') from test;`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("N"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `N`)
c.Assert(rs.Close(), IsNil)
}
// TestSelectNull Issue #4053.
func (s *testSuiteP1) TestSelectNull(c *C) {
tk := testkit.NewTestKit(c, s.store)
sql := `select nUll;`
r := tk.MustQuery(sql)
r.Check(testkit.Rows("<nil>"))
rs, err := tk.Exec(sql)
c.Check(err, IsNil)
fields := rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `NULL`)
c.Assert(rs.Close(), IsNil)
sql = `select (null);`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("<nil>"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `NULL`)
c.Assert(rs.Close(), IsNil)
sql = `select null+NULL;`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("<nil>"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(err, IsNil)
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `null+NULL`)
c.Assert(rs.Close(), IsNil)
}
// TestSelectStringLiteral Issue #3686.
func (s *testSuiteP1) TestSelectStringLiteral(c *C) {
tk := testkit.NewTestKit(c, s.store)
sql := `select 'abc';`
r := tk.MustQuery(sql)
r.Check(testkit.Rows("abc"))
rs, err := tk.Exec(sql)
c.Check(err, IsNil)
fields := rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `abc`)
c.Assert(rs.Close(), IsNil)
sql = `select (('abc'));`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("abc"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `abc`)
c.Assert(rs.Close(), IsNil)
sql = `select 'abc'+'def';`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("0"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `'abc'+'def'`)
c.Assert(rs.Close(), IsNil)
// Below checks whether leading invalid chars are trimmed.
sql = "select '\n';"
r = tk.MustQuery(sql)
r.Check(testkit.Rows("\n"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "")
c.Assert(rs.Close(), IsNil)
sql = "select '\t col';" // Lowercased letter is a valid char.
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "col")
c.Assert(rs.Close(), IsNil)
sql = "select '\t Col';" // Uppercased letter is a valid char.
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "Col")
c.Assert(rs.Close(), IsNil)
sql = "select '\n\t 中文 col';" // Chinese char is a valid char.
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "中文 col")
c.Assert(rs.Close(), IsNil)
sql = "select ' \r\n .col';" // Punctuation is a valid char.
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, ".col")
c.Assert(rs.Close(), IsNil)
sql = "select ' 😆col';" // Emoji is a valid char.
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "😆col")
c.Assert(rs.Close(), IsNil)
// Below checks whether trailing invalid chars are preserved.
sql = `select 'abc ';`
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "abc ")
c.Assert(rs.Close(), IsNil)
sql = `select ' abc 123 ';`
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "abc 123 ")
c.Assert(rs.Close(), IsNil)
// Issue #4239.
sql = `select 'a' ' ' 'string';`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("a string"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "a")
c.Assert(rs.Close(), IsNil)
sql = `select 'a' " " "string";`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("a string"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "a")
c.Assert(rs.Close(), IsNil)
sql = `select 'string' 'string';`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("stringstring"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "string")
c.Assert(rs.Close(), IsNil)
sql = `select "ss" "a";`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("ssa"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "ss")
c.Assert(rs.Close(), IsNil)
sql = `select "ss" "a" "b";`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("ssab"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "ss")
c.Assert(rs.Close(), IsNil)
sql = `select "ss" "a" ' ' "b";`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("ssa b"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "ss")
c.Assert(rs.Close(), IsNil)
sql = `select "ss" "a" ' ' "b" ' ' "d";`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("ssa b d"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "ss")
c.Assert(rs.Close(), IsNil)
}
func (s *testSuiteP1) TestSelectLimit(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
s.fillData(tk, "select_limit")
tk.MustExec("insert INTO select_limit VALUES (3, \"hello\");")
tk.CheckExecResult(1, 0)
tk.MustExec("insert INTO select_limit VALUES (4, \"hello\");")
tk.CheckExecResult(1, 0)
r := tk.MustQuery("select * from select_limit limit 1;")
r.Check(testkit.Rows("1 hello"))
r = tk.MustQuery("select id from (select * from select_limit limit 1) k where id != 1;")
r.Check(testkit.Rows())
r = tk.MustQuery("select * from select_limit limit 18446744073709551615 offset 0;")
r.Check(testkit.Rows("1 hello", "2 hello", "3 hello", "4 hello"))
r = tk.MustQuery("select * from select_limit limit 18446744073709551615 offset 1;")
r.Check(testkit.Rows("2 hello", "3 hello", "4 hello"))
r = tk.MustQuery("select * from select_limit limit 18446744073709551615 offset 3;")
r.Check(testkit.Rows("4 hello"))
err := tk.ExecToErr("select * from select_limit limit 18446744073709551616 offset 3;")
c.Assert(err, NotNil)
}
func (s *testSuiteP1) TestSelectOrderBy(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
s.fillData(tk, "select_order_test")
// Test star field
r := tk.MustQuery("select * from select_order_test where id = 1 order by id limit 1 offset 0;")
r.Check(testkit.Rows("1 hello"))
r = tk.MustQuery("select id from select_order_test order by id desc limit 1 ")
r.Check(testkit.Rows("2"))
r = tk.MustQuery("select id from select_order_test order by id + 1 desc limit 1 ")
r.Check(testkit.Rows("2"))
// Test limit
r = tk.MustQuery("select * from select_order_test order by name, id limit 1 offset 0;")
r.Check(testkit.Rows("1 hello"))
// Test limit
r = tk.MustQuery("select id as c1, name from select_order_test order by 2, id limit 1 offset 0;")
r.Check(testkit.Rows("1 hello"))
// Test limit overflow
r = tk.MustQuery("select * from select_order_test order by name, id limit 100 offset 0;")
r.Check(testkit.Rows("1 hello", "2 hello"))
// Test offset overflow
r = tk.MustQuery("select * from select_order_test order by name, id limit 1 offset 100;")
r.Check(testkit.Rows())
// Test limit exceeds int range.
r = tk.MustQuery("select id from select_order_test order by name, id limit 18446744073709551615;")
r.Check(testkit.Rows("1", "2"))
// Test multiple field
r = tk.MustQuery("select id, name from select_order_test where id = 1 group by id, name limit 1 offset 0;")
r.Check(testkit.Rows("1 hello"))
// Test limit + order by
for i := 3; i <= 10; i += 1 {
tk.MustExec(fmt.Sprintf("insert INTO select_order_test VALUES (%d, \"zz\");", i))
}
tk.MustExec("insert INTO select_order_test VALUES (10086, \"hi\");")
for i := 11; i <= 20; i += 1 {
tk.MustExec(fmt.Sprintf("insert INTO select_order_test VALUES (%d, \"hh\");", i))
}
for i := 21; i <= 30; i += 1 {
tk.MustExec(fmt.Sprintf("insert INTO select_order_test VALUES (%d, \"zz\");", i))
}
tk.MustExec("insert INTO select_order_test VALUES (1501, \"aa\");")
r = tk.MustQuery("select * from select_order_test order by name, id limit 1 offset 3;")
r.Check(testkit.Rows("11 hh"))
tk.MustExec("drop table select_order_test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c int, d int)")
tk.MustExec("insert t values (1, 1)")
tk.MustExec("insert t values (1, 2)")
tk.MustExec("insert t values (1, 3)")
r = tk.MustQuery("select 1-d as d from t order by d;")
r.Check(testkit.Rows("-2", "-1", "0"))
r = tk.MustQuery("select 1-d as d from t order by d + 1;")
r.Check(testkit.Rows("0", "-1", "-2"))
r = tk.MustQuery("select t.d from t order by d;")
r.Check(testkit.Rows("1", "2", "3"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int, b int, c int)")
tk.MustExec("insert t values (1, 2, 3)")
r = tk.MustQuery("select b from (select a,b from t order by a,c) t")
r.Check(testkit.Rows("2"))
r = tk.MustQuery("select b from (select a,b from t order by a,c limit 1) t")
r.Check(testkit.Rows("2"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int, index idx(a))")
tk.MustExec("insert into t values(1, 1), (2, 2)")
tk.MustQuery("select * from t where 1 order by b").Check(testkit.Rows("1 1", "2 2"))
tk.MustQuery("select * from t where a between 1 and 2 order by a desc").Check(testkit.Rows("2 2", "1 1"))
// Test double read and topN is pushed down to first read plannercore.
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int primary key, b int, c int, index idx(b))")
tk.MustExec("insert into t values(1, 3, 1)")
tk.MustExec("insert into t values(2, 2, 2)")
tk.MustExec("insert into t values(3, 1, 3)")
tk.MustQuery("select * from t use index(idx) order by a desc limit 1").Check(testkit.Rows("3 1 3"))
// Test double read which needs to keep order.
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int, key b (b))")
tk.Se.GetSessionVars().IndexLookupSize = 3
for i := 0; i < 10; i++ {
tk.MustExec(fmt.Sprintf("insert into t values(%d, %d)", i, 10-i))
}
tk.MustQuery("select a from t use index(b) order by b").Check(testkit.Rows("9", "8", "7", "6", "5", "4", "3", "2", "1", "0"))
}
func (s *testSuiteP1) TestOrderBy(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c1 int, c2 int, c3 varchar(20))")
tk.MustExec("insert into t values (1, 2, 'abc'), (2, 1, 'bcd')")
// Fix issue https://github.com/pingcap/tidb/issues/337
tk.MustQuery("select c1 as a, c1 as b from t order by c1").Check(testkit.Rows("1 1", "2 2"))
tk.MustQuery("select c1 as a, t.c1 as a from t order by a desc").Check(testkit.Rows("2 2", "1 1"))
tk.MustQuery("select c1 as c2 from t order by c2").Check(testkit.Rows("1", "2"))
tk.MustQuery("select sum(c1) from t order by sum(c1)").Check(testkit.Rows("3"))
tk.MustQuery("select c1 as c2 from t order by c2 + 1").Check(testkit.Rows("2", "1"))
// Order by position.
tk.MustQuery("select * from t order by 1").Check(testkit.Rows("1 2 abc", "2 1 bcd"))
tk.MustQuery("select * from t order by 2").Check(testkit.Rows("2 1 bcd", "1 2 abc"))
// Order by binary.
tk.MustQuery("select c1, c3 from t order by binary c1 desc").Check(testkit.Rows("2 bcd", "1 abc"))
tk.MustQuery("select c1, c2 from t order by binary c3").Check(testkit.Rows("1 2", "2 1"))
}
func (s *testSuiteP1) TestSelectErrorRow(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
err := tk.ExecToErr("select row(1, 1) from test")
c.Assert(err, NotNil)
err = tk.ExecToErr("select * from test group by row(1, 1);")
c.Assert(err, NotNil)
err = tk.ExecToErr("select * from test order by row(1, 1);")
c.Assert(err, NotNil)
err = tk.ExecToErr("select * from test having row(1, 1);")
c.Assert(err, NotNil)
err = tk.ExecToErr("select (select 1, 1) from test;")
c.Assert(err, NotNil)
err = tk.ExecToErr("select * from test group by (select 1, 1);")
c.Assert(err, NotNil)
err = tk.ExecToErr("select * from test order by (select 1, 1);")
c.Assert(err, NotNil)
err = tk.ExecToErr("select * from test having (select 1, 1);")
c.Assert(err, NotNil)
}
// TestIssue2612 is related with https://github.com/pingcap/tidb/issues/2612
func (s *testSuiteP1) TestIssue2612(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec(`drop table if exists t`)
tk.MustExec(`create table t (
create_at datetime NOT NULL DEFAULT '1000-01-01 00:00:00',
finish_at datetime NOT NULL DEFAULT '1000-01-01 00:00:00');`)
tk.MustExec(`insert into t values ('2016-02-13 15:32:24', '2016-02-11 17:23:22');`)
rs, err := tk.Exec(`select timediff(finish_at, create_at) from t;`)
c.Assert(err, IsNil)
req := rs.NewChunk()
err = rs.Next(context.Background(), req)
c.Assert(err, IsNil)
c.Assert(req.GetRow(0).GetDuration(0, 0).String(), Equals, "-46:09:02")
c.Assert(rs.Close(), IsNil)
}
// TestIssue345 is related with https://github.com/pingcap/tidb/issues/345
func (s *testSuiteP1) TestIssue345(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec(`drop table if exists t1, t2`)
tk.MustExec(`create table t1 (c1 int);`)
tk.MustExec(`create table t2 (c2 int);`)
tk.MustExec(`insert into t1 values (1);`)
tk.MustExec(`insert into t2 values (2);`)
tk.MustExec(`update t1, t2 set t1.c1 = 2, t2.c2 = 1;`)
tk.MustExec(`update t1, t2 set c1 = 2, c2 = 1;`)
tk.MustExec(`update t1 as a, t2 as b set a.c1 = 2, b.c2 = 1;`)
// Check t1 content
r := tk.MustQuery("SELECT * FROM t1;")
r.Check(testkit.Rows("2"))
// Check t2 content
r = tk.MustQuery("SELECT * FROM t2;")
r.Check(testkit.Rows("1"))
tk.MustExec(`update t1 as a, t2 as t1 set a.c1 = 1, t1.c2 = 2;`)
// Check t1 content
r = tk.MustQuery("SELECT * FROM t1;")
r.Check(testkit.Rows("1"))
// Check t2 content
r = tk.MustQuery("SELECT * FROM t2;")
r.Check(testkit.Rows("2"))
_, err := tk.Exec(`update t1 as a, t2 set t1.c1 = 10;`)
c.Assert(err, NotNil)
}
func (s *testSuiteP1) TestIssue5055(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec(`drop table if exists t1, t2`)
tk.MustExec(`create table t1 (a int);`)
tk.MustExec(`create table t2 (a int);`)
tk.MustExec(`insert into t1 values(1);`)
tk.MustExec(`insert into t2 values(1);`)
result := tk.MustQuery("select tbl1.* from (select t1.a, 1 from t1) tbl1 left join t2 tbl2 on tbl1.a = tbl2.a order by tbl1.a desc limit 1;")
result.Check(testkit.Rows("1 1"))
}
func (s *testSuiteWithData) TestSetOperation(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test`)
tk.MustExec(`drop table if exists t1, t2, t3`)
tk.MustExec(`create table t1(a int)`)
tk.MustExec(`create table t2 like t1`)
tk.MustExec(`create table t3 like t1`)
tk.MustExec(`insert into t1 values (1),(1),(2),(3),(null)`)
tk.MustExec(`insert into t2 values (1),(2),(null),(null)`)
tk.MustExec(`insert into t3 values (2),(3)`)
var input []string
var output []struct {
SQL string
Plan []string
Res []string
}
s.testData.GetTestCases(c, &input, &output)
for i, tt := range input {
s.testData.OnRecord(func() {
output[i].SQL = tt
output[i].Plan = s.testData.ConvertRowsToStrings(tk.MustQuery("explain " + tt).Rows())
output[i].Res = s.testData.ConvertRowsToStrings(tk.MustQuery(tt).Sort().Rows())
})
tk.MustQuery("explain " + tt).Check(testkit.Rows(output[i].Plan...))
tk.MustQuery(tt).Sort().Check(testkit.Rows(output[i].Res...))
}
}
func (s *testSuiteWithData) TestSetOperationOnDiffColType(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test`)
tk.MustExec(`drop table if exists t1, t2, t3`)
tk.MustExec(`create table t1(a int, b int)`)
tk.MustExec(`create table t2(a int, b varchar(20))`)
tk.MustExec(`create table t3(a int, b decimal(30,10))`)
tk.MustExec(`insert into t1 values (1,1),(1,1),(2,2),(3,3),(null,null)`)
tk.MustExec(`insert into t2 values (1,'1'),(2,'2'),(null,null),(null,'3')`)
tk.MustExec(`insert into t3 values (2,2.1),(3,3)`)
var input []string
var output []struct {
SQL string
Plan []string
Res []string
}
s.testData.GetTestCases(c, &input, &output)
for i, tt := range input {
s.testData.OnRecord(func() {
output[i].SQL = tt
output[i].Plan = s.testData.ConvertRowsToStrings(tk.MustQuery("explain " + tt).Rows())
output[i].Res = s.testData.ConvertRowsToStrings(tk.MustQuery(tt).Sort().Rows())
})
tk.MustQuery("explain " + tt).Check(testkit.Rows(output[i].Plan...))
tk.MustQuery(tt).Sort().Check(testkit.Rows(output[i].Res...))
}
}
// issue-23038: wrong key range of index scan for year column
func (s *testSuiteWithData) TestIndexScanWithYearCol(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists t;")
tk.MustExec("create table t (c1 year(4), c2 int, key(c1));")
tk.MustExec("insert into t values(2001, 1);")
var input []string
var output []struct {
SQL string
Plan []string
Res []string
}
s.testData.GetTestCases(c, &input, &output)
for i, tt := range input {
s.testData.OnRecord(func() {
output[i].SQL = tt
output[i].Plan = s.testData.ConvertRowsToStrings(tk.MustQuery("explain format = 'brief' " + tt).Rows())
output[i].Res = s.testData.ConvertRowsToStrings(tk.MustQuery(tt).Sort().Rows())
})
tk.MustQuery("explain format = 'brief' " + tt).Check(testkit.Rows(output[i].Plan...))
tk.MustQuery(tt).Sort().Check(testkit.Rows(output[i].Res...))
}
}
func (s *testSuiteP2) TestUnion(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
testSQL := `drop table if exists union_test; create table union_test(id int);`
tk.MustExec(testSQL)
testSQL = `drop table if exists union_test;`
tk.MustExec(testSQL)
testSQL = `create table union_test(id int);`
tk.MustExec(testSQL)
testSQL = `insert union_test values (1),(2)`
tk.MustExec(testSQL)
testSQL = `select * from (select id from union_test union select id from union_test) t order by id;`
r := tk.MustQuery(testSQL)
r.Check(testkit.Rows("1", "2"))
r = tk.MustQuery("select 1 union all select 1")
r.Check(testkit.Rows("1", "1"))
r = tk.MustQuery("select 1 union all select 1 union select 1")
r.Check(testkit.Rows("1"))
r = tk.MustQuery("select 1 as a union (select 2) order by a limit 1")
r.Check(testkit.Rows("1"))
r = tk.MustQuery("select 1 as a union (select 2) order by a limit 1, 1")
r.Check(testkit.Rows("2"))
r = tk.MustQuery("select id from union_test union all (select 1) order by id desc")
r.Check(testkit.Rows("2", "1", "1"))
r = tk.MustQuery("select id as a from union_test union (select 1) order by a desc")
r.Check(testkit.Rows("2", "1"))
r = tk.MustQuery(`select null as a union (select "abc") order by a`)
r.Check(testkit.Rows("<nil>", "abc"))
r = tk.MustQuery(`select "abc" as a union (select 1) order by a`)
r.Check(testkit.Rows("1", "abc"))
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1 (c int, d int)")
tk.MustExec("insert t1 values (NULL, 1)")
tk.MustExec("insert t1 values (1, 1)")
tk.MustExec("insert t1 values (1, 2)")
tk.MustExec("drop table if exists t2")
tk.MustExec("create table t2 (c int, d int)")
tk.MustExec("insert t2 values (1, 3)")
tk.MustExec("insert t2 values (1, 1)")
tk.MustExec("drop table if exists t3")
tk.MustExec("create table t3 (c int, d int)")
tk.MustExec("insert t3 values (3, 2)")
tk.MustExec("insert t3 values (4, 3)")
r = tk.MustQuery(`select sum(c1), c2 from (select c c1, d c2 from t1 union all select d c1, c c2 from t2 union all select c c1, d c2 from t3) x group by c2 order by c2`)
r.Check(testkit.Rows("5 1", "4 2", "4 3"))
tk.MustExec("drop table if exists t1, t2, t3")
tk.MustExec("create table t1 (a int primary key)")
tk.MustExec("create table t2 (a int primary key)")
tk.MustExec("create table t3 (a int primary key)")
tk.MustExec("insert t1 values (7), (8)")
tk.MustExec("insert t2 values (1), (9)")
tk.MustExec("insert t3 values (2), (3)")
r = tk.MustQuery("select * from t1 union all select * from t2 union all (select * from t3) order by a limit 2")
r.Check(testkit.Rows("1", "2"))
tk.MustExec("drop table if exists t1, t2")
tk.MustExec("create table t1 (a int)")
tk.MustExec("create table t2 (a int)")
tk.MustExec("insert t1 values (2), (1)")
tk.MustExec("insert t2 values (3), (4)")
r = tk.MustQuery("select * from t1 union all (select * from t2) order by a limit 1")
r.Check(testkit.Rows("1"))
r = tk.MustQuery("select (select * from t1 where a != t.a union all (select * from t2 where a != t.a) order by a limit 1) from t1 t")
r.Check(testkit.Rows("1", "2"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (id int unsigned primary key auto_increment, c1 int, c2 int, index c1_c2 (c1, c2))")
tk.MustExec("insert into t (c1, c2) values (1, 1)")
tk.MustExec("insert into t (c1, c2) values (1, 2)")
tk.MustExec("insert into t (c1, c2) values (2, 3)")
r = tk.MustQuery("select * from (select * from t where t.c1 = 1 union select * from t where t.id = 1) s order by s.id")
r.Check(testkit.Rows("1 1 1", "2 1 2"))
tk.MustExec("drop table if exists t")
tk.MustExec("CREATE TABLE t (f1 DATE)")
tk.MustExec("INSERT INTO t VALUES ('1978-11-26')")
r = tk.MustQuery("SELECT f1+0 FROM t UNION SELECT f1+0 FROM t")
r.Check(testkit.Rows("19781126"))
tk.MustExec("drop table if exists t")
tk.MustExec("CREATE TABLE t (a int, b int)")
tk.MustExec("INSERT INTO t VALUES ('1', '1')")
r = tk.MustQuery("select b from (SELECT * FROM t UNION ALL SELECT a, b FROM t order by a) t")
r.Check(testkit.Rows("1", "1"))
tk.MustExec("drop table if exists t")
tk.MustExec("CREATE TABLE t (a DECIMAL(4,2))")
tk.MustExec("INSERT INTO t VALUE(12.34)")
r = tk.MustQuery("SELECT 1 AS c UNION select a FROM t")
r.Sort().Check(testkit.Rows("1.00", "12.34"))
// #issue3771
r = tk.MustQuery("SELECT 'a' UNION SELECT CONCAT('a', -4)")
r.Sort().Check(testkit.Rows("a", "a-4"))
// test race
tk.MustQuery("SELECT @x:=0 UNION ALL SELECT @x:=0 UNION ALL SELECT @x")
// test field tp
tk.MustExec("drop table if exists t1, t2")
tk.MustExec("CREATE TABLE t1 (a date)")
tk.MustExec("CREATE TABLE t2 (a date)")
tk.MustExec("SELECT a from t1 UNION select a FROM t2")
tk.MustQuery("show create table t1").Check(testkit.Rows("t1 CREATE TABLE `t1` (\n" + " `a` date DEFAULT NULL\n" + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
// Move from session test.
tk.MustExec("drop table if exists t1, t2")
tk.MustExec("create table t1 (c double);")
tk.MustExec("create table t2 (c double);")
tk.MustExec("insert into t1 value (73);")
tk.MustExec("insert into t2 value (930);")
// If set unspecified column flen to 0, it will cause bug in union.
// This test is used to prevent the bug reappear.
tk.MustQuery("select c from t1 union (select c from t2) order by c").Check(testkit.Rows("73", "930"))
// issue 5703
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a date)")
tk.MustExec("insert into t value ('2017-01-01'), ('2017-01-02')")
r = tk.MustQuery("(select a from t where a < 0) union (select a from t where a > 0) order by a")
r.Check(testkit.Rows("2017-01-01", "2017-01-02"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int)")
tk.MustExec("insert into t value(0),(0)")
tk.MustQuery("select 1 from (select a from t union all select a from t) tmp").Check(testkit.Rows("1", "1", "1", "1"))
tk.MustQuery("select 10 as a from dual union select a from t order by a desc limit 1 ").Check(testkit.Rows("10"))
tk.MustQuery("select -10 as a from dual union select a from t order by a limit 1 ").Check(testkit.Rows("-10"))
tk.MustQuery("select count(1) from (select a from t union all select a from t) tmp").Check(testkit.Rows("4"))
err := tk.ExecToErr("select 1 from (select a from t limit 1 union all select a from t limit 1) tmp")
c.Assert(err, NotNil)
terr := errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, errors.ErrCode(mysql.ErrWrongUsage))
err = tk.ExecToErr("select 1 from (select a from t order by a union all select a from t limit 1) tmp")
c.Assert(err, NotNil)
terr = errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, errors.ErrCode(mysql.ErrWrongUsage))
_, err = tk.Exec("(select a from t order by a) union all select a from t limit 1 union all select a from t limit 1")
c.Assert(terror.ErrorEqual(err, plannercore.ErrWrongUsage), IsTrue, Commentf("err %v", err))
_, err = tk.Exec("(select a from t limit 1) union all select a from t limit 1")
c.Assert(err, IsNil)
_, err = tk.Exec("(select a from t order by a) union all select a from t order by a")
c.Assert(err, IsNil)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int)")
tk.MustExec("insert into t value(1),(2),(3)")
tk.MustQuery("(select a from t order by a limit 2) union all (select a from t order by a desc limit 2) order by a desc limit 1,2").Check(testkit.Rows("2", "2"))
tk.MustQuery("select a from t union all select a from t order by a desc limit 5").Check(testkit.Rows("3", "3", "2", "2", "1"))
tk.MustQuery("(select a from t order by a desc limit 2) union all select a from t group by a order by a").Check(testkit.Rows("1", "2", "2", "3", "3"))
tk.MustQuery("(select a from t order by a desc limit 2) union all select 33 as a order by a desc limit 2").Check(testkit.Rows("33", "3"))
tk.MustQuery("select 1 union select 1 union all select 1").Check(testkit.Rows("1", "1"))
tk.MustQuery("select 1 union all select 1 union select 1").Check(testkit.Rows("1"))
tk.MustExec("drop table if exists t1, t2")
tk.MustExec(`create table t1(a bigint, b bigint);`)
tk.MustExec(`create table t2(a bigint, b bigint);`)
tk.MustExec(`insert into t1 values(1, 1);`)
tk.MustExec(`insert into t1 select * from t1;`)
tk.MustExec(`insert into t1 select * from t1;`)
tk.MustExec(`insert into t1 select * from t1;`)
tk.MustExec(`insert into t1 select * from t1;`)
tk.MustExec(`insert into t1 select * from t1;`)
tk.MustExec(`insert into t1 select * from t1;`)
tk.MustExec(`insert into t2 values(1, 1);`)
tk.MustExec(`set @@tidb_init_chunk_size=2;`)
tk.MustExec(`set @@sql_mode="";`)
tk.MustQuery(`select count(*) from (select t1.a, t1.b from t1 left join t2 on t1.a=t2.a union all select t1.a, t1.a from t1 left join t2 on t1.a=t2.a) tmp;`).Check(testkit.Rows("128"))
tk.MustQuery(`select tmp.a, count(*) from (select t1.a, t1.b from t1 left join t2 on t1.a=t2.a union all select t1.a, t1.a from t1 left join t2 on t1.a=t2.a) tmp;`).Check(testkit.Rows("1 128"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int)")
tk.MustExec("insert into t value(1 ,2)")
tk.MustQuery("select a, b from (select a, 0 as d, b from t union all select a, 0 as d, b from t) test;").Check(testkit.Rows("1 2", "1 2"))
// #issue 8141
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1(a int, b int)")
tk.MustExec("insert into t1 value(1,2),(1,1),(2,2),(2,2),(3,2),(3,2)")
tk.MustExec("set @@tidb_init_chunk_size=2;")
tk.MustQuery("select count(*) from (select a as c, a as d from t1 union all select a, b from t1) t;").Check(testkit.Rows("12"))
// #issue 8189 and #issue 8199
tk.MustExec("drop table if exists t1")
tk.MustExec("drop table if exists t2")
tk.MustExec("CREATE TABLE t1 (a int not null, b char (10) not null)")
tk.MustExec("insert into t1 values(1,'a'),(2,'b'),(3,'c'),(3,'c')")
tk.MustExec("CREATE TABLE t2 (a int not null, b char (10) not null)")
tk.MustExec("insert into t2 values(1,'a'),(2,'b'),(3,'c'),(3,'c')")
tk.MustQuery("select a from t1 union select a from t1 order by (select a+1);").Check(testkit.Rows("1", "2", "3"))
// #issue 8201
for i := 0; i < 4; i++ {
tk.MustQuery("SELECT(SELECT 0 AS a FROM dual UNION SELECT 1 AS a FROM dual ORDER BY a ASC LIMIT 1) AS dev").Check(testkit.Rows("0"))
}
// #issue 8231
tk.MustExec("drop table if exists t1")
tk.MustExec("CREATE TABLE t1 (uid int(1))")
tk.MustExec("INSERT INTO t1 SELECT 150")
tk.MustQuery("SELECT 'a' UNION SELECT uid FROM t1 order by 1 desc;").Check(testkit.Rows("a", "150"))
// #issue 8196
tk.MustExec("drop table if exists t1")
tk.MustExec("drop table if exists t2")
tk.MustExec("CREATE TABLE t1 (a int not null, b char (10) not null)")
tk.MustExec("insert into t1 values(1,'a'),(2,'b'),(3,'c'),(3,'c')")
tk.MustExec("CREATE TABLE t2 (a int not null, b char (10) not null)")
tk.MustExec("insert into t2 values(3,'c'),(4,'d'),(5,'f'),(6,'e')")
tk.MustExec("analyze table t1")
tk.MustExec("analyze table t2")
_, err = tk.Exec("(select a,b from t1 limit 2) union all (select a,b from t2 order by a limit 1) order by t1.b")
c.Assert(err.Error(), Equals, "[planner:1250]Table 't1' from one of the SELECTs cannot be used in global ORDER clause")
// #issue 9900
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b decimal(6, 3))")
tk.MustExec("insert into t values(1, 1.000)")
tk.MustQuery("select count(distinct a), sum(distinct a), avg(distinct a) from (select a from t union all select b from t) tmp;").Check(testkit.Rows("1 1.000 1.0000000"))
// #issue 23832
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a bit(20), b float, c double, d int)")
tk.MustExec("insert into t values(10, 10, 10, 10), (1, -1, 2, -2), (2, -2, 1, 1), (2, 1.1, 2.1, 10.1)")
tk.MustQuery("select a from t union select 10 order by a").Check(testkit.Rows("1", "2", "10"))
}
func (s *testSuite2) TestUnionLimit(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists union_limit")
tk.MustExec("create table union_limit (id int) partition by hash(id) partitions 30")
for i := 0; i < 60; i++ {
tk.MustExec(fmt.Sprintf("insert into union_limit values (%d)", i))
}
// Cover the code for worker count limit in the union executor.
tk.MustQuery("select * from union_limit limit 10")
}
func (s *testSuiteP1) TestNeighbouringProj(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1, t2")
tk.MustExec("create table t1(a int, b int)")
tk.MustExec("create table t2(a int, b int)")
tk.MustExec("insert into t1 value(1, 1), (2, 2)")
tk.MustExec("insert into t2 value(1, 1), (2, 2)")
tk.MustQuery("select sum(c) from (select t1.a as a, t1.a as c, length(t1.b) from t1 union select a, b, b from t2) t;").Check(testkit.Rows("5"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a bigint, b bigint, c bigint);")
tk.MustExec("insert into t values(1, 1, 1), (2, 2, 2), (3, 3, 3);")
rs := tk.MustQuery("select cast(count(a) as signed), a as another, a from t group by a order by cast(count(a) as signed), a limit 10;")
rs.Check(testkit.Rows("1 1 1", "1 2 2", "1 3 3"))
}
func (s *testSuiteP1) TestIn(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec(`drop table if exists t`)
tk.MustExec(`create table t (c1 int primary key, c2 int, key c (c2));`)
for i := 0; i <= 200; i++ {
tk.MustExec(fmt.Sprintf("insert t values(%d, %d)", i, i))
}
queryStr := `select c2 from t where c1 in ('7', '10', '112', '111', '98', '106', '100', '9', '18', '17') order by c2`
r := tk.MustQuery(queryStr)
r.Check(testkit.Rows("7", "9", "10", "17", "18", "98", "100", "106", "111", "112"))
queryStr = `select c2 from t where c1 in ('7a')`
tk.MustQuery(queryStr).Check(testkit.Rows("7"))
}
func (s *testSuiteP1) TestTablePKisHandleScan(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int PRIMARY KEY AUTO_INCREMENT)")
tk.MustExec("insert t values (),()")
tk.MustExec("insert t values (-100),(0)")
tests := []struct {
sql string
result [][]interface{}
}{
{
"select * from t",
testkit.Rows("-100", "1", "2", "3"),
},
{
"select * from t where a = 1",
testkit.Rows("1"),
},
{
"select * from t where a != 1",
testkit.Rows("-100", "2", "3"),
},
{
"select * from t where a >= '1.1'",
testkit.Rows("2", "3"),
},
{
"select * from t where a < '1.1'",
testkit.Rows("-100", "1"),
},
{
"select * from t where a > '-100.1' and a < 2",
testkit.Rows("-100", "1"),
},
{
"select * from t where a is null",
testkit.Rows(),
}, {
"select * from t where a is true",
testkit.Rows("-100", "1", "2", "3"),
}, {
"select * from t where a is false",
testkit.Rows(),
},
{
"select * from t where a in (1, 2)",
testkit.Rows("1", "2"),
},
{
"select * from t where a between 1 and 2",
testkit.Rows("1", "2"),
},
}
for _, tt := range tests {
result := tk.MustQuery(tt.sql)
result.Check(tt.result)
}
}
func (s *testSuite8) TestIndexScan(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int unique)")
tk.MustExec("insert t values (-1), (2), (3), (5), (6), (7), (8), (9)")
result := tk.MustQuery("select a from t where a < 0 or (a >= 2.1 and a < 5.1) or ( a > 5.9 and a <= 7.9) or a > '8.1'")
result.Check(testkit.Rows("-1", "3", "5", "6", "7", "9"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int unique)")
tk.MustExec("insert t values (0)")
result = tk.MustQuery("select NULL from t ")
result.Check(testkit.Rows("<nil>"))
// test for double read
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int unique, b int)")
tk.MustExec("insert t values (5, 0)")
tk.MustExec("insert t values (4, 0)")
tk.MustExec("insert t values (3, 0)")
tk.MustExec("insert t values (2, 0)")
tk.MustExec("insert t values (1, 0)")
tk.MustExec("insert t values (0, 0)")
result = tk.MustQuery("select * from t order by a limit 3")
result.Check(testkit.Rows("0 0", "1 0", "2 0"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int unique, b int)")
tk.MustExec("insert t values (0, 1)")
tk.MustExec("insert t values (1, 2)")
tk.MustExec("insert t values (2, 1)")
tk.MustExec("insert t values (3, 2)")
tk.MustExec("insert t values (4, 1)")
tk.MustExec("insert t values (5, 2)")
result = tk.MustQuery("select * from t where a < 5 and b = 1 limit 2")
result.Check(testkit.Rows("0 1", "2 1"))
tk.MustExec("drop table if exists tab1")
tk.MustExec("CREATE TABLE tab1(pk INTEGER PRIMARY KEY, col0 INTEGER, col1 FLOAT, col3 INTEGER, col4 FLOAT)")
tk.MustExec("CREATE INDEX idx_tab1_0 on tab1 (col0)")
tk.MustExec("CREATE INDEX idx_tab1_1 on tab1 (col1)")
tk.MustExec("CREATE INDEX idx_tab1_3 on tab1 (col3)")
tk.MustExec("CREATE INDEX idx_tab1_4 on tab1 (col4)")
tk.MustExec("INSERT INTO tab1 VALUES(1,37,20.85,30,10.69)")
result = tk.MustQuery("SELECT pk FROM tab1 WHERE ((col3 <= 6 OR col3 < 29 AND (col0 < 41)) OR col3 > 42) AND col1 >= 96.1 AND col3 = 30 AND col3 > 17 AND (col0 BETWEEN 36 AND 42)")
result.Check(testkit.Rows())
tk.MustExec("drop table if exists tab1")
tk.MustExec("CREATE TABLE tab1(pk INTEGER PRIMARY KEY, a INTEGER, b INTEGER)")
tk.MustExec("CREATE INDEX idx_tab1_0 on tab1 (a)")
tk.MustExec("INSERT INTO tab1 VALUES(1,1,1)")
tk.MustExec("INSERT INTO tab1 VALUES(2,2,1)")
tk.MustExec("INSERT INTO tab1 VALUES(3,1,2)")
tk.MustExec("INSERT INTO tab1 VALUES(4,2,2)")
result = tk.MustQuery("SELECT * FROM tab1 WHERE pk <= 3 AND a = 1")
result.Check(testkit.Rows("1 1 1", "3 1 2"))
result = tk.MustQuery("SELECT * FROM tab1 WHERE pk <= 4 AND a = 1 AND b = 2")
result.Check(testkit.Rows("3 1 2"))
tk.MustExec("CREATE INDEX idx_tab1_1 on tab1 (b, a)")
result = tk.MustQuery("SELECT pk FROM tab1 WHERE b > 1")
result.Check(testkit.Rows("3", "4"))
tk.MustExec("drop table if exists t")
tk.MustExec("CREATE TABLE t (a varchar(3), index(a))")
tk.MustExec("insert t values('aaa'), ('aab')")
result = tk.MustQuery("select * from t where a >= 'aaaa' and a < 'aabb'")
result.Check(testkit.Rows("aab"))
tk.MustExec("drop table if exists t")
tk.MustExec("CREATE TABLE t (a int primary key, b int, c int, index(c))")
tk.MustExec("insert t values(1, 1, 1), (2, 2, 2), (4, 4, 4), (3, 3, 3), (5, 5, 5)")
// Test for double read and top n.
result = tk.MustQuery("select a from t where c >= 2 order by b desc limit 1")
result.Check(testkit.Rows("5"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a varchar(50) primary key, b int, c int, index idx(b))")
tk.MustExec("insert into t values('aa', 1, 1)")
tk.MustQuery("select * from t use index(idx) where a > 'a'").Check(testkit.Rows("aa 1 1"))
// fix issue9636
tk.MustExec("drop table if exists t")
tk.MustExec("CREATE TABLE `t` (a int, KEY (a))")
result = tk.MustQuery(`SELECT * FROM (SELECT * FROM (SELECT a as d FROM t WHERE a IN ('100')) AS x WHERE x.d < "123" ) tmp_count`)
result.Check(testkit.Rows())
}
func (s *testSuiteP1) TestIndexReverseOrder(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int primary key auto_increment, b int, index idx (b))")
tk.MustExec("insert t (b) values (0), (1), (2), (3), (4), (5), (6), (7), (8), (9)")
result := tk.MustQuery("select b from t order by b desc")
result.Check(testkit.Rows("9", "8", "7", "6", "5", "4", "3", "2", "1", "0"))
result = tk.MustQuery("select b from t where b <3 or (b >=6 and b < 8) order by b desc")
result.Check(testkit.Rows("7", "6", "2", "1", "0"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int, b int, index idx (b, a))")
tk.MustExec("insert t values (0, 2), (1, 2), (2, 2), (0, 1), (1, 1), (2, 1), (0, 0), (1, 0), (2, 0)")
result = tk.MustQuery("select b, a from t order by b, a desc")
result.Check(testkit.Rows("0 2", "0 1", "0 0", "1 2", "1 1", "1 0", "2 2", "2 1", "2 0"))
}
func (s *testSuiteP1) TestTableReverseOrder(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int primary key auto_increment, b int)")
tk.MustExec("insert t (b) values (1), (2), (3), (4), (5), (6), (7), (8), (9)")
result := tk.MustQuery("select b from t order by a desc")
result.Check(testkit.Rows("9", "8", "7", "6", "5", "4", "3", "2", "1"))
result = tk.MustQuery("select a from t where a <3 or (a >=6 and a < 8) order by a desc")
result.Check(testkit.Rows("7", "6", "2", "1"))
}
func (s *testSuiteP1) TestDefaultNull(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int primary key auto_increment, b int default 1, c int)")
tk.MustExec("insert t values ()")
tk.MustQuery("select * from t").Check(testkit.Rows("1 1 <nil>"))
tk.MustExec("update t set b = NULL where a = 1")
tk.MustQuery("select * from t").Check(testkit.Rows("1 <nil> <nil>"))
tk.MustExec("update t set c = 1")
tk.MustQuery("select * from t ").Check(testkit.Rows("1 <nil> 1"))
tk.MustExec("delete from t where a = 1")
tk.MustExec("insert t (a) values (1)")
tk.MustQuery("select * from t").Check(testkit.Rows("1 1 <nil>"))
}
func (s *testSuiteP1) TestUnsignedPKColumn(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int unsigned primary key, b int, c int, key idx_ba (b, c, a));")
tk.MustExec("insert t values (1, 1, 1)")
result := tk.MustQuery("select * from t;")
result.Check(testkit.Rows("1 1 1"))
tk.MustExec("update t set c=2 where a=1;")
result = tk.MustQuery("select * from t where b=1;")
result.Check(testkit.Rows("1 1 2"))
}
func (s *testSuiteP1) TestJSON(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists test_json")
tk.MustExec("create table test_json (id int, a json)")
tk.MustExec(`insert into test_json (id, a) values (1, '{"a":[1,"2",{"aa":"bb"},4],"b":true}')`)
tk.MustExec(`insert into test_json (id, a) values (2, "null")`)
tk.MustExec(`insert into test_json (id, a) values (3, null)`)
tk.MustExec(`insert into test_json (id, a) values (4, 'true')`)
tk.MustExec(`insert into test_json (id, a) values (5, '3')`)
tk.MustExec(`insert into test_json (id, a) values (5, '4.0')`)
tk.MustExec(`insert into test_json (id, a) values (6, '"string"')`)
result := tk.MustQuery(`select tj.a from test_json tj order by tj.id`)
result.Check(testkit.Rows(`{"a": [1, "2", {"aa": "bb"}, 4], "b": true}`, "null", "<nil>", "true", "3", "4", `"string"`))
// Check json_type function
result = tk.MustQuery(`select json_type(a) from test_json tj order by tj.id`)
result.Check(testkit.Rows("OBJECT", "NULL", "<nil>", "BOOLEAN", "INTEGER", "DOUBLE", "STRING"))
// Check json compare with primitives.
result = tk.MustQuery(`select a from test_json tj where a = 3`)
result.Check(testkit.Rows("3"))
result = tk.MustQuery(`select a from test_json tj where a = 4.0`)
result.Check(testkit.Rows("4"))
result = tk.MustQuery(`select a from test_json tj where a = true`)
result.Check(testkit.Rows("true"))
result = tk.MustQuery(`select a from test_json tj where a = "string"`)
result.Check(testkit.Rows(`"string"`))
// Check cast(true/false as JSON).
result = tk.MustQuery(`select cast(true as JSON)`)
result.Check(testkit.Rows(`true`))
result = tk.MustQuery(`select cast(false as JSON)`)
result.Check(testkit.Rows(`false`))
// Check two json grammar sugar.
result = tk.MustQuery(`select a->>'$.a[2].aa' as x, a->'$.b' as y from test_json having x is not null order by id`)
result.Check(testkit.Rows(`bb true`))
result = tk.MustQuery(`select a->'$.a[2].aa' as x, a->>'$.b' as y from test_json having x is not null order by id`)
result.Check(testkit.Rows(`"bb" true`))
// Check some DDL limits for TEXT/BLOB/JSON column.
var err error
var terr *terror.Error
_, err = tk.Exec(`create table test_bad_json(a json default '{}')`)
c.Assert(err, NotNil)
terr = errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, errors.ErrCode(mysql.ErrBlobCantHaveDefault))
_, err = tk.Exec(`create table test_bad_json(a blob default 'hello')`)
c.Assert(err, NotNil)
terr = errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, errors.ErrCode(mysql.ErrBlobCantHaveDefault))
_, err = tk.Exec(`create table test_bad_json(a text default 'world')`)
c.Assert(err, NotNil)
terr = errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, errors.ErrCode(mysql.ErrBlobCantHaveDefault))
// check json fields cannot be used as key.
_, err = tk.Exec(`create table test_bad_json(id int, a json, key (a))`)
c.Assert(err, NotNil)
terr = errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, errors.ErrCode(mysql.ErrJSONUsedAsKey))
// check CAST AS JSON.
result = tk.MustQuery(`select CAST('3' AS JSON), CAST('{}' AS JSON), CAST(null AS JSON)`)
result.Check(testkit.Rows(`3 {} <nil>`))
tk.MustQuery("select a, count(1) from test_json group by a order by a").Check(testkit.Rows(
"<nil> 1",
"null 1",
"3 1",
"4 1",
`"string" 1`,
"{\"a\": [1, \"2\", {\"aa\": \"bb\"}, 4], \"b\": true} 1",
"true 1"))
// Check cast json to decimal.
// NOTE: this test case contains a bug, it should be uncommented after the bug is fixed.
// TODO: Fix bug https://github.com/pingcap/tidb/issues/12178
// tk.MustExec("drop table if exists test_json")
// tk.MustExec("create table test_json ( a decimal(60,2) as (JSON_EXTRACT(b,'$.c')), b json );")
// tk.MustExec(`insert into test_json (b) values
// ('{"c": "1267.1"}'),
// ('{"c": "1267.01"}'),
// ('{"c": "1267.1234"}'),
// ('{"c": "1267.3456"}'),
// ('{"c": "1234567890123456789012345678901234567890123456789012345"}'),
// ('{"c": "1234567890123456789012345678901234567890123456789012345.12345"}');`)
//
// tk.MustQuery("select a from test_json;").Check(testkit.Rows("1267.10", "1267.01", "1267.12",
// "1267.35", "1234567890123456789012345678901234567890123456789012345.00",
// "1234567890123456789012345678901234567890123456789012345.12"))
}
func (s *testSuiteP1) TestMultiUpdate(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec(`CREATE TABLE test_mu (a int primary key, b int, c int)`)
tk.MustExec(`INSERT INTO test_mu VALUES (1, 2, 3), (4, 5, 6), (7, 8, 9)`)
// Test INSERT ... ON DUPLICATE UPDATE set_lists.
tk.MustExec(`INSERT INTO test_mu VALUES (1, 2, 3) ON DUPLICATE KEY UPDATE b = 3, c = b`)
result := tk.MustQuery(`SELECT * FROM test_mu ORDER BY a`)
result.Check(testkit.Rows(`1 3 3`, `4 5 6`, `7 8 9`))
tk.MustExec(`INSERT INTO test_mu VALUES (1, 2, 3) ON DUPLICATE KEY UPDATE c = 2, b = c+5`)
result = tk.MustQuery(`SELECT * FROM test_mu ORDER BY a`)
result.Check(testkit.Rows(`1 7 2`, `4 5 6`, `7 8 9`))
// Test UPDATE ... set_lists.
tk.MustExec(`UPDATE test_mu SET b = 0, c = b WHERE a = 4`)
result = tk.MustQuery(`SELECT * FROM test_mu ORDER BY a`)
result.Check(testkit.Rows(`1 7 2`, `4 0 5`, `7 8 9`))
tk.MustExec(`UPDATE test_mu SET c = 8, b = c WHERE a = 4`)
result = tk.MustQuery(`SELECT * FROM test_mu ORDER BY a`)
result.Check(testkit.Rows(`1 7 2`, `4 5 8`, `7 8 9`))
tk.MustExec(`UPDATE test_mu SET c = b, b = c WHERE a = 7`)
result = tk.MustQuery(`SELECT * FROM test_mu ORDER BY a`)
result.Check(testkit.Rows(`1 7 2`, `4 5 8`, `7 9 8`))
}
func (s *testSuiteP1) TestGeneratedColumnWrite(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
_, err := tk.Exec(`CREATE TABLE test_gc_write (a int primary key auto_increment, b int, c int as (a+8) virtual)`)
c.Assert(err.Error(), Equals, ddl.ErrGeneratedColumnRefAutoInc.GenWithStackByArgs("c").Error())
tk.MustExec(`CREATE TABLE test_gc_write (a int primary key auto_increment, b int, c int as (b+8) virtual)`)
tk.MustExec(`CREATE TABLE test_gc_write_1 (a int primary key, b int, c int)`)
tests := []struct {
stmt string
err int
}{
// Can't modify generated column by values.
{`insert into test_gc_write (a, b, c) values (1, 1, 1)`, mysql.ErrBadGeneratedColumn},
{`insert into test_gc_write values (1, 1, 1)`, mysql.ErrBadGeneratedColumn},
// Can't modify generated column by select clause.
{`insert into test_gc_write select 1, 1, 1`, mysql.ErrBadGeneratedColumn},
// Can't modify generated column by on duplicate clause.
{`insert into test_gc_write (a, b) values (1, 1) on duplicate key update c = 1`, mysql.ErrBadGeneratedColumn},
// Can't modify generated column by set.
{`insert into test_gc_write set a = 1, b = 1, c = 1`, mysql.ErrBadGeneratedColumn},
// Can't modify generated column by update clause.
{`update test_gc_write set c = 1`, mysql.ErrBadGeneratedColumn},
// Can't modify generated column by multi-table update clause.
{`update test_gc_write, test_gc_write_1 set test_gc_write.c = 1`, mysql.ErrBadGeneratedColumn},
// Can insert without generated columns.
{`insert into test_gc_write (a, b) values (1, 1)`, 0},
{`insert into test_gc_write set a = 2, b = 2`, 0},
{`insert into test_gc_write (b) select c from test_gc_write`, 0},
// Can update without generated columns.
{`update test_gc_write set b = 2 where a = 2`, 0},
{`update test_gc_write t1, test_gc_write_1 t2 set t1.b = 3, t2.b = 4`, 0},
// But now we can't do this, just as same with MySQL 5.7:
{`insert into test_gc_write values (1, 1)`, mysql.ErrWrongValueCountOnRow},
{`insert into test_gc_write select 1, 1`, mysql.ErrWrongValueCountOnRow},
{`insert into test_gc_write (c) select a, b from test_gc_write`, mysql.ErrWrongValueCountOnRow},
{`insert into test_gc_write (b, c) select a, b from test_gc_write`, mysql.ErrBadGeneratedColumn},
}
for _, tt := range tests {
_, err := tk.Exec(tt.stmt)
if tt.err != 0 {
c.Assert(err, NotNil, Commentf("sql is `%v`", tt.stmt))
terr := errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, errors.ErrCode(tt.err), Commentf("sql is %v", tt.stmt))
} else {
c.Assert(err, IsNil)
}
}
}
// TestGeneratedColumnRead tests select generated columns from table.
// They should be calculated from their generation expressions.
func (s *testSuiteP1) TestGeneratedColumnRead(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec(`CREATE TABLE test_gc_read(a int primary key, b int, c int as (a+b), d int as (a*b) stored, e int as (c*2))`)
result := tk.MustQuery(`SELECT generation_expression FROM information_schema.columns WHERE table_name = 'test_gc_read' AND column_name = 'd'`)
result.Check(testkit.Rows("`a` * `b`"))
// Insert only column a and b, leave c and d be calculated from them.
tk.MustExec(`INSERT INTO test_gc_read (a, b) VALUES (0,null),(1,2),(3,4)`)
result = tk.MustQuery(`SELECT * FROM test_gc_read ORDER BY a`)
result.Check(testkit.Rows(`0 <nil> <nil> <nil> <nil>`, `1 2 3 2 6`, `3 4 7 12 14`))
tk.MustExec(`INSERT INTO test_gc_read SET a = 5, b = 10`)
result = tk.MustQuery(`SELECT * FROM test_gc_read ORDER BY a`)
result.Check(testkit.Rows(`0 <nil> <nil> <nil> <nil>`, `1 2 3 2 6`, `3 4 7 12 14`, `5 10 15 50 30`))
tk.MustExec(`REPLACE INTO test_gc_read (a, b) VALUES (5, 6)`)
result = tk.MustQuery(`SELECT * FROM test_gc_read ORDER BY a`)
result.Check(testkit.Rows(`0 <nil> <nil> <nil> <nil>`, `1 2 3 2 6`, `3 4 7 12 14`, `5 6 11 30 22`))
tk.MustExec(`INSERT INTO test_gc_read (a, b) VALUES (5, 8) ON DUPLICATE KEY UPDATE b = 9`)
result = tk.MustQuery(`SELECT * FROM test_gc_read ORDER BY a`)
result.Check(testkit.Rows(`0 <nil> <nil> <nil> <nil>`, `1 2 3 2 6`, `3 4 7 12 14`, `5 9 14 45 28`))
// Test select only-generated-column-without-dependences.
result = tk.MustQuery(`SELECT c, d FROM test_gc_read`)
result.Check(testkit.Rows(`<nil> <nil>`, `3 2`, `7 12`, `14 45`))
// Test select only virtual generated column that refers to other virtual generated columns.
result = tk.MustQuery(`SELECT e FROM test_gc_read`)
result.Check(testkit.Rows(`<nil>`, `6`, `14`, `28`))
// Test order of on duplicate key update list.
tk.MustExec(`INSERT INTO test_gc_read (a, b) VALUES (5, 8) ON DUPLICATE KEY UPDATE a = 6, b = a`)
result = tk.MustQuery(`SELECT * FROM test_gc_read ORDER BY a`)
result.Check(testkit.Rows(`0 <nil> <nil> <nil> <nil>`, `1 2 3 2 6`, `3 4 7 12 14`, `6 6 12 36 24`))
tk.MustExec(`INSERT INTO test_gc_read (a, b) VALUES (6, 8) ON DUPLICATE KEY UPDATE b = 8, a = b`)
result = tk.MustQuery(`SELECT * FROM test_gc_read ORDER BY a`)
result.Check(testkit.Rows(`0 <nil> <nil> <nil> <nil>`, `1 2 3 2 6`, `3 4 7 12 14`, `8 8 16 64 32`))
// Test where-conditions on virtual/stored generated columns.
result = tk.MustQuery(`SELECT * FROM test_gc_read WHERE c = 7`)
result.Check(testkit.Rows(`3 4 7 12 14`))
result = tk.MustQuery(`SELECT * FROM test_gc_read WHERE d = 64`)
result.Check(testkit.Rows(`8 8 16 64 32`))
result = tk.MustQuery(`SELECT * FROM test_gc_read WHERE e = 6`)
result.Check(testkit.Rows(`1 2 3 2 6`))
// Test update where-conditions on virtual/generated columns.
tk.MustExec(`UPDATE test_gc_read SET a = a + 100 WHERE c = 7`)
result = tk.MustQuery(`SELECT * FROM test_gc_read WHERE c = 107`)
result.Check(testkit.Rows(`103 4 107 412 214`))
// Test update where-conditions on virtual/generated columns.
tk.MustExec(`UPDATE test_gc_read m SET m.a = m.a + 100 WHERE c = 107`)
result = tk.MustQuery(`SELECT * FROM test_gc_read WHERE c = 207`)
result.Check(testkit.Rows(`203 4 207 812 414`))
tk.MustExec(`UPDATE test_gc_read SET a = a - 200 WHERE d = 812`)
result = tk.MustQuery(`SELECT * FROM test_gc_read WHERE d = 12`)
result.Check(testkit.Rows(`3 4 7 12 14`))
tk.MustExec(`INSERT INTO test_gc_read set a = 4, b = d + 1`)
result = tk.MustQuery(`SELECT * FROM test_gc_read ORDER BY a`)
result.Check(testkit.Rows(`0 <nil> <nil> <nil> <nil>`, `1 2 3 2 6`, `3 4 7 12 14`,
`4 <nil> <nil> <nil> <nil>`, `8 8 16 64 32`))
tk.MustExec(`DELETE FROM test_gc_read where a = 4`)
// Test on-conditions on virtual/stored generated columns.
tk.MustExec(`CREATE TABLE test_gc_help(a int primary key, b int, c int, d int, e int)`)
tk.MustExec(`INSERT INTO test_gc_help(a, b, c, d, e) SELECT * FROM test_gc_read`)
result = tk.MustQuery(`SELECT t1.* FROM test_gc_read t1 JOIN test_gc_help t2 ON t1.c = t2.c ORDER BY t1.a`)
result.Check(testkit.Rows(`1 2 3 2 6`, `3 4 7 12 14`, `8 8 16 64 32`))
result = tk.MustQuery(`SELECT t1.* FROM test_gc_read t1 JOIN test_gc_help t2 ON t1.d = t2.d ORDER BY t1.a`)
result.Check(testkit.Rows(`1 2 3 2 6`, `3 4 7 12 14`, `8 8 16 64 32`))
result = tk.MustQuery(`SELECT t1.* FROM test_gc_read t1 JOIN test_gc_help t2 ON t1.e = t2.e ORDER BY t1.a`)
result.Check(testkit.Rows(`1 2 3 2 6`, `3 4 7 12 14`, `8 8 16 64 32`))
// Test generated column in subqueries.
result = tk.MustQuery(`SELECT * FROM test_gc_read t WHERE t.a not in (SELECT t.a FROM test_gc_read t where t.c > 5)`)
result.Sort().Check(testkit.Rows(`0 <nil> <nil> <nil> <nil>`, `1 2 3 2 6`))
result = tk.MustQuery(`SELECT * FROM test_gc_read t WHERE t.c in (SELECT t.c FROM test_gc_read t where t.c > 5)`)
result.Sort().Check(testkit.Rows(`3 4 7 12 14`, `8 8 16 64 32`))
result = tk.MustQuery(`SELECT tt.b FROM test_gc_read tt WHERE tt.a = (SELECT max(t.a) FROM test_gc_read t WHERE t.c = tt.c) ORDER BY b`)
result.Check(testkit.Rows(`2`, `4`, `8`))
// Test aggregation on virtual/stored generated columns.
result = tk.MustQuery(`SELECT c, sum(a) aa, max(d) dd, sum(e) ee FROM test_gc_read GROUP BY c ORDER BY aa`)
result.Check(testkit.Rows(`<nil> 0 <nil> <nil>`, `3 1 2 6`, `7 3 12 14`, `16 8 64 32`))
result = tk.MustQuery(`SELECT a, sum(c), sum(d), sum(e) FROM test_gc_read GROUP BY a ORDER BY a`)
result.Check(testkit.Rows(`0 <nil> <nil> <nil>`, `1 3 2 6`, `3 7 12 14`, `8 16 64 32`))
// Test multi-update on generated columns.
tk.MustExec(`UPDATE test_gc_read m, test_gc_read n SET m.b = m.b + 10, n.b = n.b + 10`)
result = tk.MustQuery(`SELECT * FROM test_gc_read ORDER BY a`)
result.Check(testkit.Rows(`0 <nil> <nil> <nil> <nil>`, `1 12 13 12 26`, `3 14 17 42 34`, `8 18 26 144 52`))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int)")
tk.MustExec("insert into t values(8)")
tk.MustExec("update test_gc_read set a = a+1 where a in (select a from t)")
result = tk.MustQuery("select * from test_gc_read order by a")
result.Check(testkit.Rows(`0 <nil> <nil> <nil> <nil>`, `1 12 13 12 26`, `3 14 17 42 34`, `9 18 27 162 54`))
// Test different types between generation expression and generated column.
tk.MustExec(`CREATE TABLE test_gc_read_cast(a VARCHAR(255), b VARCHAR(255), c INT AS (JSON_EXTRACT(a, b)), d INT AS (JSON_EXTRACT(a, b)) STORED)`)
tk.MustExec(`INSERT INTO test_gc_read_cast (a, b) VALUES ('{"a": "3"}', '$.a')`)
result = tk.MustQuery(`SELECT c, d FROM test_gc_read_cast`)
result.Check(testkit.Rows(`3 3`))
tk.MustExec(`CREATE TABLE test_gc_read_cast_1(a VARCHAR(255), b VARCHAR(255), c ENUM("red", "yellow") AS (JSON_UNQUOTE(JSON_EXTRACT(a, b))))`)
tk.MustExec(`INSERT INTO test_gc_read_cast_1 (a, b) VALUES ('{"a": "yellow"}', '$.a')`)
result = tk.MustQuery(`SELECT c FROM test_gc_read_cast_1`)
result.Check(testkit.Rows(`yellow`))
tk.MustExec(`CREATE TABLE test_gc_read_cast_2( a JSON, b JSON AS (a->>'$.a'))`)
tk.MustExec(`INSERT INTO test_gc_read_cast_2(a) VALUES ('{"a": "{ \\\"key\\\": \\\"\\u6d4b\\\" }"}')`)
result = tk.MustQuery(`SELECT b FROM test_gc_read_cast_2`)
result.Check(testkit.Rows(`{"key": "测"}`))
tk.MustExec(`CREATE TABLE test_gc_read_cast_3( a JSON, b JSON AS (a->>'$.a'), c INT AS (b * 3.14) )`)
tk.MustExec(`INSERT INTO test_gc_read_cast_3(a) VALUES ('{"a": "5"}')`)
result = tk.MustQuery(`SELECT c FROM test_gc_read_cast_3`)
result.Check(testkit.Rows(`16`))
_, err := tk.Exec(`INSERT INTO test_gc_read_cast_1 (a, b) VALUES ('{"a": "invalid"}', '$.a')`)
c.Assert(err, NotNil)
// Test read generated columns after drop some irrelevant column
tk.MustExec(`DROP TABLE IF EXISTS test_gc_read_m`)
tk.MustExec(`CREATE TABLE test_gc_read_m (a int primary key, b int, c int as (a+1), d int as (c*2))`)
tk.MustExec(`INSERT INTO test_gc_read_m(a) values (1), (2)`)
tk.MustExec(`ALTER TABLE test_gc_read_m DROP b`)
result = tk.MustQuery(`SELECT * FROM test_gc_read_m`)
result.Check(testkit.Rows(`1 2 4`, `2 3 6`))
// Test not null generated columns.
tk.MustExec(`CREATE TABLE test_gc_read_1(a int primary key, b int, c int as (a+b) not null, d int as (a*b) stored)`)
tk.MustExec(`CREATE TABLE test_gc_read_2(a int primary key, b int, c int as (a+b), d int as (a*b) stored not null)`)
tests := []struct {
stmt string
err int
}{
// Can't insert these records, because generated columns are not null.
{`insert into test_gc_read_1(a, b) values (1, null)`, mysql.ErrBadNull},
{`insert into test_gc_read_2(a, b) values (1, null)`, mysql.ErrBadNull},
}
for _, tt := range tests {
_, err := tk.Exec(tt.stmt)
if tt.err != 0 {
c.Assert(err, NotNil)
terr := errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, errors.ErrCode(tt.err))
} else {
c.Assert(err, IsNil)
}
}
}
// TestGeneratedColumnRead tests generated columns using point get and batch point get
func (s *testSuiteP1) TestGeneratedColumnPointGet(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists tu")
tk.MustExec("CREATE TABLE tu(a int, b int, c int GENERATED ALWAYS AS (a + b) VIRTUAL, d int as (a * b) stored, " +
"e int GENERATED ALWAYS as (b * 2) VIRTUAL, PRIMARY KEY (a), UNIQUE KEY ukc (c), unique key ukd(d), key ke(e))")
tk.MustExec("insert into tu(a, b) values(1, 2)")
tk.MustExec("insert into tu(a, b) values(5, 6)")
tk.MustQuery("select * from tu for update").Check(testkit.Rows("1 2 3 2 4", "5 6 11 30 12"))
tk.MustQuery("select * from tu where a = 1").Check(testkit.Rows("1 2 3 2 4"))
tk.MustQuery("select * from tu where a in (1, 2)").Check(testkit.Rows("1 2 3 2 4"))
tk.MustQuery("select * from tu where c in (1, 2, 3)").Check(testkit.Rows("1 2 3 2 4"))
tk.MustQuery("select * from tu where c = 3").Check(testkit.Rows("1 2 3 2 4"))
tk.MustQuery("select d, e from tu where c = 3").Check(testkit.Rows("2 4"))
tk.MustQuery("select * from tu where d in (1, 2, 3)").Check(testkit.Rows("1 2 3 2 4"))
tk.MustQuery("select * from tu where d = 2").Check(testkit.Rows("1 2 3 2 4"))
tk.MustQuery("select c, d from tu where d = 2").Check(testkit.Rows("3 2"))
tk.MustQuery("select d, e from tu where e = 4").Check(testkit.Rows("2 4"))
tk.MustQuery("select * from tu where e = 4").Check(testkit.Rows("1 2 3 2 4"))
tk.MustExec("update tu set a = a + 1, b = b + 1 where c = 11")
tk.MustQuery("select * from tu for update").Check(testkit.Rows("1 2 3 2 4", "6 7 13 42 14"))
tk.MustQuery("select * from tu where a = 6").Check(testkit.Rows("6 7 13 42 14"))
tk.MustQuery("select * from tu where c in (5, 6, 13)").Check(testkit.Rows("6 7 13 42 14"))
tk.MustQuery("select b, c, e, d from tu where c = 13").Check(testkit.Rows("7 13 14 42"))
tk.MustQuery("select a, e, d from tu where c in (5, 6, 13)").Check(testkit.Rows("6 14 42"))
tk.MustExec("drop table if exists tu")
}
func (s *testSuiteP2) TestToPBExpr(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a decimal(10,6), b decimal, index idx_b (b))")
tk.MustExec("set sql_mode = ''")
tk.MustExec("insert t values (1.1, 1.1)")
tk.MustExec("insert t values (2.4, 2.4)")
tk.MustExec("insert t values (3.3, 2.7)")
result := tk.MustQuery("select * from t where a < 2.399999")
result.Check(testkit.Rows("1.100000 1"))
result = tk.MustQuery("select * from t where a > 1.5")
result.Check(testkit.Rows("2.400000 2", "3.300000 3"))
result = tk.MustQuery("select * from t where a <= 1.1")
result.Check(testkit.Rows("1.100000 1"))
result = tk.MustQuery("select * from t where b >= 3")
result.Check(testkit.Rows("3.300000 3"))
result = tk.MustQuery("select * from t where not (b = 1)")
result.Check(testkit.Rows("2.400000 2", "3.300000 3"))
result = tk.MustQuery("select * from t where b&1 = a|1")
result.Check(testkit.Rows("1.100000 1"))
result = tk.MustQuery("select * from t where b != 2 and b <=> 3")
result.Check(testkit.Rows("3.300000 3"))
result = tk.MustQuery("select * from t where b in (3)")
result.Check(testkit.Rows("3.300000 3"))
result = tk.MustQuery("select * from t where b not in (1, 2)")
result.Check(testkit.Rows("3.300000 3"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a varchar(255), b int)")
tk.MustExec("insert t values ('abc123', 1)")
tk.MustExec("insert t values ('ab123', 2)")
result = tk.MustQuery("select * from t where a like 'ab%'")
result.Check(testkit.Rows("abc123 1", "ab123 2"))
result = tk.MustQuery("select * from t where a like 'ab_12'")
result.Check(nil)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int primary key)")
tk.MustExec("insert t values (1)")
tk.MustExec("insert t values (2)")
result = tk.MustQuery("select * from t where not (a = 1)")
result.Check(testkit.Rows("2"))
result = tk.MustQuery("select * from t where not(not (a = 1))")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("select * from t where not(a != 1 and a != 2)")
result.Check(testkit.Rows("1", "2"))
}
func (s *testSuiteP2) TestDatumXAPI(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a decimal(10,6), b decimal, index idx_b (b))")
tk.MustExec("set sql_mode = ''")
tk.MustExec("insert t values (1.1, 1.1)")
tk.MustExec("insert t values (2.2, 2.2)")
tk.MustExec("insert t values (3.3, 2.7)")
result := tk.MustQuery("select * from t where a > 1.5")
result.Check(testkit.Rows("2.200000 2", "3.300000 3"))
result = tk.MustQuery("select * from t where b > 1.5")
result.Check(testkit.Rows("2.200000 2", "3.300000 3"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a time(3), b time, index idx_a (a))")
tk.MustExec("insert t values ('11:11:11', '11:11:11')")
tk.MustExec("insert t values ('11:11:12', '11:11:12')")
tk.MustExec("insert t values ('11:11:13', '11:11:13')")
result = tk.MustQuery("select * from t where a > '11:11:11.5'")
result.Check(testkit.Rows("11:11:12.000 11:11:12", "11:11:13.000 11:11:13"))
result = tk.MustQuery("select * from t where b > '11:11:11.5'")
result.Check(testkit.Rows("11:11:12.000 11:11:12", "11:11:13.000 11:11:13"))
}
func (s *testSuiteP2) TestSQLMode(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a tinyint not null)")
tk.MustExec("set sql_mode = 'STRICT_TRANS_TABLES'")
_, err := tk.Exec("insert t values ()")
c.Check(err, NotNil)
_, err = tk.Exec("insert t values ('1000')")
c.Check(err, NotNil)
tk.MustExec("create table if not exists tdouble (a double(3,2))")
_, err = tk.Exec("insert tdouble values (10.23)")
c.Check(err, NotNil)
tk.MustExec("set sql_mode = ''")
tk.MustExec("insert t values ()")
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1364 Field 'a' doesn't have a default value"))
tk.MustExec("insert t values (null)")
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1048 Column 'a' cannot be null"))
tk.MustExec("insert ignore t values (null)")
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1048 Column 'a' cannot be null"))
tk.MustExec("insert t select null")
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1048 Column 'a' cannot be null"))
tk.MustExec("insert t values (1000)")
tk.MustQuery("select * from t order by a").Check(testkit.Rows("0", "0", "0", "0", "127"))
tk.MustExec("insert tdouble values (10.23)")
tk.MustQuery("select * from tdouble").Check(testkit.Rows("9.99"))
tk.MustExec("set sql_mode = 'STRICT_TRANS_TABLES'")
tk.MustExec("set @@global.sql_mode = ''")
tk2 := testkit.NewTestKit(c, s.store)
tk2.MustExec("use test")
tk2.MustExec("drop table if exists t2")
tk2.MustExec("create table t2 (a varchar(3))")
tk2.MustExec("insert t2 values ('abcd')")
tk2.MustQuery("select * from t2").Check(testkit.Rows("abc"))
// session1 is still in strict mode.
_, err = tk.Exec("insert t2 values ('abcd')")
c.Check(err, NotNil)
// Restore original global strict mode.
tk.MustExec("set @@global.sql_mode = 'STRICT_TRANS_TABLES'")
}
func (s *testSuiteP2) TestTableDual(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
result := tk.MustQuery("Select 1")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("Select 1 from dual")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("Select count(*) from dual")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("Select 1 from dual where 1")
result.Check(testkit.Rows("1"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int primary key)")
tk.MustQuery("select t1.* from t t1, t t2 where t1.a=t2.a and 1=0").Check(testkit.Rows())
}
func (s *testSuiteP2) TestTableScan(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use information_schema")
result := tk.MustQuery("select * from schemata")
// There must be these tables: information_schema, mysql, performance_schema and test.
c.Assert(len(result.Rows()), GreaterEqual, 4)
tk.MustExec("use test")
tk.MustExec("create database mytest")
rowStr1 := fmt.Sprintf("%s %s %s %s %v", "def", "mysql", "utf8mb4", "utf8mb4_bin", nil)
rowStr2 := fmt.Sprintf("%s %s %s %s %v", "def", "mytest", "utf8mb4", "utf8mb4_bin", nil)
tk.MustExec("use information_schema")
result = tk.MustQuery("select * from schemata where schema_name = 'mysql'")
result.Check(testkit.Rows(rowStr1))
result = tk.MustQuery("select * from schemata where schema_name like 'my%'")
result.Check(testkit.Rows(rowStr1, rowStr2))
result = tk.MustQuery("select 1 from tables limit 1")
result.Check(testkit.Rows("1"))
}
func (s *testSuiteP2) TestAdapterStatement(c *C) {
se, err := session.CreateSession4Test(s.store)
c.Check(err, IsNil)
se.GetSessionVars().TxnCtx.InfoSchema = domain.GetDomain(se).InfoSchema()
compiler := &executor.Compiler{Ctx: se}
stmtNode, err := s.ParseOneStmt("select 1", "", "")
c.Check(err, IsNil)
stmt, err := compiler.Compile(context.TODO(), stmtNode)
c.Check(err, IsNil)
c.Check(stmt.OriginText(), Equals, "select 1")
stmtNode, err = s.ParseOneStmt("create table test.t (a int)", "", "")
c.Check(err, IsNil)
stmt, err = compiler.Compile(context.TODO(), stmtNode)
c.Check(err, IsNil)
c.Check(stmt.OriginText(), Equals, "create table test.t (a int)")
}
func (s *testSuiteP2) TestIsPointGet(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use mysql")
ctx := tk.Se.(sessionctx.Context)
tests := map[string]bool{
"select * from help_topic where name='aaa'": false,
"select 1 from help_topic where name='aaa'": false,
"select * from help_topic where help_topic_id=1": true,
"select * from help_topic where help_category_id=1": false,
}
for sqlStr, result := range tests {
stmtNode, err := s.ParseOneStmt(sqlStr, "", "")
c.Check(err, IsNil)
preprocessorReturn := &plannercore.PreprocessorReturn{}
err = plannercore.Preprocess(ctx, stmtNode, plannercore.WithPreprocessorReturn(preprocessorReturn))
c.Check(err, IsNil)
p, _, err := planner.Optimize(context.TODO(), ctx, stmtNode, preprocessorReturn.InfoSchema)
c.Check(err, IsNil)
ret, err := plannercore.IsPointGetWithPKOrUniqueKeyByAutoCommit(ctx, p)
c.Assert(err, IsNil)
c.Assert(ret, Equals, result)
}
}
func (s *testSuiteP2) TestClusteredIndexIsPointGet(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("drop database if exists test_cluster_index_is_point_get;")
tk.MustExec("create database test_cluster_index_is_point_get;")
tk.MustExec("use test_cluster_index_is_point_get;")
tk.Se.GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn
tk.MustExec("drop table if exists t;")
tk.MustExec("create table t (a varchar(255), b int, c char(10), primary key (c, a));")
ctx := tk.Se.(sessionctx.Context)
tests := map[string]bool{
"select 1 from t where a='x'": false,
"select * from t where c='x'": false,
"select * from t where a='x' and c='x'": true,
"select * from t where a='x' and c='x' and b=1": false,
}
for sqlStr, result := range tests {
stmtNode, err := s.ParseOneStmt(sqlStr, "", "")
c.Check(err, IsNil)
preprocessorReturn := &plannercore.PreprocessorReturn{}
err = plannercore.Preprocess(ctx, stmtNode, plannercore.WithPreprocessorReturn(preprocessorReturn))
c.Check(err, IsNil)
p, _, err := planner.Optimize(context.TODO(), ctx, stmtNode, preprocessorReturn.InfoSchema)
c.Check(err, IsNil)
ret, err := plannercore.IsPointGetWithPKOrUniqueKeyByAutoCommit(ctx, p)
c.Assert(err, IsNil)
c.Assert(ret, Equals, result)
}
}
func (s *testSerialSuite) TestPointGetRepeatableRead(c *C) {
tk1 := testkit.NewTestKit(c, s.store)
tk1.MustExec("use test")
tk1.MustExec(`create table point_get (a int, b int, c int,
primary key k_a(a),
unique key k_b(b))`)
tk1.MustExec("insert into point_get values (1, 1, 1)")
tk2 := testkit.NewTestKit(c, s.store)
tk2.MustExec("use test")
var (
step1 = "github.com/pingcap/tidb/executor/pointGetRepeatableReadTest-step1"
step2 = "github.com/pingcap/tidb/executor/pointGetRepeatableReadTest-step2"
)
c.Assert(failpoint.Enable(step1, "return"), IsNil)
c.Assert(failpoint.Enable(step2, "pause"), IsNil)
updateWaitCh := make(chan struct{})
go func() {
ctx := context.WithValue(context.Background(), "pointGetRepeatableReadTest", updateWaitCh)
ctx = failpoint.WithHook(ctx, func(ctx context.Context, fpname string) bool {
return fpname == step1 || fpname == step2
})
rs, err := tk1.Se.Execute(ctx, "select c from point_get where b = 1")
c.Assert(err, IsNil)
result := tk1.ResultSetToResultWithCtx(ctx, rs[0], Commentf("execute sql fail"))
result.Check(testkit.Rows("1"))
}()
<-updateWaitCh // Wait `POINT GET` first time `get`
c.Assert(failpoint.Disable(step1), IsNil)
tk2.MustExec("update point_get set b = 2, c = 2 where a = 1")
c.Assert(failpoint.Disable(step2), IsNil)
}
func (s *testSerialSuite) TestBatchPointGetRepeatableRead(c *C) {
tk1 := testkit.NewTestKit(c, s.store)
tk1.MustExec("use test")
tk1.MustExec(`create table batch_point_get (a int, b int, c int, unique key k_b(a, b, c))`)
tk1.MustExec("insert into batch_point_get values (1, 1, 1), (2, 3, 4), (3, 4, 5)")
tk2 := testkit.NewTestKit(c, s.store)
tk2.MustExec("use test")
var (
step1 = "github.com/pingcap/tidb/executor/batchPointGetRepeatableReadTest-step1"
step2 = "github.com/pingcap/tidb/executor/batchPointGetRepeatableReadTest-step2"
)
c.Assert(failpoint.Enable(step1, "return"), IsNil)
c.Assert(failpoint.Enable(step2, "pause"), IsNil)
updateWaitCh := make(chan struct{})
go func() {
ctx := context.WithValue(context.Background(), "batchPointGetRepeatableReadTest", updateWaitCh)
ctx = failpoint.WithHook(ctx, func(ctx context.Context, fpname string) bool {
return fpname == step1 || fpname == step2
})
rs, err := tk1.Se.Execute(ctx, "select c from batch_point_get where (a, b, c) in ((1, 1, 1))")
c.Assert(err, IsNil)
result := tk1.ResultSetToResultWithCtx(ctx, rs[0], Commentf("execute sql fail"))
result.Check(testkit.Rows("1"))
}()
<-updateWaitCh // Wait `POINT GET` first time `get`
c.Assert(failpoint.Disable(step1), IsNil)
tk2.MustExec("update batch_point_get set b = 2, c = 2 where a = 1")
c.Assert(failpoint.Disable(step2), IsNil)
}
func (s *testSerialSuite) TestSplitRegionTimeout(c *C) {
c.Assert(failpoint.Enable("tikvclient/mockSplitRegionTimeout", `return(true)`), IsNil)
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a varchar(100),b int, index idx1(b,a))")
tk.MustExec(`split table t index idx1 by (10000,"abcd"),(10000000);`)
tk.MustExec(`set @@tidb_wait_split_region_timeout=1`)
// result 0 0 means split 0 region and 0 region finish scatter regions before timeout.
tk.MustQuery(`split table t between (0) and (10000) regions 10`).Check(testkit.Rows("0 0"))
err := failpoint.Disable("tikvclient/mockSplitRegionTimeout")
c.Assert(err, IsNil)
// Test scatter regions timeout.
c.Assert(failpoint.Enable("tikvclient/mockScatterRegionTimeout", `return(true)`), IsNil)
tk.MustQuery(`split table t between (0) and (10000) regions 10`).Check(testkit.Rows("10 1"))
err = failpoint.Disable("tikvclient/mockScatterRegionTimeout")
c.Assert(err, IsNil)
// Test pre-split with timeout.
tk.MustExec("drop table if exists t")
tk.MustExec("set @@global.tidb_scatter_region=1;")
c.Assert(failpoint.Enable("tikvclient/mockScatterRegionTimeout", `return(true)`), IsNil)
atomic.StoreUint32(&ddl.EnableSplitTableRegion, 1)
start := time.Now()
tk.MustExec("create table t (a int, b int) partition by hash(a) partitions 5;")
c.Assert(time.Since(start).Seconds(), Less, 10.0)
err = failpoint.Disable("tikvclient/mockScatterRegionTimeout")
c.Assert(err, IsNil)
}
func (s *testSuiteP2) TestRow(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c int, d int)")
tk.MustExec("insert t values (1, 1)")
tk.MustExec("insert t values (1, 3)")
tk.MustExec("insert t values (2, 1)")
tk.MustExec("insert t values (2, 3)")
result := tk.MustQuery("select * from t where (c, d) < (2,2)")
result.Check(testkit.Rows("1 1", "1 3", "2 1"))
result = tk.MustQuery("select * from t where (1,2,3) > (3,2,1)")
result.Check(testkit.Rows())
result = tk.MustQuery("select * from t where row(1,2,3) > (3,2,1)")
result.Check(testkit.Rows())
result = tk.MustQuery("select * from t where (c, d) = (select * from t where (c,d) = (1,1))")
result.Check(testkit.Rows("1 1"))
result = tk.MustQuery("select * from t where (c, d) = (select * from t k where (t.c,t.d) = (c,d))")
result.Check(testkit.Rows("1 1", "1 3", "2 1", "2 3"))
result = tk.MustQuery("select (1, 2, 3) < (2, 3, 4)")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("select (2, 3, 4) <= (2, 3, 3)")
result.Check(testkit.Rows("0"))
result = tk.MustQuery("select (2, 3, 4) <= (2, 3, 4)")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("select (2, 3, 4) <= (2, 1, 4)")
result.Check(testkit.Rows("0"))
result = tk.MustQuery("select (2, 3, 4) >= (2, 3, 4)")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("select (2, 3, 4) = (2, 3, 4)")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("select (2, 3, 4) != (2, 3, 4)")
result.Check(testkit.Rows("0"))
result = tk.MustQuery("select row(1, 1) in (row(1, 1))")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("select row(1, 0) in (row(1, 1))")
result.Check(testkit.Rows("0"))
result = tk.MustQuery("select row(1, 1) in (select 1, 1)")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("select row(1, 1) > row(1, 0)")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("select row(1, 1) > (select 1, 0)")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("select 1 > (select 1)")
result.Check(testkit.Rows("0"))
result = tk.MustQuery("select (select 1)")
result.Check(testkit.Rows("1"))
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1 (a int, b int)")
tk.MustExec("insert t1 values (1,2),(1,null)")
tk.MustExec("drop table if exists t2")
tk.MustExec("create table t2 (c int, d int)")
tk.MustExec("insert t2 values (0,0)")
tk.MustQuery("select * from t2 where (1,2) in (select * from t1)").Check(testkit.Rows("0 0"))
tk.MustQuery("select * from t2 where (1,2) not in (select * from t1)").Check(testkit.Rows())
tk.MustQuery("select * from t2 where (1,1) not in (select * from t1)").Check(testkit.Rows())
tk.MustQuery("select * from t2 where (1,null) in (select * from t1)").Check(testkit.Rows())
tk.MustQuery("select * from t2 where (null,null) in (select * from t1)").Check(testkit.Rows())
tk.MustExec("delete from t1 where a=1 and b=2")
tk.MustQuery("select (1,1) in (select * from t2) from t1").Check(testkit.Rows("0"))
tk.MustQuery("select (1,1) not in (select * from t2) from t1").Check(testkit.Rows("1"))
tk.MustQuery("select (1,1) in (select 1,1 from t2) from t1").Check(testkit.Rows("1"))
tk.MustQuery("select (1,1) not in (select 1,1 from t2) from t1").Check(testkit.Rows("0"))
// MySQL 5.7 returns 1 for these 2 queries, which is wrong.
tk.MustQuery("select (1,null) not in (select 1,1 from t2) from t1").Check(testkit.Rows("<nil>"))
tk.MustQuery("select (t1.a,null) not in (select 1,1 from t2) from t1").Check(testkit.Rows("<nil>"))
tk.MustQuery("select (1,null) in (select * from t1)").Check(testkit.Rows("<nil>"))
tk.MustQuery("select (1,null) not in (select * from t1)").Check(testkit.Rows("<nil>"))
}
func (s *testSuiteP2) TestColumnName(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c int, d int)")
// disable only full group by
tk.MustExec("set sql_mode='STRICT_TRANS_TABLES'")
rs, err := tk.Exec("select 1 + c, count(*) from t")
c.Check(err, IsNil)
fields := rs.Fields()
c.Check(len(fields), Equals, 2)
c.Check(fields[0].Column.Name.L, Equals, "1 + c")
c.Check(fields[0].ColumnAsName.L, Equals, "1 + c")
c.Check(fields[1].Column.Name.L, Equals, "count(*)")
c.Check(fields[1].ColumnAsName.L, Equals, "count(*)")
c.Assert(rs.Close(), IsNil)
rs, err = tk.Exec("select (c) > all (select c from t) from t")
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.L, Equals, "(c) > all (select c from t)")
c.Check(fields[0].ColumnAsName.L, Equals, "(c) > all (select c from t)")
c.Assert(rs.Close(), IsNil)
tk.MustExec("begin")
tk.MustExec("insert t values(1,1)")
rs, err = tk.Exec("select c d, d c from t")
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 2)
c.Check(fields[0].Column.Name.L, Equals, "c")
c.Check(fields[0].ColumnAsName.L, Equals, "d")
c.Check(fields[1].Column.Name.L, Equals, "d")
c.Check(fields[1].ColumnAsName.L, Equals, "c")
c.Assert(rs.Close(), IsNil)
// Test case for query a column of a table.
// In this case, all attributes have values.
rs, err = tk.Exec("select c as a from t as t2")
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(fields[0].Column.Name.L, Equals, "c")
c.Check(fields[0].ColumnAsName.L, Equals, "a")
c.Check(fields[0].Table.Name.L, Equals, "t")
c.Check(fields[0].TableAsName.L, Equals, "t2")
c.Check(fields[0].DBName.L, Equals, "test")
c.Assert(rs.Close(), IsNil)
// Test case for query a expression which only using constant inputs.
// In this case, the table, org_table and database attributes will all be empty.
rs, err = tk.Exec("select hour(1) as a from t as t2")
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(fields[0].Column.Name.L, Equals, "a")
c.Check(fields[0].ColumnAsName.L, Equals, "a")
c.Check(fields[0].Table.Name.L, Equals, "")
c.Check(fields[0].TableAsName.L, Equals, "")
c.Check(fields[0].DBName.L, Equals, "")
c.Assert(rs.Close(), IsNil)
// Test case for query a column wrapped with parentheses and unary plus.
// In this case, the column name should be its original name.
rs, err = tk.Exec("select (c), (+c), +(c), +(+(c)), ++c from t")
c.Check(err, IsNil)
fields = rs.Fields()
for i := 0; i < 5; i++ {
c.Check(fields[i].Column.Name.L, Equals, "c")
c.Check(fields[i].ColumnAsName.L, Equals, "c")
}
c.Assert(rs.Close(), IsNil)
// Test issue https://github.com/pingcap/tidb/issues/9639 .
// Both window function and expression appear in final result field.
tk.MustExec("set @@tidb_enable_window_function = 1")
rs, err = tk.Exec("select 1+1, row_number() over() num from t")
c.Check(err, IsNil)
fields = rs.Fields()
c.Assert(fields[0].Column.Name.L, Equals, "1+1")
c.Assert(fields[0].ColumnAsName.L, Equals, "1+1")
c.Assert(fields[1].Column.Name.L, Equals, "num")
c.Assert(fields[1].ColumnAsName.L, Equals, "num")
tk.MustExec("set @@tidb_enable_window_function = 0")
c.Assert(rs.Close(), IsNil)
rs, err = tk.Exec("select if(1,c,c) from t;")
c.Check(err, IsNil)
fields = rs.Fields()
c.Assert(fields[0].Column.Name.L, Equals, "if(1,c,c)")
// It's a compatibility issue. Should be empty instead.
c.Assert(fields[0].ColumnAsName.L, Equals, "if(1,c,c)")
c.Assert(rs.Close(), IsNil)
}
func (s *testSuiteP2) TestSelectVar(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (d int)")
tk.MustExec("insert into t values(1), (2), (1)")
// This behavior is different from MySQL.
result := tk.MustQuery("select @a, @a := d+1 from t")
result.Check(testkit.Rows("<nil> 2", "2 3", "3 2"))
// Test for PR #10658.
tk.MustExec("select SQL_BIG_RESULT d from t group by d")
tk.MustExec("select SQL_SMALL_RESULT d from t group by d")
tk.MustExec("select SQL_BUFFER_RESULT d from t group by d")
}
func (s *testSuiteP2) TestHistoryRead(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists history_read")
tk.MustExec("create table history_read (a int)")
tk.MustExec("insert history_read values (1)")
// For mocktikv, safe point is not initialized, we manually insert it for snapshot to use.
safePointName := "tikv_gc_safe_point"
safePointValue := "20060102-15:04:05 -0700"
safePointComment := "All versions after safe point can be accessed. (DO NOT EDIT)"
updateSafePoint := fmt.Sprintf(`INSERT INTO mysql.tidb VALUES ('%[1]s', '%[2]s', '%[3]s')
ON DUPLICATE KEY
UPDATE variable_value = '%[2]s', comment = '%[3]s'`, safePointName, safePointValue, safePointComment)
tk.MustExec(updateSafePoint)
// Set snapshot to a time before save point will fail.
_, err := tk.Exec("set @@tidb_snapshot = '2006-01-01 15:04:05.999999'")
c.Assert(terror.ErrorEqual(err, variable.ErrSnapshotTooOld), IsTrue, Commentf("err %v", err))
// SnapshotTS Is not updated if check failed.
c.Assert(tk.Se.GetSessionVars().SnapshotTS, Equals, uint64(0))
// Setting snapshot to a time in the future will fail. (One day before the 2038 problem)
_, err = tk.Exec("set @@tidb_snapshot = '2038-01-18 03:14:07'")
c.Assert(err, ErrorMatches, "cannot set read timestamp to a future time")
// SnapshotTS Is not updated if check failed.
c.Assert(tk.Se.GetSessionVars().SnapshotTS, Equals, uint64(0))
curVer1, _ := s.store.CurrentVersion(kv.GlobalTxnScope)
time.Sleep(time.Millisecond)
snapshotTime := time.Now()
time.Sleep(time.Millisecond)
curVer2, _ := s.store.CurrentVersion(kv.GlobalTxnScope)
tk.MustExec("insert history_read values (2)")
tk.MustQuery("select * from history_read").Check(testkit.Rows("1", "2"))
tk.MustExec("set @@tidb_snapshot = '" + snapshotTime.Format("2006-01-02 15:04:05.999999") + "'")
ctx := tk.Se.(sessionctx.Context)
snapshotTS := ctx.GetSessionVars().SnapshotTS
c.Assert(snapshotTS, Greater, curVer1.Ver)
c.Assert(snapshotTS, Less, curVer2.Ver)
tk.MustQuery("select * from history_read").Check(testkit.Rows("1"))
_, err = tk.Exec("insert history_read values (2)")
c.Assert(err, NotNil)
_, err = tk.Exec("update history_read set a = 3 where a = 1")
c.Assert(err, NotNil)
_, err = tk.Exec("delete from history_read where a = 1")
c.Assert(err, NotNil)
tk.MustExec("set @@tidb_snapshot = ''")
tk.MustQuery("select * from history_read").Check(testkit.Rows("1", "2"))
tk.MustExec("insert history_read values (3)")
tk.MustExec("update history_read set a = 4 where a = 3")
tk.MustExec("delete from history_read where a = 1")
time.Sleep(time.Millisecond)
snapshotTime = time.Now()
time.Sleep(time.Millisecond)
tk.MustExec("alter table history_read add column b int")
tk.MustExec("insert history_read values (8, 8), (9, 9)")
tk.MustQuery("select * from history_read order by a").Check(testkit.Rows("2 <nil>", "4 <nil>", "8 8", "9 9"))
tk.MustExec("set @@tidb_snapshot = '" + snapshotTime.Format("2006-01-02 15:04:05.999999") + "'")
tk.MustQuery("select * from history_read order by a").Check(testkit.Rows("2", "4"))
tsoStr := strconv.FormatUint(oracle.GoTimeToTS(snapshotTime), 10)
tk.MustExec("set @@tidb_snapshot = '" + tsoStr + "'")
tk.MustQuery("select * from history_read order by a").Check(testkit.Rows("2", "4"))
tk.MustExec("set @@tidb_snapshot = ''")
tk.MustQuery("select * from history_read order by a").Check(testkit.Rows("2 <nil>", "4 <nil>", "8 8", "9 9"))
}
func (s *testSuite2) TestLowResolutionTSORead(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("set @@autocommit=1")
tk.MustExec("use test")
tk.MustExec("drop table if exists low_resolution_tso")
tk.MustExec("create table low_resolution_tso(a int)")
tk.MustExec("insert low_resolution_tso values (1)")
// enable low resolution tso
c.Assert(tk.Se.GetSessionVars().LowResolutionTSO, IsFalse)
_, err := tk.Exec("set @@tidb_low_resolution_tso = 'on'")
c.Assert(err, IsNil)
c.Assert(tk.Se.GetSessionVars().LowResolutionTSO, IsTrue)
time.Sleep(3 * time.Second)
tk.MustQuery("select * from low_resolution_tso").Check(testkit.Rows("1"))
_, err = tk.Exec("update low_resolution_tso set a = 2")
c.Assert(err, NotNil)
tk.MustExec("set @@tidb_low_resolution_tso = 'off'")
tk.MustExec("update low_resolution_tso set a = 2")
tk.MustQuery("select * from low_resolution_tso").Check(testkit.Rows("2"))
}
func (s *testSuite2) TestStaleReadFutureTime(c *C) {
tk := testkit.NewTestKit(c, s.store)
// Setting tx_read_ts to a time in the future will fail. (One day before the 2038 problem)
_, err := tk.Exec("set @@tx_read_ts = '2038-01-18 03:14:07'")
c.Assert(err, ErrorMatches, "cannot set read timestamp to a future time")
// TxnReadTS Is not updated if check failed.
c.Assert(tk.Se.GetSessionVars().TxnReadTS.PeakTxnReadTS(), Equals, uint64(0))
}
func (s *testSuite) TestScanControlSelection(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int primary key, b int, c int, index idx_b(b))")
tk.MustExec("insert into t values (1, 1, 1), (2, 1, 1), (3, 1, 2), (4, 2, 3)")
tk.MustQuery("select (select count(1) k from t s where s.b = t1.c) from t t1").Sort().Check(testkit.Rows("0", "1", "3", "3"))
}
func (s *testSuite) TestSimpleDAG(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int primary key, b int, c int)")
tk.MustExec("insert into t values (1, 1, 1), (2, 1, 1), (3, 1, 2), (4, 2, 3)")
tk.MustQuery("select a from t").Check(testkit.Rows("1", "2", "3", "4"))
tk.MustQuery("select * from t where a = 4").Check(testkit.Rows("4 2 3"))
tk.MustQuery("select a from t limit 1").Check(testkit.Rows("1"))
tk.MustQuery("select a from t order by a desc").Check(testkit.Rows("4", "3", "2", "1"))
tk.MustQuery("select a from t order by a desc limit 1").Check(testkit.Rows("4"))
tk.MustQuery("select a from t order by b desc limit 1").Check(testkit.Rows("4"))
tk.MustQuery("select a from t where a < 3").Check(testkit.Rows("1", "2"))
tk.MustQuery("select a from t where b > 1").Check(testkit.Rows("4"))
tk.MustQuery("select a from t where b > 1 and a < 3").Check(testkit.Rows())
tk.MustQuery("select count(*) from t where b > 1 and a < 3").Check(testkit.Rows("0"))
tk.MustQuery("select count(*) from t").Check(testkit.Rows("4"))
tk.MustQuery("select count(*), c from t group by c order by c").Check(testkit.Rows("2 1", "1 2", "1 3"))
tk.MustQuery("select sum(c) as s from t group by b order by s").Check(testkit.Rows("3", "4"))
tk.MustQuery("select avg(a) as s from t group by b order by s").Check(testkit.Rows("2.0000", "4.0000"))
tk.MustQuery("select sum(distinct c) from t group by b").Check(testkit.Rows("3", "3"))
tk.MustExec("create index i on t(c,b)")
tk.MustQuery("select a from t where c = 1").Check(testkit.Rows("1", "2"))
tk.MustQuery("select a from t where c = 1 and a < 2").Check(testkit.Rows("1"))
tk.MustQuery("select a from t where c = 1 order by a limit 1").Check(testkit.Rows("1"))
tk.MustQuery("select count(*) from t where c = 1 ").Check(testkit.Rows("2"))
tk.MustExec("create index i1 on t(b)")
tk.MustQuery("select c from t where b = 2").Check(testkit.Rows("3"))
tk.MustQuery("select * from t where b = 2").Check(testkit.Rows("4 2 3"))
tk.MustQuery("select count(*) from t where b = 1").Check(testkit.Rows("3"))
tk.MustQuery("select * from t where b = 1 and a > 1 limit 1").Check(testkit.Rows("2 1 1"))
// Test time push down.
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (id int, c1 datetime);")
tk.MustExec("insert into t values (1, '2015-06-07 12:12:12')")
tk.MustQuery("select id from t where c1 = '2015-06-07 12:12:12'").Check(testkit.Rows("1"))
// Test issue 17816
tk.MustExec("drop table if exists t0")
tk.MustExec("CREATE TABLE t0(c0 INT)")
tk.MustExec("INSERT INTO t0 VALUES (100000)")
tk.MustQuery("SELECT * FROM t0 WHERE NOT SPACE(t0.c0)").Check(testkit.Rows("100000"))
}
func (s *testSuite) TestTimestampTimeZone(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (ts timestamp)")
tk.MustExec("set time_zone = '+00:00'")
tk.MustExec("insert into t values ('2017-04-27 22:40:42')")
// The timestamp will get different value if time_zone session variable changes.
tests := []struct {
timezone string
expect string
}{
{"+10:00", "2017-04-28 08:40:42"},
{"-6:00", "2017-04-27 16:40:42"},
}
for _, tt := range tests {
tk.MustExec(fmt.Sprintf("set time_zone = '%s'", tt.timezone))
tk.MustQuery("select * from t").Check(testkit.Rows(tt.expect))
}
// For issue https://github.com/pingcap/tidb/issues/3467
tk.MustExec("drop table if exists t1")
tk.MustExec(`CREATE TABLE t1 (
id bigint(20) NOT NULL AUTO_INCREMENT,
uid int(11) DEFAULT NULL,
datetime timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
ip varchar(128) DEFAULT NULL,
PRIMARY KEY (id),
KEY i_datetime (datetime),
KEY i_userid (uid)
);`)
tk.MustExec(`INSERT INTO t1 VALUES (123381351,1734,"2014-03-31 08:57:10","127.0.0.1");`)
r := tk.MustQuery("select datetime from t1;") // Cover TableReaderExec
r.Check(testkit.Rows("2014-03-31 08:57:10"))
r = tk.MustQuery("select datetime from t1 where datetime='2014-03-31 08:57:10';")
r.Check(testkit.Rows("2014-03-31 08:57:10")) // Cover IndexReaderExec
r = tk.MustQuery("select * from t1 where datetime='2014-03-31 08:57:10';")
r.Check(testkit.Rows("123381351 1734 2014-03-31 08:57:10 127.0.0.1")) // Cover IndexLookupExec
// For issue https://github.com/pingcap/tidb/issues/3485
tk.MustExec("set time_zone = 'Asia/Shanghai'")
tk.MustExec("drop table if exists t1")
tk.MustExec(`CREATE TABLE t1 (
id bigint(20) NOT NULL AUTO_INCREMENT,
datetime timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (id)
);`)
tk.MustExec(`INSERT INTO t1 VALUES (123381351,"2014-03-31 08:57:10");`)
r = tk.MustQuery(`select * from t1 where datetime="2014-03-31 08:57:10";`)
r.Check(testkit.Rows("123381351 2014-03-31 08:57:10"))
tk.MustExec(`alter table t1 add key i_datetime (datetime);`)
r = tk.MustQuery(`select * from t1 where datetime="2014-03-31 08:57:10";`)
r.Check(testkit.Rows("123381351 2014-03-31 08:57:10"))
r = tk.MustQuery(`select * from t1;`)
r.Check(testkit.Rows("123381351 2014-03-31 08:57:10"))
r = tk.MustQuery("select datetime from t1 where datetime='2014-03-31 08:57:10';")
r.Check(testkit.Rows("2014-03-31 08:57:10"))
}
func (s *testSuite) TestTimestampDefaultValueTimeZone(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("set time_zone = '+08:00'")
tk.MustExec(`create table t (a int, b timestamp default "2019-01-17 14:46:14")`)
tk.MustExec("insert into t set a=1")
r := tk.MustQuery(`show create table t`)
r.Check(testkit.Rows("t CREATE TABLE `t` (\n" + " `a` int(11) DEFAULT NULL,\n" + " `b` timestamp DEFAULT '2019-01-17 14:46:14'\n" + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
tk.MustExec("set time_zone = '+00:00'")
tk.MustExec("insert into t set a=2")
r = tk.MustQuery(`show create table t`)
r.Check(testkit.Rows("t CREATE TABLE `t` (\n" + " `a` int(11) DEFAULT NULL,\n" + " `b` timestamp DEFAULT '2019-01-17 06:46:14'\n" + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
r = tk.MustQuery(`select a,b from t order by a`)
r.Check(testkit.Rows("1 2019-01-17 06:46:14", "2 2019-01-17 06:46:14"))
// Test the column's version is greater than ColumnInfoVersion1.
sctx := tk.Se.(sessionctx.Context)
is := domain.GetDomain(sctx).InfoSchema()
c.Assert(is, NotNil)
tb, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t"))
c.Assert(err, IsNil)
tb.Cols()[1].Version = model.ColumnInfoVersion1 + 1
tk.MustExec("insert into t set a=3")
r = tk.MustQuery(`select a,b from t order by a`)
r.Check(testkit.Rows("1 2019-01-17 06:46:14", "2 2019-01-17 06:46:14", "3 2019-01-17 06:46:14"))
tk.MustExec("delete from t where a=3")
// Change time zone back.
tk.MustExec("set time_zone = '+08:00'")
r = tk.MustQuery(`select a,b from t order by a`)
r.Check(testkit.Rows("1 2019-01-17 14:46:14", "2 2019-01-17 14:46:14"))
tk.MustExec("set time_zone = '-08:00'")
r = tk.MustQuery(`show create table t`)
r.Check(testkit.Rows("t CREATE TABLE `t` (\n" + " `a` int(11) DEFAULT NULL,\n" + " `b` timestamp DEFAULT '2019-01-16 22:46:14'\n" + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
// test zero default value in multiple time zone.
defer tk.MustExec(fmt.Sprintf("set @@sql_mode='%s'", tk.MustQuery("select @@sql_mode").Rows()[0][0]))
tk.MustExec("set @@sql_mode='STRICT_TRANS_TABLES,NO_ENGINE_SUBSTITUTION';")
tk.MustExec("drop table if exists t")
tk.MustExec("set time_zone = '+08:00'")
tk.MustExec(`create table t (a int, b timestamp default "0000-00-00 00")`)
tk.MustExec("insert into t set a=1")
r = tk.MustQuery(`show create table t`)
r.Check(testkit.Rows("t CREATE TABLE `t` (\n" + " `a` int(11) DEFAULT NULL,\n" + " `b` timestamp DEFAULT '0000-00-00 00:00:00'\n" + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
tk.MustExec("set time_zone = '+00:00'")
tk.MustExec("insert into t set a=2")
r = tk.MustQuery(`show create table t`)
r.Check(testkit.Rows("t CREATE TABLE `t` (\n" + " `a` int(11) DEFAULT NULL,\n" + " `b` timestamp DEFAULT '0000-00-00 00:00:00'\n" + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
tk.MustExec("set time_zone = '-08:00'")
tk.MustExec("insert into t set a=3")
r = tk.MustQuery(`show create table t`)
r.Check(testkit.Rows("t CREATE TABLE `t` (\n" + " `a` int(11) DEFAULT NULL,\n" + " `b` timestamp DEFAULT '0000-00-00 00:00:00'\n" + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
r = tk.MustQuery(`select a,b from t order by a`)
r.Check(testkit.Rows("1 0000-00-00 00:00:00", "2 0000-00-00 00:00:00", "3 0000-00-00 00:00:00"))
// test add timestamp column default current_timestamp.
tk.MustExec(`drop table if exists t`)
tk.MustExec(`set time_zone = 'Asia/Shanghai'`)
tk.MustExec(`create table t (a int)`)
tk.MustExec(`insert into t set a=1`)
tk.MustExec(`alter table t add column b timestamp not null default current_timestamp;`)
timeIn8 := tk.MustQuery("select b from t").Rows()[0][0]
tk.MustExec(`set time_zone = '+00:00'`)
timeIn0 := tk.MustQuery("select b from t").Rows()[0][0]
c.Assert(timeIn8 != timeIn0, IsTrue, Commentf("%v == %v", timeIn8, timeIn0))
datumTimeIn8, err := expression.GetTimeValue(tk.Se, timeIn8, mysql.TypeTimestamp, 0)
c.Assert(err, IsNil)
tIn8To0 := datumTimeIn8.GetMysqlTime()
timeZoneIn8, err := time.LoadLocation("Asia/Shanghai")
c.Assert(err, IsNil)
err = tIn8To0.ConvertTimeZone(timeZoneIn8, time.UTC)
c.Assert(err, IsNil)
c.Assert(timeIn0 == tIn8To0.String(), IsTrue, Commentf("%v != %v", timeIn0, tIn8To0.String()))
// test add index.
tk.MustExec(`alter table t add index(b);`)
tk.MustExec("admin check table t")
tk.MustExec(`set time_zone = '+05:00'`)
tk.MustExec("admin check table t")
}
func (s *testSuite) TestTiDBCurrentTS(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustQuery("select @@tidb_current_ts").Check(testkit.Rows("0"))
tk.MustExec("begin")
rows := tk.MustQuery("select @@tidb_current_ts").Rows()
tsStr := rows[0][0].(string)
txn, err := tk.Se.Txn(true)
c.Assert(err, IsNil)
c.Assert(tsStr, Equals, fmt.Sprintf("%d", txn.StartTS()))
tk.MustExec("begin")
rows = tk.MustQuery("select @@tidb_current_ts").Rows()
newTsStr := rows[0][0].(string)
txn, err = tk.Se.Txn(true)
c.Assert(err, IsNil)
c.Assert(newTsStr, Equals, fmt.Sprintf("%d", txn.StartTS()))
c.Assert(newTsStr, Not(Equals), tsStr)
tk.MustExec("commit")
tk.MustQuery("select @@tidb_current_ts").Check(testkit.Rows("0"))
_, err = tk.Exec("set @@tidb_current_ts = '1'")
c.Assert(terror.ErrorEqual(err, variable.ErrIncorrectScope), IsTrue, Commentf("err %v", err))
}
func (s *testSuite) TestTiDBLastTxnInfo(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int primary key)")
tk.MustQuery("select @@tidb_last_txn_info").Check(testkit.Rows(""))
tk.MustExec("insert into t values (1)")
rows1 := tk.MustQuery("select json_extract(@@tidb_last_txn_info, '$.start_ts'), json_extract(@@tidb_last_txn_info, '$.commit_ts')").Rows()
c.Assert(rows1[0][0].(string), Greater, "0")
c.Assert(rows1[0][0].(string), Less, rows1[0][1].(string))
tk.MustExec("begin")
tk.MustQuery("select a from t where a = 1").Check(testkit.Rows("1"))
rows2 := tk.MustQuery("select json_extract(@@tidb_last_txn_info, '$.start_ts'), json_extract(@@tidb_last_txn_info, '$.commit_ts'), @@tidb_current_ts").Rows()
tk.MustExec("commit")
rows3 := tk.MustQuery("select json_extract(@@tidb_last_txn_info, '$.start_ts'), json_extract(@@tidb_last_txn_info, '$.commit_ts')").Rows()
c.Assert(rows2[0][0], Equals, rows1[0][0])
c.Assert(rows2[0][1], Equals, rows1[0][1])
c.Assert(rows3[0][0], Equals, rows1[0][0])
c.Assert(rows3[0][1], Equals, rows1[0][1])
c.Assert(rows2[0][1], Less, rows2[0][2])
tk.MustExec("begin")
tk.MustExec("update t set a = a + 1 where a = 1")
rows4 := tk.MustQuery("select json_extract(@@tidb_last_txn_info, '$.start_ts'), json_extract(@@tidb_last_txn_info, '$.commit_ts'), @@tidb_current_ts").Rows()
tk.MustExec("commit")
rows5 := tk.MustQuery("select json_extract(@@tidb_last_txn_info, '$.start_ts'), json_extract(@@tidb_last_txn_info, '$.commit_ts')").Rows()
c.Assert(rows4[0][0], Equals, rows1[0][0])
c.Assert(rows4[0][1], Equals, rows1[0][1])
c.Assert(rows4[0][2], Equals, rows5[0][0])
c.Assert(rows4[0][1], Less, rows4[0][2])
c.Assert(rows4[0][2], Less, rows5[0][1])
tk.MustExec("begin")
tk.MustExec("update t set a = a + 1 where a = 2")
tk.MustExec("rollback")
rows6 := tk.MustQuery("select json_extract(@@tidb_last_txn_info, '$.start_ts'), json_extract(@@tidb_last_txn_info, '$.commit_ts')").Rows()
c.Assert(rows6[0][0], Equals, rows5[0][0])
c.Assert(rows6[0][1], Equals, rows5[0][1])
tk.MustExec("begin optimistic")
tk.MustExec("insert into t values (2)")
_, err := tk.Exec("commit")
c.Assert(err, NotNil)
rows7 := tk.MustQuery("select json_extract(@@tidb_last_txn_info, '$.start_ts'), json_extract(@@tidb_last_txn_info, '$.commit_ts'), json_extract(@@tidb_last_txn_info, '$.error')").Rows()
c.Assert(rows7[0][0], Greater, rows5[0][0])
c.Assert(rows7[0][1], Equals, "0")
c.Assert(strings.Contains(err.Error(), rows7[0][1].(string)), IsTrue)
_, err = tk.Exec("set @@tidb_last_txn_info = '{}'")
c.Assert(terror.ErrorEqual(err, variable.ErrIncorrectScope), IsTrue, Commentf("err %v", err))
}
func (s *testSerialSuite) TestTiDBLastTxnInfoCommitMode(c *C) {
defer config.RestoreFunc()()
config.UpdateGlobal(func(conf *config.Config) {
conf.TiKVClient.AsyncCommit.SafeWindow = time.Second
})
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int primary key, v int)")
tk.MustExec("insert into t values (1, 1)")
tk.MustExec("set @@tidb_enable_async_commit = 1")
tk.MustExec("set @@tidb_enable_1pc = 0")
tk.MustExec("update t set v = v + 1 where a = 1")
rows := tk.MustQuery("select json_extract(@@tidb_last_txn_info, '$.txn_commit_mode'), json_extract(@@tidb_last_txn_info, '$.async_commit_fallback'), json_extract(@@tidb_last_txn_info, '$.one_pc_fallback')").Rows()
c.Log(rows)
c.Assert(rows[0][0], Equals, `"async_commit"`)
c.Assert(rows[0][1], Equals, "false")
c.Assert(rows[0][2], Equals, "false")
tk.MustExec("set @@tidb_enable_async_commit = 0")
tk.MustExec("set @@tidb_enable_1pc = 1")
tk.MustExec("update t set v = v + 1 where a = 1")
rows = tk.MustQuery("select json_extract(@@tidb_last_txn_info, '$.txn_commit_mode'), json_extract(@@tidb_last_txn_info, '$.async_commit_fallback'), json_extract(@@tidb_last_txn_info, '$.one_pc_fallback')").Rows()
c.Assert(rows[0][0], Equals, `"1pc"`)
c.Assert(rows[0][1], Equals, "false")
c.Assert(rows[0][2], Equals, "false")
tk.MustExec("set @@tidb_enable_async_commit = 0")
tk.MustExec("set @@tidb_enable_1pc = 0")
tk.MustExec("update t set v = v + 1 where a = 1")
rows = tk.MustQuery("select json_extract(@@tidb_last_txn_info, '$.txn_commit_mode'), json_extract(@@tidb_last_txn_info, '$.async_commit_fallback'), json_extract(@@tidb_last_txn_info, '$.one_pc_fallback')").Rows()
c.Assert(rows[0][0], Equals, `"2pc"`)
c.Assert(rows[0][1], Equals, "false")
c.Assert(rows[0][2], Equals, "false")
c.Assert(failpoint.Enable("tikvclient/invalidMaxCommitTS", "return"), IsNil)
defer func() {
c.Assert(failpoint.Disable("tikvclient/invalidMaxCommitTS"), IsNil)
}()
tk.MustExec("set @@tidb_enable_async_commit = 1")
tk.MustExec("set @@tidb_enable_1pc = 0")
tk.MustExec("update t set v = v + 1 where a = 1")
rows = tk.MustQuery("select json_extract(@@tidb_last_txn_info, '$.txn_commit_mode'), json_extract(@@tidb_last_txn_info, '$.async_commit_fallback'), json_extract(@@tidb_last_txn_info, '$.one_pc_fallback')").Rows()
c.Log(rows)
c.Assert(rows[0][0], Equals, `"2pc"`)
c.Assert(rows[0][1], Equals, "true")
c.Assert(rows[0][2], Equals, "false")
tk.MustExec("set @@tidb_enable_async_commit = 0")
tk.MustExec("set @@tidb_enable_1pc = 1")
tk.MustExec("update t set v = v + 1 where a = 1")
rows = tk.MustQuery("select json_extract(@@tidb_last_txn_info, '$.txn_commit_mode'), json_extract(@@tidb_last_txn_info, '$.async_commit_fallback'), json_extract(@@tidb_last_txn_info, '$.one_pc_fallback')").Rows()
c.Log(rows)
c.Assert(rows[0][0], Equals, `"2pc"`)
c.Assert(rows[0][1], Equals, "false")
c.Assert(rows[0][2], Equals, "true")
tk.MustExec("set @@tidb_enable_async_commit = 1")
tk.MustExec("set @@tidb_enable_1pc = 1")
tk.MustExec("update t set v = v + 1 where a = 1")
rows = tk.MustQuery("select json_extract(@@tidb_last_txn_info, '$.txn_commit_mode'), json_extract(@@tidb_last_txn_info, '$.async_commit_fallback'), json_extract(@@tidb_last_txn_info, '$.one_pc_fallback')").Rows()
c.Log(rows)
c.Assert(rows[0][0], Equals, `"2pc"`)
c.Assert(rows[0][1], Equals, "true")
c.Assert(rows[0][2], Equals, "true")
}
func (s *testSuite) TestTiDBLastQueryInfo(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int primary key, v int)")
tk.MustQuery("select json_extract(@@tidb_last_query_info, '$.start_ts'), json_extract(@@tidb_last_query_info, '$.start_ts')").Check(testkit.Rows("0 0"))
toUint64 := func(str interface{}) uint64 {
res, err := strconv.ParseUint(str.(string), 10, 64)
c.Assert(err, IsNil)
return res
}
tk.MustExec("select * from t")
rows := tk.MustQuery("select json_extract(@@tidb_last_query_info, '$.start_ts'), json_extract(@@tidb_last_query_info, '$.for_update_ts')").Rows()
c.Assert(toUint64(rows[0][0]), Greater, uint64(0))
c.Assert(rows[0][0], Equals, rows[0][1])
tk.MustExec("insert into t values (1, 10)")
rows = tk.MustQuery("select json_extract(@@tidb_last_query_info, '$.start_ts'), json_extract(@@tidb_last_query_info, '$.for_update_ts')").Rows()
c.Assert(toUint64(rows[0][0]), Greater, uint64(0))
c.Assert(rows[0][0], Equals, rows[0][1])
// tidb_last_txn_info is still valid after checking query info.
rows = tk.MustQuery("select json_extract(@@tidb_last_txn_info, '$.start_ts'), json_extract(@@tidb_last_txn_info, '$.commit_ts')").Rows()
c.Assert(toUint64(rows[0][0]), Greater, uint64(0))
c.Assert(rows[0][0].(string), Less, rows[0][1].(string))
tk.MustExec("begin pessimistic")
tk.MustExec("select * from t")
rows = tk.MustQuery("select json_extract(@@tidb_last_query_info, '$.start_ts'), json_extract(@@tidb_last_query_info, '$.for_update_ts')").Rows()
c.Assert(toUint64(rows[0][0]), Greater, uint64(0))
c.Assert(rows[0][0], Equals, rows[0][1])
tk2 := testkit.NewTestKit(c, s.store)
tk2.MustExec("use test")
tk2.MustExec("update t set v = 11 where a = 1")
tk.MustExec("select * from t")
rows = tk.MustQuery("select json_extract(@@tidb_last_query_info, '$.start_ts'), json_extract(@@tidb_last_query_info, '$.for_update_ts')").Rows()
c.Assert(toUint64(rows[0][0]), Greater, uint64(0))
c.Assert(rows[0][0], Equals, rows[0][1])
tk.MustExec("update t set v = 12 where a = 1")
rows = tk.MustQuery("select json_extract(@@tidb_last_query_info, '$.start_ts'), json_extract(@@tidb_last_query_info, '$.for_update_ts')").Rows()
c.Assert(toUint64(rows[0][0]), Greater, uint64(0))
c.Assert(toUint64(rows[0][0]), Less, toUint64(rows[0][1]))
tk.MustExec("commit")
tk.MustExec("set transaction isolation level read committed")
tk.MustExec("begin pessimistic")
tk.MustExec("select * from t")
rows = tk.MustQuery("select json_extract(@@tidb_last_query_info, '$.start_ts'), json_extract(@@tidb_last_query_info, '$.for_update_ts')").Rows()
c.Assert(toUint64(rows[0][0]), Greater, uint64(0))
c.Assert(toUint64(rows[0][0]), Less, toUint64(rows[0][1]))
tk.MustExec("rollback")
}
func (s *testSuite) TestSelectForUpdate(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk1 := testkit.NewTestKit(c, s.store)
tk1.MustExec("use test")
tk2 := testkit.NewTestKit(c, s.store)
tk2.MustExec("use test")
tk.MustExec("drop table if exists t, t1")
txn, err := tk.Se.Txn(true)
c.Assert(kv.ErrInvalidTxn.Equal(err), IsTrue)
c.Assert(txn.Valid(), IsFalse)
tk.MustExec("create table t (c1 int, c2 int, c3 int)")
tk.MustExec("insert t values (11, 2, 3)")
tk.MustExec("insert t values (12, 2, 3)")
tk.MustExec("insert t values (13, 2, 3)")
tk.MustExec("create table t1 (c1 int)")
tk.MustExec("insert t1 values (11)")
// conflict
tk1.MustExec("begin")
tk1.MustQuery("select * from t where c1=11 for update")
tk2.MustExec("begin")
tk2.MustExec("update t set c2=211 where c1=11")
tk2.MustExec("commit")
_, err = tk1.Exec("commit")
c.Assert(err, NotNil)
// no conflict for subquery.
tk1.MustExec("begin")
tk1.MustQuery("select * from t where exists(select null from t1 where t1.c1=t.c1) for update")
tk2.MustExec("begin")
tk2.MustExec("update t set c2=211 where c1=12")
tk2.MustExec("commit")
tk1.MustExec("commit")
// not conflict
tk1.MustExec("begin")
tk1.MustQuery("select * from t where c1=11 for update")
tk2.MustExec("begin")
tk2.MustExec("update t set c2=22 where c1=12")
tk2.MustExec("commit")
tk1.MustExec("commit")
// not conflict, auto commit
tk1.MustExec("set @@autocommit=1;")
tk1.MustQuery("select * from t where c1=11 for update")
tk2.MustExec("begin")
tk2.MustExec("update t set c2=211 where c1=11")
tk2.MustExec("commit")
tk1.MustExec("commit")
// conflict
tk1.MustExec("begin")
tk1.MustQuery("select * from (select * from t for update) t join t1 for update")
tk2.MustExec("begin")
tk2.MustExec("update t1 set c1 = 13")
tk2.MustExec("commit")
_, err = tk1.Exec("commit")
c.Assert(err, NotNil)
}
func (s *testSuite) TestEmptyEnum(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (e enum('Y', 'N'))")
tk.MustExec("set sql_mode='STRICT_TRANS_TABLES'")
_, err := tk.Exec("insert into t values (0)")
c.Assert(terror.ErrorEqual(err, types.ErrTruncated), IsTrue, Commentf("err %v", err))
_, err = tk.Exec("insert into t values ('abc')")
c.Assert(terror.ErrorEqual(err, types.ErrTruncated), IsTrue, Commentf("err %v", err))
tk.MustExec("set sql_mode=''")
tk.MustExec("insert into t values (0)")
tk.MustQuery("select * from t").Check(testkit.Rows(""))
tk.MustExec("insert into t values ('abc')")
tk.MustQuery("select * from t").Check(testkit.Rows("", ""))
tk.MustExec("insert into t values (null)")
tk.MustQuery("select * from t").Check(testkit.Rows("", "", "<nil>"))
}
// TestIssue4024 This tests https://github.com/pingcap/tidb/issues/4024
func (s *testSuite) TestIssue4024(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("create database test2")
tk.MustExec("use test2")
tk.MustExec("create table t(a int)")
tk.MustExec("insert into t values(1)")
tk.MustExec("use test")
tk.MustExec("create table t(a int)")
tk.MustExec("insert into t values(1)")
tk.MustExec("update t, test2.t set test2.t.a=2")
tk.MustQuery("select * from t").Check(testkit.Rows("1"))
tk.MustQuery("select * from test2.t").Check(testkit.Rows("2"))
tk.MustExec("update test.t, test2.t set test.t.a=3")
tk.MustQuery("select * from t").Check(testkit.Rows("3"))
tk.MustQuery("select * from test2.t").Check(testkit.Rows("2"))
}
const (
checkRequestOff = iota
checkRequestSyncLog
checkDDLAddIndexPriority
)
type checkRequestClient struct {
tikv.Client
priority kvrpcpb.CommandPri
lowPriorityCnt uint32
mu struct {
sync.RWMutex
checkFlags uint32
syncLog bool
}
}
func (c *checkRequestClient) setCheckPriority(priority kvrpcpb.CommandPri) {
atomic.StoreInt32((*int32)(&c.priority), int32(priority))
}
func (c *checkRequestClient) getCheckPriority() kvrpcpb.CommandPri {
return (kvrpcpb.CommandPri)(atomic.LoadInt32((*int32)(&c.priority)))
}
func (c *checkRequestClient) SendRequest(ctx context.Context, addr string, req *tikvrpc.Request, timeout time.Duration) (*tikvrpc.Response, error) {
resp, err := c.Client.SendRequest(ctx, addr, req, timeout)
c.mu.RLock()
checkFlags := c.mu.checkFlags
c.mu.RUnlock()
if checkFlags == checkRequestSyncLog {
switch req.Type {
case tikvrpc.CmdPrewrite, tikvrpc.CmdCommit:
c.mu.RLock()
syncLog := c.mu.syncLog
c.mu.RUnlock()
if syncLog != req.SyncLog {
return nil, errors.New("fail to set sync log")
}
}
} else if checkFlags == checkDDLAddIndexPriority {
if req.Type == tikvrpc.CmdScan {
if c.getCheckPriority() != req.Priority {
return nil, errors.New("fail to set priority")
}
} else if req.Type == tikvrpc.CmdPrewrite {
if c.getCheckPriority() == kvrpcpb.CommandPri_Low {
atomic.AddUint32(&c.lowPriorityCnt, 1)
}
}
}
return resp, err
}
type testSuiteWithCliBase struct {
store kv.Storage
dom *domain.Domain
cli *checkRequestClient
}
type testSuite1 struct {
testSuiteWithCliBase
}
type testSerialSuite2 struct {
testSuiteWithCliBase
}
func (s *testSuiteWithCliBase) SetUpSuite(c *C) {
cli := &checkRequestClient{}
hijackClient := func(c tikv.Client) tikv.Client {
cli.Client = c
return cli
}
s.cli = cli
var err error
s.store, err = mockstore.NewMockStore(
mockstore.WithClientHijacker(hijackClient),
)
c.Assert(err, IsNil)
session.SetStatsLease(0)
s.dom, err = session.BootstrapSession(s.store)
c.Assert(err, IsNil)
s.dom.SetStatsUpdating(true)
}
func (s *testSuiteWithCliBase) TearDownSuite(c *C) {
s.dom.Close()
s.store.Close()
}
func (s *testSuiteWithCliBase) TearDownTest(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
r := tk.MustQuery("show tables")
for _, tb := range r.Rows() {
tableName := tb[0]
tk.MustExec(fmt.Sprintf("drop table %v", tableName))
}
}
func (s *testSuite2) TestAddIndexPriority(c *C) {
cli := &checkRequestClient{}
hijackClient := func(c tikv.Client) tikv.Client {
cli.Client = c
return cli
}
store, err := mockstore.NewMockStore(
mockstore.WithClientHijacker(hijackClient),
)
c.Assert(err, IsNil)
dom, err := session.BootstrapSession(store)
c.Assert(err, IsNil)
defer func() {
dom.Close()
err = store.Close()
c.Assert(err, IsNil)
}()
tk := testkit.NewTestKit(c, store)
tk.MustExec("use test")
tk.MustExec("create table t1 (id int, v int)")
// Insert some data to make sure plan build IndexLookup for t1.
for i := 0; i < 10; i++ {
tk.MustExec(fmt.Sprintf("insert into t1 values (%d, %d)", i, i))
}
cli.mu.Lock()
cli.mu.checkFlags = checkDDLAddIndexPriority
cli.mu.Unlock()
cli.setCheckPriority(kvrpcpb.CommandPri_Low)
tk.MustExec("alter table t1 add index t1_index (id);")
c.Assert(atomic.LoadUint32(&cli.lowPriorityCnt) > 0, IsTrue)
cli.mu.Lock()
cli.mu.checkFlags = checkRequestOff
cli.mu.Unlock()
tk.MustExec("alter table t1 drop index t1_index;")
tk.MustExec("SET SESSION tidb_ddl_reorg_priority = 'PRIORITY_NORMAL'")
cli.mu.Lock()
cli.mu.checkFlags = checkDDLAddIndexPriority
cli.mu.Unlock()
cli.setCheckPriority(kvrpcpb.CommandPri_Normal)
tk.MustExec("alter table t1 add index t1_index (id);")
cli.mu.Lock()
cli.mu.checkFlags = checkRequestOff
cli.mu.Unlock()
tk.MustExec("alter table t1 drop index t1_index;")
tk.MustExec("SET SESSION tidb_ddl_reorg_priority = 'PRIORITY_HIGH'")
cli.mu.Lock()
cli.mu.checkFlags = checkDDLAddIndexPriority
cli.mu.Unlock()
cli.setCheckPriority(kvrpcpb.CommandPri_High)
tk.MustExec("alter table t1 add index t1_index (id);")
cli.mu.Lock()
cli.mu.checkFlags = checkRequestOff
cli.mu.Unlock()
}
func (s *testSuite1) TestAlterTableComment(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t_1")
tk.MustExec("create table t_1 (c1 int, c2 int, c3 int default 1, index (c1)) comment = 'test table';")
tk.MustExec("alter table `t_1` comment 'this is table comment';")
result := tk.MustQuery("select table_comment from information_schema.tables where table_name = 't_1';")
result.Check(testkit.Rows("this is table comment"))
tk.MustExec("alter table `t_1` comment 'table t comment';")
result = tk.MustQuery("select table_comment from information_schema.tables where table_name = 't_1';")
result.Check(testkit.Rows("table t comment"))
}
func (s *testSuite) TestTimezonePushDown(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table t (ts timestamp)")
defer tk.MustExec("drop table t")
tk.MustExec(`insert into t values ("2018-09-13 10:02:06")`)
systemTZ := timeutil.SystemLocation()
c.Assert(systemTZ.String(), Not(Equals), "System")
c.Assert(systemTZ.String(), Not(Equals), "Local")
ctx := context.Background()
count := 0
ctx1 := context.WithValue(ctx, "CheckSelectRequestHook", func(req *kv.Request) {
count += 1
dagReq := new(tipb.DAGRequest)
err := proto.Unmarshal(req.Data, dagReq)
c.Assert(err, IsNil)
c.Assert(dagReq.GetTimeZoneName(), Equals, systemTZ.String())
})
_, err := tk.Se.Execute(ctx1, `select * from t where ts = "2018-09-13 10:02:06"`)
c.Assert(err, IsNil)
tk.MustExec(`set time_zone="System"`)
_, err = tk.Se.Execute(ctx1, `select * from t where ts = "2018-09-13 10:02:06"`)
c.Assert(err, IsNil)
c.Assert(count, Equals, 2) // Make sure the hook function is called.
}
func (s *testSuite) TestNotFillCacheFlag(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table t (id int primary key)")
defer tk.MustExec("drop table t")
tk.MustExec("insert into t values (1)")
tests := []struct {
sql string
expect bool
}{
{"select SQL_NO_CACHE * from t", true},
{"select SQL_CACHE * from t", false},
{"select * from t", false},
}
count := 0
ctx := context.Background()
for _, test := range tests {
ctx1 := context.WithValue(ctx, "CheckSelectRequestHook", func(req *kv.Request) {
count++
if req.NotFillCache != test.expect {
c.Errorf("sql=%s, expect=%v, get=%v", test.sql, test.expect, req.NotFillCache)
}
})
rs, err := tk.Se.Execute(ctx1, test.sql)
c.Assert(err, IsNil)
tk.ResultSetToResult(rs[0], Commentf("sql: %v", test.sql))
}
c.Assert(count, Equals, len(tests)) // Make sure the hook function is called.
}
func (s *testSuite1) TestSyncLog(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
cli := s.cli
cli.mu.Lock()
cli.mu.checkFlags = checkRequestSyncLog
cli.mu.syncLog = true
cli.mu.Unlock()
tk.MustExec("create table t (id int primary key)")
cli.mu.Lock()
cli.mu.syncLog = false
cli.mu.Unlock()
tk.MustExec("insert into t values (1)")
cli.mu.Lock()
cli.mu.checkFlags = checkRequestOff
cli.mu.Unlock()
}
func (s *testSuite) TestHandleTransfer(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table t(a int, index idx(a))")
tk.MustExec("insert into t values(1), (2), (4)")
tk.MustExec("begin")
tk.MustExec("update t set a = 3 where a = 4")
// test table scan read whose result need handle.
tk.MustQuery("select * from t ignore index(idx)").Check(testkit.Rows("1", "2", "3"))
tk.MustExec("insert into t values(4)")
// test single read whose result need handle
tk.MustQuery("select * from t use index(idx)").Check(testkit.Rows("1", "2", "3", "4"))
tk.MustQuery("select * from t use index(idx) order by a desc").Check(testkit.Rows("4", "3", "2", "1"))
tk.MustExec("update t set a = 5 where a = 3")
tk.MustQuery("select * from t use index(idx)").Check(testkit.Rows("1", "2", "4", "5"))
tk.MustExec("commit")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int, index idx(a))")
tk.MustExec("insert into t values(3, 3), (1, 1), (2, 2)")
// Second test double read.
tk.MustQuery("select * from t use index(idx) order by a").Check(testkit.Rows("1 1", "2 2", "3 3"))
}
func (s *testSuite) TestBit(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c1 bit(2))")
tk.MustExec("insert into t values (0), (1), (2), (3)")
_, err := tk.Exec("insert into t values (4)")
c.Assert(err, NotNil)
_, err = tk.Exec("insert into t values ('a')")
c.Assert(err, NotNil)
r, err := tk.Exec("select * from t where c1 = 2")
c.Assert(err, IsNil)
req := r.NewChunk()
err = r.Next(context.Background(), req)
c.Assert(err, IsNil)
c.Assert(types.BinaryLiteral(req.GetRow(0).GetBytes(0)), DeepEquals, types.NewBinaryLiteralFromUint(2, -1))
r.Close()
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c1 bit(31))")
tk.MustExec("insert into t values (0x7fffffff)")
_, err = tk.Exec("insert into t values (0x80000000)")
c.Assert(err, NotNil)
_, err = tk.Exec("insert into t values (0xffffffff)")
c.Assert(err, NotNil)
tk.MustExec("insert into t values ('123')")
tk.MustExec("insert into t values ('1234')")
_, err = tk.Exec("insert into t values ('12345)")
c.Assert(err, NotNil)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c1 bit(62))")
tk.MustExec("insert into t values ('12345678')")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c1 bit(61))")
_, err = tk.Exec("insert into t values ('12345678')")
c.Assert(err, NotNil)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c1 bit(32))")
tk.MustExec("insert into t values (0x7fffffff)")
tk.MustExec("insert into t values (0xffffffff)")
_, err = tk.Exec("insert into t values (0x1ffffffff)")
c.Assert(err, NotNil)
tk.MustExec("insert into t values ('1234')")
_, err = tk.Exec("insert into t values ('12345')")
c.Assert(err, NotNil)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c1 bit(64))")
tk.MustExec("insert into t values (0xffffffffffffffff)")
tk.MustExec("insert into t values ('12345678')")
_, err = tk.Exec("insert into t values ('123456789')")
c.Assert(err, NotNil)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c1 bit(64))")
tk.MustExec("insert into t values (0xffffffffffffffff)")
tk.MustExec("insert into t values ('12345678')")
tk.MustQuery("select * from t where c1").Check(testkit.Rows("\xff\xff\xff\xff\xff\xff\xff\xff", "12345678"))
}
func (s *testSuite) TestEnum(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c enum('a', 'b', 'c'))")
tk.MustExec("insert into t values ('a'), (2), ('c')")
tk.MustQuery("select * from t where c = 'a'").Check(testkit.Rows("a"))
tk.MustQuery("select c + 1 from t where c = 2").Check(testkit.Rows("3"))
tk.MustExec("delete from t")
tk.MustExec("insert into t values ()")
tk.MustExec("insert into t values (null), ('1')")
tk.MustQuery("select c + 1 from t where c = 1").Check(testkit.Rows("2"))
tk.MustExec("delete from t")
tk.MustExec("insert into t values(1), (2), (3)")
tk.MustQuery("select * from t where c").Check(testkit.Rows("a", "b", "c"))
}
func (s *testSuite) TestSet(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c set('a', 'b', 'c'))")
tk.MustExec("insert into t values ('a'), (2), ('c'), ('a,b'), ('b,a')")
tk.MustQuery("select * from t where c = 'a'").Check(testkit.Rows("a"))
tk.MustQuery("select * from t where c = 'a,b'").Check(testkit.Rows("a,b", "a,b"))
tk.MustQuery("select c + 1 from t where c = 2").Check(testkit.Rows("3"))
tk.MustExec("delete from t")
tk.MustExec("insert into t values ()")
tk.MustExec("insert into t values (null), ('1')")
tk.MustQuery("select c + 1 from t where c = 1").Check(testkit.Rows("2"))
tk.MustExec("delete from t")
tk.MustExec("insert into t values(3)")
tk.MustQuery("select * from t where c").Check(testkit.Rows("a,b"))
}
func (s *testSuite) TestSubqueryInValues(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (id int, name varchar(20))")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1 (gid int)")
tk.MustExec("insert into t1 (gid) value (1)")
tk.MustExec("insert into t (id, name) value ((select gid from t1) ,'asd')")
tk.MustQuery("select * from t").Check(testkit.Rows("1 asd"))
}
func (s *testSuite) TestEnhancedRangeAccess(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int primary key, b int)")
tk.MustExec("insert into t values(1, 2), (2, 1)")
tk.MustQuery("select * from t where (a = 1 and b = 2) or (a = 2 and b = 1)").Check(testkit.Rows("1 2", "2 1"))
tk.MustQuery("select * from t where (a = 1 and b = 1) or (a = 2 and b = 2)").Check(nil)
}
// TestMaxInt64Handle Issue #4810
func (s *testSuite) TestMaxInt64Handle(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(id bigint, PRIMARY KEY (id))")
tk.MustExec("insert into t values(9223372036854775807)")
tk.MustExec("select * from t where id = 9223372036854775807")
tk.MustQuery("select * from t where id = 9223372036854775807;").Check(testkit.Rows("9223372036854775807"))
tk.MustQuery("select * from t").Check(testkit.Rows("9223372036854775807"))
_, err := tk.Exec("insert into t values(9223372036854775807)")
c.Assert(err, NotNil)
tk.MustExec("delete from t where id = 9223372036854775807")
tk.MustQuery("select * from t").Check(nil)
}
func (s *testSuite) TestTableScanWithPointRanges(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(id int, PRIMARY KEY (id))")
tk.MustExec("insert into t values(1), (5), (10)")
tk.MustQuery("select * from t where id in(1, 2, 10)").Check(testkit.Rows("1", "10"))
}
func (s *testSuite) TestUnsignedPk(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(id bigint unsigned primary key)")
var num1, num2 uint64 = math.MaxInt64 + 1, math.MaxInt64 + 2
tk.MustExec(fmt.Sprintf("insert into t values(%v), (%v), (1), (2)", num1, num2))
num1Str := strconv.FormatUint(num1, 10)
num2Str := strconv.FormatUint(num2, 10)
tk.MustQuery("select * from t order by id").Check(testkit.Rows("1", "2", num1Str, num2Str))
tk.MustQuery("select * from t where id not in (2)").Check(testkit.Rows(num1Str, num2Str, "1"))
tk.MustExec("drop table t")
tk.MustExec("create table t(a bigint unsigned primary key, b int, index idx(b))")
tk.MustExec("insert into t values(9223372036854775808, 1), (1, 1)")
tk.MustQuery("select * from t use index(idx) where b = 1 and a < 2").Check(testkit.Rows("1 1"))
tk.MustQuery("select * from t use index(idx) where b = 1 order by b, a").Check(testkit.Rows("1 1", "9223372036854775808 1"))
}
func (s *testSuite) TestSignedCommonHandle(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.Se.GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(k1 int, k2 int, primary key(k1, k2))")
tk.MustExec("insert into t(k1, k2) value(-100, 1), (-50, 1), (0, 0), (1, 1), (3, 3)")
tk.MustQuery("select k1 from t order by k1").Check(testkit.Rows("-100", "-50", "0", "1", "3"))
tk.MustQuery("select k1 from t order by k1 desc").Check(testkit.Rows("3", "1", "0", "-50", "-100"))
tk.MustQuery("select k1 from t where k1 < -51").Check(testkit.Rows("-100"))
tk.MustQuery("select k1 from t where k1 < -1").Check(testkit.Rows("-100", "-50"))
tk.MustQuery("select k1 from t where k1 <= 0").Check(testkit.Rows("-100", "-50", "0"))
tk.MustQuery("select k1 from t where k1 < 2").Check(testkit.Rows("-100", "-50", "0", "1"))
tk.MustQuery("select k1 from t where k1 < -1 and k1 > -90").Check(testkit.Rows("-50"))
}
func (s *testSuite) TestIssue5666(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("set @@profiling=1")
tk.MustQuery("SELECT QUERY_ID, SUM(DURATION) AS SUM_DURATION FROM INFORMATION_SCHEMA.PROFILING GROUP BY QUERY_ID;").Check(testkit.Rows("0 0"))
}
func (s *testSuite) TestIssue5341(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("drop table if exists test.t")
tk.MustExec("create table test.t(a char)")
tk.MustExec("insert into test.t value('a')")
tk.MustQuery("select * from test.t where a < 1 order by a limit 0;").Check(testkit.Rows())
}
func (s *testSuite) TestContainDotColumn(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists test.t1")
tk.MustExec("create table test.t1(t1.a char)")
tk.MustExec("drop table if exists t2")
tk.MustExec("create table t2(a char, t2.b int)")
tk.MustExec("drop table if exists t3")
_, err := tk.Exec("create table t3(s.a char);")
terr := errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, errors.ErrCode(mysql.ErrWrongTableName))
}
func (s *testSuite) TestCheckIndex(c *C) {
s.ctx = mock.NewContext()
s.ctx.Store = s.store
se, err := session.CreateSession4Test(s.store)
c.Assert(err, IsNil)
defer se.Close()
_, err = se.Execute(context.Background(), "create database test_admin")
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "use test_admin")
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "create table t (pk int primary key, c int default 1, c1 int default 1, unique key c(c))")
c.Assert(err, IsNil)
is := s.domain.InfoSchema()
db := model.NewCIStr("test_admin")
dbInfo, ok := is.SchemaByName(db)
c.Assert(ok, IsTrue)
tblName := model.NewCIStr("t")
tbl, err := is.TableByName(db, tblName)
c.Assert(err, IsNil)
tbInfo := tbl.Meta()
alloc := autoid.NewAllocator(s.store, dbInfo.ID, false, autoid.RowIDAllocType)
tb, err := tables.TableFromMeta(autoid.NewAllocators(alloc), tbInfo)
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "admin check index t c")
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "admin check index t C")
c.Assert(err, IsNil)
// set data to:
// index data (handle, data): (1, 10), (2, 20)
// table data (handle, data): (1, 10), (2, 20)
recordVal1 := types.MakeDatums(int64(1), int64(10), int64(11))
recordVal2 := types.MakeDatums(int64(2), int64(20), int64(21))
c.Assert(s.ctx.NewTxn(context.Background()), IsNil)
_, err = tb.AddRecord(s.ctx, recordVal1)
c.Assert(err, IsNil)
_, err = tb.AddRecord(s.ctx, recordVal2)
c.Assert(err, IsNil)
txn, err := s.ctx.Txn(true)
c.Assert(err, IsNil)
c.Assert(txn.Commit(context.Background()), IsNil)
mockCtx := mock.NewContext()
idx := tb.Indices()[0]
sc := &stmtctx.StatementContext{TimeZone: time.Local}
_, err = se.Execute(context.Background(), "admin check index t idx_inexistent")
c.Assert(strings.Contains(err.Error(), "not exist"), IsTrue)
// set data to:
// index data (handle, data): (1, 10), (2, 20), (3, 30)
// table data (handle, data): (1, 10), (2, 20), (4, 40)
txn, err = s.store.Begin()
c.Assert(err, IsNil)
_, err = idx.Create(mockCtx, txn, types.MakeDatums(int64(30)), kv.IntHandle(3), nil)
c.Assert(err, IsNil)
key := tablecodec.EncodeRowKey(tb.Meta().ID, kv.IntHandle(4).Encoded())
setColValue(c, txn, key, types.NewDatum(int64(40)))
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "admin check index t c")
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "[executor:8133]handle 3, index:types.Datum{k:0x1, decimal:0x0, length:0x0, i:30, collation:\"\", b:[]uint8(nil), x:interface {}(nil)} != record:<nil>")
// set data to:
// index data (handle, data): (1, 10), (2, 20), (3, 30), (4, 40)
// table data (handle, data): (1, 10), (2, 20), (4, 40)
txn, err = s.store.Begin()
c.Assert(err, IsNil)
_, err = idx.Create(mockCtx, txn, types.MakeDatums(int64(40)), kv.IntHandle(4), nil)
c.Assert(err, IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "admin check index t c")
c.Assert(strings.Contains(err.Error(), "table count 3 != index(c) count 4"), IsTrue)
// set data to:
// index data (handle, data): (1, 10), (4, 40)
// table data (handle, data): (1, 10), (2, 20), (4, 40)
txn, err = s.store.Begin()
c.Assert(err, IsNil)
err = idx.Delete(sc, txn, types.MakeDatums(int64(30)), kv.IntHandle(3))
c.Assert(err, IsNil)
err = idx.Delete(sc, txn, types.MakeDatums(int64(20)), kv.IntHandle(2))
c.Assert(err, IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "admin check index t c")
c.Assert(strings.Contains(err.Error(), "table count 3 != index(c) count 2"), IsTrue)
// TODO: pass the case below:
// set data to:
// index data (handle, data): (1, 10), (4, 40), (2, 30)
// table data (handle, data): (1, 10), (2, 20), (4, 40)
}
func setColValue(c *C, txn kv.Transaction, key kv.Key, v types.Datum) {
row := []types.Datum{v, {}}
colIDs := []int64{2, 3}
sc := &stmtctx.StatementContext{TimeZone: time.Local}
rd := rowcodec.Encoder{Enable: true}
value, err := tablecodec.EncodeRow(sc, row, colIDs, nil, nil, &rd)
c.Assert(err, IsNil)
err = txn.Set(key, value)
c.Assert(err, IsNil)
}
func (s *testSuite) TestCheckTable(c *C) {
tk := testkit.NewTestKit(c, s.store)
// Test 'admin check table' when the table has a unique index with null values.
tk.MustExec("use test")
tk.MustExec("drop table if exists admin_test;")
tk.MustExec("create table admin_test (c1 int, c2 int, c3 int default 1, index (c1), unique key(c2));")
tk.MustExec("insert admin_test (c1, c2) values (1, 1), (2, 2), (NULL, NULL);")
tk.MustExec("admin check table admin_test;")
}
func (s *testSuite) TestCheckTableClusterIndex(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.Se.GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn
tk.MustExec("drop table if exists admin_test;")
tk.MustExec("create table admin_test (c1 int, c2 int, c3 int default 1, primary key (c1, c2), index (c1), unique key(c2));")
tk.MustExec("insert admin_test (c1, c2) values (1, 1), (2, 2), (3, 3);")
tk.MustExec("admin check table admin_test;")
}
func (s *testSuite) TestCoprocessorStreamingFlag(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table t (id int, value int, index idx(id))")
// Add some data to make statistics work.
for i := 0; i < 100; i++ {
tk.MustExec(fmt.Sprintf("insert into t values (%d, %d)", i, i))
}
tests := []struct {
sql string
expect bool
}{
{"select * from t", true}, // TableReader
{"select * from t where id = 5", true}, // IndexLookup
{"select * from t where id > 5", true}, // Filter
{"select * from t limit 3", false}, // Limit
{"select avg(id) from t", false}, // Aggregate
{"select * from t order by value limit 3", false}, // TopN
}
ctx := context.Background()
for _, test := range tests {
ctx1 := context.WithValue(ctx, "CheckSelectRequestHook", func(req *kv.Request) {
if req.Streaming != test.expect {
c.Errorf("sql=%s, expect=%v, get=%v", test.sql, test.expect, req.Streaming)
}
})
rs, err := tk.Se.Execute(ctx1, test.sql)
c.Assert(err, IsNil)
tk.ResultSetToResult(rs[0], Commentf("sql: %v", test.sql))
}
}
func (s *testSuite) TestIncorrectLimitArg(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test;`)
tk.MustExec(`drop table if exists t;`)
tk.MustExec(`create table t(a bigint);`)
tk.MustExec(`prepare stmt1 from 'select * from t limit ?';`)
tk.MustExec(`prepare stmt2 from 'select * from t limit ?, ?';`)
tk.MustExec(`set @a = -1;`)
tk.MustExec(`set @b = 1;`)
var err error
_, err = tk.Se.Execute(context.TODO(), `execute stmt1 using @a;`)
c.Assert(err.Error(), Equals, `[planner:1210]Incorrect arguments to LIMIT`)
_, err = tk.Se.Execute(context.TODO(), `execute stmt2 using @b, @a;`)
c.Assert(err.Error(), Equals, `[planner:1210]Incorrect arguments to LIMIT`)
}
func (s *testSuite) TestLimit(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test;`)
tk.MustExec(`drop table if exists t;`)
tk.MustExec(`create table t(a bigint, b bigint);`)
tk.MustExec(`insert into t values(1, 1), (2, 2), (3, 30), (4, 40), (5, 5), (6, 6);`)
tk.MustQuery(`select * from t order by a limit 1, 1;`).Check(testkit.Rows(
"2 2",
))
tk.MustQuery(`select * from t order by a limit 1, 2;`).Check(testkit.Rows(
"2 2",
"3 30",
))
tk.MustQuery(`select * from t order by a limit 1, 3;`).Check(testkit.Rows(
"2 2",
"3 30",
"4 40",
))
tk.MustQuery(`select * from t order by a limit 1, 4;`).Check(testkit.Rows(
"2 2",
"3 30",
"4 40",
"5 5",
))
// test inline projection
tk.MustQuery(`select a from t where a > 0 limit 1, 1;`).Check(testkit.Rows(
"2",
))
tk.MustQuery(`select a from t where a > 0 limit 1, 2;`).Check(testkit.Rows(
"2",
"3",
))
tk.MustQuery(`select b from t where a > 0 limit 1, 3;`).Check(testkit.Rows(
"2",
"30",
"40",
))
tk.MustQuery(`select b from t where a > 0 limit 1, 4;`).Check(testkit.Rows(
"2",
"30",
"40",
"5",
))
// test @@tidb_init_chunk_size=2
tk.MustExec(`set @@tidb_init_chunk_size=2;`)
tk.MustQuery(`select * from t where a > 0 limit 2, 1;`).Check(testkit.Rows(
"3 30",
))
tk.MustQuery(`select * from t where a > 0 limit 2, 2;`).Check(testkit.Rows(
"3 30",
"4 40",
))
tk.MustQuery(`select * from t where a > 0 limit 2, 3;`).Check(testkit.Rows(
"3 30",
"4 40",
"5 5",
))
tk.MustQuery(`select * from t where a > 0 limit 2, 4;`).Check(testkit.Rows(
"3 30",
"4 40",
"5 5",
"6 6",
))
// test inline projection
tk.MustQuery(`select a from t order by a limit 2, 1;`).Check(testkit.Rows(
"3",
))
tk.MustQuery(`select b from t order by a limit 2, 2;`).Check(testkit.Rows(
"30",
"40",
))
tk.MustQuery(`select a from t order by a limit 2, 3;`).Check(testkit.Rows(
"3",
"4",
"5",
))
tk.MustQuery(`select b from t order by a limit 2, 4;`).Check(testkit.Rows(
"30",
"40",
"5",
"6",
))
}
func (s *testSuite) TestCoprocessorStreamingWarning(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a double)")
tk.MustExec("insert into t value(1.2)")
tk.MustExec("set @@session.tidb_enable_streaming = 1")
result := tk.MustQuery("select * from t where a/0 > 1")
result.Check(testkit.Rows())
tk.MustQuery("show warnings").Check(testutil.RowsWithSep("|", "Warning|1365|Division by 0"))
}
func (s *testSuite3) TestYearTypeDeleteIndex(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a YEAR, PRIMARY KEY(a));")
tk.MustExec("insert into t set a = '2151';")
tk.MustExec("delete from t;")
tk.MustExec("admin check table t")
}
func (s *testSuite3) TestForSelectScopeInUnion(c *C) {
// A union B for update, the "for update" option belongs to union statement, so
// it should works on both A and B.
tk1 := testkit.NewTestKit(c, s.store)
tk2 := testkit.NewTestKit(c, s.store)
tk1.MustExec("use test")
tk1.MustExec("drop table if exists t")
tk1.MustExec("create table t(a int)")
tk1.MustExec("insert into t values (1)")
tk1.MustExec("begin")
// 'For update' would act on the second select.
tk1.MustQuery("select 1 as a union select a from t for update")
tk2.MustExec("use test")
tk2.MustExec("update t set a = a + 1")
// As tk1 use select 'for update', it should detect conflict and fail.
_, err := tk1.Exec("commit")
c.Assert(err, NotNil)
tk1.MustExec("begin")
tk1.MustQuery("select 1 as a union select a from t limit 5 for update")
tk1.MustQuery("select 1 as a union select a from t order by a for update")
tk2.MustExec("update t set a = a + 1")
_, err = tk1.Exec("commit")
c.Assert(err, NotNil)
}
func (s *testSuite3) TestUnsignedDecimalOverflow(c *C) {
tests := []struct {
input interface{}
hasErr bool
err string
}{{
-1,
true,
"Out of range value for column",
}, {
"-1.1e-1",
true,
"Out of range value for column",
}, {
-1.1,
true,
"Out of range value for column",
}, {
-0,
false,
"",
},
}
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a decimal(10,2) unsigned)")
for _, t := range tests {
res, err := tk.Exec("insert into t values (?)", t.input)
if res != nil {
defer res.Close()
}
if t.hasErr {
c.Assert(err, NotNil)
c.Assert(strings.Contains(err.Error(), t.err), IsTrue)
} else {
c.Assert(err, IsNil)
}
if res != nil {
c.Assert(res.Close(), IsNil)
}
}
tk.MustExec("set sql_mode=''")
tk.MustExec("delete from t")
tk.MustExec("insert into t values (?)", -1)
r := tk.MustQuery("select a from t limit 1")
r.Check(testkit.Rows("0.00"))
}
func (s *testSuite3) TestIndexJoinTableDualPanic(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists a")
tk.MustExec("create table a (f1 int, f2 varchar(32), primary key (f1))")
tk.MustExec("insert into a (f1,f2) values (1,'a'), (2,'b'), (3,'c')")
// TODO here: index join cause the data race of txn.
tk.MustQuery("select /*+ inl_merge_join(a) */ a.* from a inner join (select 1 as k1,'k2-1' as k2) as k on a.f1=k.k1;").
Check(testkit.Rows("1 a"))
}
func (s *testSuite3) TestSortLeftJoinWithNullColumnInRightChildPanic(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1, t2")
tk.MustExec("create table t1(a int)")
tk.MustExec("create table t2(a int)")
tk.MustExec("insert into t1(a) select 1;")
tk.MustQuery("select b.n from t1 left join (select a as a, null as n from t2) b on b.a = t1.a order by t1.a").
Check(testkit.Rows("<nil>"))
}
func (s *testSuiteP1) TestUnionAutoSignedCast(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1,t2")
tk.MustExec("create table t1 (id int, i int, b bigint, d double, dd decimal)")
tk.MustExec("create table t2 (id int, i int unsigned, b bigint unsigned, d double unsigned, dd decimal unsigned)")
tk.MustExec("insert into t1 values(1, -1, -1, -1.1, -1)")
tk.MustExec("insert into t2 values(2, 1, 1, 1.1, 1)")
tk.MustQuery("select * from t1 union select * from t2 order by id").
Check(testkit.Rows("1 -1 -1 -1.1 -1", "2 1 1 1.1 1"))
tk.MustQuery("select id, i, b, d, dd from t2 union select id, i, b, d, dd from t1 order by id").
Check(testkit.Rows("1 0 0 0 -1", "2 1 1 1.1 1"))
tk.MustQuery("select id, i from t2 union select id, cast(i as unsigned int) from t1 order by id").
Check(testkit.Rows("1 18446744073709551615", "2 1"))
tk.MustQuery("select dd from t2 union all select dd from t2").
Check(testkit.Rows("1", "1"))
tk.MustExec("drop table if exists t3,t4")
tk.MustExec("create table t3 (id int, v int)")
tk.MustExec("create table t4 (id int, v double unsigned)")
tk.MustExec("insert into t3 values (1, -1)")
tk.MustExec("insert into t4 values (2, 1)")
tk.MustQuery("select id, v from t3 union select id, v from t4 order by id").
Check(testkit.Rows("1 -1", "2 1"))
tk.MustQuery("select id, v from t4 union select id, v from t3 order by id").
Check(testkit.Rows("1 0", "2 1"))
tk.MustExec("drop table if exists t5,t6,t7")
tk.MustExec("create table t5 (id int, v bigint unsigned)")
tk.MustExec("create table t6 (id int, v decimal)")
tk.MustExec("create table t7 (id int, v bigint)")
tk.MustExec("insert into t5 values (1, 1)")
tk.MustExec("insert into t6 values (2, -1)")
tk.MustExec("insert into t7 values (3, -1)")
tk.MustQuery("select id, v from t5 union select id, v from t6 order by id").
Check(testkit.Rows("1 1", "2 -1"))
tk.MustQuery("select id, v from t5 union select id, v from t7 union select id, v from t6 order by id").
Check(testkit.Rows("1 1", "2 -1", "3 -1"))
}
func (s *testSuiteP1) TestUpdateClustered(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
type resultChecker struct {
check string
assert []string
}
for _, clustered := range []string{"", "clustered"} {
tests := []struct {
initSchema []string
initData []string
dml string
resultCheck []resultChecker
}{
{ // left join + update both + match & unmatched + pk
[]string{
"drop table if exists a, b",
"create table a (k1 int, k2 int, v int)",
fmt.Sprintf("create table b (a int not null, k1 int, k2 int, v int, primary key(k1, k2) %s)", clustered),
},
[]string{
"insert into a values (1, 1, 1), (2, 2, 2)", // unmatched + matched
"insert into b values (2, 2, 2, 2)",
},
"update a left join b on a.k1 = b.k1 and a.k2 = b.k2 set a.v = 20, b.v = 100, a.k1 = a.k1 + 1, b.k1 = b.k1 + 1, a.k2 = a.k2 + 2, b.k2 = b.k2 + 2",
[]resultChecker{
{
"select * from b",
[]string{"2 3 4 100"},
},
{
"select * from a",
[]string{"2 3 20", "3 4 20"},
},
},
},
{ // left join + update both + match & unmatched + pk
[]string{
"drop table if exists a, b",
"create table a (k1 int, k2 int, v int)",
fmt.Sprintf("create table b (a int not null, k1 int, k2 int, v int, primary key(k1, k2) %s)", clustered),
},
[]string{
"insert into a values (1, 1, 1), (2, 2, 2)", // unmatched + matched
"insert into b values (2, 2, 2, 2)",
},
"update a left join b on a.k1 = b.k1 and a.k2 = b.k2 set a.k1 = a.k1 + 1, a.k2 = a.k2 + 2, b.k1 = b.k1 + 1, b.k2 = b.k2 + 2, a.v = 20, b.v = 100",
[]resultChecker{
{
"select * from b",
[]string{"2 3 4 100"},
},
{
"select * from a",
[]string{"2 3 20", "3 4 20"},
},
},
},
{ // left join + update both + match & unmatched + prefix pk
[]string{
"drop table if exists a, b",
"create table a (k1 varchar(100), k2 varchar(100), v varchar(100))",
fmt.Sprintf("create table b (a varchar(100) not null, k1 varchar(100), k2 varchar(100), v varchar(100), primary key(k1(1), k2(1)) %s, key kk1(k1(1), v(1)))", clustered),
},
[]string{
"insert into a values ('11', '11', '11'), ('22', '22', '22')", // unmatched + matched
"insert into b values ('22', '22', '22', '22')",
},
"update a left join b on a.k1 = b.k1 and a.k2 = b.k2 set a.k1 = a.k1 + 1, a.k2 = a.k2 + 2, b.k1 = b.k1 + 1, b.k2 = b.k2 + 2, a.v = 20, b.v = 100",
[]resultChecker{
{
"select * from b",
[]string{"22 23 24 100"},
},
{
"select * from a",
[]string{"12 13 20", "23 24 20"},
},
},
},
{ // right join + update both + match & unmatched + prefix pk
[]string{
"drop table if exists a, b",
"create table a (k1 varchar(100), k2 varchar(100), v varchar(100))",
fmt.Sprintf("create table b (a varchar(100) not null, k1 varchar(100), k2 varchar(100), v varchar(100), primary key(k1(1), k2(1)) %s, key kk1(k1(1), v(1)))", clustered),
},
[]string{
"insert into a values ('11', '11', '11'), ('22', '22', '22')", // unmatched + matched
"insert into b values ('22', '22', '22', '22')",
},
"update b right join a on a.k1 = b.k1 and a.k2 = b.k2 set a.k1 = a.k1 + 1, a.k2 = a.k2 + 2, b.k1 = b.k1 + 1, b.k2 = b.k2 + 2, a.v = 20, b.v = 100",
[]resultChecker{
{
"select * from b",
[]string{"22 23 24 100"},
},
{
"select * from a",
[]string{"12 13 20", "23 24 20"},
},
},
},
{ // inner join + update both + match & unmatched + prefix pk
[]string{
"drop table if exists a, b",
"create table a (k1 varchar(100), k2 varchar(100), v varchar(100))",
fmt.Sprintf("create table b (a varchar(100) not null, k1 varchar(100), k2 varchar(100), v varchar(100), primary key(k1(1), k2(1)) %s, key kk1(k1(1), v(1)))", clustered),
},
[]string{
"insert into a values ('11', '11', '11'), ('22', '22', '22')", // unmatched + matched
"insert into b values ('22', '22', '22', '22')",
},
"update b join a on a.k1 = b.k1 and a.k2 = b.k2 set a.k1 = a.k1 + 1, a.k2 = a.k2 + 2, b.k1 = b.k1 + 1, b.k2 = b.k2 + 2, a.v = 20, b.v = 100",
[]resultChecker{
{
"select * from b",
[]string{"22 23 24 100"},
},
{
"select * from a",
[]string{"11 11 11", "23 24 20"},
},
},
},
{
[]string{
"drop table if exists a, b",
"create table a (k1 varchar(100), k2 varchar(100), v varchar(100))",
fmt.Sprintf("create table b (a varchar(100) not null, k1 varchar(100), k2 varchar(100), v varchar(100), primary key(k1(1), k2(1)) %s, key kk1(k1(1), v(1)))", clustered),
},
[]string{
"insert into a values ('11', '11', '11'), ('22', '22', '22')", // unmatched + matched
"insert into b values ('22', '22', '22', '22')",
},
"update a set a.k1 = a.k1 + 1, a.k2 = a.k2 + 2, a.v = 20 where exists (select 1 from b where a.k1 = b.k1 and a.k2 = b.k2)",
[]resultChecker{
{
"select * from b",
[]string{"22 22 22 22"},
},
{
"select * from a",
[]string{"11 11 11", "23 24 20"},
},
},
},
}
for _, test := range tests {
for _, s := range test.initSchema {
tk.MustExec(s)
}
for _, s := range test.initData {
tk.MustExec(s)
}
tk.MustExec(test.dml)
for _, checker := range test.resultCheck {
tk.MustQuery(checker.check).Check(testkit.Rows(checker.assert...))
}
tk.MustExec("admin check table a")
tk.MustExec("admin check table b")
}
}
}
func (s *testSuite6) TestUpdateJoin(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1, t2, t3, t4, t5, t6, t7")
tk.MustExec("create table t1(k int, v int)")
tk.MustExec("create table t2(k int, v int)")
tk.MustExec("create table t3(id int auto_increment, k int, v int, primary key(id))")
tk.MustExec("create table t4(k int, v int)")
tk.MustExec("create table t5(v int, k int, primary key(k))")
tk.MustExec("insert into t1 values (1, 1)")
tk.MustExec("insert into t4 values (3, 3)")
tk.MustExec("create table t6 (id int, v longtext)")
tk.MustExec("create table t7 (x int, id int, v longtext, primary key(id))")
// test the normal case that update one row for a single table.
tk.MustExec("update t1 set v = 0 where k = 1")
tk.MustQuery("select k, v from t1 where k = 1").Check(testkit.Rows("1 0"))
// test the case that the table with auto_increment or none-null columns as the right table of left join.
tk.MustExec("update t1 left join t3 on t1.k = t3.k set t1.v = 1")
tk.MustQuery("select k, v from t1").Check(testkit.Rows("1 1"))
tk.MustQuery("select id, k, v from t3").Check(testkit.Rows())
// test left join and the case that the right table has no matching record but has updated the right table columns.
tk.MustExec("update t1 left join t2 on t1.k = t2.k set t1.v = t2.v, t2.v = 3")
tk.MustQuery("select k, v from t1").Check(testkit.Rows("1 <nil>"))
tk.MustQuery("select k, v from t2").Check(testkit.Rows())
// test the case that the update operation in the left table references data in the right table while data of the right table columns is modified.
tk.MustExec("update t1 left join t2 on t1.k = t2.k set t2.v = 3, t1.v = t2.v")
tk.MustQuery("select k, v from t1").Check(testkit.Rows("1 <nil>"))
tk.MustQuery("select k, v from t2").Check(testkit.Rows())
// test right join and the case that the left table has no matching record but has updated the left table columns.
tk.MustExec("update t2 right join t1 on t2.k = t1.k set t2.v = 4, t1.v = 0")
tk.MustQuery("select k, v from t1").Check(testkit.Rows("1 0"))
tk.MustQuery("select k, v from t2").Check(testkit.Rows())
// test the case of right join and left join at the same time.
tk.MustExec("update t1 left join t2 on t1.k = t2.k right join t4 on t4.k = t2.k set t1.v = 4, t2.v = 4, t4.v = 4")
tk.MustQuery("select k, v from t1").Check(testkit.Rows("1 0"))
tk.MustQuery("select k, v from t2").Check(testkit.Rows())
tk.MustQuery("select k, v from t4").Check(testkit.Rows("3 4"))
// test normal left join and the case that the right table has matching rows.
tk.MustExec("insert t2 values (1, 10)")
tk.MustExec("update t1 left join t2 on t1.k = t2.k set t2.v = 11")
tk.MustQuery("select k, v from t2").Check(testkit.Rows("1 11"))
// test the case of continuously joining the same table and updating the unmatching records.
tk.MustExec("update t1 t11 left join t2 on t11.k = t2.k left join t1 t12 on t2.v = t12.k set t12.v = 233, t11.v = 111")
tk.MustQuery("select k, v from t1").Check(testkit.Rows("1 111"))
tk.MustQuery("select k, v from t2").Check(testkit.Rows("1 11"))
// test the left join case that the left table has records but all records are null.
tk.MustExec("delete from t1")
tk.MustExec("delete from t2")
tk.MustExec("insert into t1 values (null, null)")
tk.MustExec("update t1 left join t2 on t1.k = t2.k set t1.v = 1")
tk.MustQuery("select k, v from t1").Check(testkit.Rows("<nil> 1"))
// test the case that the right table of left join has an primary key.
tk.MustExec("insert t5 values(0, 0)")
tk.MustExec("update t1 left join t5 on t1.k = t5.k set t1.v = 2")
tk.MustQuery("select k, v from t1").Check(testkit.Rows("<nil> 2"))
tk.MustQuery("select k, v from t5").Check(testkit.Rows("0 0"))
tk.MustExec("insert into t6 values (1, NULL)")
tk.MustExec("insert into t7 values (5, 1, 'a')")
tk.MustExec("update t6, t7 set t6.v = t7.v where t6.id = t7.id and t7.x = 5")
tk.MustQuery("select v from t6").Check(testkit.Rows("a"))
tk.MustExec("drop table if exists t1, t2")
tk.MustExec("create table t1(id int primary key, v int, gv int GENERATED ALWAYS AS (v * 2) STORED)")
tk.MustExec("create table t2(id int, v int)")
tk.MustExec("update t1 tt1 inner join (select count(t1.id) a, t1.id from t1 left join t2 on t1.id = t2.id group by t1.id) x on tt1.id = x.id set tt1.v = tt1.v + x.a")
}
func (s *testSuite3) TestMaxOneRow(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test`)
tk.MustExec(`drop table if exists t1`)
tk.MustExec(`drop table if exists t2`)
tk.MustExec(`create table t1(a double, b double);`)
tk.MustExec(`create table t2(a double, b double);`)
tk.MustExec(`insert into t1 values(1, 1), (2, 2), (3, 3);`)
tk.MustExec(`insert into t2 values(0, 0);`)
tk.MustExec(`set @@tidb_init_chunk_size=1;`)
rs, err := tk.Exec(`select (select t1.a from t1 where t1.a > t2.a) as a from t2;`)
c.Assert(err, IsNil)
err = rs.Next(context.TODO(), rs.NewChunk())
c.Assert(err.Error(), Equals, "[executor:1242]Subquery returns more than 1 row")
c.Assert(rs.Close(), IsNil)
}
func (s *testSuiteP2) TestCurrentTimestampValueSelection(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t,t1")
tk.MustExec("create table t (id int, t0 timestamp null default current_timestamp, t1 timestamp(1) null default current_timestamp(1), t2 timestamp(2) null default current_timestamp(2) on update current_timestamp(2))")
tk.MustExec("insert into t (id) values (1)")
rs := tk.MustQuery("select t0, t1, t2 from t where id = 1")
t0 := rs.Rows()[0][0].(string)
t1 := rs.Rows()[0][1].(string)
t2 := rs.Rows()[0][2].(string)
c.Assert(len(strings.Split(t0, ".")), Equals, 1)
c.Assert(len(strings.Split(t1, ".")[1]), Equals, 1)
c.Assert(len(strings.Split(t2, ".")[1]), Equals, 2)
tk.MustQuery("select id from t where t0 = ?", t0).Check(testkit.Rows("1"))
tk.MustQuery("select id from t where t1 = ?", t1).Check(testkit.Rows("1"))
tk.MustQuery("select id from t where t2 = ?", t2).Check(testkit.Rows("1"))
time.Sleep(time.Second)
tk.MustExec("update t set t0 = now() where id = 1")
rs = tk.MustQuery("select t2 from t where id = 1")
newT2 := rs.Rows()[0][0].(string)
c.Assert(newT2 != t2, IsTrue)
tk.MustExec("create table t1 (id int, a timestamp, b timestamp(2), c timestamp(3))")
tk.MustExec("insert into t1 (id, a, b, c) values (1, current_timestamp(2), current_timestamp, current_timestamp(3))")
rs = tk.MustQuery("select a, b, c from t1 where id = 1")
a := rs.Rows()[0][0].(string)
b := rs.Rows()[0][1].(string)
d := rs.Rows()[0][2].(string)
c.Assert(len(strings.Split(a, ".")), Equals, 1)
c.Assert(strings.Split(b, ".")[1], Equals, "00")
c.Assert(len(strings.Split(d, ".")[1]), Equals, 3)
}
func (s *testSuite3) TestRowID(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test`)
tk.MustExec(`drop table if exists t`)
tk.Se.GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeIntOnly
tk.MustExec(`create table t(a varchar(10), b varchar(10), c varchar(1), index idx(a, b, c));`)
tk.MustExec(`insert into t values('a', 'b', 'c');`)
tk.MustExec(`insert into t values('a', 'b', 'c');`)
tk.MustQuery(`select b, _tidb_rowid from t use index(idx) where a = 'a';`).Check(testkit.Rows(
`b 1`,
`b 2`,
))
tk.MustExec(`begin;`)
tk.MustExec(`select * from t for update`)
tk.MustQuery(`select distinct b from t use index(idx) where a = 'a';`).Check(testkit.Rows(`b`))
tk.MustExec(`commit;`)
tk.MustExec(`drop table if exists t`)
tk.MustExec(`create table t(a varchar(5) primary key)`)
tk.MustExec(`insert into t values('a')`)
tk.MustQuery("select *, _tidb_rowid from t use index(`primary`) where _tidb_rowid=1").Check(testkit.Rows("a 1"))
}
func (s *testSuite3) TestDoSubquery(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test`)
tk.MustExec(`drop table if exists t`)
tk.MustExec(`create table t(a int)`)
_, err := tk.Exec(`do 1 in (select * from t)`)
c.Assert(err, IsNil, Commentf("err %v", err))
tk.MustExec(`insert into t values(1)`)
r, err := tk.Exec(`do 1 in (select * from t)`)
c.Assert(err, IsNil, Commentf("err %v", err))
c.Assert(r, IsNil, Commentf("result of Do not empty"))
}
func (s *testSuite3) TestSubqueryTableAlias(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test`)
tk.MustExec(`drop table if exists t`)
tk.MustExec("set sql_mode = ''")
tk.MustGetErrCode("select a, b from (select 1 a) ``, (select 2 b) ``;", mysql.ErrDerivedMustHaveAlias)
tk.MustGetErrCode("select a, b from (select 1 a) `x`, (select 2 b) `x`;", mysql.ErrNonuniqTable)
tk.MustGetErrCode("select a, b from (select 1 a), (select 2 b);", mysql.ErrDerivedMustHaveAlias)
// ambiguous column name
tk.MustGetErrCode("select a from (select 1 a) ``, (select 2 a) ``;", mysql.ErrDerivedMustHaveAlias)
tk.MustGetErrCode("select a from (select 1 a) `x`, (select 2 a) `x`;", mysql.ErrNonuniqTable)
tk.MustGetErrCode("select x.a from (select 1 a) `x`, (select 2 a) `x`;", mysql.ErrNonuniqTable)
tk.MustGetErrCode("select a from (select 1 a), (select 2 a);", mysql.ErrDerivedMustHaveAlias)
tk.MustExec("set sql_mode = 'oracle';")
tk.MustQuery("select a, b from (select 1 a) ``, (select 2 b) ``;").Check(testkit.Rows("1 2"))
tk.MustQuery("select a, b from (select 1 a) `x`, (select 2 b) `x`;").Check(testkit.Rows("1 2"))
tk.MustQuery("select a, b from (select 1 a), (select 2 b);").Check(testkit.Rows("1 2"))
// ambiguous column name
tk.MustGetErrCode("select a from (select 1 a) ``, (select 2 a) ``;", mysql.ErrNonUniq)
tk.MustGetErrCode("select a from (select 1 a) `x`, (select 2 a) `x`;", mysql.ErrNonUniq)
tk.MustGetErrCode("select x.a from (select 1 a) `x`, (select 2 a) `x`;", mysql.ErrNonUniq)
tk.MustGetErrCode("select a from (select 1 a), (select 2 a);", mysql.ErrNonUniq)
}
func (s *testSerialSuite) TestTSOFail(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test`)
tk.MustExec(`drop table if exists t`)
tk.MustExec(`create table t(a int)`)
c.Assert(failpoint.Enable("github.com/pingcap/tidb/session/mockGetTSFail", "return"), IsNil)
ctx := failpoint.WithHook(context.Background(), func(ctx context.Context, fpname string) bool {
return fpname == "github.com/pingcap/tidb/session/mockGetTSFail"
})
_, err := tk.Se.Execute(ctx, `select * from t`)
c.Assert(err, NotNil)
c.Assert(failpoint.Disable("github.com/pingcap/tidb/session/mockGetTSFail"), IsNil)
}
func (s *testSuite3) TestSelectHashPartitionTable(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test`)
tk.MustExec(`drop table if exists th`)
tk.MustExec("set @@session.tidb_enable_table_partition = '1';")
tk.MustExec(`create table th (a int, b int) partition by hash(a) partitions 3;`)
defer tk.MustExec(`drop table if exists th`)
tk.MustExec(`insert into th values (0,0),(1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8);`)
tk.MustExec("insert into th values (-1,-1),(-2,-2),(-3,-3),(-4,-4),(-5,-5),(-6,-6),(-7,-7),(-8,-8);")
tk.MustQuery("select b from th order by a").Check(testkit.Rows("-8", "-7", "-6", "-5", "-4", "-3", "-2", "-1", "0", "1", "2", "3", "4", "5", "6", "7", "8"))
tk.MustQuery(" select * from th where a=-2;").Check(testkit.Rows("-2 -2"))
tk.MustQuery(" select * from th where a=5;").Check(testkit.Rows("5 5"))
}
func (s *testSuiteP1) TestSelectPartition(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test`)
tk.MustExec(`drop table if exists th, tr, tl`)
tk.MustExec("set @@session.tidb_enable_list_partition = ON;")
tk.MustExec(`create table th (a int, b int) partition by hash(a) partitions 3;`)
tk.MustExec(`create table tr (a int, b int)
partition by range (a) (
partition r0 values less than (4),
partition r1 values less than (7),
partition r3 values less than maxvalue)`)
tk.MustExec(`create table tl (a int, b int, unique index idx(a)) partition by list (a) (
partition p0 values in (3,5,6,9,17),
partition p1 values in (1,2,10,11,19,20),
partition p2 values in (4,12,13,14,18),
partition p3 values in (7,8,15,16,null));`)
defer tk.MustExec(`drop table if exists th, tr, tl`)
tk.MustExec(`insert into th values (0,0),(1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8);`)
tk.MustExec("insert into th values (-1,-1),(-2,-2),(-3,-3),(-4,-4),(-5,-5),(-6,-6),(-7,-7),(-8,-8);")
tk.MustExec(`insert into tr values (-3,-3),(3,3),(4,4),(7,7),(8,8);`)
tk.MustExec(`insert into tl values (3,3),(1,1),(4,4),(7,7),(8,8),(null,null);`)
// select 1 partition.
tk.MustQuery("select b from th partition (p0) order by a").Check(testkit.Rows("-6", "-3", "0", "3", "6"))
tk.MustQuery("select b from tr partition (r0) order by a").Check(testkit.Rows("-3", "3"))
tk.MustQuery("select b from tl partition (p0) order by a").Check(testkit.Rows("3"))
tk.MustQuery("select b from th partition (p0,P0) order by a").Check(testkit.Rows("-6", "-3", "0", "3", "6"))
tk.MustQuery("select b from tr partition (r0,R0,r0) order by a").Check(testkit.Rows("-3", "3"))
tk.MustQuery("select b from tl partition (p0,P0,p0) order by a").Check(testkit.Rows("3"))
// select multi partition.
tk.MustQuery("select b from th partition (P2,p0) order by a").Check(testkit.Rows("-8", "-6", "-5", "-3", "-2", "0", "2", "3", "5", "6", "8"))
tk.MustQuery("select b from tr partition (r1,R3) order by a").Check(testkit.Rows("4", "7", "8"))
tk.MustQuery("select b from tl partition (p0,P3) order by a").Check(testkit.Rows("<nil>", "3", "7", "8"))
// test select unknown partition error
err := tk.ExecToErr("select b from th partition (p0,p4)")
c.Assert(err.Error(), Equals, "[table:1735]Unknown partition 'p4' in table 'th'")
err = tk.ExecToErr("select b from tr partition (r1,r4)")
c.Assert(err.Error(), Equals, "[table:1735]Unknown partition 'r4' in table 'tr'")
err = tk.ExecToErr("select b from tl partition (p0,p4)")
c.Assert(err.Error(), Equals, "[table:1735]Unknown partition 'p4' in table 'tl'")
// test select partition table in transaction.
tk.MustExec("begin")
tk.MustExec("insert into th values (10,10),(11,11)")
tk.MustQuery("select a, b from th where b>10").Check(testkit.Rows("11 11"))
tk.MustExec("commit")
tk.MustQuery("select a, b from th where b>10").Check(testkit.Rows("11 11"))
// test partition function is scalar func
tk.MustExec("drop table if exists tscalar")
tk.MustExec(`create table tscalar (c1 int) partition by range (c1 % 30) (
partition p0 values less than (0),
partition p1 values less than (10),
partition p2 values less than (20),
partition pm values less than (maxvalue));`)
tk.MustExec("insert into tscalar values(0), (10), (40), (50), (55)")
// test IN expression
tk.MustExec("insert into tscalar values(-0), (-10), (-40), (-50), (-55)")
tk.MustQuery("select * from tscalar where c1 in (55, 55)").Check(testkit.Rows("55"))
tk.MustQuery("select * from tscalar where c1 in (40, 40)").Check(testkit.Rows("40"))
tk.MustQuery("select * from tscalar where c1 in (40)").Check(testkit.Rows("40"))
tk.MustQuery("select * from tscalar where c1 in (-40)").Check(testkit.Rows("-40"))
tk.MustQuery("select * from tscalar where c1 in (-40, -40)").Check(testkit.Rows("-40"))
tk.MustQuery("select * from tscalar where c1 in (-1)").Check(testkit.Rows())
}
func (s *testSuiteP1) TestDeletePartition(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test`)
tk.MustExec(`drop table if exists t1`)
tk.MustExec(`create table t1 (a int) partition by range (a) (
partition p0 values less than (10),
partition p1 values less than (20),
partition p2 values less than (30),
partition p3 values less than (40),
partition p4 values less than MAXVALUE
)`)
tk.MustExec("insert into t1 values (1),(11),(21),(31)")
tk.MustExec("delete from t1 partition (p4)")
tk.MustQuery("select * from t1 order by a").Check(testkit.Rows("1", "11", "21", "31"))
tk.MustExec("delete from t1 partition (p0) where a > 10")
tk.MustQuery("select * from t1 order by a").Check(testkit.Rows("1", "11", "21", "31"))
tk.MustExec("delete from t1 partition (p0,p1,p2)")
tk.MustQuery("select * from t1").Check(testkit.Rows("31"))
}
func (s *testSuite) TestSelectView(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table view_t (a int,b int)")
tk.MustExec("insert into view_t values(1,2)")
tk.MustExec("create definer='root'@'localhost' view view1 as select * from view_t")
tk.MustExec("create definer='root'@'localhost' view view2(c,d) as select * from view_t")
tk.MustExec("create definer='root'@'localhost' view view3(c,d) as select a,b from view_t")
tk.MustExec("create definer='root'@'localhost' view view4 as select * from (select * from (select * from view_t) tb1) tb;")
tk.MustQuery("select * from view1;").Check(testkit.Rows("1 2"))
tk.MustQuery("select * from view2;").Check(testkit.Rows("1 2"))
tk.MustQuery("select * from view3;").Check(testkit.Rows("1 2"))
tk.MustQuery("select * from view4;").Check(testkit.Rows("1 2"))
tk.MustExec("drop table view_t;")
tk.MustExec("create table view_t(c int,d int)")
err := tk.ExecToErr("select * from view1")
c.Assert(err.Error(), Equals, "[planner:1356]View 'test.view1' references invalid table(s) or column(s) or function(s) or definer/invoker of view lack rights to use them")
err = tk.ExecToErr("select * from view2")
c.Assert(err.Error(), Equals, "[planner:1356]View 'test.view2' references invalid table(s) or column(s) or function(s) or definer/invoker of view lack rights to use them")
err = tk.ExecToErr("select * from view3")
c.Assert(err.Error(), Equals, plannercore.ErrViewInvalid.GenWithStackByArgs("test", "view3").Error())
tk.MustExec("drop table view_t;")
tk.MustExec("create table view_t(a int,b int,c int)")
tk.MustExec("insert into view_t values(1,2,3)")
tk.MustQuery("select * from view1;").Check(testkit.Rows("1 2"))
tk.MustQuery("select * from view2;").Check(testkit.Rows("1 2"))
tk.MustQuery("select * from view3;").Check(testkit.Rows("1 2"))
tk.MustQuery("select * from view4;").Check(testkit.Rows("1 2"))
tk.MustExec("alter table view_t drop column a")
tk.MustExec("alter table view_t add column a int after b")
tk.MustExec("update view_t set a=1;")
tk.MustQuery("select * from view1;").Check(testkit.Rows("1 2"))
tk.MustQuery("select * from view2;").Check(testkit.Rows("1 2"))
tk.MustQuery("select * from view3;").Check(testkit.Rows("1 2"))
tk.MustQuery("select * from view4;").Check(testkit.Rows("1 2"))
tk.MustExec("drop table view_t;")
tk.MustExec("drop view view1,view2,view3,view4;")
tk.MustExec("set @@tidb_enable_window_function = 1")
defer func() {
tk.MustExec("set @@tidb_enable_window_function = 0")
}()
tk.MustExec("create table t(a int, b int)")
tk.MustExec("insert into t values (1,1),(1,2),(2,1),(2,2)")
tk.MustExec("create definer='root'@'localhost' view v as select a, first_value(a) over(rows between 1 preceding and 1 following), last_value(a) over(rows between 1 preceding and 1 following) from t")
result := tk.MustQuery("select * from v")
result.Check(testkit.Rows("1 1 1", "1 1 2", "2 1 2", "2 2 2"))
tk.MustExec("drop view v;")
}
type testSuite2 struct {
*baseTestSuite
}
func (s *testSuite2) TearDownTest(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
r := tk.MustQuery("show full tables")
for _, tb := range r.Rows() {
tableName := tb[0]
if tb[1] == "VIEW" {
tk.MustExec(fmt.Sprintf("drop view %v", tableName))
} else if tb[1] == "SEQUENCE" {
tk.MustExec(fmt.Sprintf("drop sequence %v", tableName))
} else {
tk.MustExec(fmt.Sprintf("drop table %v", tableName))
}
}
}
type testSuite3 struct {
*baseTestSuite
}
func (s *testSuite3) TearDownTest(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
r := tk.MustQuery("show full tables")
for _, tb := range r.Rows() {
tableName := tb[0]
if tb[1] == "VIEW" {
tk.MustExec(fmt.Sprintf("drop view %v", tableName))
} else if tb[1] == "SEQUENCE" {
tk.MustExec(fmt.Sprintf("drop sequence %v", tableName))
} else {
tk.MustExec(fmt.Sprintf("drop table %v", tableName))
}
}
}
type testSuite4 struct {
*baseTestSuite
}
func (s *testSuite4) TearDownTest(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
r := tk.MustQuery("show full tables")
for _, tb := range r.Rows() {
tableName := tb[0]
if tb[1] == "VIEW" {
tk.MustExec(fmt.Sprintf("drop view %v", tableName))
} else if tb[1] == "SEQUENCE" {
tk.MustExec(fmt.Sprintf("drop sequence %v", tableName))
} else {
tk.MustExec(fmt.Sprintf("drop table %v", tableName))
}
}
}
type testSuite5 struct {
*baseTestSuite
}
func (s *testSuite5) TearDownTest(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
r := tk.MustQuery("show full tables")
for _, tb := range r.Rows() {
tableName := tb[0]
if tb[1] == "VIEW" {
tk.MustExec(fmt.Sprintf("drop view %v", tableName))
} else if tb[1] == "SEQUENCE" {
tk.MustExec(fmt.Sprintf("drop sequence %v", tableName))
} else {
tk.MustExec(fmt.Sprintf("drop table %v", tableName))
}
}
}
type testSuite6 struct {
*baseTestSuite
}
func (s *testSuite6) TearDownTest(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
r := tk.MustQuery("show full tables")
for _, tb := range r.Rows() {
tableName := tb[0]
if tb[1] == "VIEW" {
tk.MustExec(fmt.Sprintf("drop view %v", tableName))
} else if tb[1] == "SEQUENCE" {
tk.MustExec(fmt.Sprintf("drop sequence %v", tableName))
} else {
tk.MustExec(fmt.Sprintf("drop table %v", tableName))
}
}
}
type testSuite7 struct {
*baseTestSuite
}
func (s *testSuite7) TearDownTest(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
r := tk.MustQuery("show full tables")
for _, tb := range r.Rows() {
tableName := tb[0]
if tb[1] == "VIEW" {
tk.MustExec(fmt.Sprintf("drop view %v", tableName))
} else if tb[1] == "SEQUENCE" {
tk.MustExec(fmt.Sprintf("drop sequence %v", tableName))
} else {
tk.MustExec(fmt.Sprintf("drop table %v", tableName))
}
}
}
type testSuite8 struct {
*baseTestSuite
}
func (s *testSuite8) TearDownTest(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
r := tk.MustQuery("show full tables")
for _, tb := range r.Rows() {
tableName := tb[0]
if tb[1] == "VIEW" {
tk.MustExec(fmt.Sprintf("drop view %v", tableName))
} else if tb[1] == "SEQUENCE" {
tk.MustExec(fmt.Sprintf("drop sequence %v", tableName))
} else {
tk.MustExec(fmt.Sprintf("drop table %v", tableName))
}
}
}
type testSerialSuite1 struct {
*baseTestSuite
}
func (s *testSerialSuite1) TearDownTest(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
r := tk.MustQuery("show full tables")
for _, tb := range r.Rows() {
tableName := tb[0]
if tb[1] == "VIEW" {
tk.MustExec(fmt.Sprintf("drop view %v", tableName))
} else if tb[1] == "SEQUENCE" {
tk.MustExec(fmt.Sprintf("drop sequence %v", tableName))
} else {
tk.MustExec(fmt.Sprintf("drop table %v", tableName))
}
}
}
func (s *testSuiteP2) TestStrToDateBuiltin(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustQuery(`select str_to_date('20190101','%Y%m%d%!') from dual`).Check(testkit.Rows("2019-01-01"))
tk.MustQuery(`select str_to_date('20190101','%Y%m%d%f') from dual`).Check(testkit.Rows("2019-01-01 00:00:00.000000"))
tk.MustQuery(`select str_to_date('20190101','%Y%m%d%H%i%s') from dual`).Check(testkit.Rows("2019-01-01 00:00:00"))
tk.MustQuery(`select str_to_date('18/10/22','%y/%m/%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('a18/10/22','%y/%m/%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('69/10/22','%y/%m/%d') from dual`).Check(testkit.Rows("2069-10-22"))
tk.MustQuery(`select str_to_date('70/10/22','%y/%m/%d') from dual`).Check(testkit.Rows("1970-10-22"))
tk.MustQuery(`select str_to_date('8/10/22','%y/%m/%d') from dual`).Check(testkit.Rows("2008-10-22"))
tk.MustQuery(`select str_to_date('8/10/22','%Y/%m/%d') from dual`).Check(testkit.Rows("2008-10-22"))
tk.MustQuery(`select str_to_date('18/10/22','%Y/%m/%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('a18/10/22','%Y/%m/%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('69/10/22','%Y/%m/%d') from dual`).Check(testkit.Rows("2069-10-22"))
tk.MustQuery(`select str_to_date('70/10/22','%Y/%m/%d') from dual`).Check(testkit.Rows("1970-10-22"))
tk.MustQuery(`select str_to_date('018/10/22','%Y/%m/%d') from dual`).Check(testkit.Rows("0018-10-22"))
tk.MustQuery(`select str_to_date('2018/10/22','%Y/%m/%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('018/10/22','%y/%m/%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('18/10/22','%y0/%m/%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('18/10/22','%Y0/%m/%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('18a/10/22','%y/%m/%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('18a/10/22','%Y/%m/%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('20188/10/22','%Y/%m/%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('2018510522','%Y5%m5%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('2018^10^22','%Y^%m^%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('2018@10@22','%Y@%m@%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('2018%10%22','%Y%%m%%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('2018(10(22','%Y(%m(%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('2018\10\22','%Y\%m\%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('2018=10=22','%Y=%m=%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('2018+10+22','%Y+%m+%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('2018_10_22','%Y_%m_%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('69510522','%y5%m5%d') from dual`).Check(testkit.Rows("2069-10-22"))
tk.MustQuery(`select str_to_date('69^10^22','%y^%m^%d') from dual`).Check(testkit.Rows("2069-10-22"))
tk.MustQuery(`select str_to_date('18@10@22','%y@%m@%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('18%10%22','%y%%m%%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('18(10(22','%y(%m(%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('18\10\22','%y\%m\%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('18+10+22','%y+%m+%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('18=10=22','%y=%m=%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('18_10_22','%y_%m_%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`SELECT STR_TO_DATE('2020-07-04 11:22:33 PM', '%Y-%m-%d %r')`).Check(testkit.Rows("2020-07-04 23:22:33"))
tk.MustQuery(`SELECT STR_TO_DATE('2020-07-04 12:22:33 AM', '%Y-%m-%d %r')`).Check(testkit.Rows("2020-07-04 00:22:33"))
tk.MustQuery(`SELECT STR_TO_DATE('2020-07-04 12:22:33', '%Y-%m-%d %T')`).Check(testkit.Rows("2020-07-04 12:22:33"))
tk.MustQuery(`SELECT STR_TO_DATE('2020-07-04 00:22:33', '%Y-%m-%d %T')`).Check(testkit.Rows("2020-07-04 00:22:33"))
}
func (s *testSuiteP2) TestAddDateBuiltinWithWarnings(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("set @@sql_mode='NO_ZERO_DATE'")
result := tk.MustQuery(`select date_add('2001-01-00', interval -2 hour);`)
result.Check(testkit.Rows("<nil>"))
tk.MustQuery("show warnings").Check(testutil.RowsWithSep("|", "Warning|1292|Incorrect datetime value: '2001-01-00'"))
}
func (s *testSuiteP2) TestStrToDateBuiltinWithWarnings(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("set @@sql_mode='NO_ZERO_DATE'")
tk.MustExec("use test")
tk.MustQuery(`SELECT STR_TO_DATE('0000-1-01', '%Y-%m-%d');`).Check(testkit.Rows("<nil>"))
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1411 Incorrect datetime value: '0000-1-01' for function str_to_date"))
}
func (s *testSuiteP2) TestReadPartitionedTable(c *C) {
// Test three reader on partitioned table.
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists pt")
tk.MustExec("create table pt (a int, b int, index i_b(b)) partition by range (a) (partition p1 values less than (2), partition p2 values less than (4), partition p3 values less than (6))")
for i := 0; i < 6; i++ {
tk.MustExec(fmt.Sprintf("insert into pt values(%d, %d)", i, i))
}
// Table reader
tk.MustQuery("select * from pt order by a").Check(testkit.Rows("0 0", "1 1", "2 2", "3 3", "4 4", "5 5"))
// Index reader
tk.MustQuery("select b from pt where b = 3").Check(testkit.Rows("3"))
// Index lookup
tk.MustQuery("select a from pt where b = 3").Check(testkit.Rows("3"))
}
func (s *testSplitTable) TestSplitRegion(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t, t1")
tk.MustExec("create table t(a varchar(100),b int, index idx1(b,a))")
tk.MustExec(`split table t index idx1 by (10000,"abcd"),(10000000);`)
_, err := tk.Exec(`split table t index idx1 by ("abcd");`)
c.Assert(err, NotNil)
terr := errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, errors.ErrCode(mysql.WarnDataTruncated))
// Test for split index region.
// Check min value is more than max value.
tk.MustExec(`split table t index idx1 between (0) and (1000000000) regions 10`)
tk.MustGetErrCode(`split table t index idx1 between (2,'a') and (1,'c') regions 10`, errno.ErrInvalidSplitRegionRanges)
// Check min value is invalid.
_, err = tk.Exec(`split table t index idx1 between () and (1) regions 10`)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "Split index `idx1` region lower value count should more than 0")
// Check max value is invalid.
_, err = tk.Exec(`split table t index idx1 between (1) and () regions 10`)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "Split index `idx1` region upper value count should more than 0")
// Check pre-split region num is too large.
_, err = tk.Exec(`split table t index idx1 between (0) and (1000000000) regions 10000`)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "Split index region num exceeded the limit 1000")
// Check pre-split region num 0 is invalid.
_, err = tk.Exec(`split table t index idx1 between (0) and (1000000000) regions 0`)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "Split index region num should more than 0")
// Test truncate error msg.
_, err = tk.Exec(`split table t index idx1 between ("aa") and (1000000000) regions 0`)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "[types:1265]Incorrect value: 'aa' for column 'b'")
// Test for split table region.
tk.MustExec(`split table t between (0) and (1000000000) regions 10`)
// Check the lower value is more than the upper value.
tk.MustGetErrCode(`split table t between (2) and (1) regions 10`, errno.ErrInvalidSplitRegionRanges)
// Check the lower value is invalid.
_, err = tk.Exec(`split table t between () and (1) regions 10`)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "Split table region lower value count should be 1")
// Check upper value is invalid.
_, err = tk.Exec(`split table t between (1) and () regions 10`)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "Split table region upper value count should be 1")
// Check pre-split region num is too large.
_, err = tk.Exec(`split table t between (0) and (1000000000) regions 10000`)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "Split table region num exceeded the limit 1000")
// Check pre-split region num 0 is invalid.
_, err = tk.Exec(`split table t between (0) and (1000000000) regions 0`)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "Split table region num should more than 0")
// Test truncate error msg.
_, err = tk.Exec(`split table t between ("aa") and (1000000000) regions 10`)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "[types:1265]Incorrect value: 'aa' for column '_tidb_rowid'")
// Test split table region step is too small.
tk.MustGetErrCode(`split table t between (0) and (100) regions 10`, errno.ErrInvalidSplitRegionRanges)
// Test split region by syntax.
tk.MustExec(`split table t by (0),(1000),(1000000)`)
// Test split region twice to test for multiple batch split region requests.
tk.MustExec("create table t1(a int, b int)")
tk.MustQuery("split table t1 between(0) and (10000) regions 10;").Check(testkit.Rows("9 1"))
tk.MustQuery("split table t1 between(10) and (10010) regions 5;").Check(testkit.Rows("4 1"))
// Test split region for partition table.
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int,b int) partition by hash(a) partitions 5;")
tk.MustQuery("split table t between (0) and (1000000) regions 5;").Check(testkit.Rows("20 1"))
// Test for `split for region` syntax.
tk.MustQuery("split region for partition table t between (1000000) and (100000000) regions 10;").Check(testkit.Rows("45 1"))
// Test split region for partition table with specified partition.
tk.MustQuery("split table t partition (p1,p2) between (100000000) and (1000000000) regions 5;").Check(testkit.Rows("8 1"))
// Test for `split for region` syntax.
tk.MustQuery("split region for partition table t partition (p3,p4) between (100000000) and (1000000000) regions 5;").Check(testkit.Rows("8 1"))
}
func (s *testSplitTable) TestSplitRegionEdgeCase(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists t;")
tk.MustExec("create table t(a bigint(20) auto_increment primary key);")
tk.MustExec("split table t between (-9223372036854775808) and (9223372036854775807) regions 16;")
tk.MustExec("drop table if exists t;")
tk.MustExec("create table t(a int(20) auto_increment primary key);")
tk.MustGetErrCode("split table t between (-9223372036854775808) and (9223372036854775807) regions 16;", errno.ErrDataOutOfRange)
}
func (s *testSplitTable) TestClusterIndexSplitTableIntegration(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("drop database if exists test_cluster_index_index_split_table_integration;")
tk.MustExec("create database test_cluster_index_index_split_table_integration;")
tk.MustExec("use test_cluster_index_index_split_table_integration;")
tk.Se.GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn
tk.MustExec("create table t (a varchar(255), b double, c int, primary key (a, b));")
// Value list length not match.
lowerMsg := "Split table region lower value count should be 2"
upperMsg := "Split table region upper value count should be 2"
tk.MustGetErrMsg("split table t between ('aaa') and ('aaa', 100.0) regions 10;", lowerMsg)
tk.MustGetErrMsg("split table t between ('aaa', 1.0) and ('aaa', 100.0, 11) regions 10;", upperMsg)
// Value type not match.
errMsg := "[types:1265]Incorrect value: 'aaa' for column 'b'"
tk.MustGetErrMsg("split table t between ('aaa', 0.0) and (100.0, 'aaa') regions 10;", errMsg)
// lower bound >= upper bound.
errMsg = "[executor:8212]Failed to split region ranges: Split table `t` region lower value (aaa,0) should less than the upper value (aaa,0)"
tk.MustGetErrMsg("split table t between ('aaa', 0.0) and ('aaa', 0.0) regions 10;", errMsg)
errMsg = "[executor:8212]Failed to split region ranges: Split table `t` region lower value (bbb,0) should less than the upper value (aaa,0)"
tk.MustGetErrMsg("split table t between ('bbb', 0.0) and ('aaa', 0.0) regions 10;", errMsg)
// Exceed limit 1000.
errMsg = "Split table region num exceeded the limit 1000"
tk.MustGetErrMsg("split table t between ('aaa', 0.0) and ('aaa', 0.1) regions 100000;", errMsg)
// Split on null values.
errMsg = "[planner:1048]Column 'a' cannot be null"
tk.MustGetErrMsg("split table t between (null, null) and (null, null) regions 1000;", errMsg)
tk.MustGetErrMsg("split table t by (null, null);", errMsg)
// Success.
tk.MustExec("split table t between ('aaa', 0.0) and ('aaa', 100.0) regions 10;")
tk.MustExec("split table t by ('aaa', 0.0), ('aaa', 20.0), ('aaa', 100.0);")
tk.MustExec("split table t by ('aaa', 100.0), ('qqq', 20.0), ('zzz', 100.0), ('zzz', 1000.0);")
tk.MustExec("drop table t;")
tk.MustExec("create table t (a int, b int, c int, d int, primary key(a, c, d));")
tk.MustQuery("split table t between (0, 0, 0) and (0, 0, 1) regions 1000;").Check(testkit.Rows("999 1"))
tk.MustExec("drop table t;")
tk.MustExec("create table t (a int, b int, c int, d int, primary key(d, a, c));")
tk.MustQuery("split table t by (0, 0, 0), (1, 2, 3), (65535, 65535, 65535);").Check(testkit.Rows("3 1"))
tk.MustExec("drop table if exists t;")
tk.MustExec("create table t (a varchar(255), b decimal, c int, primary key (a, b));")
errMsg = "[types:1265]Incorrect value: '' for column 'b'"
tk.MustGetErrMsg("split table t by ('aaa', '')", errMsg)
}
func (s *testSplitTable) TestClusterIndexShowTableRegion(c *C) {
tk := testkit.NewTestKit(c, s.store)
atomic.StoreUint32(&ddl.EnableSplitTableRegion, 1)
tk.MustExec("set global tidb_scatter_region = 1")
tk.MustExec("drop database if exists cluster_index_regions;")
tk.MustExec("create database cluster_index_regions;")
tk.MustExec("use cluster_index_regions;")
tk.Se.GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn
tk.MustExec("create table t (a int, b int, c int, primary key(a, b));")
tk.MustExec("insert t values (1, 1, 1), (2, 2, 2);")
tk.MustQuery("split table t between (1, 0) and (2, 3) regions 2;").Check(testkit.Rows("1 1"))
rows := tk.MustQuery("show table t regions").Rows()
tbl := testGetTableByName(c, tk.Se, "cluster_index_regions", "t")
// Check the region start key.
c.Assert(rows[0][1], Matches, fmt.Sprintf("t_%d_", tbl.Meta().ID))
c.Assert(rows[1][1], Matches, fmt.Sprintf("t_%d_r_03800000000000000183800000000000", tbl.Meta().ID))
tk.MustExec("drop table t;")
tk.MustExec("create table t (a int, b int);")
tk.MustQuery("split table t between (0) and (100000) regions 2;").Check(testkit.Rows("1 1"))
rows = tk.MustQuery("show table t regions").Rows()
tbl = testGetTableByName(c, tk.Se, "cluster_index_regions", "t")
// Check the region start key is int64.
c.Assert(rows[0][1], Matches, fmt.Sprintf("t_%d_", tbl.Meta().ID))
c.Assert(rows[1][1], Matches, fmt.Sprintf("t_%d_r_50000", tbl.Meta().ID))
}
func (s *testSuiteWithData) TestClusterIndexOuterJoinElimination(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.Se.GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn
tk.MustExec("create table t (a int, b int, c int, primary key(a,b))")
rows := tk.MustQuery(`explain format = 'brief' select t1.a from t t1 left join t t2 on t1.a = t2.a and t1.b = t2.b`).Rows()
rowStrs := s.testData.ConvertRowsToStrings(rows)
for _, row := range rowStrs {
// outer join has been eliminated.
c.Assert(strings.Index(row, "Join"), Equals, -1)
}
}
func (s *testSplitTable) TestShowTableRegion(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t_regions")
tk.MustExec("set global tidb_scatter_region = 1")
atomic.StoreUint32(&ddl.EnableSplitTableRegion, 1)
tk.MustExec("create table t_regions (a int key, b int, c int, index idx(b), index idx2(c))")
_, err := tk.Exec("split partition table t_regions partition (p1,p2) index idx between (0) and (20000) regions 2;")
c.Assert(err.Error(), Equals, plannercore.ErrPartitionClauseOnNonpartitioned.Error())
// Test show table regions.
tk.MustQuery(`split table t_regions between (-10000) and (10000) regions 4;`).Check(testkit.Rows("4 1"))
re := tk.MustQuery("show table t_regions regions")
// Test show table regions and split table on temporary table.
tk.MustExec("drop table if exists t_regions_temporary_table")
tk.MustExec("set tidb_enable_global_temporary_table=true")
tk.MustExec("create global temporary table t_regions_temporary_table (a int key, b int, c int, index idx(b), index idx2(c)) ON COMMIT DELETE ROWS;")
// Test show table regions.
_, err = tk.Exec("show table t_regions_temporary_table regions")
c.Assert(err.Error(), Equals, plannercore.ErrOptOnTemporaryTable.GenWithStackByArgs("show table regions").Error())
// Test split table.
_, err = tk.Exec("split table t_regions_temporary_table between (-10000) and (10000) regions 4;")
c.Assert(err.Error(), Equals, plannercore.ErrOptOnTemporaryTable.GenWithStackByArgs("split table").Error())
_, err = tk.Exec("split partition table t_regions_temporary_table partition (p1,p2) index idx between (0) and (20000) regions 2;")
c.Assert(err.Error(), Equals, plannercore.ErrOptOnTemporaryTable.GenWithStackByArgs("split table").Error())
tk.MustExec("drop table if exists t_regions_temporary_table")
// Test pre split regions
_, err = tk.Exec("create global temporary table temporary_table_pre_split(id int ) pre_split_regions=2 ON COMMIT DELETE ROWS;")
c.Assert(err.Error(), Equals, ddl.ErrOptOnTemporaryTable.GenWithStackByArgs("pre split regions").Error())
rows := re.Rows()
// Table t_regions should have 5 regions now.
// 4 regions to store record data.
// 1 region to store index data.
c.Assert(len(rows), Equals, 5)
c.Assert(len(rows[0]), Equals, 11)
tbl := testGetTableByName(c, tk.Se, "test", "t_regions")
// Check the region start key.
c.Assert(rows[0][1], Equals, fmt.Sprintf("t_%d_r", tbl.Meta().ID))
c.Assert(rows[1][1], Equals, fmt.Sprintf("t_%d_r_-5000", tbl.Meta().ID))
c.Assert(rows[2][1], Equals, fmt.Sprintf("t_%d_r_0", tbl.Meta().ID))
c.Assert(rows[3][1], Equals, fmt.Sprintf("t_%d_r_5000", tbl.Meta().ID))
c.Assert(rows[4][2], Equals, fmt.Sprintf("t_%d_r", tbl.Meta().ID))
// Test show table index regions.
tk.MustQuery(`split table t_regions index idx between (-1000) and (1000) regions 4;`).Check(testkit.Rows("4 1"))
re = tk.MustQuery("show table t_regions index idx regions")
rows = re.Rows()
// The index `idx` of table t_regions should have 4 regions now.
c.Assert(len(rows), Equals, 4)
// Check the region start key.
c.Assert(rows[0][1], Matches, fmt.Sprintf("t_%d.*", tbl.Meta().ID))
c.Assert(rows[1][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID))
c.Assert(rows[2][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID))
c.Assert(rows[3][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID))
re = tk.MustQuery("show table t_regions regions")
rows = re.Rows()
// The index `idx` of table t_regions should have 9 regions now.
// 4 regions to store record data.
// 4 region to store index idx data.
// 1 region to store index idx2 data.
c.Assert(len(rows), Equals, 9)
// Check the region start key.
c.Assert(rows[0][1], Equals, fmt.Sprintf("t_%d_r", tbl.Meta().ID))
c.Assert(rows[1][1], Equals, fmt.Sprintf("t_%d_r_-5000", tbl.Meta().ID))
c.Assert(rows[2][1], Equals, fmt.Sprintf("t_%d_r_0", tbl.Meta().ID))
c.Assert(rows[3][1], Equals, fmt.Sprintf("t_%d_r_5000", tbl.Meta().ID))
c.Assert(rows[4][1], Matches, fmt.Sprintf("t_%d_", tbl.Meta().ID))
c.Assert(rows[5][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID))
c.Assert(rows[6][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID))
c.Assert(rows[7][2], Equals, fmt.Sprintf("t_%d_i_2_", tbl.Meta().ID))
c.Assert(rows[8][2], Equals, fmt.Sprintf("t_%d_r", tbl.Meta().ID))
// Test unsigned primary key and wait scatter finish.
tk.MustExec("drop table if exists t_regions")
atomic.StoreUint32(&ddl.EnableSplitTableRegion, 1)
tk.MustExec("create table t_regions (a int unsigned key, b int, index idx(b))")
// Test show table regions.
tk.MustExec(`set @@session.tidb_wait_split_region_finish=1;`)
tk.MustQuery(`split table t_regions by (2500),(5000),(7500);`).Check(testkit.Rows("3 1"))
re = tk.MustQuery("show table t_regions regions")
rows = re.Rows()
// Table t_regions should have 4 regions now.
c.Assert(len(rows), Equals, 4)
tbl = testGetTableByName(c, tk.Se, "test", "t_regions")
// Check the region start key.
c.Assert(rows[0][1], Matches, "t_.*")
c.Assert(rows[1][1], Equals, fmt.Sprintf("t_%d_r_2500", tbl.Meta().ID))
c.Assert(rows[2][1], Equals, fmt.Sprintf("t_%d_r_5000", tbl.Meta().ID))
c.Assert(rows[3][1], Equals, fmt.Sprintf("t_%d_r_7500", tbl.Meta().ID))
// Test show table index regions.
tk.MustQuery(`split table t_regions index idx by (250),(500),(750);`).Check(testkit.Rows("4 1"))
re = tk.MustQuery("show table t_regions index idx regions")
rows = re.Rows()
// The index `idx` of table t_regions should have 4 regions now.
c.Assert(len(rows), Equals, 4)
// Check the region start key.
c.Assert(rows[0][1], Equals, fmt.Sprintf("t_%d_", tbl.Meta().ID))
c.Assert(rows[1][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID))
c.Assert(rows[2][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID))
c.Assert(rows[3][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID))
// Test show table regions for partition table when disable split region when create table.
atomic.StoreUint32(&ddl.EnableSplitTableRegion, 0)
tk.MustExec("drop table if exists partition_t;")
tk.MustExec("set @@session.tidb_enable_table_partition = '1';")
tk.MustExec("create table partition_t (a int, b int,index(a)) partition by hash (a) partitions 3")
re = tk.MustQuery("show table partition_t regions")
rows = re.Rows()
c.Assert(len(rows), Equals, 1)
c.Assert(rows[0][1], Matches, "t_.*")
// Test show table regions for partition table when enable split region when create table.
atomic.StoreUint32(&ddl.EnableSplitTableRegion, 1)
tk.MustExec("set @@global.tidb_scatter_region=1;")
tk.MustExec("drop table if exists partition_t;")
tk.MustExec("create table partition_t (a int, b int,index(a)) partition by hash (a) partitions 3")
re = tk.MustQuery("show table partition_t regions")
rows = re.Rows()
c.Assert(len(rows), Equals, 3)
tbl = testGetTableByName(c, tk.Se, "test", "partition_t")
partitionDef := tbl.Meta().GetPartitionInfo().Definitions
c.Assert(rows[0][1], Matches, fmt.Sprintf("t_%d_.*", partitionDef[0].ID))
c.Assert(rows[1][1], Matches, fmt.Sprintf("t_%d_.*", partitionDef[1].ID))
c.Assert(rows[2][1], Matches, fmt.Sprintf("t_%d_.*", partitionDef[2].ID))
// Test split partition region when add new partition.
tk.MustExec("drop table if exists partition_t;")
tk.MustExec(`create table partition_t (a int, b int,index(a)) PARTITION BY RANGE (a) (
PARTITION p0 VALUES LESS THAN (10),
PARTITION p1 VALUES LESS THAN (20),
PARTITION p2 VALUES LESS THAN (30));`)
tk.MustExec(`alter table partition_t add partition ( partition p3 values less than (40), partition p4 values less than (50) );`)
re = tk.MustQuery("show table partition_t regions")
rows = re.Rows()
c.Assert(len(rows), Equals, 5)
tbl = testGetTableByName(c, tk.Se, "test", "partition_t")
partitionDef = tbl.Meta().GetPartitionInfo().Definitions
c.Assert(rows[0][1], Matches, fmt.Sprintf("t_%d_.*", partitionDef[0].ID))
c.Assert(rows[1][1], Matches, fmt.Sprintf("t_%d_.*", partitionDef[1].ID))
c.Assert(rows[2][1], Matches, fmt.Sprintf("t_%d_.*", partitionDef[2].ID))
c.Assert(rows[3][1], Matches, fmt.Sprintf("t_%d_.*", partitionDef[3].ID))
c.Assert(rows[4][1], Matches, fmt.Sprintf("t_%d_.*", partitionDef[4].ID))
// Test pre-split table region when create table.
tk.MustExec("drop table if exists t_pre")
tk.MustExec("create table t_pre (a int, b int) shard_row_id_bits = 2 pre_split_regions=2;")
re = tk.MustQuery("show table t_pre regions")
rows = re.Rows()
// Table t_regions should have 4 regions now.
c.Assert(len(rows), Equals, 4)
tbl = testGetTableByName(c, tk.Se, "test", "t_pre")
c.Assert(rows[1][1], Equals, fmt.Sprintf("t_%d_r_2305843009213693952", tbl.Meta().ID))
c.Assert(rows[2][1], Equals, fmt.Sprintf("t_%d_r_4611686018427387904", tbl.Meta().ID))
c.Assert(rows[3][1], Equals, fmt.Sprintf("t_%d_r_6917529027641081856", tbl.Meta().ID))
// Test pre-split table region when create table.
tk.MustExec("drop table if exists pt_pre")
tk.MustExec("create table pt_pre (a int, b int) shard_row_id_bits = 2 pre_split_regions=2 partition by hash(a) partitions 3;")
re = tk.MustQuery("show table pt_pre regions")
rows = re.Rows()
// Table t_regions should have 4 regions now.
c.Assert(len(rows), Equals, 12)
tbl = testGetTableByName(c, tk.Se, "test", "pt_pre")
pi := tbl.Meta().GetPartitionInfo().Definitions
c.Assert(len(pi), Equals, 3)
for i, p := range pi {
c.Assert(rows[1+4*i][1], Equals, fmt.Sprintf("t_%d_r_2305843009213693952", p.ID))
c.Assert(rows[2+4*i][1], Equals, fmt.Sprintf("t_%d_r_4611686018427387904", p.ID))
c.Assert(rows[3+4*i][1], Equals, fmt.Sprintf("t_%d_r_6917529027641081856", p.ID))
}
defer atomic.StoreUint32(&ddl.EnableSplitTableRegion, 0)
// Test split partition table.
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int,b int) partition by hash(a) partitions 5;")
tk.MustQuery("split table t between (0) and (4000000) regions 4;").Check(testkit.Rows("15 1"))
re = tk.MustQuery("show table t regions")
rows = re.Rows()
c.Assert(len(rows), Equals, 20)
tbl = testGetTableByName(c, tk.Se, "test", "t")
c.Assert(len(tbl.Meta().GetPartitionInfo().Definitions), Equals, 5)
for i, p := range tbl.Meta().GetPartitionInfo().Definitions {
c.Assert(rows[i*4+0][1], Equals, fmt.Sprintf("t_%d_", p.ID))
c.Assert(rows[i*4+1][1], Equals, fmt.Sprintf("t_%d_r_1000000", p.ID))
c.Assert(rows[i*4+2][1], Equals, fmt.Sprintf("t_%d_r_2000000", p.ID))
c.Assert(rows[i*4+3][1], Equals, fmt.Sprintf("t_%d_r_3000000", p.ID))
}
// Test split region for partition table with specified partition.
tk.MustQuery("split table t partition (p4) between (1000000) and (2000000) regions 5;").Check(testkit.Rows("4 1"))
re = tk.MustQuery("show table t regions")
rows = re.Rows()
c.Assert(len(rows), Equals, 24)
tbl = testGetTableByName(c, tk.Se, "test", "t")
c.Assert(len(tbl.Meta().GetPartitionInfo().Definitions), Equals, 5)
for i := 0; i < 4; i++ {
p := tbl.Meta().GetPartitionInfo().Definitions[i]
c.Assert(rows[i*4+0][1], Equals, fmt.Sprintf("t_%d_", p.ID))
c.Assert(rows[i*4+1][1], Equals, fmt.Sprintf("t_%d_r_1000000", p.ID))
c.Assert(rows[i*4+2][1], Equals, fmt.Sprintf("t_%d_r_2000000", p.ID))
c.Assert(rows[i*4+3][1], Equals, fmt.Sprintf("t_%d_r_3000000", p.ID))
}
for i := 4; i < 5; i++ {
p := tbl.Meta().GetPartitionInfo().Definitions[i]
c.Assert(rows[i*4+0][1], Equals, fmt.Sprintf("t_%d_", p.ID))
c.Assert(rows[i*4+1][1], Equals, fmt.Sprintf("t_%d_r_1000000", p.ID))
c.Assert(rows[i*4+2][1], Equals, fmt.Sprintf("t_%d_r_1200000", p.ID))
c.Assert(rows[i*4+3][1], Equals, fmt.Sprintf("t_%d_r_1400000", p.ID))
c.Assert(rows[i*4+4][1], Equals, fmt.Sprintf("t_%d_r_1600000", p.ID))
c.Assert(rows[i*4+5][1], Equals, fmt.Sprintf("t_%d_r_1800000", p.ID))
c.Assert(rows[i*4+6][1], Equals, fmt.Sprintf("t_%d_r_2000000", p.ID))
c.Assert(rows[i*4+7][1], Equals, fmt.Sprintf("t_%d_r_3000000", p.ID))
}
// Test for show table partition regions.
for i := 0; i < 4; i++ {
re = tk.MustQuery(fmt.Sprintf("show table t partition (p%v) regions", i))
rows = re.Rows()
c.Assert(len(rows), Equals, 4)
p := tbl.Meta().GetPartitionInfo().Definitions[i]
c.Assert(rows[0][1], Equals, fmt.Sprintf("t_%d_", p.ID))
c.Assert(rows[1][1], Equals, fmt.Sprintf("t_%d_r_1000000", p.ID))
c.Assert(rows[2][1], Equals, fmt.Sprintf("t_%d_r_2000000", p.ID))
c.Assert(rows[3][1], Equals, fmt.Sprintf("t_%d_r_3000000", p.ID))
}
re = tk.MustQuery("show table t partition (p0, p4) regions")
rows = re.Rows()
c.Assert(len(rows), Equals, 12)
p := tbl.Meta().GetPartitionInfo().Definitions[0]
c.Assert(rows[0][1], Equals, fmt.Sprintf("t_%d_", p.ID))
c.Assert(rows[1][1], Equals, fmt.Sprintf("t_%d_r_1000000", p.ID))
c.Assert(rows[2][1], Equals, fmt.Sprintf("t_%d_r_2000000", p.ID))
c.Assert(rows[3][1], Equals, fmt.Sprintf("t_%d_r_3000000", p.ID))
p = tbl.Meta().GetPartitionInfo().Definitions[4]
c.Assert(rows[4][1], Equals, fmt.Sprintf("t_%d_", p.ID))
c.Assert(rows[5][1], Equals, fmt.Sprintf("t_%d_r_1000000", p.ID))
c.Assert(rows[6][1], Equals, fmt.Sprintf("t_%d_r_1200000", p.ID))
c.Assert(rows[7][1], Equals, fmt.Sprintf("t_%d_r_1400000", p.ID))
c.Assert(rows[8][1], Equals, fmt.Sprintf("t_%d_r_1600000", p.ID))
c.Assert(rows[9][1], Equals, fmt.Sprintf("t_%d_r_1800000", p.ID))
c.Assert(rows[10][1], Equals, fmt.Sprintf("t_%d_r_2000000", p.ID))
c.Assert(rows[11][1], Equals, fmt.Sprintf("t_%d_r_3000000", p.ID))
// Test for duplicate partition names.
re = tk.MustQuery("show table t partition (p0, p0, p0) regions")
rows = re.Rows()
c.Assert(len(rows), Equals, 4)
p = tbl.Meta().GetPartitionInfo().Definitions[0]
c.Assert(rows[0][1], Equals, fmt.Sprintf("t_%d_", p.ID))
c.Assert(rows[1][1], Equals, fmt.Sprintf("t_%d_r_1000000", p.ID))
c.Assert(rows[2][1], Equals, fmt.Sprintf("t_%d_r_2000000", p.ID))
c.Assert(rows[3][1], Equals, fmt.Sprintf("t_%d_r_3000000", p.ID))
// Test split partition table index.
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int,b int,index idx(a)) partition by hash(a) partitions 5;")
tk.MustQuery("split table t between (0) and (4000000) regions 4;").Check(testkit.Rows("20 1"))
tk.MustQuery("split table t index idx between (0) and (4000000) regions 4;").Check(testkit.Rows("20 1"))
re = tk.MustQuery("show table t regions")
rows = re.Rows()
c.Assert(len(rows), Equals, 40)
tbl = testGetTableByName(c, tk.Se, "test", "t")
c.Assert(len(tbl.Meta().GetPartitionInfo().Definitions), Equals, 5)
for i := 0; i < 5; i++ {
p := tbl.Meta().GetPartitionInfo().Definitions[i]
c.Assert(rows[i*8+0][1], Equals, fmt.Sprintf("t_%d_r", p.ID))
c.Assert(rows[i*8+1][1], Equals, fmt.Sprintf("t_%d_r_1000000", p.ID))
c.Assert(rows[i*8+2][1], Equals, fmt.Sprintf("t_%d_r_2000000", p.ID))
c.Assert(rows[i*8+3][1], Equals, fmt.Sprintf("t_%d_r_3000000", p.ID))
c.Assert(rows[i*8+4][1], Equals, fmt.Sprintf("t_%d_", p.ID))
c.Assert(rows[i*8+5][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[i*8+6][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[i*8+7][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
}
// Test split index region for partition table with specified partition.
tk.MustQuery("split table t partition (p4) index idx between (0) and (1000000) regions 5;").Check(testkit.Rows("4 1"))
re = tk.MustQuery("show table t regions")
rows = re.Rows()
c.Assert(len(rows), Equals, 44)
tbl = testGetTableByName(c, tk.Se, "test", "t")
c.Assert(len(tbl.Meta().GetPartitionInfo().Definitions), Equals, 5)
for i := 0; i < 4; i++ {
p := tbl.Meta().GetPartitionInfo().Definitions[i]
c.Assert(rows[i*8+0][1], Equals, fmt.Sprintf("t_%d_r", p.ID))
c.Assert(rows[i*8+1][1], Equals, fmt.Sprintf("t_%d_r_1000000", p.ID))
c.Assert(rows[i*8+2][1], Equals, fmt.Sprintf("t_%d_r_2000000", p.ID))
c.Assert(rows[i*8+3][1], Equals, fmt.Sprintf("t_%d_r_3000000", p.ID))
c.Assert(rows[i*8+4][1], Equals, fmt.Sprintf("t_%d_", p.ID))
c.Assert(rows[i*8+5][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[i*8+6][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[i*8+7][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
}
for i := 4; i < 5; i++ {
p := tbl.Meta().GetPartitionInfo().Definitions[i]
c.Assert(rows[i*8+0][1], Equals, fmt.Sprintf("t_%d_r", p.ID))
c.Assert(rows[i*8+1][1], Equals, fmt.Sprintf("t_%d_r_1000000", p.ID))
c.Assert(rows[i*8+2][1], Equals, fmt.Sprintf("t_%d_r_2000000", p.ID))
c.Assert(rows[i*8+3][1], Equals, fmt.Sprintf("t_%d_r_3000000", p.ID))
c.Assert(rows[i*8+4][1], Equals, fmt.Sprintf("t_%d_", p.ID))
c.Assert(rows[i*8+5][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[i*8+6][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[i*8+7][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[i*8+8][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[i*8+9][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[i*8+10][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[i*8+11][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
}
// Test show table partition region on unknown-partition.
err = tk.QueryToErr("show table t partition (p_unknown) index idx regions")
c.Assert(terror.ErrorEqual(err, table.ErrUnknownPartition), IsTrue)
// Test show table partition index.
for i := 0; i < 4; i++ {
re = tk.MustQuery(fmt.Sprintf("show table t partition (p%v) index idx regions", i))
rows = re.Rows()
c.Assert(len(rows), Equals, 4)
p := tbl.Meta().GetPartitionInfo().Definitions[i]
c.Assert(rows[0][1], Equals, fmt.Sprintf("t_%d_", p.ID))
c.Assert(rows[1][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[2][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[3][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
}
re = tk.MustQuery("show table t partition (p3,p4) index idx regions")
rows = re.Rows()
c.Assert(len(rows), Equals, 12)
p = tbl.Meta().GetPartitionInfo().Definitions[3]
c.Assert(rows[0][1], Equals, fmt.Sprintf("t_%d_", p.ID))
c.Assert(rows[1][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[2][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[3][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
p = tbl.Meta().GetPartitionInfo().Definitions[4]
c.Assert(rows[4][1], Equals, fmt.Sprintf("t_%d_", p.ID))
c.Assert(rows[5][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[6][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[7][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[8][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[9][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[10][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[11][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
// Test split for the second index.
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int,b int,index idx(a), index idx2(b))")
tk.MustQuery("split table t index idx2 between (0) and (4000000) regions 2;").Check(testkit.Rows("3 1"))
re = tk.MustQuery("show table t regions")
rows = re.Rows()
c.Assert(len(rows), Equals, 4)
tbl = testGetTableByName(c, tk.Se, "test", "t")
c.Assert(rows[0][1], Equals, fmt.Sprintf("t_%d_i_3_", tbl.Meta().ID))
c.Assert(rows[1][1], Equals, fmt.Sprintf("t_%d_", tbl.Meta().ID))
c.Assert(rows[2][1], Matches, fmt.Sprintf("t_%d_i_2_.*", tbl.Meta().ID))
c.Assert(rows[3][1], Matches, fmt.Sprintf("t_%d_i_2_.*", tbl.Meta().ID))
// Test show table partition region on non-partition table.
err = tk.QueryToErr("show table t partition (p3,p4) index idx regions")
c.Assert(terror.ErrorEqual(err, plannercore.ErrPartitionClauseOnNonpartitioned), IsTrue)
}
func testGetTableByName(c *C, ctx sessionctx.Context, db, table string) table.Table {
dom := domain.GetDomain(ctx)
// Make sure the table schema is the new schema.
err := dom.Reload()
c.Assert(err, IsNil)
tbl, err := dom.InfoSchema().TableByName(model.NewCIStr(db), model.NewCIStr(table))
c.Assert(err, IsNil)
return tbl
}
func (s *testSuiteP2) TestIssue10435(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1(i int, j int, k int)")
tk.MustExec("insert into t1 VALUES (1,1,1),(2,2,2),(3,3,3),(4,4,4)")
tk.MustExec("INSERT INTO t1 SELECT 10*i,j,5*j FROM t1 UNION SELECT 20*i,j,5*j FROM t1 UNION SELECT 30*i,j,5*j FROM t1")
tk.MustExec("set @@session.tidb_enable_window_function=1")
tk.MustQuery("SELECT SUM(i) OVER W FROM t1 WINDOW w AS (PARTITION BY j ORDER BY i) ORDER BY 1+SUM(i) OVER w").Check(
testkit.Rows("1", "2", "3", "4", "11", "22", "31", "33", "44", "61", "62", "93", "122", "124", "183", "244"),
)
}
func (s *testSerialSuite2) TestUnsignedFeedback(c *C) {
tk := testkit.NewTestKit(c, s.store)
oriProbability := statistics.FeedbackProbability.Load()
statistics.FeedbackProbability.Store(1.0)
defer func() { statistics.FeedbackProbability.Store(oriProbability) }()
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a bigint unsigned, b int, primary key(a))")
tk.MustExec("insert into t values (1,1),(2,2)")
tk.MustExec("analyze table t")
tk.MustQuery("select count(distinct b) from t").Check(testkit.Rows("2"))
result := tk.MustQuery("explain analyze select count(distinct b) from t")
c.Assert(result.Rows()[2][4], Equals, "table:t")
c.Assert(result.Rows()[2][6], Equals, "range:[0,+inf], keep order:false")
}
func (s *testSerialSuite2) TestIssue23567(c *C) {
tk := testkit.NewTestKit(c, s.store)
oriProbability := statistics.FeedbackProbability.Load()
statistics.FeedbackProbability.Store(1.0)
defer func() { statistics.FeedbackProbability.Store(oriProbability) }()
failpoint.Enable("github.com/pingcap/tidb/statistics/feedbackNoNDVCollect", `return("")`)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a bigint unsigned, b int, primary key(a))")
tk.MustExec("insert into t values (1, 1), (2, 2)")
tk.MustExec("analyze table t")
// The SQL should not panic.
tk.MustQuery("select count(distinct b) from t")
failpoint.Disable("github.com/pingcap/tidb/statistics/feedbackNoNDVCollect")
}
func (s *testSuite) TestSummaryFailedUpdate(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int as(-a))")
tk.MustExec("insert into t(a) values(1), (3), (7)")
sm := &mockSessionManager1{
PS: make([]*util.ProcessInfo, 0),
}
tk.Se.SetSessionManager(sm)
s.domain.ExpensiveQueryHandle().SetSessionManager(sm)
defer config.RestoreFunc()()
config.UpdateGlobal(func(conf *config.Config) {
conf.OOMAction = config.OOMActionCancel
})
c.Assert(tk.Se.Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil), IsTrue)
tk.MustExec("set @@tidb_mem_quota_query=1")
err := tk.ExecToErr("update t set t.a = t.a - 1 where t.a in (select a from t where a < 4)")
c.Assert(err, NotNil)
c.Assert(err.Error(), Matches, "Out Of Memory Quota!.*")
tk.MustExec("set @@tidb_mem_quota_query=1000000000")
tk.MustQuery("select stmt_type from information_schema.statements_summary where digest_text = 'update `t` set `t` . `a` = `t` . `a` - ? where `t` . `a` in ( select `a` from `t` where `a` < ? )'").Check(testkit.Rows("Update"))
}
func (s *testSuite) TestOOMPanicAction(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int primary key, b double);")
tk.MustExec("insert into t values (1,1)")
sm := &mockSessionManager1{
PS: make([]*util.ProcessInfo, 0),
}
tk.Se.SetSessionManager(sm)
s.domain.ExpensiveQueryHandle().SetSessionManager(sm)
defer config.RestoreFunc()()
config.UpdateGlobal(func(conf *config.Config) {
conf.OOMAction = config.OOMActionCancel
})
tk.MustExec("set @@tidb_mem_quota_query=1;")
err := tk.QueryToErr("select sum(b) from t group by a;")
c.Assert(err, NotNil)
c.Assert(err.Error(), Matches, "Out Of Memory Quota!.*")
// Test insert from select oom panic.
tk.MustExec("drop table if exists t,t1")
tk.MustExec("create table t (a bigint);")
tk.MustExec("create table t1 (a bigint);")
tk.MustExec("set @@tidb_mem_quota_query=200;")
_, err = tk.Exec("insert into t1 values (1),(2),(3),(4),(5);")
c.Assert(err.Error(), Matches, "Out Of Memory Quota!.*")
_, err = tk.Exec("replace into t1 values (1),(2),(3),(4),(5);")
c.Assert(err.Error(), Matches, "Out Of Memory Quota!.*")
tk.MustExec("set @@tidb_mem_quota_query=10000")
tk.MustExec("insert into t1 values (1),(2),(3),(4),(5);")
tk.MustExec("set @@tidb_mem_quota_query=10;")
_, err = tk.Exec("insert into t select a from t1 order by a desc;")
c.Assert(err.Error(), Matches, "Out Of Memory Quota!.*")
_, err = tk.Exec("replace into t select a from t1 order by a desc;")
c.Assert(err.Error(), Matches, "Out Of Memory Quota!.*")
tk.MustExec("set @@tidb_mem_quota_query=10000")
tk.MustExec("insert into t values (1),(2),(3),(4),(5);")
// Set the memory quota to 244 to make this SQL panic during the DeleteExec
// instead of the TableReaderExec.
tk.MustExec("set @@tidb_mem_quota_query=244;")
_, err = tk.Exec("delete from t")
c.Assert(err.Error(), Matches, "Out Of Memory Quota!.*")
tk.MustExec("set @@tidb_mem_quota_query=10000;")
tk.MustExec("delete from t1")
tk.MustExec("insert into t1 values(1)")
tk.MustExec("insert into t values (1),(2),(3),(4),(5);")
tk.MustExec("set @@tidb_mem_quota_query=244;")
_, err = tk.Exec("delete t, t1 from t join t1 on t.a = t1.a")
c.Assert(err, NotNil)
c.Assert(err.Error(), Matches, "Out Of Memory Quota!.*")
tk.MustExec("set @@tidb_mem_quota_query=100000;")
tk.MustExec("truncate table t")
tk.MustExec("insert into t values(1),(2),(3)")
// set the memory to quota to make the SQL panic during UpdateExec instead
// of TableReader.
tk.MustExec("set @@tidb_mem_quota_query=244;")
_, err = tk.Exec("update t set a = 4")
c.Assert(err.Error(), Matches, "Out Of Memory Quota!.*")
}
type testRecoverTable struct {
store kv.Storage
dom *domain.Domain
cluster testutils.Cluster
cli *regionProperityClient
}
func (s *testRecoverTable) SetUpSuite(c *C) {
cli := ®ionProperityClient{}
hijackClient := func(c tikv.Client) tikv.Client {
cli.Client = c
return cli
}
s.cli = cli
var err error
s.store, err = mockstore.NewMockStore(
mockstore.WithClientHijacker(hijackClient),
mockstore.WithClusterInspector(func(c testutils.Cluster) {
mockstore.BootstrapWithSingleStore(c)
s.cluster = c
}),
)
c.Assert(err, IsNil)
s.dom, err = session.BootstrapSession(s.store)
c.Assert(err, IsNil)
}
func (s *testRecoverTable) TearDownSuite(c *C) {
s.dom.Close()
s.store.Close()
}
func (s *testRecoverTable) mockGC(tk *testkit.TestKit) (string, string, string, func()) {
originGC := ddl.IsEmulatorGCEnable()
resetGC := func() {
if originGC {
ddl.EmulatorGCEnable()
} else {
ddl.EmulatorGCDisable()
}
}
// disable emulator GC.
// Otherwise emulator GC will delete table record as soon as possible after execute drop table ddl.
ddl.EmulatorGCDisable()
gcTimeFormat := "20060102-15:04:05 -0700 MST"
timeBeforeDrop := time.Now().Add(0 - 48*60*60*time.Second).Format(gcTimeFormat)
timeAfterDrop := time.Now().Add(48 * 60 * 60 * time.Second).Format(gcTimeFormat)
safePointSQL := `INSERT HIGH_PRIORITY INTO mysql.tidb VALUES ('tikv_gc_safe_point', '%[1]s', '')
ON DUPLICATE KEY
UPDATE variable_value = '%[1]s'`
// clear GC variables first.
tk.MustExec("delete from mysql.tidb where variable_name in ( 'tikv_gc_safe_point','tikv_gc_enable' )")
return timeBeforeDrop, timeAfterDrop, safePointSQL, resetGC
}
func (s *testRecoverTable) TestRecoverTable(c *C) {
c.Assert(failpoint.Enable("github.com/pingcap/tidb/meta/autoid/mockAutoIDChange", `return(true)`), IsNil)
defer func() {
err := failpoint.Disable("github.com/pingcap/tidb/meta/autoid/mockAutoIDChange")
c.Assert(err, IsNil)
}()
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("create database if not exists test_recover")
tk.MustExec("use test_recover")
tk.MustExec("drop table if exists t_recover")
tk.MustExec("create table t_recover (a int);")
timeBeforeDrop, timeAfterDrop, safePointSQL, resetGC := s.mockGC(tk)
defer resetGC()
tk.MustExec("insert into t_recover values (1),(2),(3)")
tk.MustExec("drop table t_recover")
// if GC safe point is not exists in mysql.tidb
_, err := tk.Exec("recover table t_recover")
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "can not get 'tikv_gc_safe_point'")
// set GC safe point
tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop))
// Should recover, and we can drop it straight away.
tk.MustExec("recover table t_recover")
tk.MustExec("drop table t_recover")
err = gcutil.EnableGC(tk.Se)
c.Assert(err, IsNil)
// recover job is before GC safe point
tk.MustExec(fmt.Sprintf(safePointSQL, timeAfterDrop))
_, err = tk.Exec("recover table t_recover")
c.Assert(err, NotNil)
c.Assert(strings.Contains(err.Error(), "Can't find dropped/truncated table 't_recover' in GC safe point"), Equals, true)
// set GC safe point
tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop))
// if there is a new table with the same name, should return failed.
tk.MustExec("create table t_recover (a int);")
_, err = tk.Exec("recover table t_recover")
c.Assert(err.Error(), Equals, infoschema.ErrTableExists.GenWithStackByArgs("t_recover").Error())
// drop the new table with the same name, then recover table.
tk.MustExec("rename table t_recover to t_recover2")
// do recover table.
tk.MustExec("recover table t_recover")
// check recover table meta and data record.
tk.MustQuery("select * from t_recover;").Check(testkit.Rows("1", "2", "3"))
// check recover table autoID.
tk.MustExec("insert into t_recover values (4),(5),(6)")
tk.MustQuery("select * from t_recover;").Check(testkit.Rows("1", "2", "3", "4", "5", "6"))
// check rebase auto id.
tk.MustQuery("select a,_tidb_rowid from t_recover;").Check(testkit.Rows("1 1", "2 2", "3 3", "4 5001", "5 5002", "6 5003"))
// recover table by none exits job.
_, err = tk.Exec(fmt.Sprintf("recover table by job %d", 10000000))
c.Assert(err, NotNil)
// Disable GC by manual first, then after recover table, the GC enable status should also be disabled.
err = gcutil.DisableGC(tk.Se)
c.Assert(err, IsNil)
tk.MustExec("delete from t_recover where a > 1")
tk.MustExec("drop table t_recover")
tk.MustExec("recover table t_recover")
// check recover table meta and data record.
tk.MustQuery("select * from t_recover;").Check(testkit.Rows("1"))
// check recover table autoID.
tk.MustExec("insert into t_recover values (7),(8),(9)")
tk.MustQuery("select * from t_recover;").Check(testkit.Rows("1", "7", "8", "9"))
// Recover truncate table.
tk.MustExec("truncate table t_recover")
tk.MustExec("rename table t_recover to t_recover_new")
tk.MustExec("recover table t_recover")
tk.MustExec("insert into t_recover values (10)")
tk.MustQuery("select * from t_recover;").Check(testkit.Rows("1", "7", "8", "9", "10"))
// Test for recover one table multiple time.
tk.MustExec("drop table t_recover")
tk.MustExec("flashback table t_recover to t_recover_tmp")
_, err = tk.Exec("recover table t_recover")
c.Assert(infoschema.ErrTableExists.Equal(err), IsTrue)
gcEnable, err := gcutil.CheckGCEnable(tk.Se)
c.Assert(err, IsNil)
c.Assert(gcEnable, Equals, false)
}
func (s *testRecoverTable) TestFlashbackTable(c *C) {
c.Assert(failpoint.Enable("github.com/pingcap/tidb/meta/autoid/mockAutoIDChange", `return(true)`), IsNil)
defer func() {
c.Assert(failpoint.Disable("github.com/pingcap/tidb/meta/autoid/mockAutoIDChange"), IsNil)
}()
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("create database if not exists test_flashback")
tk.MustExec("use test_flashback")
tk.MustExec("drop table if exists t_flashback")
tk.MustExec("create table t_flashback (a int);")
timeBeforeDrop, _, safePointSQL, resetGC := s.mockGC(tk)
defer resetGC()
// Set GC safe point
tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop))
// Set GC enable.
err := gcutil.EnableGC(tk.Se)
c.Assert(err, IsNil)
tk.MustExec("insert into t_flashback values (1),(2),(3)")
tk.MustExec("drop table t_flashback")
// Test flash table with not_exist_table_name name.
_, err = tk.Exec("flashback table t_not_exists")
c.Assert(err.Error(), Equals, "Can't find dropped/truncated table: t_not_exists in DDL history jobs")
// Test flashback table failed by there is already a new table with the same name.
// If there is a new table with the same name, should return failed.
tk.MustExec("create table t_flashback (a int);")
_, err = tk.Exec("flashback table t_flashback")
c.Assert(err.Error(), Equals, infoschema.ErrTableExists.GenWithStackByArgs("t_flashback").Error())
// Drop the new table with the same name, then flashback table.
tk.MustExec("rename table t_flashback to t_flashback_tmp")
// Test for flashback table.
tk.MustExec("flashback table t_flashback")
// Check flashback table meta and data record.
tk.MustQuery("select * from t_flashback;").Check(testkit.Rows("1", "2", "3"))
// Check flashback table autoID.
tk.MustExec("insert into t_flashback values (4),(5),(6)")
tk.MustQuery("select * from t_flashback;").Check(testkit.Rows("1", "2", "3", "4", "5", "6"))
// Check rebase auto id.
tk.MustQuery("select a,_tidb_rowid from t_flashback;").Check(testkit.Rows("1 1", "2 2", "3 3", "4 5001", "5 5002", "6 5003"))
// Test for flashback to new table.
tk.MustExec("drop table t_flashback")
tk.MustExec("create table t_flashback (a int);")
tk.MustExec("flashback table t_flashback to t_flashback2")
// Check flashback table meta and data record.
tk.MustQuery("select * from t_flashback2;").Check(testkit.Rows("1", "2", "3", "4", "5", "6"))
// Check flashback table autoID.
tk.MustExec("insert into t_flashback2 values (7),(8),(9)")
tk.MustQuery("select * from t_flashback2;").Check(testkit.Rows("1", "2", "3", "4", "5", "6", "7", "8", "9"))
// Check rebase auto id.
tk.MustQuery("select a,_tidb_rowid from t_flashback2;").Check(testkit.Rows("1 1", "2 2", "3 3", "4 5001", "5 5002", "6 5003", "7 10001", "8 10002", "9 10003"))
// Test for flashback one table multiple time.
_, err = tk.Exec("flashback table t_flashback to t_flashback4")
c.Assert(infoschema.ErrTableExists.Equal(err), IsTrue)
// Test for flashback truncated table to new table.
tk.MustExec("truncate table t_flashback2")
tk.MustExec("flashback table t_flashback2 to t_flashback3")
// Check flashback table meta and data record.
tk.MustQuery("select * from t_flashback3;").Check(testkit.Rows("1", "2", "3", "4", "5", "6", "7", "8", "9"))
// Check flashback table autoID.
tk.MustExec("insert into t_flashback3 values (10),(11)")
tk.MustQuery("select * from t_flashback3;").Check(testkit.Rows("1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11"))
// Check rebase auto id.
tk.MustQuery("select a,_tidb_rowid from t_flashback3;").Check(testkit.Rows("1 1", "2 2", "3 3", "4 5001", "5 5002", "6 5003", "7 10001", "8 10002", "9 10003", "10 15001", "11 15002"))
// Test for flashback drop partition table.
tk.MustExec("drop table if exists t_p_flashback")
tk.MustExec("create table t_p_flashback (a int) partition by hash(a) partitions 4;")
tk.MustExec("insert into t_p_flashback values (1),(2),(3)")
tk.MustExec("drop table t_p_flashback")
tk.MustExec("flashback table t_p_flashback")
// Check flashback table meta and data record.
tk.MustQuery("select * from t_p_flashback order by a;").Check(testkit.Rows("1", "2", "3"))
// Check flashback table autoID.
tk.MustExec("insert into t_p_flashback values (4),(5)")
tk.MustQuery("select a,_tidb_rowid from t_p_flashback order by a;").Check(testkit.Rows("1 1", "2 2", "3 3", "4 5001", "5 5002"))
// Test for flashback truncate partition table.
tk.MustExec("truncate table t_p_flashback")
tk.MustExec("flashback table t_p_flashback to t_p_flashback1")
// Check flashback table meta and data record.
tk.MustQuery("select * from t_p_flashback1 order by a;").Check(testkit.Rows("1", "2", "3", "4", "5"))
// Check flashback table autoID.
tk.MustExec("insert into t_p_flashback1 values (6)")
tk.MustQuery("select a,_tidb_rowid from t_p_flashback1 order by a;").Check(testkit.Rows("1 1", "2 2", "3 3", "4 5001", "5 5002", "6 10001"))
tk.MustExec("drop database if exists Test2")
tk.MustExec("create database Test2")
tk.MustExec("use Test2")
tk.MustExec("create table t (a int);")
tk.MustExec("insert into t values (1),(2)")
tk.MustExec("drop table t")
tk.MustExec("flashback table t")
tk.MustQuery("select a from t order by a").Check(testkit.Rows("1", "2"))
tk.MustExec("drop table t")
tk.MustExec("drop database if exists Test3")
tk.MustExec("create database Test3")
tk.MustExec("use Test3")
tk.MustExec("create table t (a int);")
tk.MustExec("drop table t")
tk.MustExec("drop database Test3")
tk.MustExec("use Test2")
tk.MustExec("flashback table t")
tk.MustExec("insert into t values (3)")
tk.MustQuery("select a from t order by a").Check(testkit.Rows("1", "2", "3"))
}
func (s *testRecoverTable) TestRecoverTempTable(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("create database if not exists test_recover")
tk.MustExec("use test_recover")
tk.MustExec("drop table if exists t_recover")
tk.MustExec("set tidb_enable_global_temporary_table=true")
tk.MustExec("create global temporary table t_recover (a int) on commit delete rows;")
timeBeforeDrop, _, safePointSQL, resetGC := s.mockGC(tk)
defer resetGC()
// Set GC safe point
tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop))
tk.MustExec("drop table t_recover")
tk.MustGetErrCode("recover table t_recover;", errno.ErrUnsupportedDDLOperation)
tk.MustGetErrCode("flashback table t_recover;", errno.ErrUnsupportedDDLOperation)
}
func (s *testSuiteP2) TestPointGetPreparedPlan(c *C) {
tk1 := testkit.NewTestKit(c, s.store)
tk1.MustExec("drop database if exists ps_text")
defer tk1.MustExec("drop database if exists ps_text")
tk1.MustExec("create database ps_text")
tk1.MustExec("use ps_text")
tk1.MustExec(`create table t (a int, b int, c int,
primary key k_a(a),
unique key k_b(b))`)
tk1.MustExec("insert into t values (1, 1, 1)")
tk1.MustExec("insert into t values (2, 2, 2)")
tk1.MustExec("insert into t values (3, 3, 3)")
pspk1Id, _, _, err := tk1.Se.PrepareStmt("select * from t where a = ?")
c.Assert(err, IsNil)
tk1.Se.GetSessionVars().PreparedStmts[pspk1Id].(*plannercore.CachedPrepareStmt).PreparedAst.UseCache = false
pspk2Id, _, _, err := tk1.Se.PrepareStmt("select * from t where ? = a ")
c.Assert(err, IsNil)
tk1.Se.GetSessionVars().PreparedStmts[pspk2Id].(*plannercore.CachedPrepareStmt).PreparedAst.UseCache = false
ctx := context.Background()
// first time plan generated
rs, err := tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(0)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(nil)
// using the generated plan but with different params
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(1)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("1 1 1"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(2)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("2 2 2"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk2Id, []types.Datum{types.NewDatum(3)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("3 3 3"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk2Id, []types.Datum{types.NewDatum(0)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(nil)
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk2Id, []types.Datum{types.NewDatum(1)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("1 1 1"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk2Id, []types.Datum{types.NewDatum(2)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("2 2 2"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk2Id, []types.Datum{types.NewDatum(3)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("3 3 3"))
// unique index
psuk1Id, _, _, err := tk1.Se.PrepareStmt("select * from t where b = ? ")
c.Assert(err, IsNil)
tk1.Se.GetSessionVars().PreparedStmts[psuk1Id].(*plannercore.CachedPrepareStmt).PreparedAst.UseCache = false
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(1)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("1 1 1"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(2)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("2 2 2"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(3)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("3 3 3"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(0)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(nil)
// test schema changed, cached plan should be invalidated
tk1.MustExec("alter table t add column col4 int default 10 after c")
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(0)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(nil)
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(1)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("1 1 1 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(2)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("2 2 2 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk2Id, []types.Datum{types.NewDatum(3)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("3 3 3 10"))
tk1.MustExec("alter table t drop index k_b")
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(1)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("1 1 1 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(2)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("2 2 2 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(3)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("3 3 3 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(0)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(nil)
tk1.MustExec(`insert into t values(4, 3, 3, 11)`)
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(1)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("1 1 1 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(2)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("2 2 2 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(3)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("3 3 3 10", "4 3 3 11"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(0)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(nil)
tk1.MustExec("delete from t where a = 4")
tk1.MustExec("alter table t add index k_b(b)")
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(1)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("1 1 1 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(2)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("2 2 2 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(3)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("3 3 3 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(0)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(nil)
// use pk again
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk2Id, []types.Datum{types.NewDatum(3)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("3 3 3 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(3)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("3 3 3 10"))
}
func (s *testSuiteP2) TestPointGetPreparedPlanWithCommitMode(c *C) {
tk1 := testkit.NewTestKit(c, s.store)
tk1.MustExec("drop database if exists ps_text")
defer tk1.MustExec("drop database if exists ps_text")
tk1.MustExec("create database ps_text")
tk1.MustExec("use ps_text")
tk1.MustExec(`create table t (a int, b int, c int,
primary key k_a(a),
unique key k_b(b))`)
tk1.MustExec("insert into t values (1, 1, 1)")
tk1.MustExec("insert into t values (2, 2, 2)")
tk1.MustExec("insert into t values (3, 3, 3)")
pspk1Id, _, _, err := tk1.Se.PrepareStmt("select * from t where a = ?")
c.Assert(err, IsNil)
tk1.Se.GetSessionVars().PreparedStmts[pspk1Id].(*plannercore.CachedPrepareStmt).PreparedAst.UseCache = false
ctx := context.Background()
// first time plan generated
rs, err := tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(0)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(nil)
// using the generated plan but with different params
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(1)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("1 1 1"))
// next start a non autocommit txn
tk1.MustExec("set autocommit = 0")
tk1.MustExec("begin")
// try to exec using point get plan(this plan should not go short path)
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(1)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("1 1 1"))
// update rows
tk2 := testkit.NewTestKit(c, s.store)
tk2.MustExec("use ps_text")
tk2.MustExec("update t set c = c + 10 where c = 1")
// try to point get again
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(1)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("1 1 1"))
// try to update in session 1
tk1.MustExec("update t set c = c + 10 where c = 1")
_, err = tk1.Exec("commit")
c.Assert(kv.ErrWriteConflict.Equal(err), IsTrue, Commentf("error: %s", err))
// verify
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(1)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("1 1 11"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(2)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("2 2 2"))
tk2.MustQuery("select * from t where a = 1").Check(testkit.Rows("1 1 11"))
}
func (s *testSuiteP2) TestPointUpdatePreparedPlan(c *C) {
tk1 := testkit.NewTestKit(c, s.store)
tk1.MustExec("drop database if exists pu_test")
defer tk1.MustExec("drop database if exists pu_test")
tk1.MustExec("create database pu_test")
tk1.MustExec("use pu_test")
tk1.MustExec(`create table t (a int, b int, c int,
primary key k_a(a),
unique key k_b(b))`)
tk1.MustExec("insert into t values (1, 1, 1)")
tk1.MustExec("insert into t values (2, 2, 2)")
tk1.MustExec("insert into t values (3, 3, 3)")
updateID1, pc, _, err := tk1.Se.PrepareStmt(`update t set c = c + 1 where a = ?`)
c.Assert(err, IsNil)
tk1.Se.GetSessionVars().PreparedStmts[updateID1].(*plannercore.CachedPrepareStmt).PreparedAst.UseCache = false
c.Assert(pc, Equals, 1)
updateID2, pc, _, err := tk1.Se.PrepareStmt(`update t set c = c + 2 where ? = a`)
c.Assert(err, IsNil)
tk1.Se.GetSessionVars().PreparedStmts[updateID2].(*plannercore.CachedPrepareStmt).PreparedAst.UseCache = false
c.Assert(pc, Equals, 1)
ctx := context.Background()
// first time plan generated
rs, err := tk1.Se.ExecutePreparedStmt(ctx, updateID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 4"))
// using the generated plan but with different params
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updateID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 5"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updateID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 6"))
// updateID2
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updateID2, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 8"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updateID2, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 10"))
// unique index
updUkID1, _, _, err := tk1.Se.PrepareStmt(`update t set c = c + 10 where b = ?`)
c.Assert(err, IsNil)
tk1.Se.GetSessionVars().PreparedStmts[updUkID1].(*plannercore.CachedPrepareStmt).PreparedAst.UseCache = false
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updUkID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 20"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updUkID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 30"))
// test schema changed, cached plan should be invalidated
tk1.MustExec("alter table t add column col4 int default 10 after c")
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updateID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 31 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updateID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 32 10"))
tk1.MustExec("alter table t drop index k_b")
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updUkID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 42 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updUkID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 52 10"))
tk1.MustExec("alter table t add unique index k_b(b)")
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updUkID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 62 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updUkID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 72 10"))
tk1.MustQuery("select * from t where a = 1").Check(testkit.Rows("1 1 1 10"))
tk1.MustQuery("select * from t where a = 2").Check(testkit.Rows("2 2 2 10"))
}
func (s *testSuiteP2) TestPointUpdatePreparedPlanWithCommitMode(c *C) {
tk1 := testkit.NewTestKit(c, s.store)
tk1.MustExec("drop database if exists pu_test2")
defer tk1.MustExec("drop database if exists pu_test2")
tk1.MustExec("create database pu_test2")
tk1.MustExec("use pu_test2")
tk1.MustExec(`create table t (a int, b int, c int,
primary key k_a(a),
unique key k_b(b))`)
tk1.MustExec("insert into t values (1, 1, 1)")
tk1.MustExec("insert into t values (2, 2, 2)")
tk1.MustExec("insert into t values (3, 3, 3)")
ctx := context.Background()
updateID1, _, _, err := tk1.Se.PrepareStmt(`update t set c = c + 1 where a = ?`)
tk1.Se.GetSessionVars().PreparedStmts[updateID1].(*plannercore.CachedPrepareStmt).PreparedAst.UseCache = false
c.Assert(err, IsNil)
// first time plan generated
rs, err := tk1.Se.ExecutePreparedStmt(ctx, updateID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 4"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updateID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 5"))
// next start a non autocommit txn
tk1.MustExec("set autocommit = 0")
tk1.MustExec("begin")
// try to exec using point get plan(this plan should not go short path)
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updateID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 6"))
// update rows
tk2 := testkit.NewTestKit(c, s.store)
tk2.MustExec("use pu_test2")
tk2.MustExec(`prepare pu2 from "update t set c = c + 2 where ? = a "`)
tk2.MustExec("set @p3 = 3")
tk2.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 5"))
tk2.MustExec("execute pu2 using @p3")
tk2.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 7"))
tk2.MustExec("execute pu2 using @p3")
tk2.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 9"))
// try to update in session 1
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 6"))
_, err = tk1.Exec("commit")
c.Assert(kv.ErrWriteConflict.Equal(err), IsTrue, Commentf("error: %s", err))
// verify
tk2.MustQuery("select * from t where a = 1").Check(testkit.Rows("1 1 1"))
tk1.MustQuery("select * from t where a = 2").Check(testkit.Rows("2 2 2"))
tk2.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 9"))
tk1.MustQuery("select * from t where a = 2").Check(testkit.Rows("2 2 2"))
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 9"))
// again next start a non autocommit txn
tk1.MustExec("set autocommit = 0")
tk1.MustExec("begin")
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updateID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updateID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 11"))
tk1.MustExec("commit")
tk2.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 11"))
}
func (s *testSuite1) TestPartitionHashCode(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec(`create table t(c1 bigint, c2 bigint, c3 bigint, primary key(c1))
partition by hash (c1) partitions 4;`)
wg := sync.WaitGroup{}
for i := 0; i < 5; i++ {
wg.Add(1)
go func() {
defer wg.Done()
tk1 := testkit.NewTestKitWithInit(c, s.store)
for i := 0; i < 5; i++ {
tk1.MustExec("select * from t")
}
}()
}
wg.Wait()
}
func (s *testSuite1) TestAlterDefaultValue(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t(a int, primary key(a))")
tk.MustExec("insert into t(a) values(1)")
tk.MustExec("alter table t add column b int default 1")
tk.MustExec("alter table t alter b set default 2")
tk.MustQuery("select b from t where a = 1").Check(testkit.Rows("1"))
}
type testClusterTableSuite struct {
testSuiteWithCliBase
rpcserver *grpc.Server
listenAddr string
}
func (s *testClusterTableSuite) SetUpSuite(c *C) {
s.testSuiteWithCliBase.SetUpSuite(c)
s.rpcserver, s.listenAddr = s.setUpRPCService(c, "127.0.0.1:0")
}
func (s *testClusterTableSuite) setUpRPCService(c *C, addr string) (*grpc.Server, string) {
sm := &mockSessionManager1{}
sm.PS = append(sm.PS, &util.ProcessInfo{
ID: 1,
User: "root",
Host: "127.0.0.1",
Command: mysql.ComQuery,
})
lis, err := net.Listen("tcp", addr)
c.Assert(err, IsNil)
srv := server.NewRPCServer(config.GetGlobalConfig(), s.dom, sm)
port := lis.Addr().(*net.TCPAddr).Port
addr = fmt.Sprintf("127.0.0.1:%d", port)
go func() {
err = srv.Serve(lis)
c.Assert(err, IsNil)
}()
config.UpdateGlobal(func(conf *config.Config) {
conf.Status.StatusPort = uint(port)
})
return srv, addr
}
func (s *testClusterTableSuite) TearDownSuite(c *C) {
if s.rpcserver != nil {
s.rpcserver.Stop()
s.rpcserver = nil
}
s.testSuiteWithCliBase.TearDownSuite(c)
}
func (s *testSuiteP1) TestPrepareLoadData(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustGetErrCode(`prepare stmt from "load data local infile '/tmp/load_data_test.csv' into table test";`, mysql.ErrUnsupportedPs)
}
func (s *testClusterTableSuite) TestSlowQuery(c *C) {
logData0 := ""
logData1 := `
# Time: 2020-02-15T18:00:01.000000+08:00
select 1;
# Time: 2020-02-15T19:00:05.000000+08:00
select 2;`
logData2 := `
# Time: 2020-02-16T18:00:01.000000+08:00
select 3;
# Time: 2020-02-16T18:00:05.000000+08:00
select 4;`
logData3 := `
# Time: 2020-02-16T19:00:00.000000+08:00
select 5;
# Time: 2020-02-17T18:00:05.000000+08:00
select 6;`
logData4 := `
# Time: 2020-05-14T19:03:54.314615176+08:00
select 7;`
logData := []string{logData0, logData1, logData2, logData3, logData4}
fileName0 := "tidb-slow-2020-02-14T19-04-05.01.log"
fileName1 := "tidb-slow-2020-02-15T19-04-05.01.log"
fileName2 := "tidb-slow-2020-02-16T19-04-05.01.log"
fileName3 := "tidb-slow-2020-02-17T18-00-05.01.log"
fileName4 := "tidb-slow.log"
fileNames := []string{fileName0, fileName1, fileName2, fileName3, fileName4}
prepareLogs(c, logData, fileNames)
defer func() {
removeFiles(fileNames)
}()
tk := testkit.NewTestKitWithInit(c, s.store)
loc, err := time.LoadLocation("Asia/Shanghai")
c.Assert(err, IsNil)
tk.Se.GetSessionVars().TimeZone = loc
tk.MustExec("use information_schema")
cases := []struct {
prepareSQL string
sql string
result []string
}{
{
sql: "select count(*),min(time),max(time) from %s where time > '2019-01-26 21:51:00' and time < now()",
result: []string{"7|2020-02-15 18:00:01.000000|2020-05-14 19:03:54.314615"},
},
{
sql: "select count(*),min(time),max(time) from %s where time > '2020-02-15 19:00:00' and time < '2020-02-16 18:00:02'",
result: []string{"2|2020-02-15 19:00:05.000000|2020-02-16 18:00:01.000000"},
},
{
sql: "select count(*),min(time),max(time) from %s where time > '2020-02-16 18:00:02' and time < '2020-02-17 17:00:00'",
result: []string{"2|2020-02-16 18:00:05.000000|2020-02-16 19:00:00.000000"},
},
{
sql: "select count(*),min(time),max(time) from %s where time > '2020-02-16 18:00:02' and time < '2020-02-17 20:00:00'",
result: []string{"3|2020-02-16 18:00:05.000000|2020-02-17 18:00:05.000000"},
},
{
sql: "select count(*),min(time),max(time) from %s",
result: []string{"1|2020-05-14 19:03:54.314615|2020-05-14 19:03:54.314615"},
},
{
sql: "select count(*),min(time) from %s where time > '2020-02-16 20:00:00'",
result: []string{"1|2020-02-17 18:00:05.000000"},
},
{
sql: "select count(*) from %s where time > '2020-02-17 20:00:00'",
result: []string{"0"},
},
{
sql: "select query from %s where time > '2019-01-26 21:51:00' and time < now()",
result: []string{"select 1;", "select 2;", "select 3;", "select 4;", "select 5;", "select 6;", "select 7;"},
},
// Test for different timezone.
{
prepareSQL: "set @@time_zone = '+00:00'",
sql: "select time from %s where time = '2020-02-17 10:00:05.000000'",
result: []string{"2020-02-17 10:00:05.000000"},
},
{
prepareSQL: "set @@time_zone = '+02:00'",
sql: "select time from %s where time = '2020-02-17 12:00:05.000000'",
result: []string{"2020-02-17 12:00:05.000000"},
},
// Test for issue 17224
{
prepareSQL: "set @@time_zone = '+08:00'",
sql: "select time from %s where time = '2020-05-14 19:03:54.314615'",
result: []string{"2020-05-14 19:03:54.314615"},
},
}
for _, cas := range cases {
if len(cas.prepareSQL) > 0 {
tk.MustExec(cas.prepareSQL)
}
sql := fmt.Sprintf(cas.sql, "slow_query")
tk.MustQuery(sql).Check(testutil.RowsWithSep("|", cas.result...))
sql = fmt.Sprintf(cas.sql, "cluster_slow_query")
tk.MustQuery(sql).Check(testutil.RowsWithSep("|", cas.result...))
}
}
func (s *testClusterTableSuite) TestIssue20236(c *C) {
logData0 := ""
logData1 := `
# Time: 2020-02-15T18:00:01.000000+08:00
select 1;
# Time: 2020-02-15T19:00:05.000000+08:00
select 2;
# Time: 2020-02-15T20:00:05.000000+08:00`
logData2 := `select 3;
# Time: 2020-02-16T18:00:01.000000+08:00
select 4;
# Time: 2020-02-16T18:00:05.000000+08:00
select 5;`
logData3 := `
# Time: 2020-02-16T19:00:00.000000+08:00
select 6;
# Time: 2020-02-17T18:00:05.000000+08:00
select 7;
# Time: 2020-02-17T19:00:00.000000+08:00`
logData4 := `select 8;
# Time: 2020-02-17T20:00:00.000000+08:00
select 9
# Time: 2020-05-14T19:03:54.314615176+08:00
select 10;`
logData := []string{logData0, logData1, logData2, logData3, logData4}
fileName0 := "tidb-slow-2020-02-14T19-04-05.01.log"
fileName1 := "tidb-slow-2020-02-15T19-04-05.01.log"
fileName2 := "tidb-slow-2020-02-16T19-04-05.01.log"
fileName3 := "tidb-slow-2020-02-17T18-00-05.01.log"
fileName4 := "tidb-slow.log"
fileNames := []string{fileName0, fileName1, fileName2, fileName3, fileName4}
prepareLogs(c, logData, fileNames)
defer func() {
removeFiles(fileNames)
}()
tk := testkit.NewTestKitWithInit(c, s.store)
loc, err := time.LoadLocation("Asia/Shanghai")
c.Assert(err, IsNil)
tk.Se.GetSessionVars().TimeZone = loc
tk.MustExec("use information_schema")
cases := []struct {
prepareSQL string
sql string
result []string
}{
{
prepareSQL: "set @@time_zone = '+08:00'",
sql: "select time from cluster_slow_query where time > '2020-02-17 12:00:05.000000' and time < '2020-05-14 20:00:00.000000'",
result: []string{"2020-02-17 18:00:05.000000", "2020-02-17 19:00:00.000000", "2020-05-14 19:03:54.314615"},
},
{
prepareSQL: "set @@time_zone = '+08:00'",
sql: "select time from cluster_slow_query where time > '2020-02-17 12:00:05.000000' and time < '2020-05-14 20:00:00.000000' order by time desc",
result: []string{"2020-05-14 19:03:54.314615", "2020-02-17 19:00:00.000000", "2020-02-17 18:00:05.000000"},
},
{
prepareSQL: "set @@time_zone = '+08:00'",
sql: "select time from cluster_slow_query where (time > '2020-02-15 18:00:00' and time < '2020-02-15 20:01:00') or (time > '2020-02-17 18:00:00' and time < '2020-05-14 20:00:00') order by time",
result: []string{"2020-02-15 18:00:01.000000", "2020-02-15 19:00:05.000000", "2020-02-17 18:00:05.000000", "2020-02-17 19:00:00.000000", "2020-05-14 19:03:54.314615"},
},
{
prepareSQL: "set @@time_zone = '+08:00'",
sql: "select time from cluster_slow_query where (time > '2020-02-15 18:00:00' and time < '2020-02-15 20:01:00') or (time > '2020-02-17 18:00:00' and time < '2020-05-14 20:00:00') order by time desc",
result: []string{"2020-05-14 19:03:54.314615", "2020-02-17 19:00:00.000000", "2020-02-17 18:00:05.000000", "2020-02-15 19:00:05.000000", "2020-02-15 18:00:01.000000"},
},
{
prepareSQL: "set @@time_zone = '+08:00'",
sql: "select count(*) from cluster_slow_query where time > '2020-02-15 18:00:00.000000' and time < '2020-05-14 20:00:00.000000' order by time desc",
result: []string{"9"},
},
{
prepareSQL: "set @@time_zone = '+08:00'",
sql: "select count(*) from cluster_slow_query where (time > '2020-02-16 18:00:00' and time < '2020-05-14 20:00:00') or (time > '2020-02-17 18:00:00' and time < '2020-05-17 20:00:00')",
result: []string{"6"},
},
{
prepareSQL: "set @@time_zone = '+08:00'",
sql: "select count(*) from cluster_slow_query where time > '2020-02-16 18:00:00.000000' and time < '2020-02-17 20:00:00.000000' order by time desc",
result: []string{"5"},
},
{
prepareSQL: "set @@time_zone = '+08:00'",
sql: "select time from cluster_slow_query where time > '2020-02-16 18:00:00.000000' and time < '2020-05-14 20:00:00.000000' order by time desc limit 3",
result: []string{"2020-05-14 19:03:54.314615", "2020-02-17 19:00:00.000000", "2020-02-17 18:00:05.000000"},
},
}
for _, cas := range cases {
if len(cas.prepareSQL) > 0 {
tk.MustExec(cas.prepareSQL)
}
tk.MustQuery(cas.sql).Check(testutil.RowsWithSep("|", cas.result...))
}
}
func (s *testClusterTableSuite) TestSQLDigestTextRetriever(c *C) {
tkInit := testkit.NewTestKitWithInit(c, s.store)
tkInit.MustExec("set global tidb_enable_stmt_summary = 1")
tkInit.MustQuery("select @@global.tidb_enable_stmt_summary").Check(testkit.Rows("1"))
tkInit.MustExec("drop table if exists test_sql_digest_text_retriever")
tkInit.MustExec("create table test_sql_digest_text_retriever (id int primary key, v int)")
tk := testkit.NewTestKitWithInit(c, s.store)
c.Assert(tk.Se.Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil), IsTrue)
tk.MustExec("insert into test_sql_digest_text_retriever values (1, 1)")
insertNormalized, insertDigest := parser.NormalizeDigest("insert into test_sql_digest_text_retriever values (1, 1)")
_, updateDigest := parser.NormalizeDigest("update test_sql_digest_text_retriever set v = v + 1 where id = 1")
r := &expression.SQLDigestTextRetriever{
SQLDigestsMap: map[string]string{
insertDigest.String(): "",
updateDigest.String(): "",
},
}
err := r.RetrieveLocal(context.Background(), tk.Se)
c.Assert(err, IsNil)
c.Assert(r.SQLDigestsMap[insertDigest.String()], Equals, insertNormalized)
c.Assert(r.SQLDigestsMap[updateDigest.String()], Equals, "")
}
func (s *testClusterTableSuite) TestFunctionDecodeSQLDigests(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
c.Assert(tk.Se.Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil), IsTrue)
tk.MustExec("set global tidb_enable_stmt_summary = 1")
tk.MustQuery("select @@global.tidb_enable_stmt_summary").Check(testkit.Rows("1"))
tk.MustExec("drop table if exists test_func_decode_sql_digests")
tk.MustExec("create table test_func_decode_sql_digests(id int primary key, v int)")
q1 := "begin"
norm1, digest1 := parser.NormalizeDigest(q1)
q2 := "select @@tidb_current_ts"
norm2, digest2 := parser.NormalizeDigest(q2)
q3 := "select id, v from test_func_decode_sql_digests where id = 1 for update"
norm3, digest3 := parser.NormalizeDigest(q3)
// TIDB_DECODE_SQL_DIGESTS function doesn't actually do "decoding", instead it queries `statements_summary` and it's
// variations for the corresponding statements.
// Execute the statements so that the queries will be saved into statements_summary table.
tk.MustExec(q1)
// Save the ts to query the transaction from tidb_trx.
ts, err := strconv.ParseUint(tk.MustQuery(q2).Rows()[0][0].(string), 10, 64)
c.Assert(err, IsNil)
c.Assert(ts, Greater, uint64(0))
tk.MustExec(q3)
tk.MustExec("rollback")
// Test statements truncating.
decoded := fmt.Sprintf(`["%s","%s","%s"]`, norm1, norm2, norm3)
digests := fmt.Sprintf(`["%s","%s","%s"]`, digest1, digest2, digest3)
tk.MustQuery("select tidb_decode_sql_digests(?, 0)", digests).Check(testkit.Rows(decoded))
// The three queries are shorter than truncate length, equal to truncate length and longer than truncate length respectively.
tk.MustQuery("select tidb_decode_sql_digests(?, ?)", digests, len(norm2)).Check(testkit.Rows(
"[\"begin\",\"select @@tidb_current_ts\",\"select `id` , `v` from `...\"]"))
// Empty array.
tk.MustQuery("select tidb_decode_sql_digests('[]')").Check(testkit.Rows("[]"))
// NULL
tk.MustQuery("select tidb_decode_sql_digests(null)").Check(testkit.Rows("<nil>"))
// Array containing wrong types and not-existing digests (maps to null).
tk.MustQuery("select tidb_decode_sql_digests(?)", fmt.Sprintf(`["%s",1,null,"%s",{"a":1},[2],"%s","","abcde"]`, digest1, digest2, digest3)).
Check(testkit.Rows(fmt.Sprintf(`["%s",null,null,"%s",null,null,"%s",null,null]`, norm1, norm2, norm3)))
// Not JSON array (throws warnings)
tk.MustQuery(`select tidb_decode_sql_digests('{"a":1}')`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`show warnings`).Check(testkit.Rows(`Warning 1210 The argument can't be unmarshalled as JSON array: '{"a":1}'`))
tk.MustQuery(`select tidb_decode_sql_digests('aabbccdd')`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`show warnings`).Check(testkit.Rows(`Warning 1210 The argument can't be unmarshalled as JSON array: 'aabbccdd'`))
// Invalid argument count.
tk.MustGetErrCode("select tidb_decode_sql_digests('a', 1, 2)", 1582)
tk.MustGetErrCode("select tidb_decode_sql_digests()", 1582)
}
func (s *testClusterTableSuite) TestFunctionDecodeSQLDigestsPrivilege(c *C) {
dropUserTk := testkit.NewTestKitWithInit(c, s.store)
c.Assert(dropUserTk.Se.Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil), IsTrue)
tk := testkit.NewTestKitWithInit(c, s.store)
c.Assert(tk.Se.Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil), IsTrue)
tk.MustExec("create user 'testuser'@'localhost'")
defer dropUserTk.MustExec("drop user 'testuser'@'localhost'")
c.Assert(tk.Se.Auth(&auth.UserIdentity{
Username: "testuser",
Hostname: "localhost",
}, nil, nil), IsTrue)
err := tk.ExecToErr("select tidb_decode_sql_digests('[\"aa\"]')")
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "[expression:1227]Access denied; you need (at least one of) the PROCESS privilege(s) for this operation")
tk = testkit.NewTestKitWithInit(c, s.store)
c.Assert(tk.Se.Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil), IsTrue)
tk.MustExec("create user 'testuser2'@'localhost'")
defer dropUserTk.MustExec("drop user 'testuser2'@'localhost'")
tk.MustExec("grant process on *.* to 'testuser2'@'localhost'")
c.Assert(tk.Se.Auth(&auth.UserIdentity{
Username: "testuser2",
Hostname: "localhost",
}, nil, nil), IsTrue)
_ = tk.MustQuery("select tidb_decode_sql_digests('[\"aa\"]')")
}
func prepareLogs(c *C, logData []string, fileNames []string) {
writeFile := func(file string, data string) {
f, err := os.OpenFile(file, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
c.Assert(err, IsNil)
_, err = f.Write([]byte(data))
c.Assert(f.Close(), IsNil)
c.Assert(err, IsNil)
}
for i, log := range logData {
writeFile(fileNames[i], log)
}
}
func removeFiles(fileNames []string) {
for _, fileName := range fileNames {
os.Remove(fileName)
}
}
func (s *testSuite1) TestIssue15718(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists tt;")
tk.MustExec("create table tt(a decimal(10, 0), b varchar(1), c time);")
tk.MustExec("insert into tt values(0, '2', null), (7, null, '1122'), (NULL, 'w', null), (NULL, '2', '3344'), (NULL, NULL, '0'), (7, 'f', '33');")
tk.MustQuery("select a and b as d, a or c as e from tt;").Check(testkit.Rows("0 <nil>", "<nil> 1", "0 <nil>", "<nil> 1", "<nil> <nil>", "0 1"))
tk.MustExec("drop table if exists tt;")
tk.MustExec("create table tt(a decimal(10, 0), b varchar(1), c time);")
tk.MustExec("insert into tt values(0, '2', '123'), (7, null, '1122'), (null, 'w', null);")
tk.MustQuery("select a and b as d, a, b from tt order by d limit 1;").Check(testkit.Rows("<nil> 7 <nil>"))
tk.MustQuery("select b or c as d, b, c from tt order by d limit 1;").Check(testkit.Rows("<nil> w <nil>"))
tk.MustExec("drop table if exists t0;")
tk.MustExec("CREATE TABLE t0(c0 FLOAT);")
tk.MustExec("INSERT INTO t0(c0) VALUES (NULL);")
tk.MustQuery("SELECT * FROM t0 WHERE NOT(0 OR t0.c0);").Check(testkit.Rows())
}
func (s *testSuite1) TestIssue15767(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists tt;")
tk.MustExec("create table t(a int, b char);")
tk.MustExec("insert into t values (1,'s'),(2,'b'),(1,'c'),(2,'e'),(1,'a');")
tk.MustExec("insert into t select * from t;")
tk.MustExec("insert into t select * from t;")
tk.MustExec("insert into t select * from t;")
tk.MustQuery("select b, count(*) from ( select b from t order by a limit 20 offset 2) as s group by b order by b;").Check(testkit.Rows("a 6", "c 7", "s 7"))
}
func (s *testSuite1) TestIssue16025(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists t0;")
tk.MustExec("CREATE TABLE t0(c0 NUMERIC PRIMARY KEY);")
tk.MustExec("INSERT IGNORE INTO t0(c0) VALUES (NULL);")
tk.MustQuery("SELECT * FROM t0 WHERE c0;").Check(testkit.Rows())
}
func (s *testSuite1) TestIssue16854(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists t;")
tk.MustExec("CREATE TABLE `t` ( `a` enum('WAITING','PRINTED','STOCKUP','CHECKED','OUTSTOCK','PICKEDUP','WILLBACK','BACKED') DEFAULT NULL)")
tk.MustExec("insert into t values(1),(2),(3),(4),(5),(6),(7);")
for i := 0; i < 7; i++ {
tk.MustExec("insert into t select * from t;")
}
tk.MustExec("set @@tidb_max_chunk_size=100;")
tk.MustQuery("select distinct a from t order by a").Check(testkit.Rows("WAITING", "PRINTED", "STOCKUP", "CHECKED", "OUTSTOCK", "PICKEDUP", "WILLBACK"))
tk.MustExec("drop table t")
tk.MustExec("CREATE TABLE `t` ( `a` set('WAITING','PRINTED','STOCKUP','CHECKED','OUTSTOCK','PICKEDUP','WILLBACK','BACKED') DEFAULT NULL)")
tk.MustExec("insert into t values(1),(2),(3),(4),(5),(6),(7);")
for i := 0; i < 7; i++ {
tk.MustExec("insert into t select * from t;")
}
tk.MustExec("set @@tidb_max_chunk_size=100;")
tk.MustQuery("select distinct a from t order by a").Check(testkit.Rows("WAITING", "PRINTED", "WAITING,PRINTED", "STOCKUP", "WAITING,STOCKUP", "PRINTED,STOCKUP", "WAITING,PRINTED,STOCKUP"))
tk.MustExec("drop table t")
}
func (s *testSuite) TestIssue16921(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t;")
tk.MustExec("create table t (a float);")
tk.MustExec("create index a on t(a);")
tk.MustExec("insert into t values (1.0), (NULL), (0), (2.0);")
tk.MustQuery("select `a` from `t` use index (a) where !`a`;").Check(testkit.Rows("0"))
tk.MustQuery("select `a` from `t` ignore index (a) where !`a`;").Check(testkit.Rows("0"))
tk.MustQuery("select `a` from `t` use index (a) where `a`;").Check(testkit.Rows("1", "2"))
tk.MustQuery("select `a` from `t` ignore index (a) where `a`;").Check(testkit.Rows("1", "2"))
tk.MustQuery("select a from t use index (a) where not a is true;").Check(testkit.Rows("<nil>", "0"))
tk.MustQuery("select a from t use index (a) where not not a is true;").Check(testkit.Rows("1", "2"))
tk.MustQuery("select a from t use index (a) where not not a;").Check(testkit.Rows("1", "2"))
tk.MustQuery("select a from t use index (a) where not not not a is true;").Check(testkit.Rows("<nil>", "0"))
tk.MustQuery("select a from t use index (a) where not not not a;").Check(testkit.Rows("0"))
}
func (s *testSuite) TestIssue19100(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t1, t2;")
tk.MustExec("create table t1 (c decimal);")
tk.MustExec("create table t2 (c decimal, key(c));")
tk.MustExec("insert into t1 values (null);")
tk.MustExec("insert into t2 values (null);")
tk.MustQuery("select count(*) from t1 where not c;").Check(testkit.Rows("0"))
tk.MustQuery("select count(*) from t2 where not c;").Check(testkit.Rows("0"))
tk.MustQuery("select count(*) from t1 where c;").Check(testkit.Rows("0"))
tk.MustQuery("select count(*) from t2 where c;").Check(testkit.Rows("0"))
}
// this is from jira issue #5856
func (s *testSuite1) TestInsertValuesWithSubQuery(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists t2")
tk.MustExec("create table t2(a int, b int, c int)")
defer tk.MustExec("drop table if exists t2")
// should not reference upper scope
c.Assert(tk.ExecToErr("insert into t2 values (11, 8, (select not b))"), NotNil)
c.Assert(tk.ExecToErr("insert into t2 set a = 11, b = 8, c = (select b))"), NotNil)
// subquery reference target table is allowed
tk.MustExec("insert into t2 values(1, 1, (select b from t2))")
tk.MustQuery("select * from t2").Check(testkit.Rows("1 1 <nil>"))
tk.MustExec("insert into t2 set a = 1, b = 1, c = (select b+1 from t2)")
tk.MustQuery("select * from t2").Check(testkit.Rows("1 1 <nil>", "1 1 2"))
// insert using column should work normally
tk.MustExec("delete from t2")
tk.MustExec("insert into t2 values(2, 4, a)")
tk.MustQuery("select * from t2").Check(testkit.Rows("2 4 2"))
tk.MustExec("insert into t2 set a = 3, b = 5, c = b")
tk.MustQuery("select * from t2").Check(testkit.Rows("2 4 2", "3 5 5"))
}
func (s *testSuite1) TestDIVZeroInPartitionExpr(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1(a int) partition by range (10 div a) (partition p0 values less than (10), partition p1 values less than maxvalue)")
defer tk.MustExec("drop table if exists t1")
tk.MustExec("set @@sql_mode=''")
tk.MustExec("insert into t1 values (NULL), (0), (1)")
tk.MustExec("set @@sql_mode='STRICT_ALL_TABLES,ERROR_FOR_DIVISION_BY_ZERO'")
tk.MustGetErrCode("insert into t1 values (NULL), (0), (1)", mysql.ErrDivisionByZero)
}
func (s *testSuite1) TestInsertIntoGivenPartitionSet(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists t1")
tk.MustExec(`create table t1(
a int(11) DEFAULT NULL,
b varchar(10) DEFAULT NULL,
UNIQUE KEY idx_a (a)) PARTITION BY RANGE (a)
(PARTITION p0 VALUES LESS THAN (10) ENGINE = InnoDB,
PARTITION p1 VALUES LESS THAN (20) ENGINE = InnoDB,
PARTITION p2 VALUES LESS THAN (30) ENGINE = InnoDB,
PARTITION p3 VALUES LESS THAN (40) ENGINE = InnoDB,
PARTITION p4 VALUES LESS THAN MAXVALUE ENGINE = InnoDB)`)
defer tk.MustExec("drop table if exists t1")
// insert into
tk.MustExec("insert into t1 partition(p0) values(1, 'a'), (2, 'b')")
tk.MustQuery("select * from t1 partition(p0) order by a").Check(testkit.Rows("1 a", "2 b"))
tk.MustExec("insert into t1 partition(p0, p1) values(3, 'c'), (4, 'd')")
tk.MustQuery("select * from t1 partition(p1)").Check(testkit.Rows())
tk.MustGetErrMsg("insert into t1 values(1, 'a')", "[kv:1062]Duplicate entry '1' for key 'idx_a'")
tk.MustGetErrMsg("insert into t1 partition(p0, p_non_exist) values(1, 'a')", "[table:1735]Unknown partition 'p_non_exist' in table 't1'")
tk.MustGetErrMsg("insert into t1 partition(p0, p1) values(40, 'a')", "[table:1748]Found a row not matching the given partition set")
// replace into
tk.MustExec("replace into t1 partition(p0) values(1, 'replace')")
tk.MustExec("replace into t1 partition(p0, p1) values(3, 'replace'), (4, 'replace')")
tk.MustExec("replace into t1 values(1, 'a')")
tk.MustQuery("select * from t1 partition (p0) order by a").Check(testkit.Rows("1 a", "2 b", "3 replace", "4 replace"))
tk.MustGetErrMsg("replace into t1 partition(p0, p_non_exist) values(1, 'a')", "[table:1735]Unknown partition 'p_non_exist' in table 't1'")
tk.MustGetErrMsg("replace into t1 partition(p0, p1) values(40, 'a')", "[table:1748]Found a row not matching the given partition set")
tk.MustExec("truncate table t1")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b char(10))")
defer tk.MustExec("drop table if exists t")
// insert into general table
tk.MustGetErrMsg("insert into t partition(p0, p1) values(1, 'a')", "[planner:1747]PARTITION () clause on non partitioned table")
// insert into from select
tk.MustExec("insert into t values(1, 'a'), (2, 'b')")
tk.MustExec("insert into t1 partition(p0) select * from t")
tk.MustQuery("select * from t1 partition(p0) order by a").Check(testkit.Rows("1 a", "2 b"))
tk.MustExec("truncate table t")
tk.MustExec("insert into t values(3, 'c'), (4, 'd')")
tk.MustExec("insert into t1 partition(p0, p1) select * from t")
tk.MustQuery("select * from t1 partition(p1) order by a").Check(testkit.Rows())
tk.MustQuery("select * from t1 partition(p0) order by a").Check(testkit.Rows("1 a", "2 b", "3 c", "4 d"))
tk.MustGetErrMsg("insert into t1 select 1, 'a'", "[kv:1062]Duplicate entry '1' for key 'idx_a'")
tk.MustGetErrMsg("insert into t1 partition(p0, p_non_exist) select 1, 'a'", "[table:1735]Unknown partition 'p_non_exist' in table 't1'")
tk.MustGetErrMsg("insert into t1 partition(p0, p1) select 40, 'a'", "[table:1748]Found a row not matching the given partition set")
// replace into from select
tk.MustExec("replace into t1 partition(p0) select 1, 'replace'")
tk.MustExec("truncate table t")
tk.MustExec("insert into t values(3, 'replace'), (4, 'replace')")
tk.MustExec("replace into t1 partition(p0, p1) select * from t")
tk.MustExec("replace into t1 select 1, 'a'")
tk.MustQuery("select * from t1 partition (p0) order by a").Check(testkit.Rows("1 a", "2 b", "3 replace", "4 replace"))
tk.MustGetErrMsg("replace into t1 partition(p0, p_non_exist) select 1, 'a'", "[table:1735]Unknown partition 'p_non_exist' in table 't1'")
tk.MustGetErrMsg("replace into t1 partition(p0, p1) select 40, 'a'", "[table:1748]Found a row not matching the given partition set")
}
func (s *testSuite1) TestUpdateGivenPartitionSet(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists t1,t2,t3,t4")
tk.MustExec(`create table t1(
a int(11),
b varchar(10) DEFAULT NULL,
primary key idx_a (a)) PARTITION BY RANGE (a)
(PARTITION p0 VALUES LESS THAN (10) ENGINE = InnoDB,
PARTITION p1 VALUES LESS THAN (20) ENGINE = InnoDB,
PARTITION p2 VALUES LESS THAN (30) ENGINE = InnoDB,
PARTITION p3 VALUES LESS THAN (40) ENGINE = InnoDB,
PARTITION p4 VALUES LESS THAN MAXVALUE ENGINE = InnoDB)`)
tk.MustExec(`create table t2(
a int(11) DEFAULT NULL,
b varchar(10) DEFAULT NULL) PARTITION BY RANGE (a)
(PARTITION p0 VALUES LESS THAN (10) ENGINE = InnoDB,
PARTITION p1 VALUES LESS THAN (20) ENGINE = InnoDB,
PARTITION p2 VALUES LESS THAN (30) ENGINE = InnoDB,
PARTITION p3 VALUES LESS THAN (40) ENGINE = InnoDB,
PARTITION p4 VALUES LESS THAN MAXVALUE ENGINE = InnoDB)`)
tk.MustExec(`create table t3 (a int(11), b varchar(10) default null)`)
defer tk.MustExec("drop table if exists t1,t2,t3")
tk.MustExec("insert into t3 values(1, 'a'), (2, 'b'), (11, 'c'), (21, 'd')")
err := tk.ExecToErr("update t3 partition(p0) set a = 40 where a = 2")
c.Assert(err.Error(), Equals, "[planner:1747]PARTITION () clause on non partitioned table")
// update with primary key change
tk.MustExec("insert into t1 values(1, 'a'), (2, 'b'), (11, 'c'), (21, 'd')")
err = tk.ExecToErr("update t1 partition(p0, p1) set a = 40")
c.Assert(err.Error(), Equals, "[table:1748]Found a row not matching the given partition set")
err = tk.ExecToErr("update t1 partition(p0) set a = 40 where a = 2")
c.Assert(err.Error(), Equals, "[table:1748]Found a row not matching the given partition set")
// test non-exist partition.
err = tk.ExecToErr("update t1 partition (p0, p_non_exist) set a = 40")
c.Assert(err.Error(), Equals, "[table:1735]Unknown partition 'p_non_exist' in table 't1'")
// test join.
err = tk.ExecToErr("update t1 partition (p0), t3 set t1.a = 40 where t3.a = 2")
c.Assert(err.Error(), Equals, "[table:1748]Found a row not matching the given partition set")
tk.MustExec("update t1 partition(p0) set a = 3 where a = 2")
tk.MustExec("update t1 partition(p0, p3) set a = 33 where a = 1")
// update without partition change
tk.MustExec("insert into t2 values(1, 'a'), (2, 'b'), (11, 'c'), (21, 'd')")
err = tk.ExecToErr("update t2 partition(p0, p1) set a = 40")
c.Assert(err.Error(), Equals, "[table:1748]Found a row not matching the given partition set")
err = tk.ExecToErr("update t2 partition(p0) set a = 40 where a = 2")
c.Assert(err.Error(), Equals, "[table:1748]Found a row not matching the given partition set")
tk.MustExec("update t2 partition(p0) set a = 3 where a = 2")
tk.MustExec("update t2 partition(p0, p3) set a = 33 where a = 1")
tk.MustExec("create table t4(a int primary key, b int) partition by hash(a) partitions 2")
tk.MustExec("insert into t4(a, b) values(1, 1),(2, 2),(3, 3);")
err = tk.ExecToErr("update t4 partition(p0) set a = 5 where a = 2")
c.Assert(err.Error(), Equals, "[table:1748]Found a row not matching the given partition set")
}
func (s *testSuiteP2) TestApplyCache(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists t;")
tk.MustExec("create table t(a int);")
tk.MustExec("insert into t values (1),(1),(1),(1),(1),(1),(1),(1),(1);")
tk.MustExec("analyze table t;")
result := tk.MustQuery("explain analyze SELECT count(1) FROM (SELECT (SELECT min(a) FROM t as t2 WHERE t2.a > t1.a) AS a from t as t1) t;")
c.Assert(result.Rows()[1][0], Equals, "└─Apply_39")
var (
ind int
flag bool
)
value := (result.Rows()[1][5]).(string)
for ind = 0; ind < len(value)-5; ind++ {
if value[ind:ind+5] == "cache" {
flag = true
break
}
}
c.Assert(flag, Equals, true)
c.Assert(value[ind:], Equals, "cache:ON, cacheHitRatio:88.889%")
tk.MustExec("drop table if exists t;")
tk.MustExec("create table t(a int);")
tk.MustExec("insert into t values (1),(2),(3),(4),(5),(6),(7),(8),(9);")
tk.MustExec("analyze table t;")
result = tk.MustQuery("explain analyze SELECT count(1) FROM (SELECT (SELECT min(a) FROM t as t2 WHERE t2.a > t1.a) AS a from t as t1) t;")
c.Assert(result.Rows()[1][0], Equals, "└─Apply_39")
flag = false
value = (result.Rows()[1][5]).(string)
for ind = 0; ind < len(value)-5; ind++ {
if value[ind:ind+5] == "cache" {
flag = true
break
}
}
c.Assert(flag, Equals, true)
c.Assert(value[ind:], Equals, "cache:OFF")
}
// For issue 17256
func (s *testSuite) TestGenerateColumnReplace(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1 (a int, b int as (a + 1) virtual not null, unique index idx(b));")
tk.MustExec("REPLACE INTO `t1` (`a`) VALUES (2);")
tk.MustExec("REPLACE INTO `t1` (`a`) VALUES (2);")
tk.MustQuery("select * from t1").Check(testkit.Rows("2 3"))
tk.MustExec("insert into `t1` (`a`) VALUES (2) on duplicate key update a = 3;")
tk.MustQuery("select * from t1").Check(testkit.Rows("3 4"))
}
func (s *testSlowQuery) TestSlowQueryWithoutSlowLog(c *C) {
tk := testkit.NewTestKit(c, s.store)
originCfg := config.GetGlobalConfig()
newCfg := *originCfg
newCfg.Log.SlowQueryFile = "tidb-slow-not-exist.log"
newCfg.Log.SlowThreshold = math.MaxUint64
config.StoreGlobalConfig(&newCfg)
defer func() {
config.StoreGlobalConfig(originCfg)
}()
tk.MustQuery("select query from information_schema.slow_query").Check(testkit.Rows())
tk.MustQuery("select query from information_schema.slow_query where time > '2020-09-15 12:16:39' and time < now()").Check(testkit.Rows())
}
func (s *testSlowQuery) TestSlowQuerySensitiveQuery(c *C) {
tk := testkit.NewTestKit(c, s.store)
originCfg := config.GetGlobalConfig()
newCfg := *originCfg
f, err := os.CreateTemp("", "tidb-slow-*.log")
c.Assert(err, IsNil)
f.Close()
newCfg.Log.SlowQueryFile = f.Name()
config.StoreGlobalConfig(&newCfg)
defer func() {
tk.MustExec("set tidb_slow_log_threshold=300;")
config.StoreGlobalConfig(originCfg)
err = os.Remove(newCfg.Log.SlowQueryFile)
c.Assert(err, IsNil)
}()
err = logutil.InitLogger(newCfg.Log.ToLogConfig())
c.Assert(err, IsNil)
tk.MustExec("set tidb_slow_log_threshold=0;")
tk.MustExec("drop user if exists user_sensitive;")
tk.MustExec("create user user_sensitive identified by '123456789';")
tk.MustExec("alter user 'user_sensitive'@'%' identified by 'abcdefg';")
tk.MustExec("set password for 'user_sensitive'@'%' = 'xyzuvw';")
tk.MustQuery("select query from `information_schema`.`slow_query` " +
"where (query like 'set password%' or query like 'create user%' or query like 'alter user%') " +
"and query like '%user_sensitive%' order by query;").
Check(testkit.Rows(
"alter user {user_sensitive@% password = ***};",
"create user {user_sensitive@% password = ***};",
"set password for user user_sensitive@%;",
))
}
func (s *testSlowQuery) TestSlowQueryPrepared(c *C) {
tk := testkit.NewTestKit(c, s.store)
originCfg := config.GetGlobalConfig()
newCfg := *originCfg
f, err := os.CreateTemp("", "tidb-slow-*.log")
c.Assert(err, IsNil)
f.Close()
newCfg.Log.SlowQueryFile = f.Name()
config.StoreGlobalConfig(&newCfg)
defer func() {
tk.MustExec("set tidb_slow_log_threshold=300;")
tk.MustExec("set tidb_redact_log=0;")
config.StoreGlobalConfig(originCfg)
os.Remove(newCfg.Log.SlowQueryFile)
}()
err = logutil.InitLogger(newCfg.Log.ToLogConfig())
c.Assert(err, IsNil)
tk.MustExec("set tidb_slow_log_threshold=0;")
tk.MustExec(`prepare mystmt1 from 'select sleep(?), 1';`)
tk.MustExec("SET @num = 0.01;")
tk.MustExec("execute mystmt1 using @num;")
tk.MustQuery("SELECT Query FROM `information_schema`.`slow_query` " +
"where query like 'select%sleep%' order by time desc limit 1").
Check(testkit.Rows(
"select sleep(?), 1 [arguments: 0.01];",
))
tk.MustExec("set tidb_redact_log=1;")
tk.MustExec(`prepare mystmt2 from 'select sleep(?), 2';`)
tk.MustExec("execute mystmt2 using @num;")
tk.MustQuery("SELECT Query FROM `information_schema`.`slow_query` " +
"where query like 'select%sleep%' order by time desc limit 1").
Check(testkit.Rows(
"select `sleep` ( ? ) , ?;",
))
}
func (s *testSlowQuery) TestLogSlowLogIndex(c *C) {
tk := testkit.NewTestKit(c, s.store)
f, err := os.CreateTemp("", "tidb-slow-*.log")
c.Assert(err, IsNil)
f.Close()
defer config.RestoreFunc()()
config.UpdateGlobal(func(conf *config.Config) {
conf.Log.SlowQueryFile = f.Name()
})
err = logutil.InitLogger(config.GetGlobalConfig().Log.ToLogConfig())
c.Assert(err, IsNil)
tk.MustExec("use test")
tk.MustExec("create table t (a int, b int,index idx(a));")
tk.MustExec("set tidb_slow_log_threshold=0;")
tk.MustQuery("select * from t use index (idx) where a in (1) union select * from t use index (idx) where a in (2,3);")
tk.MustExec("set tidb_slow_log_threshold=300;")
tk.MustQuery("select index_names from `information_schema`.`slow_query` " +
"where query like 'select%union%' limit 1").
Check(testkit.Rows(
"[t:idx]",
))
}
func (s *testSlowQuery) TestSlowQuery(c *C) {
tk := testkit.NewTestKit(c, s.store)
f, err := os.CreateTemp("", "tidb-slow-*.log")
c.Assert(err, IsNil)
_, err = f.WriteString(`
# Time: 2020-10-13T20:08:13.970563+08:00
select * from t;
# Time: 2020-10-16T20:08:13.970563+08:00
select * from t;
`)
c.Assert(err, IsNil)
err = f.Close()
c.Assert(err, IsNil)
executor.ParseSlowLogBatchSize = 1
originCfg := config.GetGlobalConfig()
newCfg := *originCfg
newCfg.Log.SlowQueryFile = f.Name()
config.StoreGlobalConfig(&newCfg)
defer func() {
executor.ParseSlowLogBatchSize = 64
config.StoreGlobalConfig(originCfg)
err = os.Remove(newCfg.Log.SlowQueryFile)
c.Assert(err, IsNil)
}()
err = logutil.InitLogger(newCfg.Log.ToLogConfig())
c.Assert(err, IsNil)
tk.MustQuery("select count(*) from `information_schema`.`slow_query` where time > '2020-10-16 20:08:13' and time < '2020-10-16 21:08:13'").Check(testkit.Rows("1"))
tk.MustQuery("select count(*) from `information_schema`.`slow_query` where time > '2019-10-13 20:08:13' and time < '2020-10-16 21:08:13'").Check(testkit.Rows("2"))
}
func (s *testSerialSuite) TestKillTableReader(c *C) {
var retry = "github.com/tikv/client-go/v2/locate/mockRetrySendReqToRegion"
defer func() {
c.Assert(failpoint.Disable(retry), IsNil)
}()
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int)")
tk.MustExec("insert into t values (1),(2),(3)")
tk.MustExec("set @@tidb_distsql_scan_concurrency=1")
atomic.StoreUint32(&tk.Se.GetSessionVars().Killed, 0)
c.Assert(failpoint.Enable(retry, `return(true)`), IsNil)
wg := &sync.WaitGroup{}
wg.Add(1)
go func() {
defer wg.Done()
time.Sleep(1 * time.Second)
err := tk.QueryToErr("select * from t")
c.Assert(err, NotNil)
c.Assert(int(terror.ToSQLError(errors.Cause(err).(*terror.Error)).Code), Equals, int(executor.ErrQueryInterrupted.Code()))
}()
atomic.StoreUint32(&tk.Se.GetSessionVars().Killed, 1)
wg.Wait()
}
func (s *testSerialSuite) TestPrevStmtDesensitization(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec(fmt.Sprintf("set @@session.%v=1", variable.TiDBRedactLog))
defer tk.MustExec(fmt.Sprintf("set @@session.%v=0", variable.TiDBRedactLog))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int, unique key (a))")
tk.MustExec("begin")
tk.MustExec("insert into t values (1),(2)")
c.Assert(tk.Se.GetSessionVars().PrevStmt.String(), Equals, "insert into `t` values ( ? ) , ( ? )")
c.Assert(tk.ExecToErr("insert into t values (1)").Error(), Equals, `[kv:1062]Duplicate entry '?' for key 'a'`)
}
func (s *testSuite) TestIssue19372(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists t1, t2;")
tk.MustExec("create table t1 (c_int int, c_str varchar(40), key(c_str));")
tk.MustExec("create table t2 like t1;")
tk.MustExec("insert into t1 values (1, 'a'), (2, 'b'), (3, 'c');")
tk.MustExec("insert into t2 select * from t1;")
tk.MustQuery("select (select t2.c_str from t2 where t2.c_str <= t1.c_str and t2.c_int in (1, 2) order by t2.c_str limit 1) x from t1 order by c_int;").Check(testkit.Rows("a", "a", "a"))
}
func (s *testSerialSuite1) TestCollectCopRuntimeStats(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1 (a int, b int)")
tk.MustExec("set tidb_enable_collect_execution_info=1;")
c.Assert(failpoint.Enable("tikvclient/tikvStoreRespResult", `return(true)`), IsNil)
rows := tk.MustQuery("explain analyze select * from t1").Rows()
c.Assert(len(rows), Equals, 2)
explain := fmt.Sprintf("%v", rows[0])
c.Assert(explain, Matches, ".*rpc_num: 2, .*regionMiss:.*")
c.Assert(failpoint.Disable("tikvclient/tikvStoreRespResult"), IsNil)
}
func (s *testSerialSuite1) TestIndexLookupRuntimeStats(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1 (a int, b int, index(a))")
tk.MustExec("insert into t1 values (1,2),(2,3),(3,4)")
sql := "explain analyze select * from t1 use index(a) where a > 1;"
rows := tk.MustQuery(sql).Rows()
c.Assert(len(rows), Equals, 3)
explain := fmt.Sprintf("%v", rows[0])
c.Assert(explain, Matches, ".*time:.*loops:.*index_task:.*table_task: {total_time.*num.*concurrency.*}.*")
indexExplain := fmt.Sprintf("%v", rows[1])
tableExplain := fmt.Sprintf("%v", rows[2])
c.Assert(indexExplain, Matches, ".*time:.*loops:.*cop_task:.*")
c.Assert(tableExplain, Matches, ".*time:.*loops:.*cop_task:.*")
}
func (s *testSerialSuite1) TestHashAggRuntimeStats(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1 (a int, b int)")
tk.MustExec("insert into t1 values (1,2),(2,3),(3,4)")
sql := "explain analyze SELECT /*+ HASH_AGG() */ count(*) FROM t1 WHERE a < 10;"
rows := tk.MustQuery(sql).Rows()
c.Assert(len(rows), Equals, 5)
explain := fmt.Sprintf("%v", rows[0])
c.Assert(explain, Matches, ".*time:.*loops:.*partial_worker:{wall_time:.*concurrency:.*task_num:.*tot_wait:.*tot_exec:.*tot_time:.*max:.*p95:.*}.*final_worker:{wall_time:.*concurrency:.*task_num:.*tot_wait:.*tot_exec:.*tot_time:.*max:.*p95:.*}.*")
}
func (s *testSerialSuite1) TestIndexMergeRuntimeStats(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists t1")
tk.MustExec("set @@tidb_enable_index_merge = 1")
tk.MustExec("create table t1(id int primary key, a int, b int, c int, d int)")
tk.MustExec("create index t1a on t1(a)")
tk.MustExec("create index t1b on t1(b)")
tk.MustExec("insert into t1 values(1,1,1,1,1),(2,2,2,2,2),(3,3,3,3,3),(4,4,4,4,4),(5,5,5,5,5)")
sql := "explain analyze select /*+ use_index_merge(t1, primary, t1a) */ * from t1 where id < 2 or a > 4;"
rows := tk.MustQuery(sql).Rows()
c.Assert(len(rows), Equals, 4)
explain := fmt.Sprintf("%v", rows[0])
c.Assert(explain, Matches, ".*time:.*loops:.*index_task:{fetch_handle:.*, merge:.*}.*table_task:{num.*concurrency.*fetch_row.*wait_time.*}.*")
tableRangeExplain := fmt.Sprintf("%v", rows[1])
indexExplain := fmt.Sprintf("%v", rows[2])
tableExplain := fmt.Sprintf("%v", rows[3])
c.Assert(tableRangeExplain, Matches, ".*time:.*loops:.*cop_task:.*")
c.Assert(indexExplain, Matches, ".*time:.*loops:.*cop_task:.*")
c.Assert(tableExplain, Matches, ".*time:.*loops:.*cop_task:.*")
tk.MustExec("set @@tidb_enable_collect_execution_info=0;")
sql = "select /*+ use_index_merge(t1, primary, t1a) */ * from t1 where id < 2 or a > 4 order by a"
tk.MustQuery(sql).Check(testkit.Rows("1 1 1 1 1", "5 5 5 5 5"))
}
func (s *testSuite) TestCollectDMLRuntimeStats(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1 (a int, b int, unique index (a))")
testSQLs := []string{
"insert ignore into t1 values (5,5);",
"insert into t1 values (5,5) on duplicate key update a=a+1;",
"replace into t1 values (5,6),(6,7)",
"update t1 set a=a+1 where a=6;",
}
getRootStats := func() string {
info := tk.Se.ShowProcess()
c.Assert(info, NotNil)
p, ok := info.Plan.(plannercore.Plan)
c.Assert(ok, IsTrue)
stats := tk.Se.GetSessionVars().StmtCtx.RuntimeStatsColl.GetRootStats(p.ID())
return stats.String()
}
for _, sql := range testSQLs {
tk.MustExec(sql)
c.Assert(getRootStats(), Matches, "time.*loops.*Get.*num_rpc.*total_time.*")
}
// Test for lock keys stats.
tk.MustExec("begin pessimistic")
tk.MustExec("update t1 set b=b+1")
c.Assert(getRootStats(), Matches, "time.*lock_keys.*time.* region.* keys.* lock_rpc:.* rpc_count.*")
tk.MustExec("rollback")
tk.MustExec("begin pessimistic")
tk.MustQuery("select * from t1 for update").Check(testkit.Rows("5 6", "7 7"))
c.Assert(getRootStats(), Matches, "time.*lock_keys.*time.* region.* keys.* lock_rpc:.* rpc_count.*")
tk.MustExec("rollback")
tk.MustExec("begin pessimistic")
tk.MustExec("insert ignore into t1 values (9,9)")
c.Assert(getRootStats(), Matches, "time:.*, loops:.*, prepare:.*, check_insert: {total_time:.*, mem_insert_time:.*, prefetch:.*, rpc:{BatchGet:{num_rpc:.*, total_time:.*}}}.*")
tk.MustExec("rollback")
tk.MustExec("begin pessimistic")
tk.MustExec("insert into t1 values (10,10) on duplicate key update a=a+1")
c.Assert(getRootStats(), Matches, "time:.*, loops:.*, prepare:.*, check_insert: {total_time:.*, mem_insert_time:.*, prefetch:.*, rpc:{BatchGet:{num_rpc:.*, total_time:.*}.*")
tk.MustExec("rollback")
tk.MustExec("begin pessimistic")
tk.MustExec("insert into t1 values (1,2)")
c.Assert(getRootStats(), Matches, "time:.*, loops:.*, prepare:.*, insert:.*")
tk.MustExec("rollback")
tk.MustExec("begin pessimistic")
tk.MustExec("insert ignore into t1 values(11,11) on duplicate key update `a`=`a`+1")
c.Assert(getRootStats(), Matches, "time:.*, loops:.*, prepare:.*, check_insert: {total_time:.*, mem_insert_time:.*, prefetch:.*, rpc:.*}")
tk.MustExec("rollback")
tk.MustExec("begin pessimistic")
tk.MustExec("replace into t1 values (1,4)")
c.Assert(getRootStats(), Matches, "time:.*, loops:.*, prefetch:.*, rpc:.*")
tk.MustExec("rollback")
}
func (s *testSuite) TestIssue13758(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1, t2")
tk.MustExec("create table t1 (pk int(11) primary key, a int(11) not null, b int(11), key idx_b(b), key idx_a(a))")
tk.MustExec("insert into `t1` values (1,1,0),(2,7,6),(3,2,null),(4,1,null),(5,4,5)")
tk.MustExec("create table t2 (a int)")
tk.MustExec("insert into t2 values (1),(null)")
tk.MustQuery("select (select a from t1 use index(idx_a) where b >= t2.a order by a limit 1) as field from t2").Check(testkit.Rows(
"4",
"<nil>",
))
}
func (s *testCoprCache) SetUpSuite(c *C) {
originConfig := config.GetGlobalConfig()
config.StoreGlobalConfig(config.NewConfig())
defer config.StoreGlobalConfig(originConfig)
cli := ®ionProperityClient{}
hijackClient := func(c tikv.Client) tikv.Client {
cli.Client = c
return cli
}
var err error
s.store, err = mockstore.NewMockStore(
mockstore.WithClusterInspector(func(c testutils.Cluster) {
mockstore.BootstrapWithSingleStore(c)
s.cls = c
}),
mockstore.WithClientHijacker(hijackClient),
)
c.Assert(err, IsNil)
s.dom, err = session.BootstrapSession(s.store)
c.Assert(err, IsNil)
}
func (s *testCoprCache) TearDownSuite(c *C) {
s.dom.Close()
s.store.Close()
}
func (s *testCoprCache) TestIntegrationCopCache(c *C) {
originConfig := config.GetGlobalConfig()
config.StoreGlobalConfig(config.NewConfig())
defer config.StoreGlobalConfig(originConfig)
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int primary key)")
tblInfo, err := s.dom.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t"))
c.Assert(err, IsNil)
tid := tblInfo.Meta().ID
tk.MustExec(`insert into t values(1),(2),(3),(4),(5),(6),(7),(8),(9),(10),(11),(12)`)
tableStart := tablecodec.GenTableRecordPrefix(tid)
s.cls.SplitKeys(tableStart, tableStart.PrefixNext(), 6)
c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/mockstore/unistore/cophandler/mockCopCacheInUnistore", `return(123)`), IsNil)
defer func() {
c.Assert(failpoint.Disable("github.com/pingcap/tidb/store/mockstore/unistore/cophandler/mockCopCacheInUnistore"), IsNil)
}()
rows := tk.MustQuery("explain analyze select * from t where t.a < 10").Rows()
c.Assert(rows[0][2], Equals, "9")
c.Assert(strings.Contains(rows[0][5].(string), "cop_task: {num: 5"), Equals, true)
c.Assert(strings.Contains(rows[0][5].(string), "copr_cache_hit_ratio: 0.00"), Equals, true)
rows = tk.MustQuery("explain analyze select * from t").Rows()
c.Assert(rows[0][2], Equals, "12")
c.Assert(strings.Contains(rows[0][5].(string), "cop_task: {num: 6"), Equals, true)
hitRatioIdx := strings.Index(rows[0][5].(string), "copr_cache_hit_ratio:") + len("copr_cache_hit_ratio: ")
c.Assert(hitRatioIdx >= len("copr_cache_hit_ratio: "), Equals, true)
hitRatio, err := strconv.ParseFloat(rows[0][5].(string)[hitRatioIdx:hitRatioIdx+4], 64)
c.Assert(err, IsNil)
c.Assert(hitRatio > 0, Equals, true)
// Test for cop cache disabled.
cfg := config.NewConfig()
cfg.TiKVClient.CoprCache.CapacityMB = 0
config.StoreGlobalConfig(cfg)
rows = tk.MustQuery("explain analyze select * from t where t.a < 10").Rows()
c.Assert(rows[0][2], Equals, "9")
c.Assert(strings.Contains(rows[0][5].(string), "copr_cache: disabled"), Equals, true)
}
func (s *testSerialSuite) TestCoprocessorOOMTicase(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec(`set @@tidb_wait_split_region_finish=1`)
// create table for non keep-order case
tk.MustExec("drop table if exists t5")
tk.MustExec("create table t5(id int)")
tk.MustQuery(`split table t5 between (0) and (10000) regions 10`).Check(testkit.Rows("9 1"))
// create table for keep-order case
tk.MustExec("drop table if exists t6")
tk.MustExec("create table t6(id int, index(id))")
tk.MustQuery(`split table t6 between (0) and (10000) regions 10`).Check(testkit.Rows("10 1"))
tk.MustQuery("split table t6 INDEX id between (0) and (10000) regions 10;").Check(testkit.Rows("10 1"))
count := 10
for i := 0; i < count; i++ {
tk.MustExec(fmt.Sprintf("insert into t5 (id) values (%v)", i))
tk.MustExec(fmt.Sprintf("insert into t6 (id) values (%v)", i))
}
defer config.RestoreFunc()()
config.UpdateGlobal(func(conf *config.Config) {
conf.OOMAction = config.OOMActionLog
})
testcases := []struct {
name string
sql string
}{
{
name: "keep Order",
sql: "select id from t6 order by id",
},
{
name: "non keep Order",
sql: "select id from t5",
},
}
f := func() {
for _, testcase := range testcases {
c.Log(testcase.name)
// larger than one copResponse, smaller than 2 copResponse
quota := 2*copr.MockResponseSizeForTest - 100
se, err := session.CreateSession4Test(s.store)
c.Check(err, IsNil)
tk.Se = se
tk.MustExec("use test")
tk.MustExec(fmt.Sprintf("set @@tidb_mem_quota_query=%v;", quota))
var expect []string
for i := 0; i < count; i++ {
expect = append(expect, fmt.Sprintf("%v", i))
}
tk.MustQuery(testcase.sql).Sort().Check(testkit.Rows(expect...))
// assert oom action worked by max consumed > memory quota
c.Assert(tk.Se.GetSessionVars().StmtCtx.MemTracker.MaxConsumed(), Greater, int64(quota))
se.Close()
}
}
// ticase-4169, trigger oom action twice after workers consuming all the data
err := failpoint.Enable("github.com/pingcap/tidb/store/copr/ticase-4169", `return(true)`)
c.Assert(err, IsNil)
f()
err = failpoint.Disable("github.com/pingcap/tidb/store/copr/ticase-4169")
c.Assert(err, IsNil)
// ticase-4170, trigger oom action twice after iterator receiving all the data.
err = failpoint.Enable("github.com/pingcap/tidb/store/copr/ticase-4170", `return(true)`)
c.Assert(err, IsNil)
f()
err = failpoint.Disable("github.com/pingcap/tidb/store/copr/ticase-4170")
c.Assert(err, IsNil)
// ticase-4171, trigger oom before reading or consuming any data
err = failpoint.Enable("github.com/pingcap/tidb/store/copr/ticase-4171", `return(true)`)
c.Assert(err, IsNil)
f()
err = failpoint.Disable("github.com/pingcap/tidb/store/copr/ticase-4171")
c.Assert(err, IsNil)
}
func (s *testSuite) TestIssue20237(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t, s")
tk.MustExec("create table t(a date, b float)")
tk.MustExec("create table s(b float)")
tk.MustExec(`insert into t values(NULL,-37), ("2011-11-04",105), ("2013-03-02",-22), ("2006-07-02",-56), (NULL,124), (NULL,111), ("2018-03-03",-5);`)
tk.MustExec(`insert into s values(-37),(105),(-22),(-56),(124),(105),(111),(-5);`)
tk.MustQuery(`select count(distinct t.a, t.b) from t join s on t.b= s.b;`).Check(testkit.Rows("4"))
}
func (s *testSerialSuite) TestIssue19148(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a decimal(16, 2));")
tk.MustExec("select * from t where a > any_value(a);")
ctx := tk.Se.(sessionctx.Context)
is := domain.GetDomain(ctx).InfoSchema()
tblInfo, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t"))
c.Assert(err, IsNil)
c.Assert(int(tblInfo.Meta().Columns[0].Flag), Equals, 0)
}
func (s *testSuite) TestIssue19667(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("CREATE TABLE t (a DATETIME)")
tk.MustExec("INSERT INTO t VALUES('1988-04-17 01:59:59')")
tk.MustQuery(`SELECT DATE_ADD(a, INTERVAL 1 SECOND) FROM t`).Check(testkit.Rows("1988-04-17 02:00:00"))
}
func issue20975Prepare(c *C, store kv.Storage) (*testkit.TestKit, *testkit.TestKit) {
tk1 := testkit.NewTestKit(c, store)
tk2 := testkit.NewTestKit(c, store)
tk1.MustExec("use test")
tk1.MustExec("drop table if exists t1, t2")
tk2.MustExec("use test")
tk1.MustExec("create table t1(id int primary key, c int)")
tk1.MustExec("insert into t1 values(1, 10), (2, 20)")
return tk1, tk2
}
func (s *testSuite) TestIssue20975UpdateNoChange(c *C) {
tk1, tk2 := issue20975Prepare(c, s.store)
tk1.MustExec("begin pessimistic")
tk1.MustExec("update t1 set c=c")
tk2.MustExec("create table t2(a int)")
tk1.MustExec("commit")
}
func (s *testSuite) TestIssue20975SelectForUpdate(c *C) {
tk1, tk2 := issue20975Prepare(c, s.store)
tk1.MustExec("begin")
tk1.MustExec("select * from t1 for update")
tk2.MustExec("create table t2(a int)")
tk1.MustExec("commit")
tk1.MustExec("begin pessimistic")
tk1.MustExec("select * from t1 for update")
tk2.MustExec("drop table t2")
tk1.MustExec("commit")
}
func (s *testSuite) TestIssue20975SelectForUpdatePointGet(c *C) {
tk1, tk2 := issue20975Prepare(c, s.store)
tk1.MustExec("begin")
tk1.MustExec("select * from t1 where id=1 for update")
tk2.MustExec("create table t2(a int)")
tk1.MustExec("commit")
tk1.MustExec("begin pessimistic")
tk1.MustExec("select * from t1 where id=1 for update")
tk2.MustExec("drop table t2")
tk1.MustExec("commit")
}
func (s *testSuite) TestIssue20975SelectForUpdateBatchPointGet(c *C) {
tk1, tk2 := issue20975Prepare(c, s.store)
tk1.MustExec("begin")
tk1.MustExec("select * from t1 where id in (1, 2) for update")
tk2.MustExec("create table t2(a int)")
tk1.MustExec("commit")
tk1.MustExec("begin pessimistic")
tk1.MustExec("select * from t1 where id in (1, 2) for update")
tk2.MustExec("drop table t2")
tk1.MustExec("commit")
}
func issue20975PreparePartitionTable(c *C, store kv.Storage) (*testkit.TestKit, *testkit.TestKit) {
tk1 := testkit.NewTestKit(c, store)
tk2 := testkit.NewTestKit(c, store)
tk1.MustExec("use test")
tk1.MustExec("drop table if exists t1, t2")
tk2.MustExec("use test")
tk1.MustExec(`create table t1(id int primary key, c int) partition by range (id) (
partition p1 values less than (10),
partition p2 values less than (20)
)`)
tk1.MustExec("insert into t1 values(1, 10), (2, 20), (11, 30), (12, 40)")
return tk1, tk2
}
func (s *testSuite) TestIssue20975UpdateNoChangeWithPartitionTable(c *C) {
tk1, tk2 := issue20975PreparePartitionTable(c, s.store)
// Set projection concurrency to avoid data race here.
// TODO: remove this line after fixing https://github.com/pingcap/tidb/issues/25496
tk1.Se.GetSessionVars().Concurrency.SetProjectionConcurrency(0)
tk1.MustExec("begin pessimistic")
tk1.MustExec("update t1 set c=c")
tk2.MustExec("create table t2(a int)")
tk1.MustExec("commit")
}
func (s *testSuite) TestIssue20975SelectForUpdateWithPartitionTable(c *C) {
tk1, tk2 := issue20975PreparePartitionTable(c, s.store)
tk1.MustExec("begin")
tk1.MustExec("select * from t1 for update")
tk2.MustExec("create table t2(a int)")
tk1.MustExec("commit")
tk1.MustExec("begin pessimistic")
tk1.MustExec("select * from t1 for update")
tk2.MustExec("drop table t2")
tk1.MustExec("commit")
}
func (s *testSuite) TestIssue20975SelectForUpdatePointGetWithPartitionTable(c *C) {
tk1, tk2 := issue20975PreparePartitionTable(c, s.store)
tk1.MustExec("begin")
tk1.MustExec("select * from t1 where id=1 for update")
tk2.MustExec("create table t2(a int)")
tk1.MustExec("commit")
tk1.MustExec("begin")
tk1.MustExec("select * from t1 where id=12 for update")
tk2.MustExec("drop table t2")
tk1.MustExec("commit")
tk1.MustExec("begin pessimistic")
tk1.MustExec("select * from t1 where id=1 for update")
tk2.MustExec("create table t2(a int)")
tk1.MustExec("commit")
tk1.MustExec("begin pessimistic")
tk1.MustExec("select * from t1 where id=12 for update")
tk2.MustExec("drop table t2")
tk1.MustExec("commit")
}
func (s *testSuite) TestIssue20975SelectForUpdateBatchPointGetWithPartitionTable(c *C) {
tk1, tk2 := issue20975PreparePartitionTable(c, s.store)
tk1.MustExec("begin")
tk1.MustExec("select * from t1 where id in (1, 2) for update")
tk2.MustExec("create table t2(a int)")
tk1.MustExec("commit")
tk1.MustExec("begin")
tk1.MustExec("select * from t1 where id in (11, 12) for update")
tk2.MustExec("drop table t2")
tk1.MustExec("commit")
tk1.MustExec("begin")
tk1.MustExec("select * from t1 where id in (1, 11) for update")
tk2.MustExec("create table t2(a int)")
tk1.MustExec("commit")
tk1.MustExec("begin pessimistic")
tk1.MustExec("select * from t1 where id in (1, 2) for update")
tk2.MustExec("drop table t2")
tk1.MustExec("commit")
tk1.MustExec("begin pessimistic")
tk1.MustExec("select * from t1 where id in (11, 12) for update")
tk2.MustExec("create table t2(a int)")
tk1.MustExec("commit")
tk1.MustExec("begin pessimistic")
tk1.MustExec("select * from t1 where id in (1, 11) for update")
tk2.MustExec("drop table t2")
tk1.MustExec("commit")
}
func (s *testSuite) TestIssue20305(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t2 (a year(4))")
tk.MustExec("insert into t2 values(69)")
tk.MustQuery("select * from t2 where a <= 69").Check(testkit.Rows("2069"))
// the following test is a regression test that matches MySQL's behavior.
tk.MustExec("drop table if exists t3")
tk.MustExec("CREATE TABLE `t3` (`y` year DEFAULT NULL, `a` int DEFAULT NULL)")
tk.MustExec("INSERT INTO `t3` VALUES (2069, 70), (2010, 11), (2155, 2156), (2069, 69)")
tk.MustQuery("SELECT * FROM `t3` where y <= a").Check(testkit.Rows("2155 2156"))
}
func (s *testSuite) TestIssue22817(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t3")
tk.MustExec("create table t3 (a year)")
tk.MustExec("insert into t3 values (1991), (\"1992\"), (\"93\"), (94)")
tk.MustQuery("select * from t3 where a >= NULL").Check(testkit.Rows())
}
func (s *testSuite) TestIssue13953(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("CREATE TABLE `t` (`id` int(11) DEFAULT NULL, `tp_bigint` bigint(20) DEFAULT NULL )")
tk.MustExec("insert into t values(0,1),(1,9215570218099803537)")
tk.MustQuery("select A.tp_bigint,B.id from t A join t B on A.id < B.id * 16 where A.tp_bigint = B.id;").Check(
testkit.Rows("1 1"))
}
func (s *testSuite) TestZeroDateTimeCompatibility(c *C) {
SQLs := []string{
`select YEAR(0000-00-00), YEAR("0000-00-00")`,
`select MONTH(0000-00-00), MONTH("0000-00-00")`,
`select DAYOFWEEK(0000-00-00), DAYOFWEEK("0000-00-00")`,
`select DAYOFMONTH(0000-00-00), DAYOFMONTH("0000-00-00")`,
`select DAYOFYEAR(0000-00-00), DAYOFYEAR("0000-00-00")`,
`select QUARTER(0000-00-00), QUARTER("0000-00-00")`,
`select EXTRACT(DAY FROM 0000-00-00), EXTRACT(DAY FROM "0000-00-00")`,
`select EXTRACT(MONTH FROM 0000-00-00), EXTRACT(MONTH FROM "0000-00-00")`,
`select EXTRACT(YEAR FROM 0000-00-00), EXTRACT(YEAR FROM "0000-00-00")`,
`select EXTRACT(WEEK FROM 0000-00-00), EXTRACT(WEEK FROM "0000-00-00")`,
`select EXTRACT(QUARTER FROM 0000-00-00), EXTRACT(QUARTER FROM "0000-00-00")`,
}
tk := testkit.NewTestKit(c, s.store)
for _, t := range SQLs {
fmt.Println(t)
tk.MustQuery(t).Check(testkit.Rows("0 <nil>"))
c.Assert(tk.Se.GetSessionVars().StmtCtx.WarningCount(), Equals, uint16(1))
}
}
// https://github.com/pingcap/tidb/issues/24165.
func (s *testSuite) TestInvalidDateValueInCreateTable(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists t;")
// Test for sql mode 'NO_ZERO_IN_DATE'.
tk.MustExec("set @@sql_mode='STRICT_TRANS_TABLES,NO_ZERO_IN_DATE';")
tk.MustGetErrCode("create table t (a datetime default '2999-00-00 00:00:00');", errno.ErrInvalidDefault)
tk.MustExec("create table t (a datetime);")
tk.MustGetErrCode("alter table t modify column a datetime default '2999-00-00 00:00:00';", errno.ErrInvalidDefault)
tk.MustExec("drop table if exists t;")
// Test for sql mode 'NO_ZERO_DATE'.
tk.MustExec("set @@sql_mode='STRICT_TRANS_TABLES,NO_ZERO_DATE';")
tk.MustGetErrCode("create table t (a datetime default '0000-00-00 00:00:00');", errno.ErrInvalidDefault)
tk.MustExec("create table t (a datetime);")
tk.MustGetErrCode("alter table t modify column a datetime default '0000-00-00 00:00:00';", errno.ErrInvalidDefault)
tk.MustExec("drop table if exists t;")
// Remove NO_ZERO_DATE and NO_ZERO_IN_DATE.
tk.MustExec("set @@sql_mode='STRICT_TRANS_TABLES';")
// Test create table with zero datetime as a default value.
tk.MustExec("create table t (a datetime default '2999-00-00 00:00:00');")
tk.MustExec("drop table if exists t;")
tk.MustExec("create table t (a datetime default '0000-00-00 00:00:00');")
tk.MustExec("drop table if exists t;")
tk.MustExec("create table t (a datetime);")
tk.MustExec("alter table t modify column a datetime default '2999-00-00 00:00:00';")
tk.MustExec("alter table t modify column a datetime default '0000-00-00 00:00:00';")
tk.MustExec("drop table if exists t;")
// Test create table with invalid datetime(02-30) as a default value.
tk.MustExec("set @@sql_mode='STRICT_TRANS_TABLES';")
tk.MustGetErrCode("create table t (a datetime default '2999-02-30 00:00:00');", errno.ErrInvalidDefault)
tk.MustExec("drop table if exists t;")
// NO_ZERO_IN_DATE and NO_ZERO_DATE have nothing to do with invalid datetime(02-30).
tk.MustExec("set @@sql_mode='STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE';")
tk.MustGetErrCode("create table t (a datetime default '2999-02-30 00:00:00');", errno.ErrInvalidDefault)
tk.MustExec("drop table if exists t;")
// ALLOW_INVALID_DATES allows invalid datetime(02-30).
tk.MustExec("set @@sql_mode='STRICT_TRANS_TABLES,ALLOW_INVALID_DATES';")
tk.MustExec("create table t (a datetime default '2999-02-30 00:00:00');")
tk.MustExec("drop table if exists t;")
tk.MustExec("create table t (a datetime);")
tk.MustExec("alter table t modify column a datetime default '2999-02-30 00:00:00';")
tk.MustExec("drop table if exists t;")
}
func (s *testSuite) TestOOMActionPriority(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t0")
tk.MustExec("drop table if exists t1")
tk.MustExec("drop table if exists t2")
tk.MustExec("drop table if exists t3")
tk.MustExec("drop table if exists t4")
tk.MustExec("create table t0(a int)")
tk.MustExec("insert into t0 values(1)")
tk.MustExec("create table t1(a int)")
tk.MustExec("insert into t1 values(1)")
tk.MustExec("create table t2(a int)")
tk.MustExec("insert into t2 values(1)")
tk.MustExec("create table t3(a int)")
tk.MustExec("insert into t3 values(1)")
tk.MustExec("create table t4(a int)")
tk.MustExec("insert into t4 values(1)")
tk.MustQuery("select * from t0 join t1 join t2 join t3 join t4 order by t0.a").Check(testkit.Rows("1 1 1 1 1"))
action := tk.Se.GetSessionVars().StmtCtx.MemTracker.GetFallbackForTest()
// check the first 5 actions is rate limit.
for i := 0; i < 5; i++ {
c.Assert(action.GetPriority(), Equals, int64(memory.DefRateLimitPriority))
action = action.GetFallback()
}
for action.GetFallback() != nil {
c.Assert(action.GetPriority(), Equals, int64(memory.DefSpillPriority))
action = action.GetFallback()
}
c.Assert(action.GetPriority(), Equals, int64(memory.DefLogPriority))
}
func (s *testSerialSuite) TestIssue21441(c *C) {
failpoint.Enable("github.com/pingcap/tidb/executor/issue21441", `return`)
defer failpoint.Disable("github.com/pingcap/tidb/executor/issue21441")
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int)")
tk.MustExec(`insert into t values(1),(2),(3)`)
tk.Se.GetSessionVars().InitChunkSize = 1
tk.Se.GetSessionVars().MaxChunkSize = 1
sql := `
select a from t union all
select a from t union all
select a from t union all
select a from t union all
select a from t union all
select a from t union all
select a from t union all
select a from t`
tk.MustQuery(sql).Sort().Check(testkit.Rows(
"1", "1", "1", "1", "1", "1", "1", "1",
"2", "2", "2", "2", "2", "2", "2", "2",
"3", "3", "3", "3", "3", "3", "3", "3",
))
tk.MustQuery("select a from (" + sql + ") t order by a limit 4").Check(testkit.Rows("1", "1", "1", "1"))
tk.MustQuery("select a from (" + sql + ") t order by a limit 7, 4").Check(testkit.Rows("1", "2", "2", "2"))
tk.MustExec("set @@tidb_executor_concurrency = 2")
c.Assert(tk.Se.GetSessionVars().UnionConcurrency(), Equals, 2)
tk.MustQuery("select a from (" + sql + ") t order by a limit 4").Check(testkit.Rows("1", "1", "1", "1"))
tk.MustQuery("select a from (" + sql + ") t order by a limit 7, 4").Check(testkit.Rows("1", "2", "2", "2"))
}
func (s *testSuite) Test17780(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t0")
tk.MustExec("create table t0 (c0 double)")
tk.MustExec("insert into t0 values (1e30)")
tk.MustExec("update t0 set c0=0 where t0.c0 like 0")
// the update should not affect c0
tk.MustQuery("select count(*) from t0 where c0 = 0").Check(testkit.Rows("0"))
}
func (s *testSuite) TestIssue9918(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a year)")
tk.MustExec("insert into t values(0)")
tk.MustQuery("select cast(a as char) from t").Check(testkit.Rows("0000"))
}
func (s *testSuite) Test13004(c *C) {
tk := testkit.NewTestKit(c, s.store)
// see https://dev.mysql.com/doc/refman/5.6/en/date-and-time-literals.html, timestamp here actually produces a datetime
tk.MustQuery("SELECT TIMESTAMP '9999-01-01 00:00:00'").Check(testkit.Rows("9999-01-01 00:00:00"))
}
func (s *testSuite) Test12178(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists ta")
tk.MustExec("create table ta(id decimal(60,2))")
tk.MustExec("insert into ta values (JSON_EXTRACT('{\"c\": \"1234567890123456789012345678901234567890123456789012345\"}', '$.c'))")
tk.MustQuery("select * from ta").Check(testkit.Rows("1234567890123456789012345678901234567890123456789012345.00"))
}
func (s *testSuite) Test11883(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1 (f1 json)")
tk.MustExec("insert into t1(f1) values ('\"asd\"'),('\"asdf\"'),('\"asasas\"')")
tk.MustQuery("select f1 from t1 where json_extract(f1,\"$\") in (\"asd\",\"asasas\",\"asdf\")").Check(testkit.Rows("\"asd\"", "\"asdf\"", "\"asasas\""))
tk.MustQuery("select f1 from t1 where json_extract(f1, '$') = 'asd'").Check(testkit.Rows("\"asd\""))
// MySQL produces empty row for the following SQL, I doubt it should be MySQL's bug.
tk.MustQuery("select f1 from t1 where case json_extract(f1,\"$\") when \"asd\" then 1 else 0 end").Check(testkit.Rows("\"asd\""))
tk.MustExec("delete from t1")
tk.MustExec("insert into t1 values ('{\"a\": 1}')")
// the first value in the tuple should be interpreted as string instead of JSON, so no row will be returned
tk.MustQuery("select f1 from t1 where f1 in ('{\"a\": 1}', 'asdf', 'asdf')").Check(testkit.Rows())
// and if we explicitly cast it into a JSON value, the check will pass
tk.MustQuery("select f1 from t1 where f1 in (cast('{\"a\": 1}' as JSON), 'asdf', 'asdf')").Check(testkit.Rows("{\"a\": 1}"))
tk.MustQuery("select json_extract('\"asd\"', '$') = 'asd'").Check(testkit.Rows("1"))
tk.MustQuery("select json_extract('\"asd\"', '$') <=> 'asd'").Check(testkit.Rows("1"))
tk.MustQuery("select json_extract('\"asd\"', '$') <> 'asd'").Check(testkit.Rows("0"))
tk.MustQuery("select json_extract('{\"f\": 1.0}', '$.f') = 1.0").Check(testkit.Rows("1"))
tk.MustQuery("select json_extract('{\"f\": 1.0}', '$.f') = '1.0'").Check(testkit.Rows("0"))
tk.MustQuery("select json_extract('{\"n\": 1}', '$') = '{\"n\": 1}'").Check(testkit.Rows("0"))
tk.MustQuery("select json_extract('{\"n\": 1}', '$') <> '{\"n\": 1}'").Check(testkit.Rows("1"))
}
func (s *testSuite) Test15492(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int, b int)")
tk.MustExec("insert into t values (2, 20), (1, 10), (3, 30)")
tk.MustQuery("select a + 1 as field1, a as field2 from t order by field1, field2 limit 2").Check(testkit.Rows("2 1", "3 2"))
}
func (s testSuite) TestTrackAggMemoryUsage(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table t(a int)")
tk.MustExec("insert into t values(1)")
tk.MustExec("set tidb_track_aggregate_memory_usage = off;")
rows := tk.MustQuery("explain analyze select /*+ HASH_AGG() */ sum(a) from t").Rows()
c.Assert(rows[0][7], Equals, "N/A")
rows = tk.MustQuery("explain analyze select /*+ STREAM_AGG() */ sum(a) from t").Rows()
c.Assert(rows[0][7], Equals, "N/A")
tk.MustExec("set tidb_track_aggregate_memory_usage = on;")
rows = tk.MustQuery("explain analyze select /*+ HASH_AGG() */ sum(a) from t").Rows()
c.Assert(rows[0][7], Not(Equals), "N/A")
rows = tk.MustQuery("explain analyze select /*+ STREAM_AGG() */ sum(a) from t").Rows()
c.Assert(rows[0][7], Not(Equals), "N/A")
}
func (s *testSuite) Test12201(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists e")
tk.MustExec("create table e (e enum('a', 'b'))")
tk.MustExec("insert into e values ('a'), ('b')")
tk.MustQuery("select * from e where case 1 when 0 then e end").Check(testkit.Rows())
tk.MustQuery("select * from e where case 1 when 1 then e end").Check(testkit.Rows("a", "b"))
tk.MustQuery("select * from e where case e when 1 then e end").Check(testkit.Rows("a"))
tk.MustQuery("select * from e where case 1 when e then e end").Check(testkit.Rows("a"))
}
func (s *testSuite) TestIssue21451(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (en enum('c', 'b', 'a'));")
tk.MustExec("insert into t values ('a'), ('b'), ('c');")
tk.MustQuery("select max(en) from t;").Check(testkit.Rows("c"))
tk.MustQuery("select min(en) from t;").Check(testkit.Rows("a"))
tk.MustQuery("select * from t order by en;").Check(testkit.Rows("c", "b", "a"))
tk.MustExec("drop table t")
tk.MustExec("create table t(s set('c', 'b', 'a'));")
tk.MustExec("insert into t values ('a'), ('b'), ('c');")
tk.MustQuery("select max(s) from t;").Check(testkit.Rows("c"))
tk.MustQuery("select min(s) from t;").Check(testkit.Rows("a"))
tk.MustExec("drop table t")
tk.MustExec("create table t(id int, en enum('c', 'b', 'a'))")
tk.MustExec("insert into t values (1, 'a'),(2, 'b'), (3, 'c'), (1, 'c');")
tk.MustQuery("select id, max(en) from t where id=1 group by id;").Check(testkit.Rows("1 c"))
tk.MustQuery("select id, min(en) from t where id=1 group by id;").Check(testkit.Rows("1 a"))
tk.MustExec("drop table t")
tk.MustExec("create table t(id int, s set('c', 'b', 'a'));")
tk.MustExec("insert into t values (1, 'a'),(2, 'b'), (3, 'c'), (1, 'c');")
tk.MustQuery("select id, max(s) from t where id=1 group by id;").Check(testkit.Rows("1 c"))
tk.MustQuery("select id, min(s) from t where id=1 group by id;").Check(testkit.Rows("1 a"))
tk.MustExec("drop table t")
tk.MustExec("create table t(e enum('e','d','c','b','a'))")
tk.MustExec("insert into t values ('e'),('d'),('c'),('b'),('a');")
tk.MustQuery("select * from t order by e limit 1;").Check(testkit.Rows("e"))
tk.MustExec("drop table t")
tk.MustExec("create table t(s set('e', 'd', 'c', 'b', 'a'))")
tk.MustExec("insert into t values ('e'),('d'),('c'),('b'),('a');")
tk.MustQuery("select * from t order by s limit 1;").Check(testkit.Rows("e"))
tk.MustExec("drop table t")
}
func (s *testSuite) TestIssue15563(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustQuery("select distinct 0.7544678906163867 / 0.68234634;").Check(testkit.Rows("1.10569639842486251190"))
}
func (s *testSuite) TestIssue22231(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t_issue_22231")
tk.MustExec("create table t_issue_22231(a datetime)")
tk.MustExec("insert into t_issue_22231 values('2020--05-20 01:22:12')")
tk.MustQuery("select * from t_issue_22231 where a >= '2020-05-13 00:00:00 00:00:00' and a <= '2020-05-28 23:59:59 00:00:00'").Check(testkit.Rows("2020-05-20 01:22:12"))
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1292 Truncated incorrect datetime value: '2020-05-13 00:00:00 00:00:00'", "Warning 1292 Truncated incorrect datetime value: '2020-05-28 23:59:59 00:00:00'"))
tk.MustQuery("select cast('2020-10-22 10:31-10:12' as datetime)").Check(testkit.Rows("2020-10-22 10:31:10"))
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1292 Truncated incorrect datetime value: '2020-10-22 10:31-10:12'"))
tk.MustQuery("select cast('2020-05-28 23:59:59 00:00:00' as datetime)").Check(testkit.Rows("2020-05-28 23:59:59"))
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1292 Truncated incorrect datetime value: '2020-05-28 23:59:59 00:00:00'"))
tk.MustExec("drop table if exists t_issue_22231")
}
func (s *testSuite) TestIssue22201(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustQuery("SELECT HEX(WEIGHT_STRING('ab' AS BINARY(1000000000000000000)));").Check(testkit.Rows("<nil>"))
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1301 Result of cast_as_binary() was larger than max_allowed_packet (67108864) - truncated"))
tk.MustQuery("SELECT HEX(WEIGHT_STRING('ab' AS char(1000000000000000000)));").Check(testkit.Rows("<nil>"))
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1301 Result of weight_string() was larger than max_allowed_packet (67108864) - truncated"))
}
func (s *testSuiteP1) TestIssue22941(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists m, mp")
tk.MustExec(`CREATE TABLE m (
mid varchar(50) NOT NULL,
ParentId varchar(50) DEFAULT NULL,
PRIMARY KEY (mid),
KEY ind_bm_parent (ParentId,mid)
)`)
// mp should have more columns than m
tk.MustExec(`CREATE TABLE mp (
mpid bigint(20) unsigned NOT NULL DEFAULT '0',
mid varchar(50) DEFAULT NULL COMMENT '模块主键',
sid int,
PRIMARY KEY (mpid)
);`)
tk.MustExec(`insert into mp values("1","1","0");`)
tk.MustExec(`insert into m values("0", "0");`)
rs := tk.MustQuery(`SELECT ( SELECT COUNT(1) FROM m WHERE ParentId = c.mid ) expand, bmp.mpid, bmp.mpid IS NULL,bmp.mpid IS NOT NULL, sid FROM m c LEFT JOIN mp bmp ON c.mid = bmp.mid WHERE c.ParentId = '0'`)
rs.Check(testkit.Rows("1 <nil> 1 0 <nil>"))
rs = tk.MustQuery(`SELECT bmp.mpid, bmp.mpid IS NULL,bmp.mpid IS NOT NULL FROM m c LEFT JOIN mp bmp ON c.mid = bmp.mid WHERE c.ParentId = '0'`)
rs.Check(testkit.Rows("<nil> 1 0"))
}
func (s *testSerialSuite) TestTxnWriteThroughputSLI(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int key, b int)")
c.Assert(failpoint.Enable("github.com/pingcap/tidb/util/sli/CheckTxnWriteThroughput", "return(true)"), IsNil)
defer func() {
err := failpoint.Disable("github.com/pingcap/tidb/util/sli/CheckTxnWriteThroughput")
c.Assert(err, IsNil)
}()
mustExec := func(sql string) {
tk.MustExec(sql)
tk.Se.GetTxnWriteThroughputSLI().FinishExecuteStmt(time.Second, tk.Se.AffectedRows(), tk.Se.GetSessionVars().InTxn())
}
errExec := func(sql string) {
_, err := tk.Exec(sql)
c.Assert(err, NotNil)
tk.Se.GetTxnWriteThroughputSLI().FinishExecuteStmt(time.Second, tk.Se.AffectedRows(), tk.Se.GetSessionVars().InTxn())
}
// Test insert in small txn
mustExec("insert into t values (1,3),(2,4)")
writeSLI := tk.Se.GetTxnWriteThroughputSLI()
c.Assert(writeSLI.IsInvalid(), Equals, false)
c.Assert(writeSLI.IsSmallTxn(), Equals, true)
c.Assert(tk.Se.GetTxnWriteThroughputSLI().String(), Equals, "invalid: false, affectRow: 2, writeSize: 58, readKeys: 0, writeKeys: 2, writeTime: 1s")
tk.Se.GetTxnWriteThroughputSLI().Reset()
// Test insert ... select ... from
mustExec("insert into t select b, a from t")
c.Assert(writeSLI.IsInvalid(), Equals, true)
c.Assert(writeSLI.IsSmallTxn(), Equals, true)
c.Assert(tk.Se.GetTxnWriteThroughputSLI().String(), Equals, "invalid: true, affectRow: 2, writeSize: 58, readKeys: 0, writeKeys: 2, writeTime: 1s")
tk.Se.GetTxnWriteThroughputSLI().Reset()
// Test for delete
mustExec("delete from t")
c.Assert(tk.Se.GetTxnWriteThroughputSLI().String(), Equals, "invalid: false, affectRow: 4, writeSize: 76, readKeys: 0, writeKeys: 4, writeTime: 1s")
tk.Se.GetTxnWriteThroughputSLI().Reset()
// Test insert not in small txn
mustExec("begin")
for i := 0; i < 20; i++ {
mustExec(fmt.Sprintf("insert into t values (%v,%v)", i, i))
c.Assert(writeSLI.IsSmallTxn(), Equals, true)
}
// The statement which affect rows is 0 shouldn't record into time.
mustExec("select count(*) from t")
mustExec("select * from t")
mustExec("insert into t values (20,20)")
c.Assert(writeSLI.IsSmallTxn(), Equals, false)
mustExec("commit")
c.Assert(writeSLI.IsInvalid(), Equals, false)
c.Assert(tk.Se.GetTxnWriteThroughputSLI().String(), Equals, "invalid: false, affectRow: 21, writeSize: 609, readKeys: 0, writeKeys: 21, writeTime: 22s")
tk.Se.GetTxnWriteThroughputSLI().Reset()
// Test invalid when transaction has replace ... select ... from ... statement.
mustExec("delete from t")
tk.Se.GetTxnWriteThroughputSLI().Reset()
mustExec("begin")
mustExec("insert into t values (1,3),(2,4)")
mustExec("replace into t select b, a from t")
mustExec("commit")
c.Assert(writeSLI.IsInvalid(), Equals, true)
c.Assert(tk.Se.GetTxnWriteThroughputSLI().String(), Equals, "invalid: true, affectRow: 4, writeSize: 116, readKeys: 0, writeKeys: 4, writeTime: 3s")
tk.Se.GetTxnWriteThroughputSLI().Reset()
// Test clean last failed transaction information.
err := failpoint.Disable("github.com/pingcap/tidb/util/sli/CheckTxnWriteThroughput")
c.Assert(err, IsNil)
mustExec("begin")
mustExec("insert into t values (1,3),(2,4)")
errExec("commit")
c.Assert(tk.Se.GetTxnWriteThroughputSLI().String(), Equals, "invalid: false, affectRow: 0, writeSize: 0, readKeys: 0, writeKeys: 0, writeTime: 0s")
c.Assert(failpoint.Enable("github.com/pingcap/tidb/util/sli/CheckTxnWriteThroughput", "return(true)"), IsNil)
mustExec("begin")
mustExec("insert into t values (5, 6)")
mustExec("commit")
c.Assert(tk.Se.GetTxnWriteThroughputSLI().String(), Equals, "invalid: false, affectRow: 1, writeSize: 29, readKeys: 0, writeKeys: 1, writeTime: 2s")
// Test for reset
tk.Se.GetTxnWriteThroughputSLI().Reset()
c.Assert(tk.Se.GetTxnWriteThroughputSLI().String(), Equals, "invalid: false, affectRow: 0, writeSize: 0, readKeys: 0, writeKeys: 0, writeTime: 0s")
}
func (s *testSuite) TestIssue23993(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
// Real cast to time should return NULL
tk.MustExec("drop table if exists t_issue_23993")
tk.MustExec("create table t_issue_23993(a double)")
tk.MustExec("insert into t_issue_23993 values(-790822912)")
tk.MustQuery("select cast(a as time) from t_issue_23993").Check(testkit.Rows("<nil>"))
tk.MustQuery("select a from t_issue_23993 where cast(a as time)").Check(testkit.Rows())
// Int cast to time should return NULL
tk.MustExec("drop table if exists t_issue_23993")
tk.MustExec("create table t_issue_23993(a int)")
tk.MustExec("insert into t_issue_23993 values(-790822912)")
tk.MustQuery("select cast(a as time) from t_issue_23993").Check(testkit.Rows("<nil>"))
tk.MustQuery("select a from t_issue_23993 where cast(a as time)").Check(testkit.Rows())
// Decimal cast to time should return NULL
tk.MustExec("drop table if exists t_issue_23993")
tk.MustExec("create table t_issue_23993(a decimal)")
tk.MustExec("insert into t_issue_23993 values(-790822912)")
tk.MustQuery("select cast(a as time) from t_issue_23993").Check(testkit.Rows("<nil>"))
tk.MustQuery("select a from t_issue_23993 where cast(a as time)").Check(testkit.Rows())
// String cast to time should not return NULL
tk.MustExec("drop table if exists t_issue_23993")
tk.MustExec("create table t_issue_23993(a varchar(255))")
tk.MustExec("insert into t_issue_23993 values('-790822912')")
tk.MustQuery("select cast(a as time) from t_issue_23993").Check(testkit.Rows("-838:59:59"))
tk.MustQuery("select a from t_issue_23993 where cast(a as time)").Check(testkit.Rows("-790822912"))
}
func (s *testSuiteP2) TestProjectionBitType(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t(k1 int, v bit(34) DEFAULT b'111010101111001001100111101111111', primary key(k1) clustered);")
tk.MustExec("create table t1(k1 int, v bit(34) DEFAULT b'111010101111001001100111101111111', primary key(k1) nonclustered);")
tk.MustExec("insert into t(k1) select 1;")
tk.MustExec("insert into t1(k1) select 1;")
tk.MustExec("set @@tidb_enable_vectorized_expression = 0;")
// following SQL should returns same result
tk.MustQuery("(select * from t where false) union(select * from t for update);").Check(testkit.Rows("1 \x01\xd5\xe4\xcf\u007f"))
tk.MustQuery("(select * from t1 where false) union(select * from t1 for update);").Check(testkit.Rows("1 \x01\xd5\xe4\xcf\u007f"))
tk.MustExec("set @@tidb_enable_vectorized_expression = 1;")
tk.MustQuery("(select * from t where false) union(select * from t for update);").Check(testkit.Rows("1 \x01\xd5\xe4\xcf\u007f"))
tk.MustQuery("(select * from t1 where false) union(select * from t1 for update);").Check(testkit.Rows("1 \x01\xd5\xe4\xcf\u007f"))
}
func (s *testSuite) TestIssue23609(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t1")
tk.MustExec("CREATE TABLE `t1` (\n `a` timestamp NULL DEFAULT NULL,\n `b` year(4) DEFAULT NULL,\n KEY `a` (`a`),\n KEY `b` (`b`)\n)")
tk.MustExec("insert into t1 values(\"2002-10-03 04:28:53\",2000), (\"2002-10-03 04:28:53\",2002), (NULL, 2002)")
tk.MustQuery("select /*+ inl_join (x,y) */ * from t1 x cross join t1 y on x.a=y.b").Check(testkit.Rows())
tk.MustQuery("select * from t1 x cross join t1 y on x.a>y.b order by x.a, x.b, y.a, y.b").Check(testkit.Rows("2002-10-03 04:28:53 2000 <nil> 2002", "2002-10-03 04:28:53 2000 2002-10-03 04:28:53 2000", "2002-10-03 04:28:53 2000 2002-10-03 04:28:53 2002", "2002-10-03 04:28:53 2002 <nil> 2002", "2002-10-03 04:28:53 2002 2002-10-03 04:28:53 2000", "2002-10-03 04:28:53 2002 2002-10-03 04:28:53 2002"))
tk.MustQuery("select * from t1 where a = b").Check(testkit.Rows())
tk.MustQuery("select * from t1 where a < b").Check(testkit.Rows())
c.Assert(tk.Se.GetSessionVars().StmtCtx.WarningCount(), Equals, uint16(0))
}
func (s *testSuite1) TestIssue24091(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists t;")
defer tk.MustExec("drop table if exists t;")
tk.MustExec("create table t(a int) partition by hash (a div 0) partitions 10;")
tk.MustExec("insert into t values (NULL);")
tk.MustQuery("select null div 0;").Check(testkit.Rows("<nil>"))
tk.MustQuery("select * from t;").Check(testkit.Rows("<nil>"))
}
func (s *testSerialSuite) TestIssue24210(c *C) {
tk := testkit.NewTestKit(c, s.store)
// for ProjectionExec
c.Assert(failpoint.Enable("github.com/pingcap/tidb/executor/mockProjectionExecBaseExecutorOpenReturnedError", `return(true)`), IsNil)
_, err := tk.Exec("select a from (select 1 as a, 2 as b) t")
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "mock ProjectionExec.baseExecutor.Open returned error")
err = failpoint.Disable("github.com/pingcap/tidb/executor/mockProjectionExecBaseExecutorOpenReturnedError")
c.Assert(err, IsNil)
// for HashAggExec
c.Assert(failpoint.Enable("github.com/pingcap/tidb/executor/mockHashAggExecBaseExecutorOpenReturnedError", `return(true)`), IsNil)
_, err = tk.Exec("select sum(a) from (select 1 as a, 2 as b) t group by b")
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "mock HashAggExec.baseExecutor.Open returned error")
err = failpoint.Disable("github.com/pingcap/tidb/executor/mockHashAggExecBaseExecutorOpenReturnedError")
c.Assert(err, IsNil)
// for StreamAggExec
c.Assert(failpoint.Enable("github.com/pingcap/tidb/executor/mockStreamAggExecBaseExecutorOpenReturnedError", `return(true)`), IsNil)
_, err = tk.Exec("select sum(a) from (select 1 as a, 2 as b) t")
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "mock StreamAggExec.baseExecutor.Open returned error")
err = failpoint.Disable("github.com/pingcap/tidb/executor/mockStreamAggExecBaseExecutorOpenReturnedError")
c.Assert(err, IsNil)
// for SelectionExec
c.Assert(failpoint.Enable("github.com/pingcap/tidb/executor/mockSelectionExecBaseExecutorOpenReturnedError", `return(true)`), IsNil)
_, err = tk.Exec("select * from (select rand() as a) t where a > 0")
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "mock SelectionExec.baseExecutor.Open returned error")
err = failpoint.Disable("github.com/pingcap/tidb/executor/mockSelectionExecBaseExecutorOpenReturnedError")
c.Assert(err, IsNil)
}
func (s *testSerialSuite) TestDeadlocksTable(c *C) {
deadlockhistory.GlobalDeadlockHistory.Clear()
deadlockhistory.GlobalDeadlockHistory.Resize(10)
occurTime := time.Date(2021, 5, 10, 1, 2, 3, 456789000, time.Local)
rec := &deadlockhistory.DeadlockRecord{
OccurTime: occurTime,
IsRetryable: false,
WaitChain: []deadlockhistory.WaitChainItem{
{
TryLockTxn: 101,
SQLDigest: "aabbccdd",
Key: []byte("k1"),
AllSQLDigests: nil,
TxnHoldingLock: 102,
},
{
TryLockTxn: 102,
SQLDigest: "ddccbbaa",
Key: []byte("k2"),
AllSQLDigests: []string{"sql1"},
TxnHoldingLock: 101,
},
},
}
deadlockhistory.GlobalDeadlockHistory.Push(rec)
occurTime2 := time.Date(2022, 6, 11, 2, 3, 4, 987654000, time.Local)
rec2 := &deadlockhistory.DeadlockRecord{
OccurTime: occurTime2,
IsRetryable: true,
WaitChain: []deadlockhistory.WaitChainItem{
{
TryLockTxn: 201,
AllSQLDigests: []string{},
TxnHoldingLock: 202,
},
{
TryLockTxn: 202,
AllSQLDigests: []string{"sql1", "sql2, sql3"},
TxnHoldingLock: 203,
},
{
TryLockTxn: 203,
TxnHoldingLock: 201,
},
},
}
deadlockhistory.GlobalDeadlockHistory.Push(rec2)
// `Push` sets the record's ID, and ID in a single DeadlockHistory is monotonically increasing. We must get it here
// to know what it is.
id1 := strconv.FormatUint(rec.ID, 10)
id2 := strconv.FormatUint(rec2.ID, 10)
c.Assert(failpoint.Enable("github.com/pingcap/tidb/expression/sqlDigestRetrieverSkipRetrieveGlobal", "return"), IsNil)
defer func() {
c.Assert(failpoint.Disable("github.com/pingcap/tidb/expression/sqlDigestRetrieverSkipRetrieveGlobal"), IsNil)
}()
tk := testkit.NewTestKit(c, s.store)
tk.MustQuery("select * from information_schema.deadlocks").Check(
testutil.RowsWithSep("/",
id1+"/2021-05-10 01:02:03.456789/0/101/aabbccdd/<nil>/6B31/<nil>/102",
id1+"/2021-05-10 01:02:03.456789/0/102/ddccbbaa/<nil>/6B32/<nil>/101",
id2+"/2022-06-11 02:03:04.987654/1/201/<nil>/<nil>/<nil>/<nil>/202",
id2+"/2022-06-11 02:03:04.987654/1/202/<nil>/<nil>/<nil>/<nil>/203",
id2+"/2022-06-11 02:03:04.987654/1/203/<nil>/<nil>/<nil>/<nil>/201",
))
}
func (s testSerialSuite) TestExprBlackListForEnum(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t;")
tk.MustExec("create table t(a enum('a','b','c'), b enum('a','b','c'), c int, index idx(b,a));")
tk.MustExec("insert into t values(1,1,1),(2,2,2),(3,3,3);")
checkFuncPushDown := func(rows [][]interface{}, keyWord string) bool {
for _, line := range rows {
// Agg/Expr push down
if line[2].(string) == "cop[tikv]" && strings.Contains(line[4].(string), keyWord) {
return true
}
// access index
if line[2].(string) == "cop[tikv]" && strings.Contains(line[3].(string), keyWord) {
return true
}
}
return false
}
// Test agg(enum) push down
tk.MustExec("insert into mysql.expr_pushdown_blacklist(name) values('enum');")
tk.MustExec("admin reload expr_pushdown_blacklist;")
rows := tk.MustQuery("desc format='brief' select /*+ HASH_AGG() */ max(a) from t;").Rows()
c.Assert(checkFuncPushDown(rows, "max"), IsFalse)
rows = tk.MustQuery("desc format='brief' select /*+ STREAM_AGG() */ max(a) from t;").Rows()
c.Assert(checkFuncPushDown(rows, "max"), IsFalse)
tk.MustExec("delete from mysql.expr_pushdown_blacklist;")
tk.MustExec("admin reload expr_pushdown_blacklist;")
rows = tk.MustQuery("desc format='brief' select /*+ HASH_AGG() */ max(a) from t;").Rows()
c.Assert(checkFuncPushDown(rows, "max"), IsTrue)
rows = tk.MustQuery("desc format='brief' select /*+ STREAM_AGG() */ max(a) from t;").Rows()
c.Assert(checkFuncPushDown(rows, "max"), IsTrue)
// Test expr(enum) push down
tk.MustExec("insert into mysql.expr_pushdown_blacklist(name) values('enum');")
tk.MustExec("admin reload expr_pushdown_blacklist;")
rows = tk.MustQuery("desc format='brief' select * from t where a + b;").Rows()
c.Assert(checkFuncPushDown(rows, "plus"), IsFalse)
rows = tk.MustQuery("desc format='brief' select * from t where a + b;").Rows()
c.Assert(checkFuncPushDown(rows, "plus"), IsFalse)
tk.MustExec("delete from mysql.expr_pushdown_blacklist;")
tk.MustExec("admin reload expr_pushdown_blacklist;")
rows = tk.MustQuery("desc format='brief' select * from t where a + b;").Rows()
c.Assert(checkFuncPushDown(rows, "plus"), IsTrue)
rows = tk.MustQuery("desc format='brief' select * from t where a + b;").Rows()
c.Assert(checkFuncPushDown(rows, "plus"), IsTrue)
// Test enum index
tk.MustExec("insert into mysql.expr_pushdown_blacklist(name) values('enum');")
tk.MustExec("admin reload expr_pushdown_blacklist;")
rows = tk.MustQuery("desc format='brief' select * from t where b = 1;").Rows()
c.Assert(checkFuncPushDown(rows, "index:idx(b)"), IsFalse)
rows = tk.MustQuery("desc format='brief' select * from t where b = 'a';").Rows()
c.Assert(checkFuncPushDown(rows, "index:idx(b)"), IsFalse)
rows = tk.MustQuery("desc format='brief' select * from t where b > 1;").Rows()
c.Assert(checkFuncPushDown(rows, "index:idx(b)"), IsFalse)
rows = tk.MustQuery("desc format='brief' select * from t where b > 'a';").Rows()
c.Assert(checkFuncPushDown(rows, "index:idx(b)"), IsFalse)
tk.MustExec("delete from mysql.expr_pushdown_blacklist;")
tk.MustExec("admin reload expr_pushdown_blacklist;")
rows = tk.MustQuery("desc format='brief' select * from t where b = 1 and a = 1;").Rows()
c.Assert(checkFuncPushDown(rows, "index:idx(b, a)"), IsTrue)
rows = tk.MustQuery("desc format='brief' select * from t where b = 'a' and a = 'a';").Rows()
c.Assert(checkFuncPushDown(rows, "index:idx(b, a)"), IsTrue)
rows = tk.MustQuery("desc format='brief' select * from t where b = 1 and a > 1;").Rows()
c.Assert(checkFuncPushDown(rows, "index:idx(b, a)"), IsTrue)
rows = tk.MustQuery("desc format='brief' select * from t where b = 1 and a > 'a'").Rows()
c.Assert(checkFuncPushDown(rows, "index:idx(b, a)"), IsTrue)
}
func (s testSerialSuite) TestTemporaryTableNoNetwork(c *C) {
s.assertTemporaryTableNoNetwork(c, model.TempTableGlobal)
s.assertTemporaryTableNoNetwork(c, model.TempTableLocal)
}
func (s testSerialSuite) assertTemporaryTableNoNetwork(c *C, temporaryTableType model.TempTableType) {
// Test that table reader/index reader/index lookup on the temporary table do not need to visit TiKV.
tk := testkit.NewTestKit(c, s.store)
tk1 := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk1.MustExec("use test")
tk.MustExec("drop table if exists normal, tmp_t")
tk.MustExec("create table normal (id int, a int, index(a))")
if temporaryTableType == model.TempTableGlobal {
tk.MustExec("set tidb_enable_global_temporary_table=true")
tk.MustExec("create global temporary table tmp_t (id int primary key, a int, b int, index(a)) on commit delete rows")
} else if temporaryTableType == model.TempTableLocal {
tk.MustExec("set tidb_enable_noop_functions=true")
tk.MustExec("create temporary table tmp_t (id int primary key, a int, b int, index(a))")
} else {
c.Fail()
}
c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/mockstore/unistore/rpcServerBusy", "return(true)"), IsNil)
defer func() {
c.Assert(failpoint.Disable("github.com/pingcap/tidb/store/mockstore/unistore/rpcServerBusy"), IsNil)
}()
tk.MustExec("begin")
tk.MustExec("insert into tmp_t values (1, 1, 1)")
tk.MustExec("insert into tmp_t values (2, 2, 2)")
// Make sure the fail point works.
// With that failpoint, all requests to the TiKV is discard.
rs, err := tk1.Exec("select * from normal")
c.Assert(err, IsNil)
blocked := make(chan struct{})
ctx, cancelFunc := context.WithCancel(context.Background())
go func() {
_, err := session.ResultSetToStringSlice(ctx, tk1.Se, rs)
blocked <- struct{}{}
c.Assert(err, NotNil)
}()
select {
case <-blocked:
c.Error("The query should block when the failpoint is enabled")
case <-time.After(200 * time.Millisecond):
}
cancelFunc()
// Check the temporary table do not send request to TiKV.
// PointGet
c.Assert(tk.HasPlan("select * from tmp_t where id=1", "Point_Get"), IsTrue)
tk.MustQuery("select * from tmp_t where id=1").Check(testkit.Rows("1 1 1"))
// BatchPointGet
c.Assert(tk.HasPlan("select * from tmp_t where id in (1, 2)", "Batch_Point_Get"), IsTrue)
tk.MustQuery("select * from tmp_t where id in (1, 2)").Check(testkit.Rows("1 1 1", "2 2 2"))
// Table reader
c.Assert(tk.HasPlan("select * from tmp_t", "TableReader"), IsTrue)
tk.MustQuery("select * from tmp_t").Check(testkit.Rows("1 1 1", "2 2 2"))
// Index reader
c.Assert(tk.HasPlan("select /*+ USE_INDEX(tmp_t, a) */ a from tmp_t", "IndexReader"), IsTrue)
tk.MustQuery("select /*+ USE_INDEX(tmp_t, a) */ a from tmp_t").Check(testkit.Rows("1", "2"))
// Index lookup
c.Assert(tk.HasPlan("select /*+ USE_INDEX(tmp_t, a) */ b from tmp_t where a = 1", "IndexLookUp"), IsTrue)
tk.MustQuery("select /*+ USE_INDEX(tmp_t, a) */ b from tmp_t where a = 1").Check(testkit.Rows("1"))
tk.MustExec("rollback")
// prepare some data for local temporary table, when for global temporary table, the below operations have no effect.
tk.MustExec("insert into tmp_t value(10, 10, 10)")
tk.MustExec("insert into tmp_t value(11, 11, 11)")
// Pessimistic lock
tk.MustExec("begin pessimistic")
tk.MustExec("insert into tmp_t values (3, 3, 3)")
tk.MustExec("insert ignore into tmp_t values (4, 4, 4)")
tk.MustExec("insert into tmp_t values (5, 5, 5) on duplicate key update a=100")
tk.MustExec("insert into tmp_t values (10, 10, 10) on duplicate key update a=100")
tk.MustExec("insert ignore into tmp_t values (10, 10, 10) on duplicate key update id=11")
tk.MustExec("replace into tmp_t values(6, 6, 6)")
tk.MustExec("replace into tmp_t values(11, 100, 100)")
tk.MustExec("update tmp_t set id = id + 1 where a = 1")
tk.MustExec("delete from tmp_t where a > 1")
tk.MustQuery("select count(*) from tmp_t where a >= 1 for update")
tk.MustExec("rollback")
// Check 'for update' will not write any lock too when table is unmodified
tk.MustExec("begin pessimistic")
tk.MustExec("select * from tmp_t where id=1 for update")
tk.MustExec("select * from tmp_t where id in (1, 2, 3) for update")
tk.MustExec("select * from tmp_t where id > 1 for update")
tk.MustExec("rollback")
}
func (s *testResourceTagSuite) TestResourceGroupTag(c *C) {
if israce.RaceEnabled {
c.Skip("unstable, skip it and fix it before 20210622")
}
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t;")
tk.MustExec("create table t(a int, b int, unique index idx(a));")
tbInfo := testGetTableByName(c, tk.Se, "test", "t")
// Enable Top SQL
variable.TopSQLVariable.Enable.Store(true)
variable.TopSQLVariable.AgentAddress.Store("mock-agent")
c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/mockstore/unistore/unistoreRPCClientSendHook", `return(true)`), IsNil)
defer failpoint.Disable("github.com/pingcap/tidb/store/mockstore/unistore/unistoreRPCClientSendHook")
var sqlDigest, planDigest *parser.Digest
checkFn := func() {}
unistore.UnistoreRPCClientSendHook = func(req *tikvrpc.Request) {
var startKey []byte
var ctx *kvrpcpb.Context
switch req.Type {
case tikvrpc.CmdGet:
request := req.Get()
startKey = request.Key
ctx = request.Context
case tikvrpc.CmdBatchGet:
request := req.BatchGet()
startKey = request.Keys[0]
ctx = request.Context
case tikvrpc.CmdPrewrite:
request := req.Prewrite()
startKey = request.Mutations[0].Key
ctx = request.Context
case tikvrpc.CmdCommit:
request := req.Commit()
startKey = request.Keys[0]
ctx = request.Context
case tikvrpc.CmdCop:
request := req.Cop()
startKey = request.Ranges[0].Start
ctx = request.Context
case tikvrpc.CmdPessimisticLock:
request := req.PessimisticLock()
startKey = request.PrimaryLock
ctx = request.Context
}
tid := tablecodec.DecodeTableID(startKey)
if tid != tbInfo.Meta().ID {
return
}
if ctx == nil {
return
}
tag := &tipb.ResourceGroupTag{}
err := tag.Unmarshal(ctx.ResourceGroupTag)
c.Assert(err, IsNil)
sqlDigest = parser.NewDigest(tag.SqlDigest)
planDigest = parser.NewDigest(tag.PlanDigest)
checkFn()
}
resetVars := func() {
sqlDigest = parser.NewDigest(nil)
planDigest = parser.NewDigest(nil)
}
cases := []struct {
sql string
ignore bool
}{
{sql: "insert into t values(1,1),(2,2),(3,3)"},
{sql: "select * from t use index (idx) where a=1"},
{sql: "select * from t use index (idx) where a in (1,2,3)"},
{sql: "select * from t use index (idx) where a>1"},
{sql: "select * from t where b>1"},
{sql: "begin pessimistic", ignore: true},
{sql: "insert into t values(4,4)"},
{sql: "commit", ignore: true},
{sql: "update t set a=5,b=5 where a=5"},
{sql: "replace into t values(6,6)"},
}
for _, ca := range cases {
resetVars()
commentf := Commentf("%v", ca.sql)
_, expectSQLDigest := parser.NormalizeDigest(ca.sql)
var expectPlanDigest *parser.Digest
checkCnt := 0
checkFn = func() {
if ca.ignore {
return
}
if expectPlanDigest == nil {
info := tk.Se.ShowProcess()
c.Assert(info, NotNil)
p, ok := info.Plan.(plannercore.Plan)
c.Assert(ok, IsTrue)
_, expectPlanDigest = plannercore.NormalizePlan(p)
}
c.Assert(sqlDigest.String(), Equals, expectSQLDigest.String(), commentf)
c.Assert(planDigest.String(), Equals, expectPlanDigest.String())
checkCnt++
}
if strings.HasPrefix(ca.sql, "select") {
tk.MustQuery(ca.sql)
} else {
tk.MustExec(ca.sql)
}
if ca.ignore {
continue
}
c.Assert(checkCnt > 0, IsTrue, commentf)
}
}
func (s *testSuite) TestIssue24933(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t;")
tk.MustExec("drop view if exists v;")
tk.MustExec("create table t(a int);")
tk.MustExec("insert into t values(1), (2), (3);")
tk.MustExec("create definer='root'@'localhost' view v as select count(*) as c1 from t;")
rows := tk.MustQuery("select * from v;")
rows.Check(testkit.Rows("3"))
// Test subquery and outer field is wildcard.
tk.MustExec("drop view v;")
tk.MustExec("create definer='root'@'localhost' view v as select * from (select count(*) from t) s;")
rows = tk.MustQuery("select * from v order by 1;")
rows.Check(testkit.Rows("3"))
tk.MustExec("drop view v;")
tk.MustExec("create definer='root'@'localhost' view v as select * from (select avg(a) from t group by a) s;")
rows = tk.MustQuery("select * from v order by 1;")
rows.Check(testkit.Rows("1.0000", "2.0000", "3.0000"))
tk.MustExec("drop view v;")
tk.MustExec("create definer='root'@'localhost' view v as select * from (select sum(a) from t group by a) s;")
rows = tk.MustQuery("select * from v order by 1;")
rows.Check(testkit.Rows("1", "2", "3"))
tk.MustExec("drop view v;")
tk.MustExec("create definer='root'@'localhost' view v as select * from (select group_concat(a) from t group by a) s;")
rows = tk.MustQuery("select * from v order by 1;")
rows.Check(testkit.Rows("1", "2", "3"))
// Test alias names.
tk.MustExec("drop view v;")
tk.MustExec("create definer='root'@'localhost' view v as select * from (select count(0) as c1 from t) s;")
rows = tk.MustQuery("select * from v order by 1;")
rows.Check(testkit.Rows("3"))
tk.MustExec("drop view v;")
tk.MustExec("create definer='root'@'localhost' view v as select * from (select count(*) as c1 from t) s;")
rows = tk.MustQuery("select * from v order by 1;")
rows.Check(testkit.Rows("3"))
tk.MustExec("drop view v;")
tk.MustExec("create definer='root'@'localhost' view v as select * from (select group_concat(a) as `concat(a)` from t group by a) s;")
rows = tk.MustQuery("select * from v order by 1;")
rows.Check(testkit.Rows("1", "2", "3"))
// Test firstrow.
tk.MustExec("drop view v;")
tk.MustExec("create definer='root'@'localhost' view v as select * from (select a from t group by a) s;")
rows = tk.MustQuery("select * from v order by 1;")
rows.Check(testkit.Rows("1", "2", "3"))
// Test direct select.
err := tk.ExecToErr("SELECT `s`.`count(a)` FROM (SELECT COUNT(`a`) FROM `test`.`t`) AS `s`")
c.Assert(err.Error(), Equals, "[planner:1054]Unknown column 's.count(a)' in 'field list'")
tk.MustExec("drop view v;")
tk.MustExec("create definer='root'@'localhost' view v as select * from (select count(a) from t) s;")
rows = tk.MustQuery("select * from v")
rows.Check(testkit.Rows("3"))
// Test window function.
tk.MustExec("drop table if exists t;")
tk.MustExec("create table t(c1 int);")
tk.MustExec("insert into t values(111), (222), (333);")
tk.MustExec("drop view if exists v;")
tk.MustExec("create definer='root'@'localhost' view v as (select * from (select row_number() over (order by c1) from t) s);")
rows = tk.MustQuery("select * from v;")
rows.Check(testkit.Rows("1", "2", "3"))
tk.MustExec("drop view if exists v;")
tk.MustExec("create definer='root'@'localhost' view v as (select * from (select c1, row_number() over (order by c1) from t) s);")
rows = tk.MustQuery("select * from v;")
rows.Check(testkit.Rows("111 1", "222 2", "333 3"))
// Test simple expr.
tk.MustExec("drop view if exists v;")
tk.MustExec("create definer='root'@'localhost' view v as (select * from (select c1 or 0 from t) s)")
rows = tk.MustQuery("select * from v;")
rows.Check(testkit.Rows("1", "1", "1"))
rows = tk.MustQuery("select `c1 or 0` from v;")
rows.Check(testkit.Rows("1", "1", "1"))
tk.MustExec("drop view v;")
}
func (s *testStaleTxnSuite) TestInvalidReadTemporaryTable(c *C) {
tk := testkit.NewTestKit(c, s.store)
// For mocktikv, safe point is not initialized, we manually insert it for snapshot to use.
safePointName := "tikv_gc_safe_point"
safePointValue := "20160102-15:04:05 -0700"
safePointComment := "All versions after safe point can be accessed. (DO NOT EDIT)"
updateSafePoint := fmt.Sprintf(`INSERT INTO mysql.tidb VALUES ('%[1]s', '%[2]s', '%[3]s')
ON DUPLICATE KEY
UPDATE variable_value = '%[2]s', comment = '%[3]s'`, safePointName, safePointValue, safePointComment)
tk.MustExec(updateSafePoint)
tk.MustExec("set @@tidb_enable_global_temporary_table=1")
tk.MustExec("use test")
tk.MustExec("drop table if exists tmp1")
tk.MustExec("create global temporary table tmp1 " +
"(id int not null primary key, code int not null, value int default null, unique key code(code))" +
"on commit delete rows")
// sleep 1us to make test stale
time.Sleep(time.Microsecond)
queries := []struct {
sql string
}{
{
sql: "select * from tmp1 where id=1",
},
{
sql: "select * from tmp1 where code=1",
},
{
sql: "select * from tmp1 where id in (1, 2, 3)",
},
{
sql: "select * from tmp1 where code in (1, 2, 3)",
},
{
sql: "select * from tmp1 where id > 1",
},
{
sql: "select /*+use_index(tmp1, code)*/ * from tmp1 where code > 1",
},
{
sql: "select /*+use_index(tmp1, code)*/ code from tmp1 where code > 1",
},
{
sql: "select /*+ use_index_merge(tmp1, primary, code) */ * from tmp1 where id > 1 or code > 2",
},
}
addStaleReadToSQL := func(sql string) string {
idx := strings.Index(sql, " where ")
if idx < 0 {
return ""
}
return sql[0:idx] + " as of timestamp NOW(6)" + sql[idx:]
}
for _, query := range queries {
sql := addStaleReadToSQL(query.sql)
if sql != "" {
tk.MustGetErrMsg(sql, "can not stale read temporary table")
}
}
tk.MustExec("start transaction read only as of timestamp NOW(6)")
for _, query := range queries {
tk.MustGetErrMsg(query.sql, "can not stale read temporary table")
}
tk.MustExec("commit")
for _, query := range queries {
tk.MustExec(query.sql)
}
tk.MustExec("set transaction read only as of timestamp NOW(6)")
tk.MustExec("start transaction")
for _, query := range queries {
tk.MustGetErrMsg(query.sql, "can not stale read temporary table")
}
tk.MustExec("commit")
for _, query := range queries {
tk.MustExec(query.sql)
}
tk.MustExec("set @@tidb_snapshot=NOW(6)")
for _, query := range queries {
// Will success here for compatibility with some tools like dumping
rs := tk.MustQuery(query.sql)
rs.Check(testkit.Rows())
}
}
func (s *testSuite) TestEmptyTableSampleTemporaryTable(c *C) {
tk := testkit.NewTestKit(c, s.store)
// For mocktikv, safe point is not initialized, we manually insert it for snapshot to use.
safePointName := "tikv_gc_safe_point"
safePointValue := "20160102-15:04:05 -0700"
safePointComment := "All versions after safe point can be accessed. (DO NOT EDIT)"
updateSafePoint := fmt.Sprintf(`INSERT INTO mysql.tidb VALUES ('%[1]s', '%[2]s', '%[3]s')
ON DUPLICATE KEY
UPDATE variable_value = '%[2]s', comment = '%[3]s'`, safePointName, safePointValue, safePointComment)
tk.MustExec(updateSafePoint)
tk.MustExec("set @@tidb_enable_global_temporary_table=1")
tk.MustExec("use test")
tk.MustExec("drop table if exists tmp1")
tk.MustExec("create global temporary table tmp1 " +
"(id int not null primary key, code int not null, value int default null, unique key code(code))" +
"on commit delete rows")
// sleep 1us to make test stale
time.Sleep(time.Microsecond)
// test tablesample return empty
rs := tk.MustQuery("select * from tmp1 tablesample regions()")
rs.Check(testkit.Rows())
tk.MustExec("begin")
tk.MustExec("insert into tmp1 values (1, 1, 1)")
rs = tk.MustQuery("select * from tmp1 tablesample regions()")
rs.Check(testkit.Rows())
tk.MustExec("commit")
// tablesample should not return error for compatibility of tools like dumpling
tk.MustExec("set @@tidb_snapshot=NOW(6)")
rs = tk.MustQuery("select * from tmp1 tablesample regions()")
rs.Check(testkit.Rows())
tk.MustExec("begin")
rs = tk.MustQuery("select * from tmp1 tablesample regions()")
rs.Check(testkit.Rows())
tk.MustExec("commit")
}
func (s *testSuite) TestIssue25506(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists tbl_3, tbl_23")
tk.MustExec("create table tbl_3 (col_15 bit(20))")
tk.MustExec("insert into tbl_3 values (0xFFFF)")
tk.MustExec("insert into tbl_3 values (0xFF)")
tk.MustExec("create table tbl_23 (col_15 bit(15))")
tk.MustExec("insert into tbl_23 values (0xF)")
tk.MustQuery("(select col_15 from tbl_23) union all (select col_15 from tbl_3 for update) order by col_15").Check(testkit.Rows("\x00\x00\x0F", "\x00\x00\xFF", "\x00\xFF\xFF"))
}
func (s *testSuite) TestIssue26532(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustQuery("select greatest(cast(\"2020-01-01 01:01:01\" as datetime), cast(\"2019-01-01 01:01:01\" as datetime) )union select null;").Sort().Check(testkit.Rows("2020-01-01 01:01:01", "<nil>"))
tk.MustQuery("select least(cast(\"2020-01-01 01:01:01\" as datetime), cast(\"2019-01-01 01:01:01\" as datetime) )union select null;").Sort().Check(testkit.Rows("2019-01-01 01:01:01", "<nil>"))
tk.MustQuery("select greatest(\"2020-01-01 01:01:01\" ,\"2019-01-01 01:01:01\" )union select null;").Sort().Check(testkit.Rows("2020-01-01 01:01:01", "<nil>"))
tk.MustQuery("select least(\"2020-01-01 01:01:01\" , \"2019-01-01 01:01:01\" )union select null;").Sort().Check(testkit.Rows("2019-01-01 01:01:01", "<nil>"))
}
func (s *testSuite) TestIssue25447(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1, t2")
tk.MustExec("create table t1(a int, b varchar(8))")
tk.MustExec("insert into t1 values(1,'1')")
tk.MustExec("create table t2(a int , b varchar(8) GENERATED ALWAYS AS (c) VIRTUAL, c varchar(8), PRIMARY KEY (a))")
tk.MustExec("insert into t2(a) values(1)")
tk.MustQuery("select /*+ tidb_inlj(t2) */ t2.b, t1.b from t1 join t2 ON t2.a=t1.a").Check(testkit.Rows("<nil> 1"))
}
|
[
"\"log_level\""
] |
[] |
[
"log_level"
] |
[]
|
["log_level"]
|
go
| 1 | 0 | |
project3/path.py
|
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if not os.environ.get('SECRET_KEY'):
try:
with open(os.path.join(BASE_DIR, 'sk.txt')) as f:
local_key = f.read().strip()
print(local_key)
except FileNotFoundError:
print("No secret key found - please set one for this environment.")
else:
SECRET_KEY = os.environ['SECRET_KEY']
|
[] |
[] |
[
"SECRET_KEY"
] |
[]
|
["SECRET_KEY"]
|
python
| 1 | 0 | |
bluemix/configuration/core_config/repository.go
|
// Package core_config provides functions to load core configuration.
// The package is for internal only.
package core_config
import (
"fmt"
"os"
"time"
"github.com/IBM-Cloud/ibm-cloud-cli-sdk/bluemix"
"github.com/IBM-Cloud/ibm-cloud-cli-sdk/bluemix/authentication/iam"
"github.com/IBM-Cloud/ibm-cloud-cli-sdk/bluemix/authentication/vpc"
"github.com/IBM-Cloud/ibm-cloud-cli-sdk/bluemix/configuration"
"github.com/IBM-Cloud/ibm-cloud-cli-sdk/bluemix/configuration/config_helpers"
"github.com/IBM-Cloud/ibm-cloud-cli-sdk/bluemix/models"
"github.com/IBM-Cloud/ibm-cloud-cli-sdk/common/rest"
)
type Repository interface {
APIEndpoint() string
HasAPIEndpoint() bool
IsPrivateEndpointEnabled() bool
IsAccessFromVPC() bool
ConsoleEndpoints() models.Endpoints
IAMEndpoint() string
IAMEndpoints() models.Endpoints
CloudName() string
CloudType() string
CurrentRegion() models.Region
HasTargetedRegion() bool
IAMToken() string
IAMRefreshToken() string
IsLoggedIn() bool
IsLoggedInWithServiceID() bool
IsLoggedInAsProfile() bool
IsLoggedInAsCRI() bool
UserEmail() string
// UserDisplayText is the human readable ID for logged-in users which include non-human IDs
UserDisplayText() string
IAMID() string
CurrentAccount() models.Account
HasTargetedAccount() bool
HasTargetedProfile() bool
HasTargetedComputeResource() bool
IMSAccountID() string
CurrentProfile() models.Profile
CurrentResourceGroup() models.ResourceGroup
// CRIType returns the type of compute resource the user logged in as, if applicable. Valid values are `IKS`, `VPC`, or `OTHER`
CRIType() string
HasTargetedResourceGroup() bool
PluginRepos() []models.PluginRepo
PluginRepo(string) (models.PluginRepo, bool)
IsSSLDisabled() bool
HTTPTimeout() int
CLIInfoEndpoint() string
CheckCLIVersionDisabled() bool
UpdateCheckInterval() time.Duration
UpdateRetryCheckInterval() time.Duration
UpdateNotificationInterval() time.Duration
// VPCCRITokenURL() returns the value specified by the environment variable 'IBMCLOUD_CR_VPC_URL', if set.
// Otherwise, the default VPC auth url specified by the constant `DefaultServerEndpoint` is returned
VPCCRITokenURL() string
// UsageSatsDisabled returns whether the usage statistics data collection is disabled or not
// Deprecated: use UsageSatsEnabled instead. We change to disable usage statistics by default,
// So this property will not be used anymore
UsageStatsDisabled() bool
// UsageSatsEnabled returns whether the usage statistics data collection is enabled or not
UsageStatsEnabled() bool
// UsageStatsEnabledLastUpdate returns last time when `UsageStatsEnabled` was updated
UsageStatsEnabledLastUpdate() time.Time
Locale() string
LoginAt() time.Time
Trace() string
ColorEnabled() string
SDKVersion() string
UnsetAPI()
RefreshIAMToken() (string, error)
SetAPIEndpoint(string)
SetPrivateEndpointEnabled(bool)
SetAccessFromVPC(bool)
SetConsoleEndpoints(models.Endpoints)
SetIAMEndpoints(models.Endpoints)
SetCloudType(string)
SetCloudName(string)
SetCRIType(string)
SetIsLoggedInAsCRI(bool)
SetRegion(models.Region)
SetIAMToken(string)
SetIAMRefreshToken(string)
ClearSession()
SetAccount(models.Account)
SetProfile(models.Profile)
SetResourceGroup(models.ResourceGroup)
SetLoginAt(loginAt time.Time)
SetCheckCLIVersionDisabled(bool)
SetCLIInfoEndpoint(string)
SetPluginRepo(models.PluginRepo)
UnsetPluginRepo(string)
SetSSLDisabled(bool)
SetHTTPTimeout(int)
// SetUsageSatsDisabled disable or enable usage statistics data collection
// Deprecated: use SetUsageSatsEnabled instead
SetUsageStatsDisabled(bool)
// SetUsageSatsEnabled enable or disable usage statistics data collection
SetUsageStatsEnabled(bool)
SetUpdateCheckInterval(time.Duration)
SetUpdateRetryCheckInterval(time.Duration)
SetUpdateNotificationInterval(time.Duration)
SetLocale(string)
SetTrace(string)
SetColorEnabled(string)
CFConfig() CFConfig
HasTargetedCF() bool
HasTargetedCFEE() bool
HasTargetedPublicCF() bool
SetCFEETargeted(bool)
CFEEEnvID() string
SetCFEEEnvID(string)
CheckMessageOfTheDay() bool
SetMessageOfTheDayTime()
}
// Deprecated
type ReadWriter interface {
Repository
}
type CFConfig interface {
APIVersion() string
APIEndpoint() string
AsyncTimeout() uint
ColorEnabled() string
HasAPIEndpoint() bool
AuthenticationEndpoint() string
UAAEndpoint() string
DopplerEndpoint() string
RoutingAPIEndpoint() string
SSHOAuthClient() string
MinCFCLIVersion() string
MinRecommendedCFCLIVersion() string
Username() string
UserGUID() string
UserEmail() string
Locale() string
LoginAt() time.Time
IsLoggedIn() bool
SetLoginAt(loginAt time.Time)
Trace() string
UAAToken() string
UAARefreshToken() string
CurrentOrganization() models.OrganizationFields
HasTargetedOrganization() bool
CurrentSpace() models.SpaceFields
HasTargetedSpace() bool
UnsetAPI()
SetAPIVersion(string)
SetAPIEndpoint(string)
SetAuthenticationEndpoint(string)
SetDopplerEndpoint(string)
SetUAAEndpoint(string)
SetRoutingAPIEndpoint(string)
SetSSHOAuthClient(string)
SetMinCFCLIVersion(string)
SetMinRecommendedCFCLIVersion(string)
SetUAAToken(string)
SetUAARefreshToken(string)
SetOrganization(models.OrganizationFields)
SetSpace(models.SpaceFields)
ClearSession()
}
type repository struct {
*bxConfig
cfConfig cfConfigWrapper
}
type cfConfigWrapper struct {
*cfConfig
bx *bxConfig
}
func (wrapper cfConfigWrapper) UnsetAPI() {
wrapper.cfConfig.UnsetAPI()
wrapper.bx.SetCFEEEnvID("")
wrapper.bx.SetCFEETargeted(false)
}
func newRepository(bx *bxConfig, cf *cfConfig) repository {
return repository{
bxConfig: bx,
cfConfig: cfConfigWrapper{cfConfig: cf, bx: bx},
}
}
func (c repository) IsLoggedIn() bool {
return c.bxConfig.IsLoggedIn() || c.cfConfig.IsLoggedIn()
}
func (c repository) IsLoggedInWithServiceID() bool {
return c.bxConfig.IsLoggedIn() && NewIAMTokenInfo(c.IAMToken()).SubjectType == SubjectTypeServiceID
}
func (c repository) IsLoggedInAsProfile() bool {
return c.bxConfig.IsLoggedIn() && NewIAMTokenInfo(c.IAMToken()).SubjectType == SubjectTypeTrustedProfile
}
func (c repository) VPCCRITokenURL() string {
if env := bluemix.EnvCRVpcUrl.Get(); env != "" {
return env
}
// default server endpoint is a constant value in vpc authenticator
return vpc.DefaultServerEndpoint
}
func (c repository) IAMEndpoint() string {
if c.IsPrivateEndpointEnabled() {
if c.IsAccessFromVPC() {
// return VPC endpoint
return c.IAMEndpoints().PrivateVPCEndpoint
} else {
// return CSE endpoint
return c.IAMEndpoints().PrivateEndpoint
}
}
return c.IAMEndpoints().PublicEndpoint
}
func (c repository) RefreshIAMToken() (string, error) {
var ret string
// confirm user is logged in as a VPC compute resource identity
isLoggedInAsCRI := c.IsLoggedInAsCRI()
criType := c.CRIType()
if isLoggedInAsCRI && criType == "VPC" {
token, err := c.fetchNewIAMTokenUsingVPCAuth()
if err != nil {
return "", err
}
ret = fmt.Sprintf("%s %s", token.TokenType, token.AccessToken)
c.SetIAMToken(ret)
// this should be empty for vpc vsi tokens
c.SetIAMRefreshToken(token.RefreshToken)
} else {
iamEndpoint := os.Getenv("IAM_ENDPOINT")
if iamEndpoint == "" {
iamEndpoint = c.IAMEndpoint()
}
if iamEndpoint == "" {
return "", fmt.Errorf("IAM endpoint is not set")
}
auth := iam.NewClient(iam.DefaultConfig(iamEndpoint), rest.NewClient())
token, err := auth.GetToken(iam.RefreshTokenRequest(c.IAMRefreshToken()))
if err != nil {
return "", err
}
ret = fmt.Sprintf("%s %s", token.TokenType, token.AccessToken)
c.SetIAMToken(ret)
c.SetIAMRefreshToken(token.RefreshToken)
}
return ret, nil
}
func (c repository) fetchNewIAMTokenUsingVPCAuth() (*iam.Token, error) {
// create a vpc client using default configuration
client := vpc.NewClient(vpc.DefaultConfig(c.VPCCRITokenURL(), vpc.DefaultMetadataServiceVersion), rest.NewClient())
// fetch an instance identity token from the metadata server
identityToken, err := client.GetInstanceIdentityToken()
if err != nil {
return nil, err
}
// get the existing targeted IAM trusted profile ID of the CLI session
targetProfile := c.CurrentProfile()
profileID := targetProfile.ID
if profileID == "" {
return nil, fmt.Errorf("Trusted profile not set in configuration")
}
// prepare IAM token request using the existing targeted profile.
req, err := vpc.NewIAMAccessTokenRequest(profileID, "", identityToken.AccessToken)
if err != nil {
return nil, err
}
// get the new access token
iamToken, err := client.GetIAMAccessToken(req)
if err != nil {
return nil, err
}
return iamToken, nil
}
func (c repository) UserEmail() string {
email := c.bxConfig.UserEmail()
if email == "" {
email = c.cfConfig.UserEmail()
}
return email
}
func (c repository) CFConfig() CFConfig {
return c.cfConfig
}
func (c repository) HasTargetedCF() bool {
return c.cfConfig.HasAPIEndpoint()
}
func (c repository) HasTargetedCFEE() bool {
return c.HasTargetedCF() && c.bxConfig.HasTargetedCFEE()
}
func (c repository) HasTargetedPublicCF() bool {
return c.HasTargetedCF() && !c.bxConfig.HasTargetedCFEE()
}
func (c repository) SetSSLDisabled(disabled bool) {
c.bxConfig.SetSSLDisabled(disabled)
c.cfConfig.SetSSLDisabled(disabled)
}
func (c repository) SetColorEnabled(enabled string) {
c.bxConfig.SetColorEnabled(enabled)
c.cfConfig.SetColorEnabled(enabled)
}
func (c repository) SetTrace(trace string) {
c.bxConfig.SetTrace(trace)
c.cfConfig.SetTrace(trace)
}
func (c repository) SetLocale(locale string) {
c.bxConfig.SetLocale(locale)
c.cfConfig.SetLocale(locale)
}
func (c repository) UnsetAPI() {
c.bxConfig.UnsetAPI()
c.bxConfig.SetCFEETargeted(false)
c.bxConfig.SetCFEEEnvID("")
c.cfConfig.UnsetAPI()
}
func (c repository) ClearSession() {
c.bxConfig.ClearSession()
c.cfConfig.ClearSession()
}
func NewCoreConfig(errHandler func(error)) ReadWriter {
// config_helpers.MigrateFromOldConfig() // error ignored
return NewCoreConfigFromPath(config_helpers.CFConfigFilePath(), config_helpers.ConfigFilePath(), errHandler)
}
func NewCoreConfigFromPath(cfConfigPath string, bxConfigPath string, errHandler func(error)) ReadWriter {
return NewCoreConfigFromPersistor(configuration.NewDiskPersistor(cfConfigPath), configuration.NewDiskPersistor(bxConfigPath), errHandler)
}
func NewCoreConfigFromPersistor(cfPersistor configuration.Persistor, bxPersistor configuration.Persistor, errHandler func(error)) ReadWriter {
return newRepository(createBluemixConfigFromPersistor(bxPersistor, errHandler), createCFConfigFromPersistor(cfPersistor, errHandler))
}
|
[
"\"IAM_ENDPOINT\""
] |
[] |
[
"IAM_ENDPOINT"
] |
[]
|
["IAM_ENDPOINT"]
|
go
| 1 | 0 | |
services/instance.go
|
package services
import (
"context"
"crypto/tls"
"io"
"log"
"os"
"strings"
"sync"
"golang.org/x/text/encoding"
"github.com/docker/docker/api/types"
"github.com/docker/docker/client"
)
var rw sync.Mutex
type UInt16Slice []uint16
func (p UInt16Slice) Len() int { return len(p) }
func (p UInt16Slice) Less(i, j int) bool { return p[i] < p[j] }
func (p UInt16Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
type Instance struct {
session *Session `json:"-"`
Name string `json:"name"`
Hostname string `json:"hostname"`
IP string `json:"ip"`
conn *types.HijackedResponse `json:"-"`
ctx context.Context `json:"-"`
dockerClient *client.Client `json:"-"`
IsManager *bool `json:"is_manager"`
Mem string `json:"mem"`
Cpu string `json:"cpu"`
Ports UInt16Slice
tempPorts []uint16 `json:"-"`
ServerCert []byte `json:"server_cert"`
ServerKey []byte `json:"server_key"`
cert *tls.Certificate `json:"-"`
}
func (i *Instance) setUsedPort(port uint16) {
rw.Lock()
defer rw.Unlock()
for _, p := range i.tempPorts {
if p == port {
return
}
}
i.tempPorts = append(i.tempPorts, port)
}
func (i *Instance) SetCertificate(cert, key []byte) (*tls.Certificate, error) {
i.ServerCert = cert
i.ServerKey = key
c, e := tls.X509KeyPair(i.ServerCert, i.ServerKey)
if e != nil {
return nil, e
}
i.cert = &c
// We store sessions as soon as we set instance keys
if err := saveSessionsToDisk(); err != nil {
return nil, err
}
return i.cert, nil
}
func (i *Instance) GetCertificate() *tls.Certificate {
return i.cert
}
func (i *Instance) IsConnected() bool {
return i.conn != nil
}
func (i *Instance) SetSession(s *Session) {
i.session = s
}
var dindImage string
func init() {
dindImage = getDindImageName()
}
func getDindImageName() string {
dindImage := os.Getenv("DIND_IMAGE")
defaultDindImageName := "franela/dind"
if len(dindImage) == 0 {
dindImage = defaultDindImageName
}
return dindImage
}
func NewInstance(session *Session, imageName string) (*Instance, error) {
if imageName == "" {
imageName = dindImage
}
log.Printf("NewInstance - using image: [%s]\n", imageName)
instance, err := CreateInstance(session, imageName)
if err != nil {
return nil, err
}
instance.session = session
if session.Instances == nil {
session.Instances = make(map[string]*Instance)
}
session.Instances[instance.Name] = instance
go instance.Attach()
err = saveSessionsToDisk()
if err != nil {
return nil, err
}
wsServer.BroadcastTo(session.Id, "new instance", instance.Name, instance.IP, instance.Hostname)
setGauges()
return instance, nil
}
type sessionWriter struct {
instance *Instance
}
func (s *sessionWriter) Write(p []byte) (n int, err error) {
wsServer.BroadcastTo(s.instance.session.Id, "terminal out", s.instance.Name, string(p))
return len(p), nil
}
func (i *Instance) ResizeTerminal(cols, rows uint) error {
return ResizeConnection(i.Name, cols, rows)
}
func (i *Instance) Attach() {
i.ctx = context.Background()
conn, err := CreateAttachConnection(i.Name, i.ctx)
if err != nil {
return
}
i.conn = conn
go func() {
encoder := encoding.Replacement.NewEncoder()
sw := &sessionWriter{instance: i}
io.Copy(encoder.Writer(sw), conn.Reader)
}()
select {
case <-i.ctx.Done():
}
}
func GetInstance(session *Session, name string) *Instance {
return session.Instances[name]
}
func FindInstanceByIP(ip string) *Instance {
for _, s := range sessions {
for _, i := range s.Instances {
if i.IP == ip {
return i
}
}
}
return nil
}
func DeleteInstance(session *Session, instance *Instance) error {
if instance.conn != nil {
instance.conn.Close()
}
err := DeleteContainer(instance.Name)
if err != nil && !strings.Contains(err.Error(), "No such container") {
log.Println(err)
return err
}
wsServer.BroadcastTo(session.Id, "delete instance", instance.Name)
delete(session.Instances, instance.Name)
if err := saveSessionsToDisk(); err != nil {
return err
}
setGauges()
return nil
}
|
[
"\"DIND_IMAGE\""
] |
[] |
[
"DIND_IMAGE"
] |
[]
|
["DIND_IMAGE"]
|
go
| 1 | 0 | |
_unittests/ut_special/test_tsp_kohonen.py
|
"""
@brief test log(time=10s)
"""
import os
import sys
import unittest
from pyquickhelper.loghelper import fLOG
from pyquickhelper.pycode import get_temp_folder, is_travis_or_appveyor
try:
import src
except ImportError:
path = os.path.normpath(
os.path.abspath(
os.path.join(
os.path.split(__file__)[0],
"..",
"..")))
if path not in sys.path:
sys.path.append(path)
import src
from src.ensae_teaching_cs.special.tsp_kohonen import pygame_simulation
from src.ensae_teaching_cs.helpers.video_helper import make_video
class TestTspKohonen(unittest.TestCase):
def test_image_video_kohonen(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
temp = get_temp_folder(__file__, "temp_image_video_tsp_kohonen")
if is_travis_or_appveyor() in ("travis",):
# pygame.error: No available video device
return
import pygame
if is_travis_or_appveyor() == "circleci":
# os.environ["SDL_VIDEODRIVER"] = "x11"
flags = pygame.NOFRAME
else:
flags = 0
pygame_simulation(pygame, fLOG=fLOG, folder=temp,
nb=200 if __name__ == "__main__" else 20,
size=(400, 250), flags=flags)
files = os.listdir(temp)
assert len(files) > 9
png = [os.path.join(temp, _)
for _ in files if os.path.splitext(_)[-1] == ".png"]
assert len(png) > 0
out = os.path.join(temp, "tsp_kohonen.avi")
v = make_video(png, out, size=(200, 125), format="XVID", fps=20)
assert v is not None
if __name__ == "__main__":
unittest.main()
|
[] |
[] |
[
"SDL_VIDEODRIVER"
] |
[]
|
["SDL_VIDEODRIVER"]
|
python
| 1 | 0 | |
volttron/platform/vip/rmq_router.py
|
# -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright 2020, Battelle Memorial Institute.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This material was prepared as an account of work sponsored by an agency of
# the United States Government. Neither the United States Government nor the
# United States Department of Energy, nor Battelle, nor any of their
# employees, nor any jurisdiction or organization that has cooperated in the
# development of these materials, makes any warranty, express or
# implied, or assumes any legal liability or responsibility for the accuracy,
# completeness, or usefulness or any information, apparatus, product,
# software, or process disclosed, or represents that its use would not infringe
# privately owned rights. Reference herein to any specific commercial product,
# process, or service by trade name, trademark, manufacturer, or otherwise
# does not necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors expressed
# herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY operated by
# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
from __future__ import absolute_import
import errno
import logging
import os
from queue import Queue
from typing import Optional
from volttron.platform import is_rabbitmq_available
from volttron.platform import jsonapi
from volttron.utils.rmq_mgmt import RabbitMQMgmt
from .rmq_connection import RMQRouterConnection
from .router import BaseRouter
from .servicepeer import ServicePeerNotifier
from .socket import Message, Address
from ..keystore import KeyStore
from ..main import __version__
if is_rabbitmq_available():
import pika
__all__ = ['RMQRouter']
_log = logging.getLogger(__name__)
class RMQRouter(object):
"""
Concrete VIP Router for RabbitMQ message bus. It handles router specific
messages and unrouteable messages.
"""
def __init__(self, address, local_address, instance_name,
addresses=(), identity='router', default_user_id=None,
volttron_central_address=None,
volttron_central_serverkey=None,
bind_web_address=None,
service_notifier=Optional[ServicePeerNotifier]
):
"""
Initialize the object instance.
:param instance_name: Name of VOLTTRON instance
:param identity: Identity for router
:param default_user_id: Default user id
"""
self.default_user_id = default_user_id
self._peers = set()
self._peers_with_messagebus = dict()
self.addresses = [Address(addr) for addr in set(addresses)]
self.local_address = Address(local_address)
self._address = address
self._volttron_central_address = volttron_central_address
self._volttron_central_serverkey = volttron_central_serverkey
self._bind_web_address = bind_web_address
self._instance_name = instance_name
self._identity = identity
self.rmq_mgmt = RabbitMQMgmt()
self.event_queue = Queue()
self._service_notifier = service_notifier
param = self._build_connection_parameters()
self.connection = RMQRouterConnection(param,
identity,
instance_name,
reconnect_delay=self.rmq_mgmt.rmq_config.reconnect_delay()
)
def _build_connection_parameters(self):
if self._identity is None:
raise ValueError("Agent's VIP identity is not set")
else:
param = self.rmq_mgmt.build_router_connection(self._identity,
self._instance_name)
return param
def start(self):
"""
Register VIP message handler with connection object and create
connection to RabbitMQ broker.
:return:
"""
self.connection.register(self.handle_system)
self.setup()
def stop(self, linger=1):
"""
Close the connection to RabbitMQ broker.
:param linger:
:return:
"""
self.connection.disconnect()
def setup(self):
"""
Called from start() method to set connection properties.
:return:
"""
# set properties for VIP queue
flags = dict(durable=False, exclusive=True, auto_delete=True)
self.connection.set_properties(flags)
def run(self):
"""
RabbitMQ router loop to keep the connection running.
:return:
"""
self.start()
try:
self.connection.loop()
except KeyboardInterrupt:
pass
except (pika.exceptions.AMQPConnectionError,
pika.exceptions.AMQPChannelError) as exc:
_log.error("RabbitMQ Connection Error. {}".format(exc))
finally:
self.stop()
def connection_open_callback(self):
_log.debug("Received connection callback")
def connection_close_callback(self):
_log.debug("Received connection callback")
def issue(self, topic, frames, extra=None):
pass
def _add_peer(self, peer, message_bus='rmq'):
if peer == self._identity:
return
if peer in self._peers:
return
self._distribute('peerlist', 'add', peer, message_bus)
self._peers.add(peer)
self._peers_with_messagebus[peer] = message_bus
self._service_notifier.peer_added(peer)
def _drop_peer(self, peer, message_bus='rmq'):
try:
self._peers.remove(peer)
self._service_notifier.peer_dropped(peer)
del self._peers_with_messagebus[peer]
except KeyError:
return
self._distribute('peerlist', 'drop', peer, message_bus)
def route(self, message):
'''Route one message and return.
One message is read from the socket and processed. If the
recipient is the router (empty recipient), the standard hello
and ping subsystems are handled. Other subsystems are sent to
handle_subsystem() for processing. Messages destined for other
entities are routed appropriately.
'''
self.handle_system(message)
def handle_system(self, message):
"""
Handles messages intended for router. Standard hello, ping, peerlist subsystems
are handled.
:param props: properties associated with incoming message
:param message: actual message
:return:
"""
# [SENDER, RECIPIENT, PROTOCOL, USER_ID, MSG_ID, SUBSYSTEM, ...]
sender = message.peer # source
subsystem = message.subsystem
self._add_peer(sender)
if subsystem == 'hello':
self.authenticate(sender)
# send welcome message back
message.args = ['welcome', '1.0', self._identity, sender]
elif subsystem == 'ping':
message.args[0] = 'pong'
elif subsystem == 'peerlist':
try:
op = message.args[0]
except IndexError:
op = None
except ValueError:
op = None
if op == 'list':
del message.args[:]
message.args = ['listing']
message.args.extend(self._peers)
elif op == 'list_with_messagebus':
_log.debug("Router peerlist request op: list_with_messagebus, {}, {}".format(sender, self._peers))
del message.args[:]
message.args = ['listing_with_messagebus']
message.args.append(jsonapi.dumps(self._peers_with_messagebus))
_log.debug("Router peerlist request op: list_with_messagebus, {}, {}".format(sender, self._peers))
elif op == 'add':
peer = message.args[1]
try:
message_bus = message.args[2]
except IndexError:
message_bus = 'rmq'
self._add_peer(peer=peer, message_bus=message_bus)
elif op == 'drop':
peer = message.args[1]
try:
message_bus = message.args[2]
except IndexError:
message_bus = 'rmq'
self._drop_peer(peer=peer, message_bus=message_bus)
else:
error = ('unknown' if op else 'missing') + ' operation'
message.args.extend(['error', error])
elif subsystem == 'quit':
if sender == 'control':
self.stop()
raise KeyboardInterrupt()
elif subsystem == 'agentstop':
_log.debug("ROUTER received agent stop {}".format(sender))
try:
drop = message.args[0]
self._drop_peer(drop)
except IndexError:
pass
except ValueError:
pass
return False
elif subsystem == 'query':
try:
name = message.args[0]
except IndexError:
value = None
except ValueError:
value = None
else:
if name == 'addresses':
if self.addresses:
value = [addr.base for addr in self.addresses]
else:
value = [self.local_address.base]
elif name == 'local_address':
value = self.local_address.base
# Allow the agents to know the serverkey.
elif name == 'serverkey':
keystore = KeyStore()
value = keystore.public
elif name == 'volttron-central-address':
value = self._volttron_central_address
elif name == 'volttron-central-serverkey':
value = self._volttron_central_serverkey
elif name == 'instance-name':
value = self._instance_name
elif name == 'bind-web-address':
value = self._bind_web_address
elif name == 'platform-version':
value = __version__
elif name == 'message-bus':
value = os.environ.get('MESSAGEBUS', 'zmq')
else:
value = None
message.args = ['', value]
message.args.append('')
elif subsystem == 'error':
try:
errnum = message.args[0]
if errnum == errno.EHOSTUNREACH:
recipient = message.args[2]
self._drop_peer(recipient)
return
except IndexError:
_log.error("ROUTER unable to parse error message {}".format(message.args))
else:
# Router does not know of the subsystem
message.type = 'error'
errnum = errno.EPROTONOSUPPORT
errmsg = os.strerror(errnum).encode('ascii') # str(errnum).encode('ascii')
_log.debug("ROUTER proto unsupported {}, sender {}".format(subsystem, sender))
message.args = [errnum, errmsg, '', subsystem]
# Send the message back to the sender
self.connection.send_vip_object(message)
def _distribute(self, *parts):
message = Message(peer=None, subsystem=parts[0], args=parts[1:])
for peer in self._peers:
message.peer = peer
if self._peers_with_messagebus[peer] == 'rmq':
self.connection.send_vip_object(message)
def _make_user_access_tokens(self, identity):
tokens = dict()
tokens["configure"] = tokens["read"] = tokens["write"] = [identity,
identity + ".pubsub.*",
identity + ".zmq.*"]
tokens["read"].append("volttron")
tokens["write"].append("volttron")
return tokens
def _check_user_access_token(self, actual, allowed):
pending = actual[:]
for tk in actual:
if tk in allowed:
pending.remove(tk)
return pending
def _make_topic_permission_tokens(self, identity):
"""
Make tokens for read and write permission on topic (routing key) for an agent
:param identity:
:return:
"""
tokens = dict()
# Exclusive read access ( topic consumption ) to it's VIP routing key and any pubsub routing key
tokens["read"] = ["{0}.{1}".format(self._instance_name, identity),
"__pubsub__.*"]
# Write access to any VIP routing key and application specific topics within this instance
tokens["write"] = ["{0}.*".format(self._instance_name),
"__pubsub__.{0}.*".format(self._instance_name)]
if identity == "proxy_router":
tokens["read"] = ".*"
tokens["write"] = ".*"
return tokens
def _check_token(self, actual, allowed):
"""
Check if actual permission string matches the allowed permission
:param actual: actual permission
:param allowed: allowed permission
:return: returns missing permissions
"""
pending = actual[:]
for tk in actual:
if tk in allowed:
pending.remove(tk)
return pending
def authenticate(self, identity):
"""
Check the permissions set for the agent
1. Check the permissions for user
- to access the "volttron" exchange
- to access it's VIP queue and pubsub queues
2. Check/Set the topic permissions for the user
:param user: Agent identity
:return:
"""
user_error_msg = self._check_user_permissions(self._instance_name +
"." + identity)
return user_error_msg
def _check_user_permissions(self, identity):
msg = None
user_permission = self.rmq_mgmt.get_user_permissions(identity)
# Check user access permissions for the agent
allowed_tokens = self._make_user_access_tokens(identity)
# _log.debug("Identity: {0}, User permissions: {1}".format(identity, user_permission))
if user_permission:
config_perms = user_permission.get("configure", "").split("|")
read_perms = user_permission.get("read", "").split("|")
write_perms = user_permission.get("write", "").split("|")
config_chk = self._check_token(config_perms, allowed_tokens["configure"])
read_chk = self._check_token(read_perms, allowed_tokens["read"])
write_chk = self._check_token(write_perms, allowed_tokens["write"])
if config_chk or read_chk or write_chk:
msg = "Agent has invalid user permissions to "
if config_chk: msg += "CONFIGURE: {} , ".format(config_chk)
if read_chk: msg += "READ: {} ".format(read_chk)
if write_chk: msg += "WRITE: {}".format(write_chk)
else:
# Setting default user access control
common_access = "{identity}|{identity}.pubsub.*|{identity}.zmq.*".format(identity=identity)
# Rabbit user for the agent should have access to limited resources (exchange, queues)
config_access = common_access
read_access = "volttron|{}".format(common_access)
write_access = "volttron|{}".format(common_access)
permissions = dict(configure=config_access, read=read_access, write=write_access)
self.rmq_mgmt.set_user_permissions(permissions, identity)
return msg
|
[] |
[] |
[
"MESSAGEBUS"
] |
[]
|
["MESSAGEBUS"]
|
python
| 1 | 0 | |
main.go
|
package main
import (
"encoding/json"
"io"
"log"
"net/http"
"os"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ssmincidents"
"github.com/fujiwara/ridge"
"github.com/pkg/errors"
)
var responsePlanArn string
func main() {
responsePlanArn = os.Getenv("RESPONSE_PLAN_ARN")
log.Println("[info] checking self IP address")
resp, err := http.Get("http://checkip.amazonaws.com/")
if err != nil {
log.Println("[warn]", err)
} else {
io.Copy(os.Stderr, resp.Body)
resp.Body.Close()
}
var mux = http.NewServeMux()
mux.HandleFunc("/webhook", handleWebhook)
ridge.Run(":8000", "/", mux)
}
func handleWebhook(w http.ResponseWriter, r *http.Request) {
if t := r.Header.Get("content-type"); !strings.HasPrefix(t, "application/json") {
errorResponse(w, http.StatusBadRequest, errors.Errorf("invalid content-type %s", t))
return
}
var hook MackerelWebhook
err := json.NewDecoder(r.Body).Decode(&hook)
if err != nil {
errorResponse(w, http.StatusInternalServerError, err)
}
if s := hook.Alert.Status; s != "critical" {
log.Printf("[info] alert status is %s. not a critical. ignored.", s)
return
}
log.Println("[info] start incident:", hook.IncidentTitle(), hook.Alert.URL)
sess := session.Must(session.NewSession())
svc := ssmincidents.New(sess)
out, err := svc.StartIncident(&ssmincidents.StartIncidentInput{
Title: aws.String(hook.IncidentTitle()),
ResponsePlanArn: aws.String(responsePlanArn),
RelatedItems: []*ssmincidents.RelatedItem{
{
Title: aws.String("Mackerel"),
Identifier: &ssmincidents.ItemIdentifier{
Type: aws.String("OTHER"),
Value: &ssmincidents.ItemValue{
Url: &hook.Alert.URL,
},
},
},
},
})
if err != nil {
errorResponse(w, http.StatusInternalServerError, err)
return
}
log.Printf("[info] incident record arn: %s", *out.IncidentRecordArn)
w.Header().Set("content-type", "application/json")
json.NewEncoder(w).Encode(out)
}
func errorResponse(w http.ResponseWriter, code int, err error) {
log.Printf("[error] %d %s", code, err)
w.WriteHeader(code)
}
|
[
"\"RESPONSE_PLAN_ARN\""
] |
[] |
[
"RESPONSE_PLAN_ARN"
] |
[]
|
["RESPONSE_PLAN_ARN"]
|
go
| 1 | 0 | |
cmd/common-methods.go
|
/*
* MinIO Client (C) 2015-2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"context"
"encoding/base64"
"errors"
"io"
"net/http"
"os"
"path/filepath"
"regexp"
"strings"
"time"
"golang.org/x/net/http/httpguts"
"gopkg.in/h2non/filetype.v1"
"github.com/minio/cli"
"github.com/minio/mc/pkg/probe"
minio "github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/encrypt"
)
// decode if the key is encoded key and returns the key
func getDecodedKey(sseKeys string) (key string, err *probe.Error) {
keyString := ""
for i, sse := range strings.Split(sseKeys, ",") {
if i > 0 {
keyString = keyString + ","
}
sseString, err := parseKey(sse)
if err != nil {
return "", err
}
keyString = keyString + sseString
}
return keyString, nil
}
// Validate the key
func parseKey(sseKeys string) (sse string, err *probe.Error) {
encryptString := strings.SplitN(sseKeys, "=", 2)
if len(encryptString) < 2 {
return "", probe.NewError(errors.New("SSE-C prefix should be of the form prefix1=key1,... "))
}
secretValue := encryptString[1]
if len(secretValue) == 32 {
return sseKeys, nil
}
decodedString, e := base64.StdEncoding.DecodeString(secretValue)
if e != nil || len(decodedString) != 32 {
return "", probe.NewError(errors.New("Encryption key should be 32 bytes plain text key or 44 bytes base64 encoded key"))
}
return encryptString[0] + "=" + string(decodedString), nil
}
// parse and return encryption key pairs per alias.
func getEncKeys(ctx *cli.Context) (map[string][]prefixSSEPair, *probe.Error) {
sseServer := os.Getenv("MC_ENCRYPT")
if prefix := ctx.String("encrypt"); prefix != "" {
sseServer = prefix
}
sseKeys := os.Getenv("MC_ENCRYPT_KEY")
if keyPrefix := ctx.String("encrypt-key"); keyPrefix != "" {
if sseServer != "" && strings.Contains(keyPrefix, sseServer) {
return nil, errConflictSSE(sseServer, keyPrefix).Trace(ctx.Args()...)
}
sseKeys = keyPrefix
}
var err *probe.Error
if sseKeys != "" {
sseKeys, err = getDecodedKey(sseKeys)
if err != nil {
return nil, err.Trace(sseKeys)
}
}
encKeyDB, err := parseAndValidateEncryptionKeys(sseKeys, sseServer)
if err != nil {
return nil, err.Trace(sseKeys)
}
return encKeyDB, nil
}
// Check if the passed URL represents a folder. It may or may not exist yet.
// If it exists, we can easily check if it is a folder, if it doesn't exist,
// we can guess if the url is a folder from how it looks.
func isAliasURLDir(ctx context.Context, aliasURL string, keys map[string][]prefixSSEPair, timeRef time.Time) bool {
// If the target url exists, check if it is a directory
// and return immediately.
_, targetContent, err := url2Stat(ctx, aliasURL, "", false, keys, timeRef)
if err == nil {
return targetContent.Type.IsDir()
}
_, expandedURL, _ := mustExpandAlias(aliasURL)
// Check if targetURL is an FS or S3 aliased url
if expandedURL == aliasURL {
// This is an FS url, check if the url has a separator at the end
return strings.HasSuffix(aliasURL, string(filepath.Separator))
}
// This is an S3 url, then:
// *) If alias format is specified, return false
// *) If alias/bucket is specified, return true
// *) If alias/bucket/prefix, check if prefix has
// has a trailing slash.
pathURL := filepath.ToSlash(aliasURL)
fields := strings.Split(pathURL, "/")
switch len(fields) {
// Nothing or alias format
case 0, 1:
return false
// alias/bucket format
case 2:
return true
} // default case..
// alias/bucket/prefix format
return strings.HasSuffix(pathURL, "/")
}
// getSourceStreamMetadataFromURL gets a reader from URL.
func getSourceStreamMetadataFromURL(ctx context.Context, aliasedURL, versionID string, timeRef time.Time, encKeyDB map[string][]prefixSSEPair) (reader io.ReadCloser,
metadata map[string]string, err *probe.Error) {
alias, urlStrFull, _, err := expandAlias(aliasedURL)
if err != nil {
return nil, nil, err.Trace(aliasedURL)
}
if !timeRef.IsZero() {
_, content, err := url2Stat(ctx, aliasedURL, "", false, nil, timeRef)
if err != nil {
return nil, nil, err
}
versionID = content.VersionID
}
sseKey := getSSE(aliasedURL, encKeyDB[alias])
return getSourceStream(ctx, alias, urlStrFull, versionID, true, sseKey, false)
}
// getSourceStreamFromURL gets a reader from URL.
func getSourceStreamFromURL(ctx context.Context, urlStr, versionID string, encKeyDB map[string][]prefixSSEPair) (reader io.ReadCloser, err *probe.Error) {
alias, urlStrFull, _, err := expandAlias(urlStr)
if err != nil {
return nil, err.Trace(urlStr)
}
sse := getSSE(urlStr, encKeyDB[alias])
reader, _, err = getSourceStream(ctx, alias, urlStrFull, versionID, false, sse, false)
return reader, err
}
func probeContentType(reader io.Reader) (ctype string, err *probe.Error) {
ctype = "application/octet-stream"
// Read a chunk to decide between utf-8 text and binary
if s, ok := reader.(io.Seeker); ok {
var buf [512]byte
n, _ := io.ReadFull(reader, buf[:])
if n <= 0 {
return ctype, nil
}
kind, e := filetype.Match(buf[:n])
if e != nil {
return ctype, probe.NewError(e)
}
// rewind to output whole file
if _, e = s.Seek(0, io.SeekStart); e != nil {
return ctype, probe.NewError(e)
}
if kind.MIME.Value != "" {
ctype = kind.MIME.Value
}
}
return ctype, nil
}
// Verify if reader is a generic ReaderAt
func isReadAt(reader io.Reader) (ok bool) {
var v *os.File
v, ok = reader.(*os.File)
if ok {
// Stdin, Stdout and Stderr all have *os.File type
// which happen to also be io.ReaderAt compatible
// we need to add special conditions for them to
// be ignored by this function.
for _, f := range []string{
"/dev/stdin",
"/dev/stdout",
"/dev/stderr",
} {
if f == v.Name() {
ok = false
break
}
}
}
return
}
// getSourceStream gets a reader from URL.
func getSourceStream(ctx context.Context, alias, urlStr, versionID string, fetchStat bool, sse encrypt.ServerSide, preserve bool) (reader io.ReadCloser, metadata map[string]string, err *probe.Error) {
sourceClnt, err := newClientFromAlias(alias, urlStr)
if err != nil {
return nil, nil, err.Trace(alias, urlStr)
}
reader, err = sourceClnt.Get(ctx, GetOptions{SSE: sse, VersionID: versionID})
if err != nil {
return nil, nil, err.Trace(alias, urlStr)
}
metadata = make(map[string]string)
if fetchStat {
var st *ClientContent
mo, mok := reader.(*minio.Object)
if mok {
oinfo, e := mo.Stat()
if e != nil {
return nil, nil, probe.NewError(e).Trace(alias, urlStr)
}
st = &ClientContent{}
st.Time = oinfo.LastModified
st.Size = oinfo.Size
st.ETag = oinfo.ETag
st.Expires = oinfo.Expires
st.Type = os.FileMode(0664)
st.Metadata = map[string]string{}
for k := range oinfo.Metadata {
st.Metadata[k] = oinfo.Metadata.Get(k)
}
st.ETag = oinfo.ETag
} else {
st, err = sourceClnt.Stat(ctx, StatOptions{preserve: preserve, sse: sse})
if err != nil {
return nil, nil, err.Trace(alias, urlStr)
}
}
for k, v := range st.Metadata {
if httpguts.ValidHeaderFieldName(k) &&
httpguts.ValidHeaderFieldValue(v) {
metadata[k] = v
}
}
// All unrecognized files have `application/octet-stream`
// So we continue our detection process.
if ctype := metadata["Content-Type"]; ctype == "application/octet-stream" {
// Continue probing content-type if its filesystem stream.
if !mok {
metadata["Content-Type"], err = probeContentType(reader)
if err != nil {
return nil, nil, err.Trace(alias, urlStr)
}
}
}
}
return reader, metadata, nil
}
// putTargetRetention sets retention headers if any
func putTargetRetention(ctx context.Context, alias string, urlStr string, metadata map[string]string) *probe.Error {
targetClnt, err := newClientFromAlias(alias, urlStr)
if err != nil {
return err.Trace(alias, urlStr)
}
lockModeStr, ok := metadata[AmzObjectLockMode]
lockMode := minio.RetentionMode("")
if ok {
lockMode = minio.RetentionMode(lockModeStr)
delete(metadata, AmzObjectLockMode)
}
retainUntilDateStr, ok := metadata[AmzObjectLockRetainUntilDate]
retainUntilDate := timeSentinel
if ok {
delete(metadata, AmzObjectLockRetainUntilDate)
if t, e := time.Parse(time.RFC3339, retainUntilDateStr); e == nil {
retainUntilDate = t.UTC()
}
}
if err := targetClnt.PutObjectRetention(ctx, "", lockMode, retainUntilDate, false); err != nil {
return err.Trace(alias, urlStr)
}
return nil
}
// putTargetStream writes to URL from Reader.
func putTargetStream(ctx context.Context, alias, urlStr, mode, until, legalHold string, reader io.Reader, size int64, progress io.Reader, opts PutOptions) (int64, *probe.Error) {
targetClnt, err := newClientFromAlias(alias, urlStr)
if err != nil {
return 0, err.Trace(alias, urlStr)
}
if mode != "" {
opts.metadata[AmzObjectLockMode] = mode
}
if until != "" {
opts.metadata[AmzObjectLockRetainUntilDate] = until
}
if legalHold != "" {
opts.metadata[AmzObjectLockLegalHold] = legalHold
}
n, err := targetClnt.Put(ctx, reader, size, progress, opts)
if err != nil {
return n, err.Trace(alias, urlStr)
}
return n, nil
}
// putTargetStreamWithURL writes to URL from reader. If length=-1, read until EOF.
func putTargetStreamWithURL(urlStr string, reader io.Reader, size int64, opts PutOptions) (int64, *probe.Error) {
alias, urlStrFull, _, err := expandAlias(urlStr)
if err != nil {
return 0, err.Trace(alias, urlStr)
}
contentType := guessURLContentType(urlStr)
if opts.metadata == nil {
opts.metadata = map[string]string{}
}
opts.metadata["Content-Type"] = contentType
return putTargetStream(context.Background(), alias, urlStrFull, "", "", "", reader, size, nil, opts)
}
// copySourceToTargetURL copies to targetURL from source.
func copySourceToTargetURL(ctx context.Context, alias, urlStr, source, sourceVersionID, mode, until, legalHold string, size int64, progress io.Reader, opts CopyOptions) *probe.Error {
targetClnt, err := newClientFromAlias(alias, urlStr)
if err != nil {
return err.Trace(alias, urlStr)
}
opts.versionID = sourceVersionID
opts.size = size
opts.metadata[AmzObjectLockMode] = mode
opts.metadata[AmzObjectLockRetainUntilDate] = until
opts.metadata[AmzObjectLockLegalHold] = legalHold
err = targetClnt.Copy(ctx, source, opts, progress)
if err != nil {
return err.Trace(alias, urlStr)
}
return nil
}
func filterMetadata(metadata map[string]string) map[string]string {
newMetadata := map[string]string{}
for k, v := range metadata {
if httpguts.ValidHeaderFieldName(k) && httpguts.ValidHeaderFieldValue(v) {
newMetadata[k] = v
}
}
for k := range metadata {
if strings.HasPrefix(http.CanonicalHeaderKey(k), http.CanonicalHeaderKey(serverEncryptionKeyPrefix)) {
delete(newMetadata, k)
}
}
return newMetadata
}
// getAllMetadata - returns a map of user defined function
// by combining the usermetadata of object and values passed by attr keyword
func getAllMetadata(ctx context.Context, sourceAlias, sourceURLStr string, srcSSE encrypt.ServerSide, urls URLs) (map[string]string, *probe.Error) {
metadata := make(map[string]string)
sourceClnt, err := newClientFromAlias(sourceAlias, sourceURLStr)
if err != nil {
return nil, err.Trace(sourceAlias, sourceURLStr)
}
st, err := sourceClnt.Stat(ctx, StatOptions{preserve: true, sse: srcSSE})
if err != nil {
return nil, err.Trace(sourceAlias, sourceURLStr)
}
for k, v := range st.Metadata {
metadata[http.CanonicalHeaderKey(k)] = v
}
for k, v := range urls.TargetContent.UserMetadata {
metadata[http.CanonicalHeaderKey(k)] = v
}
return filterMetadata(metadata), nil
}
// uploadSourceToTargetURL - uploads to targetURL from source.
// optionally optimizes copy for object sizes <= 5GiB by using
// server side copy operation.
func uploadSourceToTargetURL(ctx context.Context, urls URLs, progress io.Reader, encKeyDB map[string][]prefixSSEPair, preserve bool) URLs {
sourceAlias := urls.SourceAlias
sourceURL := urls.SourceContent.URL
sourceVersion := urls.SourceContent.VersionID
targetAlias := urls.TargetAlias
targetURL := urls.TargetContent.URL
length := urls.SourceContent.Size
sourcePath := filepath.ToSlash(filepath.Join(sourceAlias, urls.SourceContent.URL.Path))
targetPath := filepath.ToSlash(filepath.Join(targetAlias, urls.TargetContent.URL.Path))
srcSSE := getSSE(sourcePath, encKeyDB[sourceAlias])
tgtSSE := getSSE(targetPath, encKeyDB[targetAlias])
var err *probe.Error
var metadata = map[string]string{}
var mode, until, legalHold string
// add object retention fields in metadata for target, if target wants
// to override defaults from source, usually happens in `cp` command.
// for the most part source metadata is copied over.
if urls.TargetContent.RetentionEnabled {
m := minio.RetentionMode(strings.ToUpper(urls.TargetContent.RetentionMode))
if !m.IsValid() {
return urls.WithError(probe.NewError(errors.New("invalid retention mode")).Trace(targetURL.String()))
}
var dur uint64
var unit minio.ValidityUnit
dur, unit, err = parseRetentionValidity(urls.TargetContent.RetentionDuration)
if err != nil {
return urls.WithError(err.Trace(targetURL.String()))
}
mode = urls.TargetContent.RetentionMode
until, err = getRetainUntilDate(dur, unit)
if err != nil {
return urls.WithError(err.Trace(sourceURL.String()))
}
}
// add object legal hold fields in metadata for target, if target wants
// to override defaults from source, usually happens in `cp` command.
// for the most part source metadata is copied over.
if urls.TargetContent.LegalHoldEnabled {
switch minio.LegalHoldStatus(urls.TargetContent.LegalHold) {
case minio.LegalHoldDisabled:
case minio.LegalHoldEnabled:
default:
return urls.WithError(errInvalidArgument().Trace(urls.TargetContent.LegalHold))
}
legalHold = urls.TargetContent.LegalHold
}
for k, v := range urls.SourceContent.UserMetadata {
metadata[http.CanonicalHeaderKey(k)] = v
}
for k, v := range urls.SourceContent.Metadata {
metadata[http.CanonicalHeaderKey(k)] = v
}
// Optimize for server side copy if the host is same.
if sourceAlias == targetAlias {
// preserve new metadata and save existing ones.
if preserve {
currentMetadata, err := getAllMetadata(ctx, sourceAlias, sourceURL.String(), srcSSE, urls)
if err != nil {
return urls.WithError(err.Trace(sourceURL.String()))
}
for k, v := range currentMetadata {
metadata[k] = v
}
}
// Get metadata from target content as well
for k, v := range urls.TargetContent.Metadata {
metadata[http.CanonicalHeaderKey(k)] = v
}
// Get userMetadata from target content as well
for k, v := range urls.TargetContent.UserMetadata {
metadata[http.CanonicalHeaderKey(k)] = v
}
sourcePath := filepath.ToSlash(sourceURL.Path)
if urls.SourceContent.RetentionEnabled {
err = putTargetRetention(ctx, targetAlias, targetURL.String(), metadata)
return urls.WithError(err.Trace(sourceURL.String()))
}
opts := CopyOptions{
srcSSE: srcSSE,
tgtSSE: tgtSSE,
metadata: filterMetadata(metadata),
disableMultipart: urls.DisableMultipart,
isPreserve: preserve,
storageClass: urls.TargetContent.StorageClass,
}
err = copySourceToTargetURL(ctx, targetAlias, targetURL.String(), sourcePath, sourceVersion, mode, until,
legalHold, length, progress, opts)
} else {
if urls.SourceContent.RetentionEnabled {
// preserve new metadata and save existing ones.
if preserve {
currentMetadata, err := getAllMetadata(ctx, sourceAlias, sourceURL.String(), srcSSE, urls)
if err != nil {
return urls.WithError(err.Trace(sourceURL.String()))
}
for k, v := range currentMetadata {
metadata[k] = v
}
}
// Get metadata from target content as well
for k, v := range urls.TargetContent.Metadata {
metadata[http.CanonicalHeaderKey(k)] = v
}
// Get userMetadata from target content as well
for k, v := range urls.TargetContent.UserMetadata {
metadata[http.CanonicalHeaderKey(k)] = v
}
err = putTargetRetention(ctx, targetAlias, targetURL.String(), metadata)
return urls.WithError(err.Trace(sourceURL.String()))
}
var reader io.ReadCloser
// Proceed with regular stream copy.
reader, metadata, err = getSourceStream(ctx, sourceAlias, sourceURL.String(), sourceVersion, true, srcSSE, preserve)
if err != nil {
return urls.WithError(err.Trace(sourceURL.String()))
}
defer reader.Close()
// Get metadata from target content as well
for k, v := range urls.TargetContent.Metadata {
metadata[http.CanonicalHeaderKey(k)] = v
}
// Get userMetadata from target content as well
for k, v := range urls.TargetContent.UserMetadata {
metadata[http.CanonicalHeaderKey(k)] = v
}
putOpts := PutOptions{
metadata: filterMetadata(metadata),
sse: tgtSSE,
storageClass: urls.TargetContent.StorageClass,
md5: urls.MD5,
disableMultipart: urls.DisableMultipart,
isPreserve: preserve,
}
if isReadAt(reader) {
_, err = putTargetStream(ctx, targetAlias, targetURL.String(), mode, until,
legalHold, reader, length, progress, putOpts)
} else {
_, err = putTargetStream(ctx, targetAlias, targetURL.String(), mode, until,
legalHold, io.LimitReader(reader, length), length, progress, putOpts)
}
}
if err != nil {
return urls.WithError(err.Trace(sourceURL.String()))
}
return urls.WithError(nil)
}
// newClientFromAlias gives a new client interface for matching
// alias entry in the mc config file. If no matching host config entry
// is found, fs client is returned.
func newClientFromAlias(alias, urlStr string) (Client, *probe.Error) {
alias, _, hostCfg, err := expandAlias(alias)
if err != nil {
return nil, err.Trace(alias, urlStr)
}
if hostCfg == nil {
// No matching host config. So we treat it like a
// filesystem.
fsClient, fsErr := fsNew(urlStr)
if fsErr != nil {
return nil, fsErr.Trace(alias, urlStr)
}
return fsClient, nil
}
s3Config := NewS3Config(urlStr, hostCfg)
s3Client, err := S3New(s3Config)
if err != nil {
return nil, err.Trace(alias, urlStr)
}
return s3Client, nil
}
// urlRgx - verify if aliased url is real URL.
var urlRgx = regexp.MustCompile("^https?://")
// newClient gives a new client interface
func newClient(aliasedURL string) (Client, *probe.Error) {
alias, urlStrFull, hostCfg, err := expandAlias(aliasedURL)
if err != nil {
return nil, err.Trace(aliasedURL)
}
// Verify if the aliasedURL is a real URL, fail in those cases
// indicating the user to add alias.
if hostCfg == nil && urlRgx.MatchString(aliasedURL) {
return nil, errInvalidAliasedURL(aliasedURL).Trace(aliasedURL)
}
return newClientFromAlias(alias, urlStrFull)
}
|
[
"\"MC_ENCRYPT\"",
"\"MC_ENCRYPT_KEY\""
] |
[] |
[
"MC_ENCRYPT",
"MC_ENCRYPT_KEY"
] |
[]
|
["MC_ENCRYPT", "MC_ENCRYPT_KEY"]
|
go
| 2 | 0 | |
docs/source/conf.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# tiledb-plot-widget documentation build configuration file
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.viewcode",
"sphinx.ext.intersphinx",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"nbsphinx",
"jupyter_sphinx.embed_widgets",
"nbsphinx_link",
]
# Ensure our extension is available:
import sys
from os.path import dirname, join as pjoin
docs = dirname(dirname(__file__))
root = dirname(docs)
sys.path.insert(0, root)
sys.path.insert(0, pjoin(docs, "sphinxext"))
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "tiledb-plot-widget"
copyright = "2020, TileDB, Inc."
author = "TileDB"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# get version from python package:
import os
here = os.path.dirname(__file__)
repo = os.path.join(here, "..", "..")
_version_py = os.path.join(repo, "tiledb-plot-widget", "_version.py")
version_ns = {}
with open(_version_py) as f:
exec(f.read(), version_ns)
# The short X.Y version.
version = "%i.%i" % version_ns["version_info"][:2]
# The full version, including alpha/beta/rc tags.
release = version_ns["__version__"]
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["**.ipynb_checkpoints"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "tiledb-plot-widgetdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"tiledb-plot-widget.tex",
"tiledb-plot-widget Documentation",
"TileDB",
"manual",
),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, "tiledb-plot-widget", "tiledb-plot-widget Documentation", [author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"tiledb-plot-widget",
"tiledb-plot-widget Documentation",
author,
"tiledb-plot-widget",
"Custom Jupyterlab widget for TileDB",
"Miscellaneous",
),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {"https://docs.python.org/": None}
# Read The Docs
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from
# docs.readthedocs.org
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# otherwise, readthedocs.org uses their theme by default, so no need to specify it
# Uncomment this line if you have know exceptions in your included notebooks
# that nbsphinx complains about:
#
nbsphinx_allow_errors = True # exception ipstruct.py ipython_genutils
def setup(app):
app.setup_extension("jupyter_sphinx.embed_widgets")
def add_scripts(app):
for fname in ["helper.js", "embed-bundle.js"]:
if not os.path.exists(os.path.join(here, "_static", fname)):
app.warn("missing javascript file: %s" % fname)
app.add_javascript(fname)
app.connect("builder-inited", add_scripts)
|
[] |
[] |
[
"READTHEDOCS"
] |
[]
|
["READTHEDOCS"]
|
python
| 1 | 0 | |
cmd/abapEnvironmentAssembleConfirm_generated.go
|
// Code generated by piper's step-generator. DO NOT EDIT.
package cmd
import (
"fmt"
"os"
"path/filepath"
"time"
"github.com/SAP/jenkins-library/pkg/config"
"github.com/SAP/jenkins-library/pkg/log"
"github.com/SAP/jenkins-library/pkg/piperenv"
"github.com/SAP/jenkins-library/pkg/splunk"
"github.com/SAP/jenkins-library/pkg/telemetry"
"github.com/SAP/jenkins-library/pkg/validation"
"github.com/spf13/cobra"
)
type abapEnvironmentAssembleConfirmOptions struct {
CfAPIEndpoint string `json:"cfApiEndpoint,omitempty"`
CfOrg string `json:"cfOrg,omitempty"`
CfSpace string `json:"cfSpace,omitempty"`
CfServiceInstance string `json:"cfServiceInstance,omitempty"`
CfServiceKeyName string `json:"cfServiceKeyName,omitempty"`
Host string `json:"host,omitempty"`
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
AddonDescriptor string `json:"addonDescriptor,omitempty"`
MaxRuntimeInMinutes int `json:"maxRuntimeInMinutes,omitempty"`
}
type abapEnvironmentAssembleConfirmCommonPipelineEnvironment struct {
abap struct {
addonDescriptor string
}
}
func (p *abapEnvironmentAssembleConfirmCommonPipelineEnvironment) persist(path, resourceName string) {
content := []struct {
category string
name string
value interface{}
}{
{category: "abap", name: "addonDescriptor", value: p.abap.addonDescriptor},
}
errCount := 0
for _, param := range content {
err := piperenv.SetResourceParameter(path, resourceName, filepath.Join(param.category, param.name), param.value)
if err != nil {
log.Entry().WithError(err).Error("Error persisting piper environment.")
errCount++
}
}
if errCount > 0 {
log.Entry().Fatal("failed to persist Piper environment")
}
}
// AbapEnvironmentAssembleConfirmCommand Confirm the Delivery of Assembly for installation, support package or patch in SAP Cloud Platform ABAP Environment system
func AbapEnvironmentAssembleConfirmCommand() *cobra.Command {
const STEP_NAME = "abapEnvironmentAssembleConfirm"
metadata := abapEnvironmentAssembleConfirmMetadata()
var stepConfig abapEnvironmentAssembleConfirmOptions
var startTime time.Time
var commonPipelineEnvironment abapEnvironmentAssembleConfirmCommonPipelineEnvironment
var logCollector *log.CollectorHook
var splunkClient *splunk.Splunk
telemetryClient := &telemetry.Telemetry{}
var createAbapEnvironmentAssembleConfirmCmd = &cobra.Command{
Use: STEP_NAME,
Short: "Confirm the Delivery of Assembly for installation, support package or patch in SAP Cloud Platform ABAP Environment system",
Long: `This step confirms the assemblies of provided [installations, support packages or patches] in SAP Cloud Platform ABAP Environment system`,
PreRunE: func(cmd *cobra.Command, _ []string) error {
startTime = time.Now()
log.SetStepName(STEP_NAME)
log.SetVerbose(GeneralConfig.Verbose)
GeneralConfig.GitHubAccessTokens = ResolveAccessTokens(GeneralConfig.GitHubTokens)
path, _ := os.Getwd()
fatalHook := &log.FatalHook{CorrelationID: GeneralConfig.CorrelationID, Path: path}
log.RegisterHook(fatalHook)
err := PrepareConfig(cmd, &metadata, STEP_NAME, &stepConfig, config.OpenPiperFile)
if err != nil {
log.SetErrorCategory(log.ErrorConfiguration)
return err
}
log.RegisterSecret(stepConfig.Username)
log.RegisterSecret(stepConfig.Password)
if len(GeneralConfig.HookConfig.SentryConfig.Dsn) > 0 {
sentryHook := log.NewSentryHook(GeneralConfig.HookConfig.SentryConfig.Dsn, GeneralConfig.CorrelationID)
log.RegisterHook(&sentryHook)
}
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
splunkClient = &splunk.Splunk{}
logCollector = &log.CollectorHook{CorrelationID: GeneralConfig.CorrelationID}
log.RegisterHook(logCollector)
}
validation, err := validation.New(validation.WithJSONNamesForStructFields(), validation.WithPredefinedErrorMessages())
if err != nil {
return err
}
if err = validation.ValidateStruct(stepConfig); err != nil {
log.SetErrorCategory(log.ErrorConfiguration)
return err
}
return nil
},
Run: func(_ *cobra.Command, _ []string) {
stepTelemetryData := telemetry.CustomData{}
stepTelemetryData.ErrorCode = "1"
handler := func() {
config.RemoveVaultSecretFiles()
commonPipelineEnvironment.persist(GeneralConfig.EnvRootPath, "commonPipelineEnvironment")
stepTelemetryData.Duration = fmt.Sprintf("%v", time.Since(startTime).Milliseconds())
stepTelemetryData.ErrorCategory = log.GetErrorCategory().String()
stepTelemetryData.PiperCommitHash = GitCommit
telemetryClient.SetData(&stepTelemetryData)
telemetryClient.Send()
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
splunkClient.Send(telemetryClient.GetData(), logCollector)
}
}
log.DeferExitHandler(handler)
defer handler()
telemetryClient.Initialize(GeneralConfig.NoTelemetry, STEP_NAME)
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
splunkClient.Initialize(GeneralConfig.CorrelationID,
GeneralConfig.HookConfig.SplunkConfig.Dsn,
GeneralConfig.HookConfig.SplunkConfig.Token,
GeneralConfig.HookConfig.SplunkConfig.Index,
GeneralConfig.HookConfig.SplunkConfig.SendLogs)
}
abapEnvironmentAssembleConfirm(stepConfig, &stepTelemetryData, &commonPipelineEnvironment)
stepTelemetryData.ErrorCode = "0"
log.Entry().Info("SUCCESS")
},
}
addAbapEnvironmentAssembleConfirmFlags(createAbapEnvironmentAssembleConfirmCmd, &stepConfig)
return createAbapEnvironmentAssembleConfirmCmd
}
func addAbapEnvironmentAssembleConfirmFlags(cmd *cobra.Command, stepConfig *abapEnvironmentAssembleConfirmOptions) {
cmd.Flags().StringVar(&stepConfig.CfAPIEndpoint, "cfApiEndpoint", os.Getenv("PIPER_cfApiEndpoint"), "Cloud Foundry API endpoint")
cmd.Flags().StringVar(&stepConfig.CfOrg, "cfOrg", os.Getenv("PIPER_cfOrg"), "Cloud Foundry target organization")
cmd.Flags().StringVar(&stepConfig.CfSpace, "cfSpace", os.Getenv("PIPER_cfSpace"), "Cloud Foundry target space")
cmd.Flags().StringVar(&stepConfig.CfServiceInstance, "cfServiceInstance", os.Getenv("PIPER_cfServiceInstance"), "Cloud Foundry Service Instance")
cmd.Flags().StringVar(&stepConfig.CfServiceKeyName, "cfServiceKeyName", os.Getenv("PIPER_cfServiceKeyName"), "Cloud Foundry Service Key")
cmd.Flags().StringVar(&stepConfig.Host, "host", os.Getenv("PIPER_host"), "Specifies the host address of the SAP Cloud Platform ABAP Environment system")
cmd.Flags().StringVar(&stepConfig.Username, "username", os.Getenv("PIPER_username"), "User for either the Cloud Foundry API or the Communication Arrangement for SAP_COM_0582")
cmd.Flags().StringVar(&stepConfig.Password, "password", os.Getenv("PIPER_password"), "Password for either the Cloud Foundry API or the Communication Arrangement for SAP_COM_0582")
cmd.Flags().StringVar(&stepConfig.AddonDescriptor, "addonDescriptor", os.Getenv("PIPER_addonDescriptor"), "Structure in the commonPipelineEnvironment containing information about the Product Version and corresponding Software Component Versions")
cmd.Flags().IntVar(&stepConfig.MaxRuntimeInMinutes, "maxRuntimeInMinutes", 360, "maximal runtime of the step")
cmd.MarkFlagRequired("username")
cmd.MarkFlagRequired("password")
cmd.MarkFlagRequired("addonDescriptor")
cmd.MarkFlagRequired("maxRuntimeInMinutes")
}
// retrieve step metadata
func abapEnvironmentAssembleConfirmMetadata() config.StepData {
var theMetaData = config.StepData{
Metadata: config.StepMetadata{
Name: "abapEnvironmentAssembleConfirm",
Aliases: []config.Alias{},
Description: "Confirm the Delivery of Assembly for installation, support package or patch in SAP Cloud Platform ABAP Environment system",
},
Spec: config.StepSpec{
Inputs: config.StepInputs{
Secrets: []config.StepSecrets{
{Name: "abapCredentialsId", Description: "Jenkins credentials ID containing user and password to authenticate to the Cloud Platform ABAP Environment system or the Cloud Foundry API", Type: "jenkins", Aliases: []config.Alias{{Name: "cfCredentialsId", Deprecated: false}, {Name: "credentialsId", Deprecated: false}}},
},
Parameters: []config.StepParameters{
{
Name: "cfApiEndpoint",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "cloudFoundry/apiEndpoint"}},
Default: os.Getenv("PIPER_cfApiEndpoint"),
},
{
Name: "cfOrg",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "cloudFoundry/org"}},
Default: os.Getenv("PIPER_cfOrg"),
},
{
Name: "cfSpace",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "cloudFoundry/space"}},
Default: os.Getenv("PIPER_cfSpace"),
},
{
Name: "cfServiceInstance",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "cloudFoundry/serviceInstance"}},
Default: os.Getenv("PIPER_cfServiceInstance"),
},
{
Name: "cfServiceKeyName",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "cloudFoundry/serviceKey"}, {Name: "cloudFoundry/serviceKeyName"}, {Name: "cfServiceKey"}},
Default: os.Getenv("PIPER_cfServiceKeyName"),
},
{
Name: "host",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_host"),
},
{
Name: "username",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_username"),
},
{
Name: "password",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_password"),
},
{
Name: "addonDescriptor",
ResourceRef: []config.ResourceReference{
{
Name: "commonPipelineEnvironment",
Param: "abap/addonDescriptor",
},
},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_addonDescriptor"),
},
{
Name: "maxRuntimeInMinutes",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "int",
Mandatory: true,
Aliases: []config.Alias{},
Default: 360,
},
},
},
Containers: []config.Container{
{Name: "cf", Image: "ppiper/cf-cli:7"},
},
Outputs: config.StepOutputs{
Resources: []config.StepResources{
{
Name: "commonPipelineEnvironment",
Type: "piperEnvironment",
Parameters: []map[string]interface{}{
{"Name": "abap/addonDescriptor"},
},
},
},
},
},
}
return theMetaData
}
|
[
"\"PIPER_cfApiEndpoint\"",
"\"PIPER_cfOrg\"",
"\"PIPER_cfSpace\"",
"\"PIPER_cfServiceInstance\"",
"\"PIPER_cfServiceKeyName\"",
"\"PIPER_host\"",
"\"PIPER_username\"",
"\"PIPER_password\"",
"\"PIPER_addonDescriptor\"",
"\"PIPER_cfApiEndpoint\"",
"\"PIPER_cfOrg\"",
"\"PIPER_cfSpace\"",
"\"PIPER_cfServiceInstance\"",
"\"PIPER_cfServiceKeyName\"",
"\"PIPER_host\"",
"\"PIPER_username\"",
"\"PIPER_password\"",
"\"PIPER_addonDescriptor\""
] |
[] |
[
"PIPER_addonDescriptor",
"PIPER_cfSpace",
"PIPER_host",
"PIPER_cfApiEndpoint",
"PIPER_password",
"PIPER_username",
"PIPER_cfServiceInstance",
"PIPER_cfServiceKeyName",
"PIPER_cfOrg"
] |
[]
|
["PIPER_addonDescriptor", "PIPER_cfSpace", "PIPER_host", "PIPER_cfApiEndpoint", "PIPER_password", "PIPER_username", "PIPER_cfServiceInstance", "PIPER_cfServiceKeyName", "PIPER_cfOrg"]
|
go
| 9 | 0 | |
pkg/test-infra/util/util.go
|
// Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package util
import (
"bytes"
"context"
"fmt"
"html/template"
"os"
"strings"
"github.com/ngaut/log"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/pingcap/tipocket/pkg/cluster"
"github.com/pingcap/tipocket/pkg/test-infra/fixture"
)
// FindPort get possible correct port when there are multiple ports
func FindPort(podName, component string, containers []corev1.Container) int32 {
var container *corev1.Container
for idx, c := range containers {
if c.Name == component {
container = &containers[idx]
break
}
}
// if we cannot find the target container according to component name, fallback to the first container
if container == nil {
log.Errorf("failed to find the main container %s in %s", component, podName)
container = &containers[0]
}
ports := container.Ports
var priorityPort int32 = 0
if component == string(cluster.PD) {
priorityPort = 2379
} else if component == string(cluster.TiKV) {
priorityPort = 20160
} else if component == string(cluster.TiDB) {
priorityPort = 4000
} else if component == string(cluster.DM) {
priorityPort = 8261
} else if component == string(cluster.MySQL) {
priorityPort = 3306
} else if component == string(cluster.TiCDC) {
priorityPort = 8301
}
for _, port := range ports {
if port.ContainerPort == priorityPort {
return priorityPort
}
}
return ports[0].ContainerPort
}
// RenderTemplateFunc ...
func RenderTemplateFunc(tpl *template.Template, model interface{}) (string, error) {
buff := new(bytes.Buffer)
err := tpl.Execute(buff, model)
if err != nil {
return "", err
}
return buff.String(), nil
}
// ApplyObject applies k8s object
func ApplyObject(client client.Client, object runtime.Object) error {
if err := client.Create(context.TODO(), object); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
return nil
}
// BuildImage builds a image URL: ${fixture.Context.HubAddress}/${fixture.Context.DockerRepository}/$name:$tag
// or returns the fullImageIfNotEmpty if it's not empty
func BuildImage(name, tag, fullImageIfNotEmpty string) string {
if len(fullImageIfNotEmpty) > 0 {
return fullImageIfNotEmpty
}
var b strings.Builder
hub := chooseHub(name)
if hub != "" {
fmt.Fprintf(&b, "%s/", hub)
}
b.WriteString(fixture.Context.DockerRepository)
b.WriteString("/")
b.WriteString(name)
b.WriteString(":")
b.WriteString(tag)
return b.String()
}
func chooseHub(image string) string {
switch image {
case "tidb":
if fixture.Context.TiDBClusterConfig.TiDBHubAddress != "" {
return fixture.Context.TiDBClusterConfig.TiDBHubAddress
}
case "tikv":
if fixture.Context.TiDBClusterConfig.TiKVHubAddress != "" {
return fixture.Context.TiDBClusterConfig.TiKVHubAddress
}
case "pd":
if fixture.Context.TiDBClusterConfig.PDHubAddress != "" {
return fixture.Context.TiDBClusterConfig.PDHubAddress
}
case "tiflash":
if fixture.Context.TiDBClusterConfig.TiFlashHubAddress != "" {
return fixture.Context.TiDBClusterConfig.TiFlashHubAddress
}
}
return fixture.Context.HubAddress
}
// GetNodeIPsFromPod gets the IPs (or addresses) for nodes.
func GetNodeIPsFromPod(cli client.Client, namespace string, podLabels map[string]string) ([]string, error) {
var ips []string
pods := &corev1.PodList{}
if err := cli.List(context.Background(), pods, client.InNamespace(namespace), client.MatchingLabels(podLabels)); err != nil {
return ips, err
}
for _, item := range pods.Items {
ips = append(ips, item.Status.HostIP)
}
return ips, nil
}
// GetServiceByMeta gets the service by its meta.
func GetServiceByMeta(cli client.Client, svc *corev1.Service) (*corev1.Service, error) {
clone := svc.DeepCopy()
key, err := client.ObjectKeyFromObject(clone)
if err != nil {
return nil, err
}
if err := cli.Get(context.Background(), key, clone); err != nil {
return nil, err
}
return clone, nil
}
// IsInK8sPodEnvironment checks whether in the k8s pod environment
// refer: https://stackoverflow.com/questions/36639062/how-do-i-tell-if-my-container-is-running-inside-a-kubernetes-cluster/54130803#54130803
func IsInK8sPodEnvironment() bool {
return os.Getenv("KUBERNETES_SERVICE_HOST") != ""
}
// GetFQDNFromStsPod generates the full qualified domain name from a pod of sts which has a headless(or other service kinds) service with it
func GetFQDNFromStsPod(pod *corev1.Pod) (string, error) {
if pod.Spec.Hostname == "" {
return "", fmt.Errorf("expect non-empty .spec.hostname of %s/%s", pod.ObjectMeta.Namespace, pod.ObjectMeta.Name)
}
if pod.Spec.Subdomain == "" {
return "", fmt.Errorf("expect non-empty .spec.subdomain of %s/%s", pod.ObjectMeta.Namespace, pod.ObjectMeta.Name)
}
return fmt.Sprintf("%s.%s.%s.svc", pod.Spec.Hostname, pod.Spec.Subdomain, pod.Namespace), nil
}
|
[
"\"KUBERNETES_SERVICE_HOST\""
] |
[] |
[
"KUBERNETES_SERVICE_HOST"
] |
[]
|
["KUBERNETES_SERVICE_HOST"]
|
go
| 1 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.