filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
sequence | variablearg
sequence | constarg
sequence | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
pkg/nuctl/command/run.go | /*
Copyright 2017 The Nuclio Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package command
import (
"encoding/json"
"fmt"
"os"
"strings"
"github.com/nuclio/nuclio/pkg/errors"
"github.com/nuclio/nuclio/pkg/functioncr"
"github.com/nuclio/nuclio/pkg/nuctl"
"github.com/nuclio/nuclio/pkg/nuctl/runner"
"github.com/spf13/cobra"
)
type runCommandeer struct {
cmd *cobra.Command
rootCommandeer *RootCommandeer
runOptions runner.Options
encodedDataBindings string
}
func newRunCommandeer(rootCommandeer *RootCommandeer) *runCommandeer {
commandeer := &runCommandeer{
rootCommandeer: rootCommandeer,
}
cmd := &cobra.Command{
Use: "run function-name",
Short: "Build, deploy and run a function",
RunE: func(cmd *cobra.Command, args []string) error {
// decode the JSON data bindings
if err := json.Unmarshal([]byte(commandeer.encodedDataBindings),
&commandeer.runOptions.DataBindings); err != nil {
return errors.Wrap(err, "Failed to decode data bindings")
}
err := prepareRunnerOptions(args, &rootCommandeer.commonOptions, &commandeer.runOptions)
if err != nil {
return err
}
// create logger
logger, err := rootCommandeer.createLogger()
if err != nil {
return errors.Wrap(err, "Failed to create logger")
}
// create function runner and execute
functionRunner, err := runner.NewFunctionRunner(logger)
if err != nil {
return errors.Wrap(err, "Failed to create function runner")
}
// create a kube consumer - a bunch of kubernetes clients
kubeConsumer, err := nuctl.NewKubeConsumer(logger, commandeer.runOptions.Common.KubeconfigPath)
if err != nil {
return errors.Wrap(err, "Failed to create kubeconsumer")
}
_, err = functionRunner.Run(kubeConsumer, &commandeer.runOptions)
return err
},
}
addRunFlags(cmd, &commandeer.runOptions, &commandeer.encodedDataBindings)
commandeer.cmd = cmd
return commandeer
}
func prepareRunnerOptions(args []string,
commonOptions *nuctl.CommonOptions,
runOptions *runner.Options) error {
functionName := ""
var specRegistryURL, specImageName, specImageVersion string
var err error
// if the spec path was set, load the spec
if runOptions.SpecPath != "" {
err := functioncr.FromSpecFile(runOptions.SpecPath, &runOptions.Spec)
if err != nil {
return errors.Wrap(err, "Failed to read spec file")
}
}
// name can either be a positional argument or passed in the spec
if len(args) != 1 {
if runOptions.Spec.ObjectMeta.Name == "" {
return errors.New("Function run requires name")
}
// use name from spec
functionName = runOptions.Spec.ObjectMeta.Name
} else {
functionName = args[0]
}
// function can either be in the path or received inline
if runOptions.Build.Path == "" && runOptions.Spec.Spec.Code.Inline == "" {
return errors.New("Function code must be provided either in path or inline in a spec file")
}
// the image in the specfile can hold both the image name and the push/run registry. check that if it's
// empty, we have what we need from command line arguments
if runOptions.Spec.Spec.Image == "" {
if runOptions.Build.Registry == "" {
return errors.New("Registry is required (can also be specified in spec.image or a NUCTL_REGISTRY env var")
}
if runOptions.Build.ImageName == "" {
// use the function name if image name not provided in specfile
runOptions.Build.ImageName = functionName
}
} else {
// parse the image passed in the spec - we might need it
specRegistryURL, specImageName, specImageVersion, err = parseImageURL(runOptions.Spec.Spec.Image)
if err != nil {
return fmt.Errorf("Failed to parse image URL: %s", err.Error())
}
}
// if the image name was not provided in command line / env, take it from the spec image
if runOptions.Build.ImageName == "" {
runOptions.Build.ImageName = specImageName
}
// same for version
if runOptions.Build.ImageVersion == "latest" && specImageVersion != "" {
runOptions.Build.ImageVersion = specImageVersion
}
// same for push registry
if runOptions.Build.Registry == "" {
runOptions.Build.Registry = specRegistryURL
}
// if the run registry wasn't specified, take the build registry
if runOptions.RunRegistry == "" {
runOptions.RunRegistry = runOptions.Build.Registry
}
// set common
runOptions.Build.Common = commonOptions
runOptions.Common = commonOptions
runOptions.Common.Identifier = functionName
return nil
}
func parseImageURL(imageURL string) (url string, imageName string, imageVersion string, err error) {
urlAndImageName := strings.SplitN(imageURL, "/", 2)
if len(urlAndImageName) != 2 {
err = errors.New("Failed looking for image splitter: /")
return
}
url = urlAndImageName[0]
imageNameAndVersion := strings.Split(urlAndImageName[1], ":")
imageName = imageNameAndVersion[0]
if len(imageNameAndVersion) == 1 {
imageVersion = "latest"
} else if len(imageNameAndVersion) == 2 {
imageVersion = imageNameAndVersion[1]
}
return
}
func addRunFlags(cmd *cobra.Command, options *runner.Options, encodedDataBindings *string) {
addBuildFlags(cmd, &options.Build)
cmd.Flags().StringVarP(&options.SpecPath, "file", "f", "", "Function Spec File")
cmd.Flags().StringVar(&options.Description, "desc", "", "Function description")
cmd.Flags().StringVarP(&options.Scale, "scale", "s", "1", "Function scaling (auto|number)")
cmd.Flags().StringVarP(&options.Labels, "labels", "l", "", "Additional function labels (lbl1=val1,lbl2=val2..)")
cmd.Flags().StringVarP(&options.Env, "env", "e", "", "Environment variables (name1=val1,name2=val2..)")
cmd.Flags().StringVar(&options.Events, "events", "", "Comma separated list of event sources (in json)")
cmd.Flags().StringVar(&options.Data, "data", "", "Comma separated list of data bindings (in json)")
cmd.Flags().BoolVarP(&options.Disabled, "disabled", "d", false, "Start function disabled (don't run yet)")
cmd.Flags().Int32Var(&options.HTTPPort, "port", 0, "Public HTTP port (node port)")
cmd.Flags().Int32Var(&options.MinReplicas, "min-replica", 0, "Minimum number of function replicas")
cmd.Flags().Int32Var(&options.MaxReplicas, "max-replica", 0, "Maximum number of function replicas")
cmd.Flags().BoolVar(&options.Publish, "publish", false, "Publish the function")
cmd.Flags().StringVar(encodedDataBindings, "data-bindings", "{}", "JSON encoded data bindings for the function")
cmd.Flags().StringVar(&options.RunRegistry, "run-registry", os.Getenv("NUCTL_RUN_REGISTRY"), "The registry URL to pull the image from, if differs from -r (env: NUCTL_RUN_REGISTRY)")
}
| [
"\"NUCTL_RUN_REGISTRY\""
] | [] | [
"NUCTL_RUN_REGISTRY"
] | [] | ["NUCTL_RUN_REGISTRY"] | go | 1 | 0 | |
networking/main.go | package main
import (
"bufio"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"io"
"log"
"net"
"os"
"strconv"
"sync"
"time"
"github.com/davecgh/go-spew/spew"
"github.com/joho/godotenv"
)
// Block represents each 'item' in the blockchain
type Block struct {
Index int
Timestamp string
BPM int
Hash string
PrevHash string
}
// Blockchain is a series of validated Blocks
var Blockchain []Block
// bcServer handles incoming concurrent Blocks
var bcServer chan []Block
var mutex = &sync.Mutex{}
func main() {
err := godotenv.Load()
if err != nil {
log.Fatal(err)
}
bcServer = make(chan []Block)
// create genesis block
t := time.Now()
genesisBlock := Block{0, t.String(), 0, "", ""}
spew.Dump(genesisBlock)
Blockchain = append(Blockchain, genesisBlock)
tcpPort := os.Getenv("PORT")
// start TCP and serve TCP server
server, err := net.Listen("tcp", ":"+tcpPort)
if err != nil {
log.Fatal(err)
}
log.Println("TCP Server Listening on port :", tcpPort)
defer server.Close()
for {
conn, err := server.Accept()
if err != nil {
log.Fatal(err)
}
go handleConn(conn)
}
}
func handleConn(conn net.Conn) {
defer conn.Close()
io.WriteString(conn, "Enter a new BPM:")
scanner := bufio.NewScanner(conn)
// take in BPM from stdin and add it to blockchain after conducting necessary validation
go func() {
for scanner.Scan() {
bpm, err := strconv.Atoi(scanner.Text())
if err != nil {
log.Printf("%v not a number: %v", scanner.Text(), err)
continue
}
mutex.Lock()
prevBlock := Blockchain[len(Blockchain)-1]
newBlock, err := generateBlock(prevBlock, bpm)
if err != nil {
log.Println(err)
continue
}
if isBlockValid(newBlock, prevBlock) {
newBlockchain := append(Blockchain, newBlock)
replaceChain(newBlockchain)
}
mutex.Unlock()
bcServer <- Blockchain
io.WriteString(conn, "\nEnter a new BPM:")
}
}()
// simulate receiving broadcast
go func() {
for {
time.Sleep(30 * time.Second)
mutex.Lock()
output, err := json.Marshal(Blockchain)
if err != nil {
log.Fatal(err)
}
mutex.Unlock()
io.WriteString(conn, string(output))
}
}()
for range bcServer {
spew.Dump(Blockchain)
}
}
// make sure block is valid by checking index, and comparing the hash of the previous block
func isBlockValid(newBlock, oldBlock Block) bool {
if oldBlock.Index+1 != newBlock.Index {
return false
}
if oldBlock.Hash != newBlock.PrevHash {
return false
}
if calculateHash(newBlock) != newBlock.Hash {
return false
}
return true
}
// make sure the chain we're checking is longer than the current blockchain
func replaceChain(newBlocks []Block) {
mutex.Lock()
if len(newBlocks) > len(Blockchain) {
Blockchain = newBlocks
}
mutex.Unlock()
}
// SHA256 hasing
func calculateHash(block Block) string {
record := strconv.Itoa(block.Index) + block.Timestamp + strconv.Itoa(block.BPM) + block.PrevHash
h := sha256.New()
h.Write([]byte(record))
hashed := h.Sum(nil)
return hex.EncodeToString(hashed)
}
// create a new block using previous block's hash
func generateBlock(oldBlock Block, BPM int) (Block, error) {
var newBlock Block
t := time.Now()
newBlock.Index = oldBlock.Index + 1
newBlock.Timestamp = t.String()
newBlock.BPM = BPM
newBlock.PrevHash = oldBlock.Hash
newBlock.Hash = calculateHash(newBlock)
return newBlock, nil
}
| [
"\"PORT\""
] | [] | [
"PORT"
] | [] | ["PORT"] | go | 1 | 0 | |
ChernMachine/kernel/VImage.py | import json
import os
import sys
import subprocess
from Chern.utils import csys
from Chern.utils import metadata
from ChernMachine.kernel.VJob import VJob
"""
This should have someting
A image can be determined uniquely by the ?
"""
class VImage(VJob):
def __init__(self, file_name):
super(VImage, self).__init__(file_name)
def inspect(self):
ps = subprocess.Popen("docker inspect {0}".format(self.image_id().decode()), shell=True, stdout=subprocess.PIPE)
info = ps.communicate()
json_info = json.loads(info[0])
return json_info[0]
def is_locked(self):
status_file = metadata.ConfigFile(os.path.join(self.path, "status.json"))
status = status_file.read_variable("status")
return status == "locked"
def status(self):
dirs = csys.list_dir(self.path)
for run in dirs:
if run.startswith("run."):
config_file = metadata.ConfigFile(os.path.join(self.path, run, "status.json"))
status = config_file.read_variable("status", "submitted")
print("status is ", status, file=sys.stderr)
if status != "submitted":
return status
if self.is_locked():
return "locked"
return "submitted"
status = self.config_file.read_variable("status")
if status is None:
return "submitted"
else:
return status
def image_id(self):
dirs = csys.list_dir(self.path)
for run in dirs:
if run.startswith("run."):
config_file = metadata.ConfigFile(os.path.join(self.path, run, "status.json"))
status = config_file.read_variable("status", "submitted")
if status == "built":
return config_file.read_variable("image_id")
return ""
def machine_storage(self):
config_file = metadata.ConfigFile(os.path.join(os.environ["HOME"], ".ChernMachine/config.json"))
machine_id = config_file.read_variable("machine_id")
return "run." + machine_id
def execute(self):
run_path = os.path.join(self.path, self.machine_storage())
csys.copy_tree(os.path.join(self.path, "contents"), run_path)
status_file = metadata.ConfigFile(os.path.join(run_path, "status.json"))
status_file.write_variable("status", "building")
entrypoint = open(os.path.join(run_path, "entrypoint.sh"), "w")
entrypoint.write("""#!/bin/bash\n$@\n""")
entrypoint.close()
try:
self.build()
except Exception as e:
self.append_error("Fail to build the image!\n"+str(e))
status_file.write_variable("status", "failed")
raise e
status_file.write_variable("status", "built")
def satisfied(self):
return True
def build(self):
"""
Build the image to change the status of the Algorithm to builded.
It will create a unique VImage object and the md5 of the VImage will be saved.
"""
"""
What to do:
first: copy all the files to a temporary file directory and next
write a docker file
then, you should build the docker file
"""
run_path = os.path.join(self.path, self.machine_storage())
os.chdir(run_path)
ps = subprocess.Popen("docker build .", shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
ps.wait()
if ps.poll() != 0:
raise Exception(ps.stderr.read().decode())
info = ps.communicate()[0]
image_id = info.split()[-1]
status_file = metadata.ConfigFile(os.path.join(run_path, "status.json"))
status_file.write_variable("image_id", image_id.decode())
| [] | [] | [
"HOME"
] | [] | ["HOME"] | python | 1 | 0 | |
src/amuse/rfi/tools/create_python_worker.py | from amuse.support.core import late, print_out
from amuse.support.options import option
from amuse.support.options import OptionalAttributes
import os
import inspect
import sys
class CreateAPythonWorker(OptionalAttributes):
@option(sections=['data'])
def amuse_root_dir(self):
if 'AMUSE_DIR' in os.environ:
return os.environ['AMUSE_DIR']
previous = None
result = os.path.abspath(__file__)
while not os.path.exists(os.path.join(result,'build.py')):
result = os.path.dirname(result)
if result == previous:
return os.path.dirname(os.path.dirname(__file__))
previous = result
return result
@late
def channel_type(self):
return 'mpi'
@late
def template_dir(self):
return os.path.dirname(__file__)
@late
def template_string(self):
path = self.template_dir
path = os.path.join(path, 'python_code_script.template')
with open(path, "r") as f:
template_string = f.read()
return template_string
@late
def worker_name(self):
filename = os.path.basename(inspect.getfile(self.implementation_factory))
filename = filename.split('.')[0]
filename.replace(os.sep, '_')
path = os.path.abspath(os.path.curdir)
path = os.path.join(path, filename)
return path
@late
def output_name(self):
executable_path = self.worker_name
return executable_path
@late
def interface_class(self):
return self.specification_class
def new_executable_script_string(self):
return self.template_string.format(
executable = sys.executable,
syspath = ','.join(map(repr, sys.path)),
factory_module = inspect.getmodule(self.implementation_factory).__name__,
factory = self.implementation_factory.__name__,
interface_module = inspect.getmodule(self.interface_class).__name__,
interface = self.interface_class.__name__,
)
@property
def result(self):
return self.new_executable_script_string()
def start(self):
string = self.new_executable_script_string()
with open(self.output_name, 'w') as f:
f.write(string)
os.chmod(self.output_name, 0777)
| [] | [] | [
"AMUSE_DIR"
] | [] | ["AMUSE_DIR"] | python | 1 | 0 | |
pkg/image/export/main.go | package main
import (
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"sort"
"strings"
"github.com/coreos/go-semver/semver"
kd "github.com/rancher/rancher/pkg/controllers/management/kontainerdrivermetadata"
img "github.com/rancher/rancher/pkg/image"
ext "github.com/rancher/rancher/pkg/image/external"
"github.com/rancher/rke/types/image"
"github.com/rancher/rke/types/kdm"
)
var (
scriptMap = map[string]string{
"linux-save": linuxSaveScript,
"linux-load": linuxLoadScript,
"linux-mirror": linuxMirrorScript,
"windows-save": windowsSaveScript,
"windows-load": windowsLoadScript,
"windows-mirror": windowsMirrorScript,
}
scriptNameMap = map[string]string{
"linux-save": "rancher-save-images.sh",
"linux-load": "rancher-load-images.sh",
"linux-mirror": "rancher-mirror-to-rancher-org.sh",
"windows-save": "rancher-save-images.ps1",
"windows-load": "rancher-load-images.ps1",
"windows-mirror": "rancher-mirror-to-rancher-org.ps1",
}
filenameMap = map[string]string{
"linux": "rancher-images.txt",
"windows": "rancher-windows-images.txt",
}
sourcesFilenameMap = map[string]string{
"linux": "rancher-images-sources.txt",
"windows": "rancher-windows-images-sources.txt",
}
)
func main() {
if len(os.Args) < 3 {
log.Fatal("\"main.go\" requires 2 arguments. Usage: go run main.go [SYSTEM_CHART_PATH] [CHART_PATH] [OPTIONAL]...")
}
if err := run(os.Args[1], os.Args[2], os.Args[3:]); err != nil {
log.Fatal(err)
}
}
func run(systemChartPath, chartPath string, imagesFromArgs []string) error {
tag, ok := os.LookupEnv("TAG")
if !ok {
return fmt.Errorf("no tag %s", tag)
}
rancherVersion := tag
if strings.HasPrefix(rancherVersion, "dev") || strings.HasPrefix(rancherVersion, "master") {
rancherVersion = kd.RancherVersionDev
}
if strings.HasPrefix(rancherVersion, "v") {
rancherVersion = rancherVersion[1:]
}
// already downloaded in dapper
b, err := ioutil.ReadFile(filepath.Join("data.json"))
if os.IsNotExist(err) {
b, err = ioutil.ReadFile(filepath.Join(os.Getenv("HOME"), "bin", "data.json"))
}
if err != nil {
return err
}
data, err := kdm.FromData(b)
if err != nil {
return err
}
linuxInfo, windowsInfo := kd.GetK8sVersionInfo(
rancherVersion,
data.K8sVersionRKESystemImages,
data.K8sVersionServiceOptions,
data.K8sVersionWindowsServiceOptions,
data.K8sVersionInfo,
)
var k8sVersions []string
for k := range linuxInfo.RKESystemImages {
k8sVersions = append(k8sVersions, k)
}
sort.Strings(k8sVersions)
writeSliceToFile(filepath.Join(os.Getenv("HOME"), "bin", "rancher-rke-k8s-versions.txt"), k8sVersions)
externalImages := make(map[string][]string)
k3sUpgradeImages, err := ext.GetExternalImages(rancherVersion, data.K3S, ext.K3S, nil)
if err != nil {
return err
}
if k3sUpgradeImages != nil {
externalImages["k3sUpgrade"] = k3sUpgradeImages
}
// RKE2 Provisioning will only be supported on Kubernetes v1.21+. In addition, only RKE2
// releases corresponding to Kubernetes v1.21+ include the "rke2-images-all" file that we need.
rke2AllImages, err := ext.GetExternalImages(rancherVersion, data.RKE2, ext.RKE2, &semver.Version{
Major: 1,
Minor: 21,
Patch: 0,
})
if err != nil {
return err
}
if rke2AllImages != nil {
externalImages["rke2All"] = rke2AllImages
}
targetImages, targetImagesAndSources, err := img.GetImages(systemChartPath, chartPath, externalImages, imagesFromArgs, linuxInfo.RKESystemImages, img.Linux)
if err != nil {
return err
}
targetWindowsImages, targetWindowsImagesAndSources, err := img.GetImages(systemChartPath, chartPath, nil, []string{getWindowsAgentImage()}, windowsInfo.RKESystemImages, img.Windows)
if err != nil {
return err
}
type imageTextLists struct {
images []string
imagesAndSources []string
}
for arch, imageLists := range map[string]imageTextLists{
"linux": {images: targetImages, imagesAndSources: targetImagesAndSources},
"windows": {images: targetWindowsImages, imagesAndSources: targetWindowsImagesAndSources},
} {
err = imagesText(arch, imageLists.images)
if err != nil {
return err
}
if err := imagesAndSourcesText(arch, imageLists.imagesAndSources); err != nil {
return err
}
err = mirrorScript(arch, imageLists.images)
if err != nil {
return err
}
err = saveScript(arch, imageLists.images)
if err != nil {
return err
}
err = loadScript(arch, imageLists.images)
if err != nil {
return err
}
}
return nil
}
func loadScript(arch string, targetImages []string) error {
loadScriptName := getScriptFilename(arch, "load")
log.Printf("Creating %s\n", loadScriptName)
load, err := os.Create(loadScriptName)
if err != nil {
return err
}
defer load.Close()
load.Chmod(0755)
fmt.Fprintf(load, getScript(arch, "load"))
return nil
}
func saveImages(targetImages []string) []string {
var saveImages []string
for _, targetImage := range targetImages {
_, ok := image.Mirrors[targetImage]
if !ok {
continue
}
saveImages = append(saveImages, targetImage)
}
return saveImages
}
func saveImagesAndSources(imagesAndSources []string) []string {
var saveImagesAndSources []string
for _, imageAndSources := range imagesAndSources {
targetImage := strings.Split(imageAndSources, " ")[0]
_, ok := image.Mirrors[targetImage]
if !ok {
continue
}
saveImagesAndSources = append(saveImagesAndSources, imageAndSources)
}
return saveImagesAndSources
}
func checkImage(image string) error {
// ignore non prefixed images, also in types (image/mirror.go)
if strings.HasPrefix(image, "weaveworks") || strings.HasPrefix(image, "noiro") || strings.HasPrefix(image, "registry:") || strings.EqualFold(image, "busybox") {
return nil
}
imageNameTag := strings.Split(image, ":")
if len(imageNameTag) != 2 {
return fmt.Errorf("Can't extract tag from image [%s]", image)
}
if !strings.HasPrefix(imageNameTag[0], "rancher/") {
return fmt.Errorf("Image [%s] does not start with rancher/", image)
}
if strings.HasSuffix(imageNameTag[0], "-") {
return fmt.Errorf("Image [%s] has trailing '-', probably an error in image substitution", image)
}
return nil
}
func saveScript(arch string, targetImages []string) error {
filename := getScriptFilename(arch, "save")
log.Printf("Creating %s\n", filename)
save, err := os.Create(filename)
if err != nil {
return err
}
defer save.Close()
save.Chmod(0755)
fmt.Fprintf(save, getScript(arch, "save"))
return nil
}
func imagesText(arch string, targetImages []string) error {
filename := filenameMap[arch]
log.Printf("Creating %s\n", filename)
save, err := os.Create(filename)
if err != nil {
return err
}
defer save.Close()
save.Chmod(0755)
for _, image := range saveImages(targetImages) {
err := checkImage(image)
if err != nil {
return err
}
log.Println("Image:", image)
fmt.Fprintln(save, image)
}
return nil
}
func writeSliceToFile(filename string, versions []string) error {
log.Printf("Creating %s\n", filename)
save, err := os.Create(filename)
if err != nil {
return err
}
defer save.Close()
save.Chmod(0755)
for _, version := range versions {
fmt.Fprintln(save, version)
}
return nil
}
// imagesAndSourcesText writes data of the format "image source1,..." to the filename
// designated for the given arch
func imagesAndSourcesText(arch string, targetImagesAndSources []string) error {
filename := sourcesFilenameMap[arch]
log.Printf("Creating %s\n", filename)
save, err := os.Create(filename)
if err != nil {
return err
}
defer save.Close()
save.Chmod(0755)
for _, imageAndSources := range saveImagesAndSources(targetImagesAndSources) {
if err := checkImage(strings.Split(imageAndSources, " ")[0]); err != nil {
return err
}
fmt.Fprintln(save, imageAndSources)
}
return nil
}
func mirrorScript(arch string, targetImages []string) error {
filename := getScriptFilename(arch, "mirror")
log.Printf("Creating %s\n", filename)
mirror, err := os.Create(filename)
if err != nil {
return err
}
defer mirror.Close()
mirror.Chmod(0755)
scriptStarter := getScript(arch, "mirror")
fmt.Fprintf(mirror, scriptStarter)
var saveImages []string
for _, targetImage := range targetImages {
srcImage, ok := image.Mirrors[targetImage]
if !ok {
continue
}
saveImages = append(saveImages, targetImage)
fmt.Fprintf(mirror, "docker pull %s\n", srcImage)
if targetImage != srcImage {
fmt.Fprintf(mirror, "docker tag %s %s\n", srcImage, targetImage)
fmt.Fprintf(mirror, "docker push %s\n", targetImage)
}
}
return nil
}
func getWindowsAgentImage() string {
tag, ok := os.LookupEnv("TAG")
if !ok {
return ""
}
repo, ok := os.LookupEnv("REPO")
if !ok {
return ""
}
return fmt.Sprintf("%s/rancher-agent:%s", repo, tag)
}
func getScript(arch, fileType string) string {
return scriptMap[fmt.Sprintf("%s-%s", arch, fileType)]
}
func getScriptFilename(arch, fileType string) string {
return scriptNameMap[fmt.Sprintf("%s-%s", arch, fileType)]
}
const (
linuxLoadScript = `#!/bin/bash
images="rancher-images.tar.gz"
list="rancher-images.txt"
windows_image_list=""
windows_versions="1809"
usage () {
echo "USAGE: $0 [--images rancher-images.tar.gz] --registry my.registry.com:5000"
echo " [-l|--image-list path] text file with list of images; one image per line."
echo " [-i|--images path] tar.gz generated by docker save."
echo " [-r|--registry registry:port] target private registry:port."
echo " [--windows-image-list path] text file with list of images used in Windows. Windows image mirroring is skipped when this is empty"
echo " [--windows-versions version] Comma separated Windows versions. e.g., \"1809,2004,20H2\". (Default \"1809\")"
echo " [-h|--help] Usage message"
}
push_manifest () {
export DOCKER_CLI_EXPERIMENTAL=enabled
manifest_list=()
for i in "${arch_list[@]}"
do
manifest_list+=("$1-${i}")
done
echo "Preparing manifest $1, list[${arch_list[@]}]"
docker manifest create "$1" "${manifest_list[@]}" --amend
docker manifest push "$1" --purge
}
while [[ $# -gt 0 ]]; do
key="$1"
case $key in
-r|--registry)
reg="$2"
shift # past argument
shift # past value
;;
-l|--image-list)
list="$2"
shift # past argument
shift # past value
;;
-i|--images)
images="$2"
shift # past argument
shift # past value
;;
--windows-image-list)
windows_image_list="$2"
shift # past argument
shift # past value
;;
--windows-versions)
windows_versions="$2"
shift # past argument
shift # past value
;;
-h|--help)
help="true"
shift
;;
*)
usage
exit 1
;;
esac
done
if [[ -z $reg ]]; then
usage
exit 1
fi
if [[ $help ]]; then
usage
exit 0
fi
docker load --input ${images}
linux_images=()
while IFS= read -r i; do
[ -z "${i}" ] && continue
linux_images+=("${i}");
done < "${list}"
arch_list=()
if [[ -n "${windows_image_list}" ]]; then
IFS=',' read -r -a versions <<< "$windows_versions"
for version in "${versions[@]}"
do
arch_list+=("windows-${version}")
done
windows_images=()
while IFS= read -r i; do
[ -z "${i}" ] && continue
windows_images+=("${i}")
done < "${windows_image_list}"
# use manifest to publish images only used in Windows
for i in "${windows_images[@]}"; do
if [[ ! " ${linux_images[@]}" =~ " ${i}" ]]; then
case $i in
*/*)
image_name="${reg}/${i}"
;;
*)
image_name="${reg}/rancher/${i}"
;;
esac
push_manifest "${image_name}"
fi
done
fi
arch_list+=("linux-amd64")
for i in "${linux_images[@]}"; do
[ -z "${i}" ] && continue
arch_suffix=""
use_manifest=false
if [[ (-n "${windows_image_list}") && " ${windows_images[@]}" =~ " ${i}" ]]; then
# use manifest to publish images when it is used both in Linux and Windows
use_manifest=true
arch_suffix="-linux-amd64"
fi
case $i in
*/*)
image_name="${reg}/${i}"
;;
*)
image_name="${reg}/rancher/${i}"
;;
esac
docker tag "${i}" "${image_name}${arch_suffix}"
docker push "${image_name}${arch_suffix}"
if $use_manifest; then
push_manifest "${image_name}"
fi
done
`
linuxSaveScript = `#!/bin/bash
list="rancher-images.txt"
images="rancher-images.tar.gz"
usage () {
echo "USAGE: $0 [--image-list rancher-images.txt] [--images rancher-images.tar.gz]"
echo " [-l|--image-list path] text file with list of images; one image per line."
echo " [-i|--images path] tar.gz generated by docker save."
echo " [-h|--help] Usage message"
}
POSITIONAL=()
while [[ $# -gt 0 ]]; do
key="$1"
case $key in
-i|--images)
images="$2"
shift # past argument
shift # past value
;;
-l|--image-list)
list="$2"
shift # past argument
shift # past value
;;
-h|--help)
help="true"
shift
;;
*)
usage
exit 1
;;
esac
done
if [[ $help ]]; then
usage
exit 0
fi
pulled=""
while IFS= read -r i; do
[ -z "${i}" ] && continue
if docker pull "${i}" > /dev/null 2>&1; then
echo "Image pull success: ${i}"
pulled="${pulled} ${i}"
else
if docker inspect "${i}" > /dev/null 2>&1; then
pulled="${pulled} ${i}"
else
echo "Image pull failed: ${i}"
fi
fi
done < "${list}"
echo "Creating ${images} with $(echo ${pulled} | wc -w | tr -d '[:space:]') images"
docker save $(echo ${pulled}) | gzip --stdout > ${images}
`
linuxMirrorScript = "#!/bin/sh\nset -e -x\n\n"
windowsLoadScript = `$ErrorActionPreference = 'Stop'
$script_name = $MyInvocation.InvocationName
$image_list = "rancher-windows-images.txt"
$images = "rancher-windows-images.tar.gz"
$os_release_id = $(Get-ItemProperty 'HKLM:\SOFTWARE\Microsoft\Windows NT\CurrentVersion\' | Select-Object -ExpandProperty ReleaseId)
if ($os_release_id -eq "2009") {
$os_release_id = "20H2"
}
$registry = $null
$help = $false
function usage {
echo "USAGE: $script_name [--images rancher-windows-images.tar.gz] --registry my.registry.com:5000"
echo " [-l|--image-list path] text file with list of images; one image per line."
echo " [-i|--images path] tar.gz generated by docker save."
echo " [-r|--registry registry:port] target private registry:port."
echo " [-o|--os-release-id (1809|2004|20H2|...)] release id of OS, gets detected automatically if not passed."
echo " [-h|--help] Usage message."
}
# parse arguments
$vals = $null
for ($i = $args.Length; $i -ge 0; $i--)
{
$arg = $args[$i]
switch -regex ($arg)
{
'^(-i|--images)$' {
$images = ($vals | Select-Object -First 1)
$vals = $null
}
'^(-l|--image-list)$' {
$image_list = ($vals | Select-Object -First 1)
$vals = $null
}
'^(-r|--registry)$' {
$registry = ($vals | Select-Object -First 1)
$vals = $null
}
'^(-o|--os-release-id)$' {
$os_release_id = ($vals | Select-Object -First 1)
$vals = $null
}
'^(-h|--help)$' {
$help = $true
$vals = $null
}
default {
if ($vals) {
$vals = ,$arg + $vals
} else {
$vals = @($arg)
}
}
}
}
if ($help)
{
usage
exit 0
}
if (-not $registry)
{
echo "Registry address is required"
usage
exit 1
}
if (-not (Test-Path $images))
{
echo "Could not find '$images'"
usage
exit 1
}
docker load --input $images
if (-not $?)
{
echo "Could not load '$images'"
exit 1
}
if (-not (Test-Path $image_list))
{
exit 0
}
Get-Content -Force -Path $image_list | ForEach-Object {
if ($_) {
$fullname_image = ('{0}-windows-{1}' -f $_, $os_release_id)
echo "Tagging $registry/$fullname_image"
switch -regex ($fullname_image)
{
'.+/.+' {
docker tag $fullname_image $registry/$fullname_image
if ($?) {
docker push $registry/$fullname_image
}
}
default {
docker tag $fullname_image $registry/rancher/$fullname_image
if ($?) {
docker push $registry/rancher/$fullname_image
}
}
}
}
}
`
windowsSaveScript = `$ErrorActionPreference = 'Stop'
$script_name = $MyInvocation.InvocationName
$image_list = "rancher-windows-images.txt"
$images = "rancher-windows-images.tar.gz"
$os_release_id = $(Get-ItemProperty 'HKLM:\SOFTWARE\Microsoft\Windows NT\CurrentVersion\' | Select-Object -ExpandProperty ReleaseId)
if ($os_release_id -eq "2009") {
$os_release_id = "20H2"
}
$help = $false
function usage {
echo "USAGE: $script_name [--image-list rancher-windows-images.txt] [--images rancher-windows-images.tar.gz]"
echo " [-l|--image-list path] text file with list of images; one image per line."
echo " [-i|--images path] tar.gz generated by docker save."
echo " [-o|--os-release-id (1809|2004|20H2|...)] release id of OS, gets detected automatically if not passed."
echo " [-h|--help] Usage message."
}
# parse arguments
$vals = $null
for ($i = $args.Length; $i -ge 0; $i--)
{
$arg = $args[$i]
switch -regex ($arg)
{
'^(-l|--image-list)$' {
$image_list = ($vals | Select-Object -First 1)
$vals = $null
}
'^(-i|--images)$' {
$images = ($vals | Select-Object -First 1)
$vals = $null
}
'^(-o|--os-release-id)$' {
$os_release_id = ($vals | Select-Object -First 1)
$vals = $null
}
'^(-h|--help)$' {
$help = $true
$vals = $null
}
default {
if ($vals) {
$vals = ,$arg + $vals
} else {
$vals = @($arg)
}
}
}
}
if ($help)
{
usage
exit 0
}
if (-not (Test-Path $image_list))
{
echo "Could not find '$image_list' file"
usage
exit 1
}
$fullname_images = @()
Get-Content -Force -Path $image_list | ForEach-Object {
if ($_) {
$fullname_image = ('{0}-windows-{1}' -f $_, $os_release_id)
echo "Pulling $fullname_image"
docker pull $fullname_image
if ($?) {
$fullname_images += @($fullname_image)
}
}
}
if (-not $fullname_images)
{
echo "Could not save empty images to host"
echo "Please verify the images of '$image_list' existing or not"
exit 1
}
docker save $($fullname_images) -o $images
`
windowsMirrorScript = ``
)
| [
"\"HOME\"",
"\"HOME\""
] | [] | [
"HOME"
] | [] | ["HOME"] | go | 1 | 0 | |
service/network/tunnel/mucp/mucp_test.go | // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Original source: github.com/micro/go-micro/v3/network/tunnel/mucp/mucp_test.go
package mucp
import (
"os"
"sync"
"testing"
"time"
"github.com/micro-community/micro/v3/service/network/transport"
"github.com/micro-community/micro/v3/service/network/tunnel"
)
func testBrokenTunAccept(t *testing.T, tun tunnel.Tunnel, wait chan bool, wg *sync.WaitGroup) {
defer wg.Done()
// listen on some virtual address
tl, err := tun.Listen("test-tunnel")
if err != nil {
t.Fatal(err)
}
// receiver ready; notify sender
wait <- true
// accept a connection
c, err := tl.Accept()
if err != nil {
t.Fatal(err)
}
// accept the message and close the tunnel
// we do this to simulate loss of network connection
m := new(transport.Message)
if err := c.Recv(m); err != nil {
t.Fatal(err)
}
// close all the links
for _, link := range tun.Links() {
link.Close()
}
// receiver ready; notify sender
wait <- true
// accept the message
m = new(transport.Message)
if err := c.Recv(m); err != nil {
t.Fatal(err)
}
// notify the sender we have received
wait <- true
}
func testBrokenTunSend(t *testing.T, tun tunnel.Tunnel, wait chan bool, wg *sync.WaitGroup, reconnect time.Duration) {
defer wg.Done()
// wait for the listener to get ready
<-wait
// dial a new session
c, err := tun.Dial("test-tunnel")
if err != nil {
t.Fatal(err)
}
defer c.Close()
m := transport.Message{
Header: map[string]string{
"test": "send",
},
}
// send the message
if err := c.Send(&m); err != nil {
t.Fatal(err)
}
// wait for the listener to get ready
<-wait
// give it time to reconnect
time.Sleep(reconnect)
// send the message
if err := c.Send(&m); err != nil {
t.Fatal(err)
}
// wait for the listener to receive the message
// c.Send merely enqueues the message to the link send queue and returns
// in order to verify it was received we wait for the listener to tell us
<-wait
}
// testAccept will accept connections on the transport, create a new link and tunnel on top
func testAccept(t *testing.T, tun tunnel.Tunnel, wait chan bool, wg *sync.WaitGroup) {
defer wg.Done()
// listen on some virtual address
tl, err := tun.Listen("test-tunnel")
if err != nil {
t.Fatal(err)
}
// receiver ready; notify sender
wait <- true
// accept a connection
c, err := tl.Accept()
if err != nil {
t.Fatal(err)
}
// get a message
// accept the message
m := new(transport.Message)
if err := c.Recv(m); err != nil {
t.Fatal(err)
}
if v := m.Header["test"]; v != "send" {
t.Fatalf("Accept side expected test:send header. Received: %s", v)
}
// now respond
m.Header["test"] = "accept"
if err := c.Send(m); err != nil {
t.Fatal(err)
}
wait <- true
return
}
// testSend will create a new link to an address and then a tunnel on top
func testSend(t *testing.T, tun tunnel.Tunnel, wait chan bool, wg *sync.WaitGroup) {
defer wg.Done()
// wait for the listener to get ready
<-wait
// dial a new session
c, err := tun.Dial("test-tunnel")
if err != nil {
t.Fatal(err)
}
defer c.Close()
m := transport.Message{
Header: map[string]string{
"test": "send",
},
}
// send the message
if err := c.Send(&m); err != nil {
t.Fatal(err)
}
// now wait for the response
mr := new(transport.Message)
if err := c.Recv(mr); err != nil {
t.Fatal(err)
}
<-wait
if v := mr.Header["test"]; v != "accept" {
t.Fatalf("Message not received from accepted side. Received: %s", v)
}
}
func TestTunnel(t *testing.T) {
// create a new tunnel client
tunA := NewTunnel(
tunnel.Address("127.0.0.1:9096"),
tunnel.Nodes("127.0.0.1:9097"),
)
// create a new tunnel server
tunB := NewTunnel(
tunnel.Address("127.0.0.1:9097"),
)
// start tunB
err := tunB.Connect()
if err != nil {
t.Fatal(err)
}
defer tunB.Close()
// start tunA
err = tunA.Connect()
if err != nil {
t.Fatal(err)
}
defer tunA.Close()
wait := make(chan bool)
var wg sync.WaitGroup
wg.Add(1)
// start the listener
go testAccept(t, tunB, wait, &wg)
wg.Add(1)
// start the client
go testSend(t, tunA, wait, &wg)
// wait until done
wg.Wait()
}
func TestLoopbackTunnel(t *testing.T) {
// create a new tunnel
tun := NewTunnel(
tunnel.Address("127.0.0.1:9096"),
tunnel.Nodes("127.0.0.1:9096"),
)
// start tunnel
err := tun.Connect()
if err != nil {
t.Fatal(err)
}
defer tun.Close()
time.Sleep(500 * time.Millisecond)
wait := make(chan bool)
var wg sync.WaitGroup
wg.Add(1)
// start the listener
go testAccept(t, tun, wait, &wg)
wg.Add(1)
// start the client
go testSend(t, tun, wait, &wg)
// wait until done
wg.Wait()
}
func TestTunnelRTTRate(t *testing.T) {
// create a new tunnel client
tunA := NewTunnel(
tunnel.Address("127.0.0.1:9096"),
tunnel.Nodes("127.0.0.1:9097"),
)
// create a new tunnel server
tunB := NewTunnel(
tunnel.Address("127.0.0.1:9097"),
)
// start tunB
err := tunB.Connect()
if err != nil {
t.Fatal(err)
}
defer tunB.Close()
// start tunA
err = tunA.Connect()
if err != nil {
t.Fatal(err)
}
defer tunA.Close()
wait := make(chan bool)
var wg sync.WaitGroup
wg.Add(1)
// start the listener
go testAccept(t, tunB, wait, &wg)
wg.Add(1)
// start the client
go testSend(t, tunA, wait, &wg)
// wait until done
wg.Wait()
if len(os.Getenv("IN_TRAVIS_CI")) == 0 {
// only needed for debug
for _, link := range tunA.Links() {
t.Logf("Link %s length %v rate %v", link.Id(), link.Length(), link.Rate())
}
for _, link := range tunB.Links() {
t.Logf("Link %s length %v rate %v", link.Id(), link.Length(), link.Rate())
}
}
}
func TestReconnectTunnel(t *testing.T) {
// we manually override the tunnel.ReconnectTime value here
// this is so that we make the reconnects faster than the default 5s
ReconnectTime = 200 * time.Millisecond
// create a new tunnel client
tunA := NewTunnel(
tunnel.Address("127.0.0.1:9098"),
tunnel.Nodes("127.0.0.1:9099"),
)
// create a new tunnel server
tunB := NewTunnel(
tunnel.Address("127.0.0.1:9099"),
)
// start tunnel
err := tunB.Connect()
if err != nil {
t.Fatal(err)
}
defer tunB.Close()
// start tunnel
err = tunA.Connect()
if err != nil {
t.Fatal(err)
}
defer tunA.Close()
wait := make(chan bool)
var wg sync.WaitGroup
wg.Add(1)
// start tunnel listener
go testBrokenTunAccept(t, tunB, wait, &wg)
wg.Add(1)
// start tunnel sender
go testBrokenTunSend(t, tunA, wait, &wg, ReconnectTime*5)
// wait until done
wg.Wait()
}
| [
"\"IN_TRAVIS_CI\""
] | [] | [
"IN_TRAVIS_CI"
] | [] | ["IN_TRAVIS_CI"] | go | 1 | 0 | |
pandaharvester/harvestersubmitter/htcondor_submitter.py | import os
import errno
import datetime
import tempfile
import threading
import random
import json
from concurrent.futures import ThreadPoolExecutor
import re
from math import log1p
from pandaharvester.harvesterconfig import harvester_config
from pandaharvester.harvestercore.queue_config_mapper import QueueConfigMapper
from pandaharvester.harvestercore import core_utils
from pandaharvester.harvestercore.plugin_base import PluginBase
from pandaharvester.harvestermisc.info_utils import PandaQueuesDict
from pandaharvester.harvestermisc.token_utils import endpoint_to_filename
from pandaharvester.harvestermisc.htcondor_utils import get_job_id_tuple_from_batchid
from pandaharvester.harvestermisc.htcondor_utils import CondorJobSubmit
from pandaharvester.harvestersubmitter import submitter_common
# logger
baseLogger = core_utils.setup_logger('htcondor_submitter')
# Integer division round up
def _div_round_up(a, b):
return a // b + int(a % b > 0)
# Compute weight of each CE according to worker stat, return tuple(dict, total weight score)
def _get_ce_weighting(ce_endpoint_list=[], worker_ce_all_tuple=None, is_slave_queue=False):
multiplier = 1000.
n_ce = len(ce_endpoint_list)
worker_limits_dict, worker_ce_stats_dict, worker_ce_backend_throughput_dict, time_window, n_new_workers = worker_ce_all_tuple
N = float(n_ce)
Q = float(worker_limits_dict['nQueueLimitWorker'])
W = float(worker_limits_dict['maxWorkers'])
Q_good_init = float(sum(worker_ce_backend_throughput_dict[_ce][_st]
for _st in ('submitted', 'running', 'finished')
for _ce in worker_ce_backend_throughput_dict))
Q_good_fin = float(sum(worker_ce_backend_throughput_dict[_ce][_st]
for _st in ('submitted',)
for _ce in worker_ce_backend_throughput_dict))
thruput_avg = (log1p(Q_good_init) - log1p(Q_good_fin))
n_new_workers = float(n_new_workers)
# target number of queuing
target_Q = Q + n_new_workers
if is_slave_queue:
# take total number of current queuing if slave queue
total_Q = sum(( float(worker_ce_stats_dict[_k]['submitted']) for _k in worker_ce_stats_dict ))
target_Q = min(total_Q, Q) + n_new_workers
def _get_thruput(_ce_endpoint): # inner function
if _ce_endpoint not in worker_ce_backend_throughput_dict:
q_good_init = 0.
q_good_fin = 0.
else:
q_good_init = float(sum(worker_ce_backend_throughput_dict[_ce_endpoint][_st]
for _st in ('submitted', 'running', 'finished')))
q_good_fin = float(sum(worker_ce_backend_throughput_dict[_ce_endpoint][_st]
for _st in ('submitted',)))
thruput = (log1p(q_good_init) - log1p(q_good_fin))
return thruput
def _get_thruput_adj_ratio(thruput): # inner function
try:
thruput_adj_ratio = thruput/thruput_avg + 1/N
except ZeroDivisionError:
if thruput == 0.:
thruput_adj_ratio = 1/N
else:
raise
return thruput_adj_ratio
ce_base_weight_sum = sum((_get_thruput_adj_ratio(_get_thruput(_ce))
for _ce in ce_endpoint_list))
def _get_init_weight(_ce_endpoint): # inner function
if _ce_endpoint not in worker_ce_stats_dict:
q = 0.
r = 0.
else:
q = float(worker_ce_stats_dict[_ce_endpoint]['submitted'])
r = float(worker_ce_stats_dict[_ce_endpoint]['running'])
# q_avg = sum(( float(worker_ce_stats_dict[_k]['submitted']) for _k in worker_ce_stats_dict )) / N
# r_avg = sum(( float(worker_ce_stats_dict[_k]['running']) for _k in worker_ce_stats_dict )) / N
if ( _ce_endpoint in worker_ce_stats_dict and q > Q ):
return float(0)
ce_base_weight_normalized = _get_thruput_adj_ratio(_get_thruput(_ce_endpoint))/ce_base_weight_sum
# target number of queuing of the CE
q_expected = target_Q * ce_base_weight_normalized
# weight by difference
ret = max((q_expected - q), 2**-10)
# # Weight by running ratio
# _weight_r = 1 + N*r/R
if r == 0:
# Penalty for dead CE (no running worker)
ret = ret / (1 + log1p(q)**2)
return ret
init_weight_iterator = map(_get_init_weight, ce_endpoint_list)
sum_of_weights = sum(init_weight_iterator)
total_score = multiplier * N
try:
regulator = total_score / sum_of_weights
except ZeroDivisionError:
regulator = 1.
ce_weight_dict = {_ce: _get_init_weight(_ce) * regulator for _ce in ce_endpoint_list}
ce_thruput_dict = {_ce: _get_thruput(_ce) * 86400. / time_window for _ce in ce_endpoint_list}
return total_score, ce_weight_dict, ce_thruput_dict, target_Q
# Choose a CE accroding to weighting
def _choose_ce(weighting):
total_score, ce_weight_dict, ce_thruput_dict, target_Q = weighting
lucky_number = random.random() * total_score
cur = 0.
ce_now = None
for _ce, _w in ce_weight_dict.items():
if _w == 0.:
continue
ce_now = _ce
cur += _w
if cur >= lucky_number:
return _ce
if ce_weight_dict.get(ce_now, -1) > 0.:
return ce_now
else:
return None
# Get better string to display the statistics and weightng of CEs
def _get_ce_stats_weighting_display(ce_list, worker_ce_all_tuple, ce_weighting):
worker_limits_dict, worker_ce_stats_dict, worker_ce_backend_throughput_dict, time_window, n_new_workers = worker_ce_all_tuple
total_score, ce_weight_dict, ce_thruput_dict, target_Q = ce_weighting
worker_ce_stats_dict_sub_default = {'submitted': 0, 'running': 0}
worker_ce_backend_throughput_dict_sub_default = {'submitted': 0, 'running': 0, 'finished': 0}
general_dict = {
'maxWorkers': int(worker_limits_dict.get('maxWorkers')),
'nQueueLimitWorker': int(worker_limits_dict.get('nQueueLimitWorker')),
'nNewWorkers': int(n_new_workers),
'target_Q': int(target_Q),
'history_time_window': int(time_window),
}
general_str = (
'maxWorkers={maxWorkers} '
'nQueueLimitWorker={nQueueLimitWorker} '
'nNewWorkers={nNewWorkers} '
'target_Q={target_Q} '
'hist_timeWindow={history_time_window} '
).format(**general_dict)
ce_str_list = []
for _ce in ce_list:
schema_sub_dict = {
'submitted_now': int(worker_ce_stats_dict.get(_ce, worker_ce_stats_dict_sub_default).get('submitted')),
'running_now': int(worker_ce_stats_dict.get(_ce, worker_ce_stats_dict_sub_default).get('running')),
'submitted_history': int(worker_ce_backend_throughput_dict.get(_ce, worker_ce_backend_throughput_dict_sub_default).get('submitted')),
'running_history': int(worker_ce_backend_throughput_dict.get(_ce, worker_ce_backend_throughput_dict_sub_default).get('running')),
'finished_history': int(worker_ce_backend_throughput_dict.get(_ce, worker_ce_backend_throughput_dict_sub_default).get('finished')),
'thruput_score': ce_thruput_dict.get(_ce),
'weight_score': ce_weight_dict.get(_ce),
}
ce_str = (
'"{_ce}": '
'now_S={submitted_now} '
'now_R={running_now} '
'hist_S={submitted_history} '
'hist_R={running_history} '
'hist_F={finished_history} '
'T={thruput_score:.02f} '
'W={weight_score:.03f} '
).format(_ce=_ce, **schema_sub_dict)
ce_str_list.append(ce_str)
stats_weighting_display_str = general_str + ' ; ' + ' , '.join(ce_str_list)
return stats_weighting_display_str
# Replace condor Macro from SDF file, return string
def _condor_macro_replace(string, **kwarg):
new_string = string
macro_map = {
'\$\(Cluster\)': str(kwarg['ClusterId']),
'\$\(Process\)': str(kwarg['ProcId']),
}
for k, v in macro_map.items():
new_string = re.sub(k, v, new_string)
return new_string
# Parse resource type from string for Unified PanDA Queue
def _get_resource_type(string, is_unified_queue, is_pilot_option=False):
string = str(string)
if not is_unified_queue:
ret = ''
elif string in set(['SCORE', 'MCORE', 'SCORE_HIMEM', 'MCORE_HIMEM']):
if is_pilot_option:
ret = '--resource-type {0}'.format(string)
else:
ret = string
else:
ret = ''
return ret
# submit a bag of workers
def submit_bag_of_workers(data_list):
# make logger
tmpLog = core_utils.make_logger(baseLogger, method_name='submit_bag_of_workers')
# keep order of workers in data_list
workerIDs_list = [ data['workspec'].workerID for data in data_list ]
# initialization
worker_retval_map = {}
worker_data_map = {}
host_jdl_list_workerid_map = {}
# go
for data in data_list:
workspec = data['workspec']
workerID = workspec.workerID
worker_data_map[workerID] = data
to_submit = data['to_submit']
# no need to submit bad worker
if not to_submit:
errStr = '{0} not submitted due to incomplete data of the worker'.format(workerID)
tmpLog.warning(errStr)
tmpRetVal = (None, errStr)
# return tmpRetVal, workspec.get_changed_attributes()
worker_retval_map[workerID] = (tmpRetVal, workspec.get_changed_attributes())
# attributes
try:
use_spool = data['use_spool']
except KeyError:
errStr = '{0} not submitted due to incomplete data of the worker'.format(workerID)
tmpLog.warning(errStr)
tmpRetVal = (None, errStr)
# return tmpRetVal, workspec.get_changed_attributes()
worker_retval_map[workerID] = (tmpRetVal, workspec.get_changed_attributes())
else:
workspec.reset_changed_list()
# fill in host_jdl_list_workerid_map
a_jdl, placeholder_map = make_a_jdl(**data)
val = (workspec, a_jdl, placeholder_map)
try:
host_jdl_list_workerid_map[workspec.submissionHost].append(val)
except KeyError:
host_jdl_list_workerid_map[workspec.submissionHost] = [val]
# loop over submissionHost
for host, val_list in host_jdl_list_workerid_map.items():
# make jdl string of workers
jdl_list = [ val[1] for val in val_list ]
# condor job submit object
tmpLog.debug('submitting to submissionHost={0}'.format(host))
# submit
try:
condor_job_submit = CondorJobSubmit(id=host)
batchIDs_list, ret_err_str = condor_job_submit.submit(jdl_list, use_spool=use_spool)
except Exception as e:
batchIDs_list = None
ret_err_str = 'Exception {0}: {1}'.format(e.__class__.__name__, e)
# result
if batchIDs_list:
# submitted
n_workers = len(val_list)
tmpLog.debug('submitted {0} workers to submissionHost={1}'.format(n_workers, host))
for val_i in range(n_workers):
val = val_list[val_i]
workspec = val[0]
placeholder_map = val[2]
# got batchID
workspec.batchID = batchIDs_list[val_i]
tmpLog.debug('workerID={0} submissionHost={1} batchID={2}'.format(
workspec.workerID, workspec.submissionHost, workspec.batchID))
# get worker data
data = worker_data_map[workspec.workerID]
# set computingElement
ce_info_dict = data['ce_info_dict']
workspec.computingElement = ce_info_dict.get('ce_endpoint', '')
# set log
batch_log_dict = data['batch_log_dict']
(clusterid, procid) = get_job_id_tuple_from_batchid(workspec.batchID)
batch_log = _condor_macro_replace(batch_log_dict['batch_log'], ClusterId=clusterid, ProcId=procid).format(**placeholder_map)
batch_stdout = _condor_macro_replace(batch_log_dict['batch_stdout'], ClusterId=clusterid, ProcId=procid).format(**placeholder_map)
batch_stderr = _condor_macro_replace(batch_log_dict['batch_stderr'], ClusterId=clusterid, ProcId=procid).format(**placeholder_map)
try:
batch_jdl = '{0}.jdl'.format(batch_stderr[:-4])
except Exception:
batch_jdl = None
workspec.set_log_file('batch_log', batch_log)
workspec.set_log_file('stdout', batch_stdout)
workspec.set_log_file('stderr', batch_stderr)
workspec.set_log_file('jdl', batch_jdl)
if not workspec.get_jobspec_list():
tmpLog.debug('No jobspec associated in the worker of workerID={0}'.format(workspec.workerID))
else:
for jobSpec in workspec.get_jobspec_list():
# using batchLog and stdOut URL as pilotID and pilotLog
jobSpec.set_one_attribute('pilotID', workspec.workAttributes['stdOut'])
jobSpec.set_one_attribute('pilotLog', workspec.workAttributes['batchLog'])
tmpLog.debug('Done set_log_file after submission of workerID={0}'.format(workspec.workerID))
tmpRetVal = (True, '')
worker_retval_map[workspec.workerID] = (tmpRetVal, workspec.get_changed_attributes())
else:
# failed
tmpLog.debug('failed to submit workers to submissionHost={0} ; {1}'.format(host, ret_err_str))
for val in val_list:
workspec = val[0]
errStr = 'submission failed: {0}'.format(ret_err_str)
tmpLog.error(errStr)
tmpRetVal = (None, errStr)
worker_retval_map[workspec.workerID] = (tmpRetVal, workspec.get_changed_attributes())
# make return list
retValList = [ worker_retval_map[w_id] for w_id in workerIDs_list ]
return retValList
# make a condor jdl for a worker
def make_a_jdl(workspec, template, n_core_per_node, log_dir, panda_queue_name, executable_file,
x509_user_proxy, log_subdir=None, ce_info_dict=dict(), batch_log_dict=dict(),
pilot_url=None, pilot_args='',
special_par='', harvester_queue_config=None, is_unified_queue=False,
pilot_version='unknown', python_version='unknown', token_dir=None, **kwarg):
# make logger
tmpLog = core_utils.make_logger(baseLogger, 'workerID={0}'.format(workspec.workerID),
method_name='make_a_jdl')
# Note: In workspec, unit of minRamCount and of maxDiskCount are both MB.
# In HTCondor SDF, unit of request_memory is MB, and request_disk is KB.
n_core_total = workspec.nCore if workspec.nCore else n_core_per_node
request_ram = max(workspec.minRamCount, 1 * n_core_total) if workspec.minRamCount else 1 * n_core_total
request_disk = workspec.maxDiskCount * 1024 if workspec.maxDiskCount else 1
request_walltime = workspec.maxWalltime if workspec.maxWalltime else 0
io_intensity = workspec.ioIntensity if workspec.ioIntensity else 0
ce_info_dict = ce_info_dict.copy()
batch_log_dict = batch_log_dict.copy()
# possible override by AGIS special_par
if special_par:
special_par_attr_list = ['queue', 'maxWallTime', 'xcount', ]
_match_special_par_dict = { attr: re.search('\({attr}=([^)]+)\)'.format(attr=attr), special_par) \
for attr in special_par_attr_list }
for attr, _match in _match_special_par_dict.items():
if not _match:
continue
elif attr == 'queue':
ce_info_dict['ce_queue_name'] = str(_match.group(1))
elif attr == 'maxWallTime':
request_walltime = int(_match.group(1))
elif attr == 'xcount':
n_core_total = int(_match.group(1))
tmpLog.debug('job attributes override by AGIS special_par: {0}={1}'.format(attr, str(_match.group(1))))
# derived job attributes
n_node = _div_round_up(n_core_total, n_core_per_node)
request_ram_bytes = request_ram * 2**20
request_ram_per_core = _div_round_up(request_ram * n_node, n_core_total)
request_ram_bytes_per_core = _div_round_up(request_ram_bytes * n_node, n_core_total)
request_cputime = request_walltime * n_core_total
request_walltime_minute = _div_round_up(request_walltime, 60)
request_cputime_minute = _div_round_up(request_cputime, 60)
# decide prodSourceLabel
pilot_opt_dict = submitter_common.get_complicated_pilot_options(workspec.pilotType, pilot_url, pilot_version)
if pilot_opt_dict is None:
prod_source_label = harvester_queue_config.get_source_label(workspec.jobType)
pilot_type_opt = workspec.pilotType
pilot_url_str = '--piloturl {0}'.format(pilot_url) if pilot_url else ''
pilot_debug_str = ''
else:
prod_source_label = pilot_opt_dict['prod_source_label']
pilot_type_opt = pilot_opt_dict['pilot_type_opt']
pilot_url_str = pilot_opt_dict['pilot_url_str']
pilot_debug_str = pilot_opt_dict['pilot_debug_str']
# get token filename according to CE
token_filename = None
if token_dir is not None and ce_info_dict.get('ce_endpoint'):
token_filename = endpoint_to_filename(ce_info_dict['ce_endpoint'])
token_path = None
if token_dir is not None and token_filename is not None:
token_path = os.path.join(token_dir, token_filename)
else:
tmpLog.warning('token_path is None: site={0}, token_dir={1} , token_filename={2}'.format(panda_queue_name, token_dir, token_filename))
# open tmpfile as submit description file
tmpFile = tempfile.NamedTemporaryFile(mode='w', delete=False, suffix='_submit.sdf', dir=workspec.get_access_point())
# placeholder map
placeholder_map = {
'sdfPath': tmpFile.name,
'executableFile': executable_file,
'nCorePerNode': n_core_per_node,
'nCoreTotal': n_core_total,
'nNode': n_node,
'requestRam': request_ram,
'requestRamBytes': request_ram_bytes,
'requestRamPerCore': request_ram_per_core,
'requestRamBytesPerCore': request_ram_bytes_per_core,
'requestDisk': request_disk,
'requestWalltime': request_walltime,
'requestWalltimeMinute': request_walltime_minute,
'requestCputime': request_cputime,
'requestCputimeMinute': request_cputime_minute,
'accessPoint': workspec.accessPoint,
'harvesterID': harvester_config.master.harvester_id,
'workerID': workspec.workerID,
'computingSite': workspec.computingSite,
'pandaQueueName': panda_queue_name,
'x509UserProxy': x509_user_proxy,
'ceEndpoint': ce_info_dict.get('ce_endpoint', ''),
'ceHostname': ce_info_dict.get('ce_hostname', ''),
'ceFlavour': ce_info_dict.get('ce_flavour', ''),
'ceJobmanager': ce_info_dict.get('ce_jobmanager', ''),
'ceQueueName': ce_info_dict.get('ce_queue_name', ''),
'ceVersion': ce_info_dict.get('ce_version', ''),
'logDir': log_dir,
'logSubdir': log_subdir,
'gtag': batch_log_dict.get('gtag', 'fake_GTAG_string'),
'prodSourceLabel': prod_source_label,
'jobType': workspec.jobType,
'resourceType': _get_resource_type(workspec.resourceType, is_unified_queue),
'pilotResourceTypeOption': _get_resource_type(workspec.resourceType, is_unified_queue, True),
'ioIntensity': io_intensity,
'pilotType': pilot_type_opt,
'pilotUrlOption': pilot_url_str,
'pilotVersion': pilot_version,
'pilotPythonOption': submitter_common.get_python_version_option(python_version, prod_source_label),
'pilotDebugOption': pilot_debug_str,
'pilotArgs': pilot_args,
'submissionHost': workspec.submissionHost,
'submissionHostShort': workspec.submissionHost.split('.')[0],
'ceARCGridType': ce_info_dict.get('ce_arc_grid_type', 'nordugrid'),
'tokenDir': token_dir,
'tokenFilename': token_filename,
'tokenPath': token_path,
}
# fill in template string
jdl_str = template.format(**placeholder_map)
# save jdl to submit description file
tmpFile.write(jdl_str)
tmpFile.close()
tmpLog.debug('saved sdf at {0}'.format(tmpFile.name))
tmpLog.debug('done')
return jdl_str, placeholder_map
# parse log, stdout, stderr filename
def parse_batch_job_filename(value_str, file_dir, batchID, guess=False):
_filename = os.path.basename(value_str)
if guess:
# guess file name before files really created; possibly containing condor macros
return _filename
else:
_sanitized_list = re.sub('\{(\w+)\}|\[(\w+)\]|\((\w+)\)|#(\w+)#|\$', '', _filename).split('.')
_prefix = _sanitized_list[0]
_suffix = _sanitized_list[-1] if len(_sanitized_list) > 1 else ''
for _f in os.listdir(file_dir):
if re.match('{prefix}(.*)\.{batchID}\.(.*)\.{suffix}'.format(prefix=_prefix, suffix=_suffix, batchID=batchID), _f):
return _f
return None
# submitter for HTCONDOR batch system
class HTCondorSubmitter(PluginBase):
# constructor
def __init__(self, **kwarg):
tmpLog = core_utils.make_logger(baseLogger, method_name='__init__')
self.logBaseURL = None
self.templateFile = None
PluginBase.__init__(self, **kwarg)
# number of processes
try:
self.nProcesses
except AttributeError:
self.nProcesses = 1
else:
if (not self.nProcesses) or (self.nProcesses < 1):
self.nProcesses = 1
# executable file
try:
self.executableFile
except AttributeError:
self.executableFile = None
# condor log directory
try:
self.logDir
except AttributeError:
self.logDir = os.getenv('TMPDIR') or '/tmp'
# Default x509 proxy for a queue
try:
self.x509UserProxy
except AttributeError:
self.x509UserProxy = os.getenv('X509_USER_PROXY')
# x509 proxy for analysis jobs in grandly unified queues
try:
self.x509UserProxyAnalysis
except AttributeError:
self.x509UserProxyAnalysis = os.getenv('X509_USER_PROXY_ANAL')
# Default token directory for a queue
try:
self.tokenDir
except AttributeError:
self.tokenDir = None
# token directory for analysis jobs in grandly unified queues
try:
self.tokenDirAnalysis
except AttributeError:
self.tokenDirAnalysis = None
# ATLAS AGIS
try:
self.useAtlasAGIS = bool(self.useAtlasAGIS)
except AttributeError:
self.useAtlasAGIS = False
# ATLAS Grid CE, requiring AGIS
try:
self.useAtlasGridCE = bool(self.useAtlasGridCE)
except AttributeError:
self.useAtlasGridCE = False
finally:
self.useAtlasAGIS = self.useAtlasAGIS or self.useAtlasGridCE
# sdf template directories of CEs; ignored if templateFile is set
try:
self.CEtemplateDir
except AttributeError:
self.CEtemplateDir = ''
# remote condor schedd and pool name (collector)
try:
self.condorSchedd
except AttributeError:
self.condorSchedd = None
try:
self.condorPool
except AttributeError:
self.condorPool = None
# json config file of remote condor host: schedd/pool and weighting. If set, condorSchedd and condorPool are overwritten
try:
self.condorHostConfig
except AttributeError:
self.condorHostConfig = False
if self.condorHostConfig:
try:
self.condorSchedd = []
self.condorPool = []
self.condorHostWeight = []
with open(self.condorHostConfig, 'r') as f:
condor_host_config_map = json.load(f)
for _schedd, _cm in condor_host_config_map.items():
_pool = _cm['pool']
_weight = int(_cm['weight'])
self.condorSchedd.append(_schedd)
self.condorPool.append(_pool)
self.condorHostWeight.append(_weight)
except Exception as e:
tmpLog.error('error when parsing condorHostConfig json file; {0}: {1}'.format(e.__class__.__name__, e))
raise
else:
if isinstance(self.condorSchedd, list):
self.condorHostWeight = [1] * len(self.condorSchedd)
else:
self.condorHostWeight = [1]
# condor spool mechanism. If False, need shared FS across remote schedd
try:
self.useSpool
except AttributeError:
self.useSpool = False
# number of workers less than this number will be bulkily submitted in only one schedd
try:
self.minBulkToRamdomizedSchedd
except AttributeError:
self.minBulkToRamdomizedSchedd = 20
# try to use analysis credentials first
try:
self.useAnalysisCredentials
except AttributeError:
self.useAnalysisCredentials = False
# record of information of CE statistics
self.ceStatsLock = threading.Lock()
self.ceStats = dict()
# allowed associated parameters from AGIS
self._allowed_agis_attrs = (
'pilot_url',
'pilot_args',
)
# get CE statistics of a site
def get_ce_statistics(self, site_name, n_new_workers, time_window=21600):
if site_name in self.ceStats:
return self.ceStats[site_name]
with self.ceStatsLock:
if site_name in self.ceStats:
return self.ceStats[site_name]
else:
worker_limits_dict = self.dbInterface.get_worker_limits(self.queueName)
worker_ce_stats_dict = self.dbInterface.get_worker_ce_stats(self.queueName)
worker_ce_backend_throughput_dict = self.dbInterface.get_worker_ce_backend_throughput(self.queueName, time_window=time_window)
return (worker_limits_dict, worker_ce_stats_dict, worker_ce_backend_throughput_dict, time_window, n_new_workers)
# submit workers
def submit_workers(self, workspec_list):
tmpLog = self.make_logger(baseLogger, 'site={0}'.format(self.queueName), method_name='submit_workers')
nWorkers = len(workspec_list)
tmpLog.debug('start nWorkers={0}'.format(nWorkers))
# whether to submit any worker
to_submit_any = True
# get log subdirectory name from timestamp
timeNow = datetime.datetime.utcnow()
log_subdir = timeNow.strftime('%y-%m-%d_%H')
log_subdir_path = os.path.join(self.logDir, log_subdir)
if self.condorSchedd is None or not self.useSpool:
try:
os.mkdir(log_subdir_path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
else:
pass
# get info from harvester queue config
_queueConfigMapper = QueueConfigMapper()
harvester_queue_config = _queueConfigMapper.get_queue(self.queueName)
# associated parameters dict
associated_params_dict = {}
is_grandly_unified_queue = False
# get queue info from AGIS by cacher in db
if self.useAtlasAGIS:
panda_queues_dict = PandaQueuesDict()
panda_queue_name = panda_queues_dict.get_panda_queue_name(self.queueName)
this_panda_queue_dict = panda_queues_dict.get(self.queueName, dict())
is_grandly_unified_queue = panda_queues_dict.is_grandly_unified_queue(self.queueName)
# tmpLog.debug('panda_queues_name and queue_info: {0}, {1}'.format(self.queueName, panda_queues_dict[self.queueName]))
# associated params on AGIS
for key, val in panda_queues_dict.get_harvester_params(self.queueName).items():
if key in self._allowed_agis_attrs:
associated_params_dict[key] = val
else:
panda_queues_dict = dict()
panda_queue_name = self.queueName
this_panda_queue_dict = dict()
# get default information from queue info
n_core_per_node_from_queue = this_panda_queue_dict.get('corecount', 1) if this_panda_queue_dict.get('corecount', 1) else 1
is_unified_queue = this_panda_queue_dict.get('capability', '') == 'ucore'
pilot_url = associated_params_dict.get('pilot_url')
pilot_args = associated_params_dict.get('pilot_args', '')
pilot_version = str(this_panda_queue_dict.get('pilot_version', 'current'))
python_version = str(this_panda_queue_dict.get('python_version', '2'))
sdf_suffix_str = '_pilot2'
# get override requirements from queue configured
try:
n_core_per_node = self.nCorePerNode if self.nCorePerNode else n_core_per_node_from_queue
except AttributeError:
n_core_per_node = n_core_per_node_from_queue
# deal with Condor schedd and central managers; make a random list the choose
n_bulks = _div_round_up(nWorkers, self.minBulkToRamdomizedSchedd)
if isinstance(self.condorSchedd, list) and len(self.condorSchedd) > 0:
orig_list = []
if isinstance(self.condorPool, list) and len(self.condorPool) > 0:
for _schedd, _pool, _weight in zip(self.condorSchedd, self.condorPool, self.condorHostWeight):
orig_list.extend([(_schedd, _pool)] * _weight)
else:
for _schedd, _weight in zip(self.condorSchedd, self.condorHostWeight):
orig_list.extend([(_schedd, self.condorPool)] * _weight)
if n_bulks < len(orig_list):
schedd_pool_choice_list = random.sample(orig_list, n_bulks)
else:
schedd_pool_choice_list = orig_list
else:
schedd_pool_choice_list = [(self.condorSchedd, self.condorPool)]
# deal with CE
special_par = ''
ce_weighting = None
if self.useAtlasGridCE:
# If ATLAS Grid CE mode used
tmpLog.debug('Using ATLAS Grid CE mode...')
queues_from_queue_list = this_panda_queue_dict.get('queues', [])
special_par = this_panda_queue_dict.get('special_par', '')
ce_auxilary_dict = {}
for _queue_dict in queues_from_queue_list:
if not ( _queue_dict.get('ce_endpoint')
and str(_queue_dict.get('ce_state', '')).upper() == 'ACTIVE'
and str(_queue_dict.get('ce_flavour', '')).lower() in set(['arc-ce', 'cream-ce', 'htcondor-ce']) ):
continue
ce_endpoint = _queue_dict.get('ce_endpoint')
if ( ce_endpoint in ce_auxilary_dict
and str(_queue_dict.get('ce_queue_name', '')).lower() == 'default' ):
pass
else:
ce_auxilary_dict[ce_endpoint] = _queue_dict
# qualified CEs from AGIS info
n_qualified_ce = len(ce_auxilary_dict)
if n_qualified_ce > 0:
# Get CE weighting
tmpLog.debug('Get CE weighting')
worker_ce_all_tuple = self.get_ce_statistics(self.queueName, nWorkers)
is_slave_queue = (harvester_queue_config.runMode == 'slave')
ce_weighting = _get_ce_weighting(ce_endpoint_list=list(ce_auxilary_dict.keys()),
worker_ce_all_tuple=worker_ce_all_tuple,
is_slave_queue=is_slave_queue)
stats_weighting_display_str = _get_ce_stats_weighting_display(
ce_auxilary_dict.keys(), worker_ce_all_tuple, ce_weighting)
tmpLog.debug('CE stats and weighting: {0}'.format(stats_weighting_display_str))
else:
tmpLog.error('No valid CE endpoint found')
to_submit_any = False
def _handle_one_worker(workspec, to_submit=to_submit_any):
# make logger
tmpLog = core_utils.make_logger(baseLogger, 'site={0} workerID={1}'.format(self.queueName, workspec.workerID),
method_name='_handle_one_worker')
def _choose_credential(workspec):
"""
Choose the credential based on the job type
"""
job_type = workspec.jobType
proxy = self.x509UserProxy
token_dir = self.tokenDir
if (is_grandly_unified_queue and job_type in ('user', 'panda', 'analysis')) or self.useAnalysisCredentials:
if self.x509UserProxyAnalysis:
tmpLog.debug('Taking analysis proxy')
proxy = self.x509UserProxyAnalysis
if self.tokenDirAnalysis:
tmpLog.debug('Taking analysis token_dir')
token_dir = self.tokenDirAnalysis
else:
tmpLog.debug('Taking default proxy')
if self.tokenDir:
tmpLog.debug('Taking default token_dir')
return proxy, token_dir
# initialize
ce_info_dict = dict()
batch_log_dict = dict()
data = {'workspec': workspec,
'to_submit': to_submit,}
if to_submit:
sdf_template_file = None
if self.useAtlasGridCE:
# choose a CE
tmpLog.info('choose a CE...')
ce_chosen = _choose_ce(ce_weighting)
try:
ce_info_dict = ce_auxilary_dict[ce_chosen].copy()
except KeyError:
tmpLog.info('Problem choosing CE with weighting. Choose an arbitrary CE endpoint')
ce_info_dict = random.choice(list(ce_auxilary_dict.values())).copy()
# go on info of the CE
# ignore protocol prefix in ce_endpoint for cream and condor CE
# check protocol prefix for ARC CE (gridftp or REST)
_match_ce_endpoint = re.match('^(\w+)://(\w+)', ce_info_dict.get('ce_endpoint', ''))
ce_endpoint_prefix = ''
if _match_ce_endpoint:
ce_endpoint_prefix = _match_ce_endpoint.group(1)
ce_endpoint_from_queue = re.sub('^\w+://', '', ce_info_dict.get('ce_endpoint', ''))
ce_flavour_str = str(ce_info_dict.get('ce_flavour', '')).lower()
ce_version_str = str(ce_info_dict.get('ce_version', '')).lower()
if ce_flavour_str == 'arc-ce' and ce_endpoint_prefix in ['https', 'http']:
# new ARC REST interface
ce_info_dict['ce_arc_grid_type'] = 'arc'
else:
ce_info_dict['ce_arc_grid_type'] = 'nordugrid'
ce_info_dict['ce_hostname'] = re.sub(':\w*', '', ce_endpoint_from_queue)
if ce_info_dict['ce_hostname'] == ce_endpoint_from_queue \
and ce_info_dict['ce_arc_grid_type'] != 'arc':
# add default port to ce_endpoint if missing
default_port_map = {
'cream-ce': 8443,
'arc-ce': 2811,
'htcondor-ce': 9619,
}
if ce_flavour_str in default_port_map:
default_port = default_port_map[ce_flavour_str]
ce_info_dict['ce_endpoint'] = '{0}:{1}'.format(ce_endpoint_from_queue, default_port)
tmpLog.debug('Got pilot version: "{0}"; CE endpoint: "{1}", flavour: "{2}"'.format(
pilot_version, ce_endpoint_from_queue, ce_flavour_str))
if self.templateFile:
sdf_template_file = self.templateFile
elif os.path.isdir(self.CEtemplateDir) and ce_flavour_str:
sdf_template_filename = '{ce_flavour_str}{sdf_suffix_str}.sdf'.format(
ce_flavour_str=ce_flavour_str, sdf_suffix_str=sdf_suffix_str)
sdf_template_file = os.path.join(self.CEtemplateDir, sdf_template_filename)
else:
if self.templateFile:
sdf_template_file = self.templateFile
try:
# Manually define site condor schedd as ceHostname and central manager as ceEndpoint
if self.ceHostname and isinstance(self.ceHostname, list) and len(self.ceHostname) > 0:
if isinstance(self.ceEndpoint, list) and len(self.ceEndpoint) > 0:
ce_info_dict['ce_hostname'], ce_info_dict['ce_endpoint'] = random.choice(list(zip(self.ceHostname, self.ceEndpoint)))
else:
ce_info_dict['ce_hostname'] = random.choice(self.ceHostname)
ce_info_dict['ce_endpoint'] = self.ceEndpoint
else:
ce_info_dict['ce_hostname'] = self.ceHostname
ce_info_dict['ce_endpoint'] = self.ceEndpoint
except AttributeError:
pass
try:
# Manually define ceQueueName
if self.ceQueueName:
ce_info_dict['ce_queue_name'] = self.ceQueueName
except AttributeError:
pass
# template for batch script
try:
tmpFile = open(sdf_template_file)
sdf_template_raw = tmpFile.read()
tmpFile.close()
except AttributeError:
tmpLog.error('No valid templateFile found. Maybe templateFile, CEtemplateDir invalid, or no valid CE found')
to_submit = False
return data
else:
# get batch_log, stdout, stderr filename, and remobe commented liness
sdf_template_str_list = []
for _line in sdf_template_raw.split('\n'):
if _line.startswith('#'):
continue
sdf_template_str_list.append(_line)
_match_batch_log = re.match('log = (.+)', _line)
_match_stdout = re.match('output = (.+)', _line)
_match_stderr = re.match('error = (.+)', _line)
if _match_batch_log:
batch_log_value = _match_batch_log.group(1)
continue
if _match_stdout:
stdout_value = _match_stdout.group(1)
continue
if _match_stderr:
stderr_value = _match_stderr.group(1)
continue
sdf_template = '\n'.join(sdf_template_str_list)
# Choose from Condor schedd and central managers
condor_schedd, condor_pool = random.choice(schedd_pool_choice_list)
# set submissionHost
if not condor_schedd and not condor_pool:
workspec.submissionHost = 'LOCAL'
else:
workspec.submissionHost = '{0},{1}'.format(condor_schedd, condor_pool)
tmpLog.debug('set submissionHost={0}'.format(workspec.submissionHost))
# Log Base URL
if self.logBaseURL and '[ScheddHostname]' in self.logBaseURL:
schedd_hostname = re.sub(r'(?:[a-zA-Z0-9_.\-]*@)?([a-zA-Z0-9.\-]+)(?::[0-9]+)?',
lambda matchobj: matchobj.group(1) if matchobj.group(1) else '',
condor_schedd)
log_base_url = re.sub(r'\[ScheddHostname\]', schedd_hostname, self.logBaseURL)
else:
log_base_url = self.logBaseURL
# URLs for log files
if not (log_base_url is None):
if workspec.batchID:
batchID = workspec.batchID
guess = False
else:
batchID = ''
guess = True
batch_log_filename = parse_batch_job_filename(value_str=batch_log_value, file_dir=log_subdir_path, batchID=batchID, guess=guess)
stdout_path_file_name = parse_batch_job_filename(value_str=stdout_value, file_dir=log_subdir_path, batchID=batchID, guess=guess)
stderr_path_filename = parse_batch_job_filename(value_str=stderr_value, file_dir=log_subdir_path, batchID=batchID, guess=guess)
batch_log = '{0}/{1}/{2}'.format(log_base_url, log_subdir, batch_log_filename)
batch_stdout = '{0}/{1}/{2}'.format(log_base_url, log_subdir, stdout_path_file_name)
batch_stderr = '{0}/{1}/{2}'.format(log_base_url, log_subdir, stderr_path_filename)
workspec.set_log_file('batch_log', batch_log)
workspec.set_log_file('stdout', batch_stdout)
workspec.set_log_file('stderr', batch_stderr)
batch_log_dict['batch_log'] = batch_log
batch_log_dict['batch_stdout'] = batch_stdout
batch_log_dict['batch_stderr'] = batch_stderr
batch_log_dict['gtag'] = workspec.workAttributes['stdOut']
tmpLog.debug('Done set_log_file before submission')
tmpLog.debug('Done jobspec attribute setting')
# choose the x509 certificate based on the type of job (analysis or production)
proxy, token_dir = _choose_credential(workspec)
# set data dict
data.update({
'workspec': workspec,
'to_submit': to_submit,
'template': sdf_template,
'executable_file': self.executableFile,
'log_dir': self.logDir,
'log_subdir': log_subdir,
'n_core_per_node': n_core_per_node,
'panda_queue_name': panda_queue_name,
'x509_user_proxy': proxy,
'ce_info_dict': ce_info_dict,
'batch_log_dict': batch_log_dict,
'special_par': special_par,
'harvester_queue_config': harvester_queue_config,
'is_unified_queue': is_unified_queue,
'condor_schedd': condor_schedd,
'condor_pool': condor_pool,
'use_spool': self.useSpool,
'pilot_url': pilot_url,
'pilot_args': pilot_args,
'pilot_version': pilot_version,
'python_version': python_version,
'token_dir': token_dir,
})
return data
def _propagate_attributes(workspec, tmpVal):
# make logger
tmpLog = core_utils.make_logger(baseLogger, 'workerID={0}'.format(workspec.workerID),
method_name='_propagate_attributes')
(retVal, tmpDict) = tmpVal
workspec.set_attributes_with_dict(tmpDict)
tmpLog.debug('Done workspec attributes propagation')
return retVal
tmpLog.debug('finished preparing worker attributes')
# map(_handle_one_worker, workspec_list)
with ThreadPoolExecutor(self.nProcesses * 4) as thread_pool:
dataIterator = thread_pool.map(_handle_one_worker, workspec_list)
tmpLog.debug('{0} workers handled'.format(nWorkers))
# submit
retValList = submit_bag_of_workers(list(dataIterator))
tmpLog.debug('{0} workers submitted'.format(nWorkers))
# propagate changed attributes
with ThreadPoolExecutor(self.nProcesses) as thread_pool:
retIterator = thread_pool.map(lambda _wv_tuple: _propagate_attributes(*_wv_tuple), zip(workspec_list, retValList))
retList = list(retIterator)
tmpLog.debug('done')
return retList
| [] | [] | [
"X509_USER_PROXY",
"X509_USER_PROXY_ANAL",
"TMPDIR"
] | [] | ["X509_USER_PROXY", "X509_USER_PROXY_ANAL", "TMPDIR"] | python | 3 | 0 | |
sdks/python/apache_beam/runners/interactive/caching/streaming_cache.py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
from __future__ import absolute_import
import logging
import os
import shutil
import tempfile
import time
import traceback
from collections import OrderedDict
from google.protobuf.message import DecodeError
import apache_beam as beam
from apache_beam.portability.api.beam_interactive_api_pb2 import TestStreamFileHeader
from apache_beam.portability.api.beam_interactive_api_pb2 import TestStreamFileRecord
from apache_beam.portability.api.beam_runner_api_pb2 import TestStreamPayload
from apache_beam.runners.interactive.cache_manager import CacheManager
from apache_beam.runners.interactive.cache_manager import SafeFastPrimitivesCoder
from apache_beam.testing.test_stream import OutputFormat
from apache_beam.testing.test_stream import ReverseTestStream
from apache_beam.utils import timestamp
# We don't have an explicit pathlib dependency because this code only works with
# the interactive target installed which has an indirect dependency on pathlib
# and pathlib2 through ipython>=5.9.0.
try:
from pathlib import Path
except ImportError:
from pathlib2 import Path # python 2 backport
_LOGGER = logging.getLogger(__name__)
class StreamingCacheSink(beam.PTransform):
"""A PTransform that writes TestStreamFile(Header|Records)s to file.
This transform takes in an arbitrary element stream and writes the list of
TestStream events (as TestStreamFileRecords) to file. When replayed, this
will produce the best-effort replay of the original job (e.g. some elements
may be produced slightly out of order from the original stream).
Note that this PTransform is assumed to be only run on a single machine where
the following assumptions are correct: elements come in ordered, no two
transforms are writing to the same file. This PTransform is assumed to only
run correctly with the DirectRunner.
TODO(BEAM-9447): Generalize this to more source/sink types aside from file
based. Also, generalize to cases where there might be multiple workers
writing to the same sink.
"""
def __init__(
self,
cache_dir,
filename,
sample_resolution_sec,
coder=SafeFastPrimitivesCoder()):
self._cache_dir = cache_dir
self._filename = filename
self._sample_resolution_sec = sample_resolution_sec
self._coder = coder
self._path = os.path.join(self._cache_dir, self._filename)
@property
def path(self):
"""Returns the path the sink leads to."""
return self._path
@property
def size_in_bytes(self):
"""Returns the space usage in bytes of the sink."""
try:
return os.stat(self._path).st_size
except OSError:
_LOGGER.debug(
'Failed to calculate cache size for file %s, the file might have not '
'been created yet. Return 0. %s',
self._path,
traceback.format_exc())
return 0
def expand(self, pcoll):
class StreamingWriteToText(beam.DoFn):
"""DoFn that performs the writing.
Note that the other file writing methods cannot be used in streaming
contexts.
"""
def __init__(self, full_path, coder=SafeFastPrimitivesCoder()):
self._full_path = full_path
self._coder = coder
# Try and make the given path.
Path(os.path.dirname(full_path)).mkdir(parents=True, exist_ok=True)
def start_bundle(self):
# Open the file for 'append-mode' and writing 'bytes'.
self._fh = open(self._full_path, 'ab')
def finish_bundle(self):
self._fh.close()
def process(self, e):
"""Appends the given element to the file.
"""
self._fh.write(self._coder.encode(e) + b'\n')
return (
pcoll
| ReverseTestStream(
output_tag=self._filename,
sample_resolution_sec=self._sample_resolution_sec,
output_format=OutputFormat.SERIALIZED_TEST_STREAM_FILE_RECORDS,
coder=self._coder)
| beam.ParDo(
StreamingWriteToText(full_path=self._path, coder=self._coder)))
class StreamingCacheSource:
"""A class that reads and parses TestStreamFile(Header|Reader)s.
This source operates in the following way:
1. Wait for up to `timeout_secs` for the file to be available.
2. Read, parse, and emit the entire contents of the file
3. Wait for more events to come or until `is_cache_complete` returns True
4. If there are more events, then go to 2
5. Otherwise, stop emitting.
This class is used to read from file and send its to the TestStream via the
StreamingCacheManager.Reader.
"""
def __init__(
self,
cache_dir,
labels,
is_cache_complete=None,
coder=SafeFastPrimitivesCoder()):
self._cache_dir = cache_dir
self._coder = coder
self._labels = labels
self._path = os.path.join(self._cache_dir, *self._labels)
self._is_cache_complete = (
is_cache_complete if is_cache_complete else lambda _: True)
from apache_beam.runners.interactive.pipeline_instrument import CacheKey
self._pipeline_id = CacheKey.from_str(labels[-1]).pipeline_id
def _wait_until_file_exists(self, timeout_secs=30):
"""Blocks until the file exists for a maximum of timeout_secs.
"""
# Wait for up to `timeout_secs` for the file to be available.
start = time.time()
while not os.path.exists(self._path):
time.sleep(1)
if time.time() - start > timeout_secs:
from apache_beam.runners.interactive.pipeline_instrument import CacheKey
pcollection_var = CacheKey.from_str(self._labels[-1]).var
raise RuntimeError(
'Timed out waiting for cache file for PCollection `{}` to be '
'available with path {}.'.format(pcollection_var, self._path))
return open(self._path, mode='rb')
def _emit_from_file(self, fh, tail):
"""Emits the TestStreamFile(Header|Record)s from file.
This returns a generator to be able to read all lines from the given file.
If `tail` is True, then it will wait until the cache is complete to exit.
Otherwise, it will read the file only once.
"""
# Always read at least once to read the whole file.
while True:
pos = fh.tell()
line = fh.readline()
# Check if we are at EOF or if we have an incomplete line.
if not line or (line and line[-1] != b'\n'[0]):
if not tail:
break
# Complete reading only when the cache is complete.
if self._is_cache_complete(self._pipeline_id):
break
# Otherwise wait for new data in the file to be written.
time.sleep(0.5)
fh.seek(pos)
else:
# The first line at pos = 0 is always the header. Read the line without
# the new line.
to_decode = line[:-1]
proto_cls = TestStreamFileHeader if pos == 0 else TestStreamFileRecord
msg = self._try_parse_as(proto_cls, to_decode)
if msg:
yield msg
else:
break
def _try_parse_as(self, proto_cls, to_decode):
try:
msg = proto_cls()
msg.ParseFromString(self._coder.decode(to_decode))
except DecodeError:
_LOGGER.error(
'Could not parse as %s. This can indicate that the cache is '
'corruputed. Please restart the kernel. '
'\nfile: %s \nmessage: %s',
proto_cls,
self._path,
to_decode)
msg = None
return msg
def read(self, tail):
"""Reads all TestStreamFile(Header|TestStreamFileRecord)s from file.
This returns a generator to be able to read all lines from the given file.
If `tail` is True, then it will wait until the cache is complete to exit.
Otherwise, it will read the file only once.
"""
with self._wait_until_file_exists() as f:
for e in self._emit_from_file(f, tail):
yield e
class StreamingCache(CacheManager):
"""Abstraction that holds the logic for reading and writing to cache.
"""
def __init__(
self, cache_dir, is_cache_complete=None, sample_resolution_sec=0.1):
self._sample_resolution_sec = sample_resolution_sec
self._is_cache_complete = is_cache_complete
if cache_dir:
self._cache_dir = cache_dir
else:
self._cache_dir = tempfile.mkdtemp(
prefix='interactive-temp-', dir=os.environ.get('TEST_TMPDIR', None))
# List of saved pcoders keyed by PCollection path. It is OK to keep this
# list in memory because once FileBasedCacheManager object is
# destroyed/re-created it loses the access to previously written cache
# objects anyways even if cache_dir already exists. In other words,
# it is not possible to resume execution of Beam pipeline from the
# saved cache if FileBasedCacheManager has been reset.
#
# However, if we are to implement better cache persistence, one needs
# to take care of keeping consistency between the cached PCollection
# and its PCoder type.
self._saved_pcoders = {}
self._default_pcoder = SafeFastPrimitivesCoder()
# The sinks to capture data from capturable sources.
# Dict([str, StreamingCacheSink])
self._capture_sinks = {}
@property
def capture_size(self):
return sum([sink.size_in_bytes for _, sink in self._capture_sinks.items()])
@property
def capture_paths(self):
return list(self._capture_sinks.keys())
def exists(self, *labels):
path = os.path.join(self._cache_dir, *labels)
return os.path.exists(path)
# TODO(srohde): Modify this to return the correct version.
def read(self, *labels):
"""Returns a generator to read all records from file.
Does not tail.
"""
if not self.exists(*labels):
return iter([]), -1
reader = StreamingCacheSource(
self._cache_dir, labels, self._is_cache_complete).read(tail=False)
header = next(reader)
return StreamingCache.Reader([header], [reader]).read(), 1
def read_multiple(self, labels):
"""Returns a generator to read all records from file.
Does tail until the cache is complete. This is because it is used in the
TestStreamServiceController to read from file which is only used during
pipeline runtime which needs to block.
"""
readers = [
StreamingCacheSource(self._cache_dir, l,
self._is_cache_complete).read(tail=True)
for l in labels
]
headers = [next(r) for r in readers]
return StreamingCache.Reader(headers, readers).read()
def write(self, values, *labels):
"""Writes the given values to cache.
"""
directory = os.path.join(self._cache_dir, *labels[:-1])
filepath = os.path.join(directory, labels[-1])
if not os.path.exists(directory):
os.makedirs(directory)
with open(filepath, 'ab') as f:
for v in values:
if isinstance(v, (TestStreamFileHeader, TestStreamFileRecord)):
val = v.SerializeToString()
else:
val = v
f.write(self._default_pcoder.encode(val) + b'\n')
def source(self, *labels):
"""Returns the StreamingCacheManager source.
This is beam.Impulse() because unbounded sources will be marked with this
and then the PipelineInstrument will replace these with a TestStream.
"""
return beam.Impulse()
def sink(self, labels, is_capture=False):
"""Returns a StreamingCacheSink to write elements to file.
Note that this is assumed to only work in the DirectRunner as the underlying
StreamingCacheSink assumes a single machine to have correct element
ordering.
"""
filename = labels[-1]
cache_dir = os.path.join(self._cache_dir, *labels[:-1])
sink = StreamingCacheSink(cache_dir, filename, self._sample_resolution_sec)
if is_capture:
self._capture_sinks[sink.path] = sink
return sink
def save_pcoder(self, pcoder, *labels):
self._saved_pcoders[os.path.join(*labels)] = pcoder
def load_pcoder(self, *labels):
return (
self._default_pcoder if self._default_pcoder is not None else
self._saved_pcoders[os.path.join(*labels)])
def cleanup(self):
if os.path.exists(self._cache_dir):
shutil.rmtree(self._cache_dir)
self._saved_pcoders = {}
self._capture_sinks = {}
class Reader(object):
"""Abstraction that reads from PCollection readers.
This class is an Abstraction layer over multiple PCollection readers to be
used for supplying a TestStream service with events.
This class is also responsible for holding the state of the clock, injecting
clock advancement events, and watermark advancement events.
"""
def __init__(self, headers, readers):
# This timestamp is used as the monotonic clock to order events in the
# replay.
self._monotonic_clock = timestamp.Timestamp.of(0)
# The PCollection cache readers.
self._readers = {}
# The file headers that are metadata for that particular PCollection.
# The header allows for metadata about an entire stream, so that the data
# isn't copied per record.
self._headers = {header.tag: header for header in headers}
self._readers = OrderedDict(
((h.tag, r) for (h, r) in zip(headers, readers)))
# The most recently read timestamp per tag.
self._stream_times = {
tag: timestamp.Timestamp(seconds=0)
for tag in self._headers
}
def _test_stream_events_before_target(self, target_timestamp):
"""Reads the next iteration of elements from each stream.
Retrieves an element from each stream iff the most recently read timestamp
from that stream is less than the target_timestamp. Since the amount of
events may not fit into memory, this StreamingCache reads at most one
element from each stream at a time.
"""
records = []
for tag, r in self._readers.items():
# The target_timestamp is the maximum timestamp that was read from the
# stream. Some readers may have elements that are less than this. Thus,
# we skip all readers that already have elements that are at this
# timestamp so that we don't read everything into memory.
if self._stream_times[tag] >= target_timestamp:
continue
try:
record = next(r).recorded_event
if record.HasField('processing_time_event'):
self._stream_times[tag] += timestamp.Duration(
micros=record.processing_time_event.advance_duration)
records.append((tag, record, self._stream_times[tag]))
except StopIteration:
pass
return records
def _merge_sort(self, previous_events, new_events):
return sorted(
previous_events + new_events, key=lambda x: x[2], reverse=True)
def _min_timestamp_of(self, events):
return events[-1][2] if events else timestamp.MAX_TIMESTAMP
def _event_stream_caught_up_to_target(self, events, target_timestamp):
empty_events = not events
stream_is_past_target = self._min_timestamp_of(events) > target_timestamp
return empty_events or stream_is_past_target
def read(self):
"""Reads records from PCollection readers.
"""
# The largest timestamp read from the different streams.
target_timestamp = timestamp.MAX_TIMESTAMP
# The events from last iteration that are past the target timestamp.
unsent_events = []
# Emit events until all events have been read.
while True:
# Read the next set of events. The read events will most likely be
# out of order if there are multiple readers. Here we sort them into
# a more manageable state.
new_events = self._test_stream_events_before_target(target_timestamp)
events_to_send = self._merge_sort(unsent_events, new_events)
if not events_to_send:
break
# Get the next largest timestamp in the stream. This is used as the
# timestamp for readers to "catch-up" to. This will only read from
# readers with a timestamp less than this.
target_timestamp = self._min_timestamp_of(events_to_send)
# Loop through the elements with the correct timestamp.
while not self._event_stream_caught_up_to_target(events_to_send,
target_timestamp):
# First advance the clock to match the time of the stream. This has
# a side-effect of also advancing this cache's clock.
tag, r, curr_timestamp = events_to_send.pop()
if curr_timestamp > self._monotonic_clock:
yield self._advance_processing_time(curr_timestamp)
# Then, send either a new element or watermark.
if r.HasField('element_event'):
r.element_event.tag = tag
yield r
elif r.HasField('watermark_event'):
r.watermark_event.tag = tag
yield r
unsent_events = events_to_send
target_timestamp = self._min_timestamp_of(unsent_events)
def _advance_processing_time(self, new_timestamp):
"""Advances the internal clock and returns an AdvanceProcessingTime event.
"""
advancy_by = new_timestamp.micros - self._monotonic_clock.micros
e = TestStreamPayload.Event(
processing_time_event=TestStreamPayload.Event.AdvanceProcessingTime(
advance_duration=advancy_by))
self._monotonic_clock = new_timestamp
return e
| [] | [] | [
"TEST_TMPDIR"
] | [] | ["TEST_TMPDIR"] | python | 1 | 0 | |
main.go | package main
import (
"flag"
"fmt"
"net/http"
"net/url"
"os"
"strconv"
"strings"
"time"
log "github.com/Sirupsen/logrus"
"github.com/alphagov/govuk_crawler_worker/http_crawler"
"github.com/alphagov/govuk_crawler_worker/queue"
"github.com/alphagov/govuk_crawler_worker/ttl_hash_set"
"github.com/alphagov/govuk_crawler_worker/util"
)
var (
amqpAddr = util.GetEnvDefault("AMQP_ADDRESS", "amqp://guest:guest@localhost:5672/")
basicAuthPassword = util.GetEnvDefault("BASIC_AUTH_PASSWORD", "")
basicAuthUsername = util.GetEnvDefault("BASIC_AUTH_USERNAME", "")
blacklistPaths = util.GetEnvDefault("BLACKLIST_PATHS", "/search,/government/uploads")
crawlerThreads = util.GetEnvDefault("CRAWLER_THREADS", "4")
exchangeName = util.GetEnvDefault("AMQP_EXCHANGE", "govuk_crawler_exchange")
httpPort = util.GetEnvDefault("HTTP_PORT", "8080")
maxCrawlRetries = util.GetEnvDefault("MAX_CRAWL_RETRIES", "4")
queueName = util.GetEnvDefault("AMQP_MESSAGE_QUEUE", "govuk_crawler_queue")
redisAddr = util.GetEnvDefault("REDIS_ADDRESS", "127.0.0.1:6379")
redisKeyPrefix = util.GetEnvDefault("REDIS_KEY_PREFIX", "gcw")
rootURLs []*url.URL
rootURLString = util.GetEnvDefault("ROOT_URLS", "https://www.gov.uk/")
ttlExpireString = util.GetEnvDefault("TTL_EXPIRE_TIME", "12h")
mirrorRoot = os.Getenv("MIRROR_ROOT")
rateLimitToken = os.Getenv("RATE_LIMIT_TOKEN")
)
const versionNumber string = "0.2.0"
func init() {
jsonFlag := flag.Bool("json", false, "output logs as JSON")
quietFlag := flag.Bool("quiet", false, "surpress all logging except errors")
verboseFlag := flag.Bool("verbose", false, "verbose logging showing debug messages")
versionFlag := flag.Bool("version", false, "show version and exit")
flag.Parse()
switch {
case *quietFlag:
log.SetLevel(log.ErrorLevel)
case *verboseFlag:
log.SetLevel(log.DebugLevel)
default:
log.SetLevel(log.InfoLevel)
}
log.SetOutput(os.Stderr)
if *jsonFlag {
log.SetFormatter(new(log.JSONFormatter))
}
if *versionFlag {
fmt.Println(versionNumber)
os.Exit(0)
}
}
func main() {
if mirrorRoot == "" {
log.Fatalln("MIRROR_ROOT environment variable not set")
}
rootURLStrings := strings.Split(rootURLString, ",")
for _, u := range rootURLStrings {
rootURL, err := url.Parse(u)
if err != nil {
log.Fatalln("Couldn't parse ROOT_URL:", u)
}
rootURLs = append(rootURLs, rootURL)
}
ttlExpireTime, err := time.ParseDuration(ttlExpireString)
if err != nil {
log.Fatalln("Couldn't parse TTL_EXPIRE_TIME:", ttlExpireString)
}
ttlHashSet, err := ttl_hash_set.NewTTLHashSet(redisKeyPrefix, redisAddr, ttlExpireTime)
if err != nil {
log.Fatalln(err)
}
defer ttlHashSet.Close()
log.Infoln("Connected to Redis service:", ttlHashSet)
queueManager, err := queue.NewManager(amqpAddr, exchangeName, queueName)
if err != nil {
log.Fatalln(err)
}
defer queueManager.Close()
log.Infoln("Connected to AMQP service:", queueManager)
var crawler *http_crawler.Crawler
if basicAuthUsername != "" && basicAuthPassword != "" {
crawler = http_crawler.NewCrawler(rootURLs, versionNumber, rateLimitToken,
&http_crawler.BasicAuth{basicAuthUsername, basicAuthPassword})
} else {
crawler = http_crawler.NewCrawler(rootURLs, versionNumber, rateLimitToken, nil)
}
log.Infoln("Generated crawler:", crawler)
deliveries, err := queueManager.Consume()
if err != nil {
log.Fatalln(err)
}
log.Infoln("Generated delivery (consumer) channel:", deliveries)
dontQuit := make(chan struct{})
var acknowledgeChan, crawlChan, persistChan, parseChan <-chan *CrawlerMessageItem
publishChan := make(<-chan *url.URL, 100)
var crawlerThreadsInt int
crawlerThreadsInt, err = strconv.Atoi(crawlerThreads)
if err != nil {
crawlerThreadsInt = 1
}
var maxCrawlRetriesInt int
maxCrawlRetriesInt, err = strconv.Atoi(maxCrawlRetries)
if err != nil {
maxCrawlRetriesInt = 4
}
crawlChan = ReadFromQueue(deliveries, rootURLs, ttlHashSet, splitPaths(blacklistPaths), crawlerThreadsInt)
persistChan = CrawlURL(ttlHashSet, crawlChan, crawler, crawlerThreadsInt, maxCrawlRetriesInt)
parseChan = WriteItemToDisk(mirrorRoot, persistChan)
publishChan, acknowledgeChan = ExtractURLs(parseChan)
go PublishURLs(ttlHashSet, queueManager, publishChan)
go AcknowledgeItem(acknowledgeChan, ttlHashSet)
healthCheck := NewHealthCheck(queueManager, ttlHashSet)
http.HandleFunc("/healthcheck", healthCheck.HTTPHandler())
log.Fatalln(http.ListenAndServe(":"+httpPort, nil))
<-dontQuit
}
func splitPaths(paths string) []string {
if !strings.Contains(paths, ",") {
return []string{paths}
}
splitPaths := strings.Split(paths, ",")
trimmedPaths := make([]string, len(splitPaths))
for i, v := range splitPaths {
trimmedPaths[i] = v
}
return trimmedPaths
}
| [
"\"MIRROR_ROOT\"",
"\"RATE_LIMIT_TOKEN\""
] | [] | [
"MIRROR_ROOT",
"RATE_LIMIT_TOKEN"
] | [] | ["MIRROR_ROOT", "RATE_LIMIT_TOKEN"] | go | 2 | 0 | |
PhoneSearch/manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'PhoneSearch.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
github-clone-all.py | #!/usr/bin/env python3
import os
import time
import json
import requests
import subprocess
import sys
from dotenv import load_dotenv
scriptDir = os.path.dirname(__file__)
envPath = os.path.join(scriptDir, ".env")
if os.path.isfile(envPath) is False:
sys.exit("No .env file found, please create one. :)")
load_dotenv(envPath)
outputDir = os.getenv('OUTPUT_DIR')
githubUsername = os.getenv('GITHUB_USERNAME')
githubToken = os.getenv('GITHUB_TOKEN')
currentDate = time.strftime("%Y-%m-%d", time.gmtime())
if os.path.isdir(outputDir) is False:
sys.exit("Invalid OUTPUT_DIR in .env file.")
headers = {'Authorization': 'token ' + githubToken}
# Get repo URLs.
print("Fetching repo URLs...")
lastPageReached = False
page = 1
repoUrls = []
while lastPageReached is False:
r = requests.get(f'https://api.github.com/user/repos?type=all&per_page=100&page={page}', headers=headers)
page += 1
rJson = r.json()
if len(r.json()) == 0 or len(r.json()) < 100:
lastPageReached = True
for repo in rJson:
repoUrls.append(repo["clone_url"])
# Get Gist URLs.
print("Fetching Gist URLs...")
lastPageReached = False
page = 1
gistUrls = []
while lastPageReached is False:
r = requests.get(f'https://api.github.com/users/{githubUsername}/gists?per_page=100&page={page}')
page += 1
rJson = r.json()
if len(r.json()) == 0 or len(r.json()) < 100:
lastPageReached = True
for gist in rJson:
gistUrls.append(gist["git_pull_url"])
saveDir = os.path.join(outputDir, currentDate)
os.mkdir(saveDir)
# Clone repos.
repoDir = os.path.join(saveDir, "repositories")
os.mkdir(repoDir)
for url in repoUrls:
subprocess.run(f'cd {repoDir}; git clone --bare --recurse-submodules {url}', shell=True)
# Clone gists.
gistDir = os.path.join(saveDir, "gists")
os.mkdir(gistDir)
for url in gistUrls:
subprocess.run(f'cd {gistDir}; git clone --bare {url}', shell=True)
| [] | [] | [
"GITHUB_USERNAME",
"GITHUB_TOKEN",
"OUTPUT_DIR"
] | [] | ["GITHUB_USERNAME", "GITHUB_TOKEN", "OUTPUT_DIR"] | python | 3 | 0 | |
fastmri_recon/training_scripts/nc_train_block.py | import math
import os
import os.path as op
import time
import pickle
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.keras.callbacks import TensorBoard
from tensorflow.keras.mixed_precision import experimental as mixed_precision
from tensorflow_addons.callbacks import TQDMProgressBar
from fastmri_recon.config import *
from fastmri_recon.data.datasets.fastmri_pyfunc_non_cartesian import train_nc_kspace_dataset_from_indexable as singlecoil_dataset
from fastmri_recon.data.datasets.oasis_tf_records import train_nc_kspace_dataset_from_tfrecords as three_d_dataset
from fastmri_recon.data.datasets.multicoil.non_cartesian_tf_records import train_nc_kspace_dataset_from_tfrecords as multicoil_dataset
from fastmri_recon.models.subclassed_models.ncpdnet import NCPDNet
from fastmri_recon.models.training.compile import default_model_compile
from fastmri_recon.training_scripts.model_saving_workaround import ModelCheckpointWorkAround
n_volumes_train_fastmri = 973
n_volumes_train_oasis = 3273
# this number means that 99.56% of all images will not be affected by
# cropping
IM_SIZE = (640, 400)
VOLUME_SIZE = (176, 256, 256)
def train_ncnet_block(
model,
n_iter=10,
run_id=None,
multicoil=False,
three_d=False,
acq_type='radial',
scale_factor=1e6,
dcomp=False,
contrast=None,
cuda_visible_devices='0123',
n_samples=None,
n_epochs=200,
use_mixed_precision=False,
loss='mae',
original_run_id=None,
checkpoint_epoch=0,
save_state=False,
lr=1e-4,
block_size=10,
block_overlap=0,
epochs_per_block_step=None,
**acq_kwargs,
):
# paths
n_volumes_train = n_volumes_train_fastmri
if multicoil:
train_path = f'{FASTMRI_DATA_DIR}multicoil_train/'
val_path = f'{FASTMRI_DATA_DIR}multicoil_val/'
elif three_d:
train_path = f'{OASIS_DATA_DIR}/train/'
val_path = f'{OASIS_DATA_DIR}/val/'
n_volumes_train = n_volumes_train_oasis
else:
train_path = f'{FASTMRI_DATA_DIR}singlecoil_train/singlecoil_train/'
val_path = f'{FASTMRI_DATA_DIR}singlecoil_val/'
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(cuda_visible_devices)
# trying mixed precision
if use_mixed_precision:
policy_type = 'mixed_float16'
else:
policy_type = 'float32'
policy = mixed_precision.Policy(policy_type)
mixed_precision.set_policy(policy)
# generators
if multicoil:
dataset = multicoil_dataset
image_size = IM_SIZE
elif three_d:
dataset = three_d_dataset
image_size = VOLUME_SIZE
else:
dataset = singlecoil_dataset
image_size = IM_SIZE
if not three_d:
add_kwargs = {
'contrast': contrast,
'rand': True,
'inner_slices': None,
}
else:
add_kwargs = {}
add_kwargs.update(**acq_kwargs)
train_set = dataset(
train_path,
image_size,
acq_type=acq_type,
compute_dcomp=dcomp,
scale_factor=scale_factor,
n_samples=n_samples,
**add_kwargs
)
val_set = dataset(
val_path,
image_size,
acq_type=acq_type,
compute_dcomp=dcomp,
scale_factor=scale_factor,
**add_kwargs
)
additional_info = f'{acq_type}'
if contrast is not None:
additional_info += f'_{contrast}'
if n_samples is not None:
additional_info += f'_{n_samples}'
if loss != 'mae':
additional_info += f'_{loss}'
if dcomp:
additional_info += '_dcomp'
if block_overlap != 0:
additional_info += f'_blkov{block_overlap}'
if checkpoint_epoch == 0:
run_id = f'{run_id}_bbb_{additional_info}_{int(time.time())}'
else:
run_id = original_run_id
chkpt_path = f'{CHECKPOINTS_DIR}checkpoints/{run_id}' + '-{epoch:02d}.hdf5'
log_dir = op.join(f'{LOGS_DIR}logs', run_id)
tboard_cback = TensorBoard(
profile_batch=0,
log_dir=log_dir,
histogram_freq=0,
write_graph=False,
write_images=False,
)
tqdm_cback = TQDMProgressBar()
n_steps = n_volumes_train
chkpt_cback = ModelCheckpointWorkAround(
chkpt_path,
save_freq=int(epochs_per_block_step*n_steps),
save_optimizer=False,
save_weights_only=True,
)
print(run_id)
# if there are 4 blocks, with a block size of 2 and a block overlap of 1
# we do the following block combinations:
# 01, 12, 23 -> n block steps = 3
# if there are 6 blocks with a block size 3 and a block overlap of 2:
# 012, 123, 234, 345 -> n = 4
# if there are 6 blocks with a block size 3 and a block overlap of 1:
# 012, 234, 456 -> n = 3
stride = block_size - block_overlap
assert stride > 0
n_block_steps = int(math.ceil((n_iter - block_size) / stride) + 1)
## epochs handling
restart_at_block_step = checkpoint_epoch // epochs_per_block_step
start_epoch = checkpoint_epoch
final_epoch = checkpoint_epoch + min(epochs_per_block_step, n_epochs)
for i_step in range(n_block_steps):
if i_step < restart_at_block_step:
continue
first_block_to_train = i_step * stride
blocks = list(range(first_block_to_train, first_block_to_train + block_size))
model.blocks_to_train = blocks
default_model_compile(model, lr=lr, loss=loss)
# first run of the model to avoid the saving error
# ValueError: as_list() is not defined on an unknown TensorShape.
# it can also allow loading of weights
model(next(iter(train_set))[0])
if not checkpoint_epoch == 0 and i_step == restart_at_block_step:
model.load_weights(f'{CHECKPOINTS_DIR}checkpoints/{original_run_id}-{checkpoint_epoch:02d}.hdf5')
if not checkpoint_epoch % epochs_per_block_step == 0:
grad_vars = model.trainable_weights
zero_grads = [tf.zeros_like(w) for w in grad_vars]
model.optimizer.apply_gradients(zip(zero_grads, grad_vars))
with open(f'{CHECKPOINTS_DIR}checkpoints/{original_run_id}-optimizer.pkl', 'rb') as f:
weight_values = pickle.load(f)
model.optimizer.set_weights(weight_values)
model.fit(
train_set,
steps_per_epoch=n_steps,
initial_epoch=start_epoch,
epochs=final_epoch,
validation_data=val_set,
validation_steps=5,
verbose=0,
callbacks=[tboard_cback, chkpt_cback, tqdm_cback],
)
n_epochs = n_epochs - (final_epoch - start_epoch)
if n_epochs <= 0:
break
start_epoch = final_epoch
final_epoch += min(epochs_per_block_step, n_epochs)
if save_state:
symbolic_weights = getattr(model.optimizer, 'weights')
weight_values = K.batch_get_value(symbolic_weights)
with open(f'{CHECKPOINTS_DIR}checkpoints/{run_id}-optimizer.pkl', 'wb') as f:
pickle.dump(weight_values, f)
return run_id
def train_ncpdnet(
multicoil=False,
three_d=False,
dcomp=False,
normalize_image=False,
n_iter=10,
n_filters=32,
n_primal=5,
non_linearity='relu',
refine_smaps=True,
**train_kwargs,
):
if three_d:
image_size = VOLUME_SIZE
else:
image_size = IM_SIZE
run_params = {
'n_primal': n_primal,
'multicoil': multicoil,
'three_d': three_d,
'activation': non_linearity,
'n_iter': n_iter,
'n_filters': n_filters,
'im_size': image_size,
'dcomp': dcomp,
'normalize_image': normalize_image,
'refine_smaps': refine_smaps,
'fastmri': not three_d,
}
if multicoil:
ncpdnet_type = 'ncpdnet_sense_'
elif three_d:
ncpdnet_type = 'ncpdnet_3d_'
else:
ncpdnet_type = 'ncpdnet_singlecoil_'
additional_info = ''
if n_iter != 10:
additional_info += f'_i{n_iter}'
if non_linearity != 'relu':
additional_info += f'_{non_linearity}'
if multicoil and refine_smaps:
additional_info += '_rfs'
run_id = f'{ncpdnet_type}_{additional_info}'
model = NCPDNet(**run_params)
return train_ncnet_block(
model,
n_iter=n_iter,
run_id=run_id,
multicoil=multicoil,
dcomp=dcomp,
three_d=three_d,
**train_kwargs,
)
| [] | [] | [
"CUDA_VISIBLE_DEVICES"
] | [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
geodescriber/__init__.py | """The Geodescriber ANALYSIS API MODULE"""
import logging
import os
import CTRegisterMicroserviceFlask
import ee
from flask import Flask
from geodescriber.config import SETTINGS
from geodescriber.routes.api import error
from geodescriber.routes.api.v1 import geodescriber_endpoints_v1
from geodescriber.utils.files import BASE_DIR, PROJECT_DIR
from geodescriber.utils.files import load_config_json
logging.basicConfig(
level=SETTINGS.get('logging', {}).get('level'),
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%Y%m%d-%H:%M%p',
)
# Initilizing GEE
gee = SETTINGS.get('gee')
ee_user = gee.get('service_account')
private_key_file = gee.get('privatekey_file')
if private_key_file:
logging.info(f'Initilizing EE with privatekey.json credential file: {ee_user} | {private_key_file}')
credentials = ee.ServiceAccountCredentials(ee_user, private_key_file)
ee.Initialize(credentials)
ee.data.setDeadline(60000)
else:
raise ValueError("privatekey.json file not found. Unable to authenticate EE.")
# Flask App
app = Flask(__name__)
# Routing
app.register_blueprint(geodescriber_endpoints_v1, url_prefix="/api/v1/geodescriber")
# CT
info = load_config_json('register')
swagger = load_config_json('swagger')
CTRegisterMicroserviceFlask.register(
app=app,
name='geodescriber',
info=info,
swagger=swagger,
mode=CTRegisterMicroserviceFlask.AUTOREGISTER_MODE if os.getenv('CT_REGISTER_MODE') and os.getenv(
'CT_REGISTER_MODE') == 'auto' else CTRegisterMicroserviceFlask.NORMAL_MODE,
ct_url=os.getenv('CT_URL'),
url=os.getenv('LOCAL_URL')
)
@app.errorhandler(403)
def forbidden(e):
return error(status=403, detail='Forbidden')
@app.errorhandler(404)
def page_not_found(e):
return error(status=404, detail='Not Found')
@app.errorhandler(405)
def method_not_allowed(e):
return error(status=405, detail='Method Not Allowed')
@app.errorhandler(410)
def gone(e):
return error(status=410, detail='Gone')
@app.errorhandler(500)
def internal_server_error(e):
return error(status=500, detail='Internal Server Error')
| [] | [] | [
"LOCAL_URL",
"CT_REGISTER_MODE",
"CT_URL"
] | [] | ["LOCAL_URL", "CT_REGISTER_MODE", "CT_URL"] | python | 3 | 0 | |
mysqld_exporter.go | // Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"crypto/tls"
"crypto/x509"
"fmt"
"io/ioutil"
"net/http"
"os"
"path"
"strconv"
"time"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/go-sql-driver/mysql"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/prometheus/common/promlog"
"github.com/prometheus/common/promlog/flag"
"github.com/prometheus/common/version"
"gopkg.in/alecthomas/kingpin.v2"
"gopkg.in/ini.v1"
"github.com/prometheus/mysqld_exporter/collector"
)
var (
listenAddress = kingpin.Flag(
"web.listen-address",
"Address to listen on for web interface and telemetry.",
).Default(":9104").String()
metricPath = kingpin.Flag(
"web.telemetry-path",
"Path under which to expose metrics.",
).Default("/metrics").String()
timeoutOffset = kingpin.Flag(
"timeout-offset",
"Offset to subtract from timeout in seconds.",
).Default("0.25").Float64()
configMycnf = kingpin.Flag(
"config.my-cnf",
"Path to .my.cnf file to read MySQL credentials from.",
).Default(path.Join(os.Getenv("HOME"), ".my.cnf")).String()
tlsInsecureSkipVerify = kingpin.Flag(
"tls.insecure-skip-verify",
"Ignore certificate and server verification when using a tls connection.",
).Bool()
multiHostExporter = kingpin.Flag(
"export-multi-hosts",
"Setting it to true enables scraping multiple mysql hosts.",
).Default("false").Bool()
multiHostExporterConfigFile = kingpin.Flag(
"config-multi-hosts",
"Path to ini config file to fetch mysql client info. Used when export-multi-hosts is true.",
).Default("config-multi.ini").String()
dsn string
configMulti *ini.File
)
// scrapers lists all possible collection methods and if they should be enabled by default.
var scrapers = map[collector.Scraper]bool{
collector.ScrapeGlobalStatus{}: true,
collector.ScrapeGlobalVariables{}: true,
collector.ScrapeSlaveStatus{}: true,
collector.ScrapeProcesslist{}: false,
collector.ScrapeUser{}: false,
collector.ScrapeTableSchema{}: false,
collector.ScrapeInfoSchemaInnodbTablespaces{}: false,
collector.ScrapeInnodbMetrics{}: false,
collector.ScrapeAutoIncrementColumns{}: false,
collector.ScrapeBinlogSize{}: false,
collector.ScrapePerfTableIOWaits{}: false,
collector.ScrapePerfIndexIOWaits{}: false,
collector.ScrapePerfTableLockWaits{}: false,
collector.ScrapePerfEventsStatements{}: false,
collector.ScrapePerfEventsStatementsSum{}: false,
collector.ScrapePerfEventsWaits{}: false,
collector.ScrapePerfFileEvents{}: false,
collector.ScrapePerfFileInstances{}: false,
collector.ScrapePerfReplicationGroupMembers{}: false,
collector.ScrapePerfReplicationGroupMemberStats{}: false,
collector.ScrapePerfReplicationApplierStatsByWorker{}: false,
collector.ScrapeUserStat{}: false,
collector.ScrapeClientStat{}: false,
collector.ScrapeTableStat{}: false,
collector.ScrapeSchemaStat{}: false,
collector.ScrapeInnodbCmp{}: true,
collector.ScrapeInnodbCmpMem{}: true,
collector.ScrapeQueryResponseTime{}: true,
collector.ScrapeEngineTokudbStatus{}: false,
collector.ScrapeEngineInnodbStatus{}: false,
collector.ScrapeHeartbeat{}: false,
collector.ScrapeSlaveHosts{}: false,
collector.ScrapeReplicaHost{}: false,
}
func parseMycnf(config interface{}) (string, error) {
var dsn string
opts := ini.LoadOptions{
// MySQL ini file can have boolean keys.
AllowBooleanKeys: true,
}
cfg, err := ini.LoadSources(opts, config)
if err != nil {
return dsn, fmt.Errorf("failed reading ini file: %s", err)
}
user := cfg.Section("client").Key("user").String()
password := cfg.Section("client").Key("password").String()
if (user == "") || (password == "") {
return dsn, fmt.Errorf("no user or password specified under [client] in %s", config)
}
host := cfg.Section("client").Key("host").MustString("localhost")
port := cfg.Section("client").Key("port").MustUint(3306)
socket := cfg.Section("client").Key("socket").String()
if socket != "" {
dsn = fmt.Sprintf("%s:%s@unix(%s)/", user, password, socket)
} else {
dsn = fmt.Sprintf("%s:%s@tcp(%s:%d)/", user, password, host, port)
}
sslCA := cfg.Section("client").Key("ssl-ca").String()
sslCert := cfg.Section("client").Key("ssl-cert").String()
sslKey := cfg.Section("client").Key("ssl-key").String()
if sslCA != "" {
if tlsErr := customizeTLS(sslCA, sslCert, sslKey); tlsErr != nil {
tlsErr = fmt.Errorf("failed to register a custom TLS configuration for mysql dsn: %s", tlsErr)
return dsn, tlsErr
}
dsn = fmt.Sprintf("%s?tls=custom", dsn)
}
return dsn, nil
}
func customizeTLS(sslCA string, sslCert string, sslKey string) error {
var tlsCfg tls.Config
caBundle := x509.NewCertPool()
pemCA, err := ioutil.ReadFile(sslCA)
if err != nil {
return err
}
if ok := caBundle.AppendCertsFromPEM(pemCA); ok {
tlsCfg.RootCAs = caBundle
} else {
return fmt.Errorf("failed parse pem-encoded CA certificates from %s", sslCA)
}
if sslCert != "" && sslKey != "" {
certPairs := make([]tls.Certificate, 0, 1)
keypair, err := tls.LoadX509KeyPair(sslCert, sslKey)
if err != nil {
return fmt.Errorf("failed to parse pem-encoded SSL cert %s or SSL key %s: %s",
sslCert, sslKey, err)
}
certPairs = append(certPairs, keypair)
tlsCfg.Certificates = certPairs
tlsCfg.InsecureSkipVerify = *tlsInsecureSkipVerify
}
mysql.RegisterTLSConfig("custom", &tlsCfg)
return nil
}
func init() {
prometheus.MustRegister(version.NewCollector("mysqld_exporter"))
}
func newHandler(metrics collector.Metrics, scrapers []collector.Scraper, logger log.Logger) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
filteredScrapers := scrapers
params := r.URL.Query()["collect[]"]
// Use request context for cancellation when connection gets closed.
ctx := r.Context()
// If a timeout is configured via the Prometheus header, add it to the context.
if v := r.Header.Get("X-Prometheus-Scrape-Timeout-Seconds"); v != "" {
timeoutSeconds, err := strconv.ParseFloat(v, 64)
if err != nil {
level.Error(logger).Log("msg", "Failed to parse timeout from Prometheus header", "err", err)
} else {
if *timeoutOffset >= timeoutSeconds {
// Ignore timeout offset if it doesn't leave time to scrape.
level.Error(logger).Log("msg", "Timeout offset should be lower than prometheus scrape timeout", "offset", *timeoutOffset, "prometheus_scrape_timeout", timeoutSeconds)
} else {
// Subtract timeout offset from timeout.
timeoutSeconds -= *timeoutOffset
}
// Create new timeout context with request context as parent.
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, time.Duration(timeoutSeconds*float64(time.Second)))
defer cancel()
// Overwrite request with timeout context.
r = r.WithContext(ctx)
}
}
level.Debug(logger).Log("msg", "collect[] params", "params", params)
// Check if we have some "collect[]" query parameters.
if len(params) > 0 {
filters := make(map[string]bool)
for _, param := range params {
filters[param] = true
}
filteredScrapers = nil
for _, scraper := range scrapers {
if filters[scraper.Name()] {
filteredScrapers = append(filteredScrapers, scraper)
}
}
}
registry := prometheus.NewRegistry()
if *multiHostExporter {
target := r.URL.Query().Get("target")
if target == "" {
http.Error(w, "Target parameter is missing", http.StatusBadRequest)
return
}
var err error
if dsn, err = formMultiHostExporterDSN(target, configMulti); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
registry.MustRegister(collector.New(ctx, dsn, metrics, filteredScrapers, logger))
gatherers := prometheus.Gatherers{
prometheus.DefaultGatherer,
registry,
}
// Delegate http serving to Prometheus client library, which will call collector.Collect.
h := promhttp.HandlerFor(gatherers, promhttp.HandlerOpts{})
h.ServeHTTP(w, r)
}
}
func main() {
// Generate ON/OFF flags for all scrapers.
scraperFlags := map[collector.Scraper]*bool{}
for scraper, enabledByDefault := range scrapers {
defaultOn := "false"
if enabledByDefault {
defaultOn = "true"
}
f := kingpin.Flag(
"collect."+scraper.Name(),
scraper.Help(),
).Default(defaultOn).Bool()
scraperFlags[scraper] = f
}
// Parse flags.
promlogConfig := &promlog.Config{}
flag.AddFlags(kingpin.CommandLine, promlogConfig)
kingpin.Version(version.Print("mysqld_exporter"))
kingpin.HelpFlag.Short('h')
kingpin.Parse()
logger := promlog.New(promlogConfig)
// landingPage contains the HTML served at '/'.
// TODO: Make this nicer and more informative.
var landingPage = []byte(`<html>
<head><title>MySQLd exporter</title></head>
<body>
<h1>MySQLd exporter</h1>
<p><a href='` + *metricPath + `'>Metrics</a></p>
</body>
</html>
`)
level.Info(logger).Log("msg", "Starting msqyld_exporter", "version", version.Info())
level.Info(logger).Log("msg", "Build context", version.BuildContext())
if !*multiHostExporter {
var err error
if dsn, err = parseMycnf(*configMycnf); err != nil {
level.Info(logger).Log("msg", "Error parsing my.cnf", "file", *configMycnf, "err", err)
os.Exit(1)
}
} else {
level.Info(logger).Log("msg", "Multi host exporter mode enabled")
var err error
if configMulti, err = newMultiHostExporterConfig(*multiHostExporterConfigFile); err != nil {
level.Info(logger).Log("msg", "Error parsing multi host config", "file", *multiHostExporterConfigFile, "err", err)
os.Exit(1)
}
if err := validateMultiHostExporterConfig(configMulti); err != nil {
level.Info(logger).Log("msg", err)
os.Exit(1)
}
}
// Register only scrapers enabled by flag.
enabledScrapers := []collector.Scraper{}
for scraper, enabled := range scraperFlags {
if *enabled {
level.Info(logger).Log("msg", "Scraper enabled", "scraper", scraper.Name())
enabledScrapers = append(enabledScrapers, scraper)
}
}
handlerFunc := newHandler(collector.NewMetrics(), enabledScrapers, logger)
http.Handle(*metricPath, promhttp.InstrumentMetricHandler(prometheus.DefaultRegisterer, handlerFunc))
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
w.Write(landingPage)
})
level.Info(logger).Log("msg", "Listening on address", "address", *listenAddress)
if err := http.ListenAndServe(*listenAddress, nil); err != nil {
level.Error(logger).Log("msg", "Error starting HTTP server", "err", err)
os.Exit(1)
}
}
| [
"\"HOME\""
] | [] | [
"HOME"
] | [] | ["HOME"] | go | 1 | 0 | |
kmd_labs/oracles/select_oracle.py | #!/usr/bin/env python3
import sys
import os
import json
import getconf
import colorTable
def selectRange(low,high, msg):
while True:
try:
number = int(input(msg))
except ValueError:
print("integer only, try again")
continue
if low <= number <= high:
return number
else:
print("input outside range, try again")
# construct daemon url
i = 0
ID = 1
oracletxIDs = []
oracleChains = []
HOME = os.environ['HOME']
with open(HOME + '/StakedNotary/assetchains.json') as file:
assetchains = json.load(file)
print(colorTable.colors[i]+ 'ID'.rjust(3) + ' | ' +'ASSET CHAIN'.ljust(12) + ' | ' + 'ORACLE NAME'.ljust(20) + ' | ' + 'ORACLE DESCIPTION'.ljust(50) + ' | ' + 'ORACLE TX ID')
for chain in assetchains:
RPCURL = getconf.def_credentials(chain['ac_name'])
oraclelist_result = getconf.oracleslist_rpc(chain['ac_name'])
i+=1
for oracle_txid in oraclelist_result:
oraclesinfo_result = getconf.oraclesinfo_rpc(chain['ac_name'], oracle_txid)
description = oraclesinfo_result['description']
name = oraclesinfo_result['name']
# if description[0:3] == 'GPS':
print(colorTable.colors[i]+ str(ID).rjust(3) + ' | ' + chain['ac_name'].ljust(12) + ' | ' + name.ljust(20) + ' | ' + description.ljust(50) + ' | ' + oracle_txid)
oracletxIDs.append(oracle_txid)
oracleChains.append(chain['ac_name'])
ID+=1
chosen_one = selectRange(0,len(oracletxIDs),"Select an oracle: ")
chosen_info = getconf.oraclesinfo_rpc(oracleChains[chosen_one-1], oracletxIDs[chosen_one-1])
print("you selected oracle " + str(chosen_one) + " ["+chosen_info['name']+" ]" )
print(chosen_info)
| [] | [] | [
"HOME"
] | [] | ["HOME"] | python | 1 | 0 | |
net/arp.go | package net
import (
"fmt"
"io"
"os"
"path/filepath"
)
// Configure the ARP cache parameters for the given interface. This
// makes containers react more quickly to a change in the MAC address
// associated with an IP address.
func ConfigureARPCache(procPath, name string) error {
if err := sysctl(procPath, fmt.Sprintf("net/ipv4/neigh/%s/base_reachable_time", name), "5"); err != nil {
return err
}
if err := sysctl(procPath, fmt.Sprintf("net/ipv4/neigh/%s/delay_first_probe_time", name), "2"); err != nil {
return err
}
if err := sysctl(procPath, fmt.Sprintf("net/ipv4/neigh/%s/ucast_solicit", name), "1"); err != nil {
return err
}
return nil
}
func sysctl(procPath, variable, value string) error {
f, err := os.OpenFile(filepath.Join(procPath, "/sys/", variable), os.O_WRONLY, 0)
if err != nil {
return err
}
n, err := io.WriteString(f, value)
if err != nil {
return err
}
if n < len(value) {
return io.ErrShortWrite
}
err = f.Close()
if err != nil {
return err
}
return nil
}
| [] | [] | [] | [] | [] | go | null | null | null |
api/utils/fs/fs.go | package fs
import (
"os"
"path/filepath"
)
func CreateDirIfNotExists(dirPath string) error {
if _, err := os.Stat(dirPath); os.IsNotExist(err) {
return os.Mkdir(dirPath, 0755)
}
return nil
}
func IsUserArchiveExists(userCode string) bool {
if _, err := os.Stat(os.Getenv("FILE_DIR") + userCode + ".zip"); os.IsNotExist(err) {
return false
}
return true
}
func ReCreateUserDir(userCode string) error {
userDir := os.Getenv("FILE_DIR") + userCode
err := os.RemoveAll(userDir)
if err != nil {
return err
}
err = os.Mkdir(userDir, 0755)
if err != nil {
return err
}
return nil
}
func ClearAllUserFiles(userCode string) error {
userDir := os.Getenv("FILE_DIR") + userCode
err := os.RemoveAll(userDir + ".zip")
if err != nil {
return err
}
return os.RemoveAll(userDir)
}
func FileNameWithoutExt(fileName string) string {
return fileName[:len(fileName)-len(filepath.Ext(fileName))]
}
func GetPathWithoutExt(path string) string {
if len(path) == 0 {
return ""
}
extIndex := len(path) - 1
for i := len(path) - 1; i >= 0; i-- {
if path[i] == '.' {
extIndex = i
break
}
}
return path[0:extIndex]
}
| [
"\"FILE_DIR\"",
"\"FILE_DIR\"",
"\"FILE_DIR\""
] | [] | [
"FILE_DIR"
] | [] | ["FILE_DIR"] | go | 1 | 0 | |
testsuite/runtest.py | #!/usr/bin/env python
from __future__ import print_function
import os
import glob
import sys
import platform
import subprocess
import difflib
import filecmp
import shutil
from optparse import OptionParser
#
# Get standard testsuite test arguments: srcdir exepath
#
srcdir = "."
tmpdir = "."
path = "../.."
# Options for the command line
parser = OptionParser()
parser.add_option("-p", "--path", help="add to executable path",
action="store", type="string", dest="path", default="")
parser.add_option("--devenv-config", help="use a MS Visual Studio configuration",
action="store", type="string", dest="devenv_config", default="")
parser.add_option("--solution-path", help="MS Visual Studio solution path",
action="store", type="string", dest="solution_path", default="")
(options, args) = parser.parse_args()
if args and len(args) > 0 :
srcdir = args[0]
srcdir = os.path.abspath (srcdir) + "/"
os.chdir (srcdir)
if args and len(args) > 1 :
path = args[1]
path = os.path.normpath (path)
tmpdir = "."
tmpdir = os.path.abspath (tmpdir)
redirect = " >> out.txt "
def oiio_relpath (path, start=os.curdir):
"Wrapper around os.path.relpath which always uses '/' as the separator."
p = os.path.relpath (path, start)
return p if sys.platform != "win32" else p.replace ('\\', '/')
# Try to figure out where some key things are. Go by env variables set by
# the cmake tests, but if those aren't set, assume somebody is running
# this script by hand from inside build/PLATFORM/testsuite/TEST and that
# the rest of the tree has the standard layout.
OIIO_TESTSUITE_ROOT = oiio_relpath(os.environ.get('OIIO_TESTSUITE_ROOT',
'../../../../testsuite'))
OIIO_TESTSUITE_IMAGEDIR = os.environ.get('OIIO_TESTSUITE_IMAGEDIR',
'../../../../../oiio-images')
if OIIO_TESTSUITE_IMAGEDIR:
OIIO_TESTSUITE_IMAGEDIR = oiio_relpath(OIIO_TESTSUITE_IMAGEDIR)
# Set it back so test's can use it (python-imagebufalgo)
os.environ['OIIO_TESTSUITE_IMAGEDIR'] = OIIO_TESTSUITE_IMAGEDIR
refdir = "ref/"
refdirlist = [ refdir ]
mytest = os.path.split(os.path.abspath(os.getcwd()))[-1]
test_source_dir = os.environ.get('OIIO_TESTSUITE_SRC',
os.path.join(OIIO_TESTSUITE_ROOT, mytest))
colorconfig_file = os.path.join(OIIO_TESTSUITE_ROOT,
"common", "OpenColorIO", "nuke-default", "config.ocio")
# Swap the relative diff lines if the test suite is not being run via Makefile
if OIIO_TESTSUITE_ROOT != "../../../../testsuite":
def replace_relative(lines):
imgdir = None
if OIIO_TESTSUITE_IMAGEDIR:
imgdir = os.path.basename(OIIO_TESTSUITE_IMAGEDIR)
if imgdir != "oiio-images":
oiioimgs = os.path.basename(os.path.dirname(OIIO_TESTSUITE_IMAGEDIR))
if oiioimgs == "oiio-images":
imgdir = "oiio-images/" + imgdir
imgdir = "../../../../../" + imgdir
for i in xrange(len(lines)):
lines[i] = lines[i].replace("../../../../testsuite", OIIO_TESTSUITE_ROOT)
if imgdir:
lines[i] = lines[i].replace(imgdir, OIIO_TESTSUITE_IMAGEDIR)
return lines
else:
replace_relative = None
command = ""
outputs = [ "out.txt" ] # default
failureok = 0
failthresh = 0.004
hardfail = 0.012
failpercent = 0.02
anymatch = False
image_extensions = [ ".tif", ".tx", ".exr", ".jpg", ".png", ".rla",
".dpx", ".iff", ".psd" ]
# print ("srcdir = " + srcdir)
# print ("tmpdir = " + tmpdir)
# print ("path = " + path)
# print ("refdir = " + refdir)
# print ("test source dir = " + test_source_dir)
if platform.system() == 'Windows' :
if not os.path.exists("./ref") :
shutil.copytree (os.path.join (test_source_dir, "ref"), "./ref")
if os.path.exists (os.path.join (test_source_dir, "src")) and not os.path.exists("./src") :
shutil.copytree (os.path.join (test_source_dir, "src"), "./src")
# if not os.path.exists("../data") :
# shutil.copytree ("../../../testsuite/data", "..")
# if not os.path.exists("../common") :
# shutil.copytree ("../../../testsuite/common", "..")
else :
def newsymlink(src, dst):
print("newsymlink", src, dst)
# os.path.exists returns False for broken symlinks, so remove if thats the case
if os.path.islink(dst):
os.remove(dst)
os.symlink (src, dst)
if not os.path.exists("./ref") :
newsymlink (os.path.join (test_source_dir, "ref"), "./ref")
if os.path.exists (os.path.join (test_source_dir, "src")) and not os.path.exists("./src") :
newsymlink (os.path.join (test_source_dir, "src"), "./src")
if not os.path.exists("./data") :
newsymlink (test_source_dir, "./data")
if not os.path.exists("../common") :
newsymlink (os.path.join(os.environ['OIIO_TESTSUITE_ROOT'], "common"),
"../common")
# Disable this test on Travis when using leak sanitizer, because the error
# condition makes a leak we can't stop, but that's ok.
import os
if (os.getenv("TRAVIS") and (os.getenv("SANITIZE") in ["leak","address"])
and os.path.exists(os.path.join (test_source_dir,"TRAVIS_SKIP_LSAN"))) :
sys.exit (0)
pythonbin = 'python'
if os.getenv("PYTHON_VERSION") :
pythonbin += os.getenv("PYTHON_VERSION")
#print ("pythonbin = ", pythonbin)
###########################################################################
# Handy functions...
# Compare two text files. Returns 0 if they are equal otherwise returns
# a non-zero value and writes the differences to "diff_file".
# Based on the command-line interface to difflib example from the Python
# documentation
def text_diff (fromfile, tofile, diff_file=None):
import time
try:
fromdate = time.ctime (os.stat (fromfile).st_mtime)
todate = time.ctime (os.stat (tofile).st_mtime)
fromlines = open (fromfile, 'r').readlines()
tolines = open (tofile, 'r').readlines()
if replace_relative:
tolines = replace_relative(tolines)
except:
print ("Unexpected error:", sys.exc_info()[0])
return -1
diff = difflib.unified_diff(fromlines, tolines, fromfile, tofile,
fromdate, todate)
# Diff is a generator, but since we need a way to tell if it is
# empty we just store all the text in advance
diff_lines = [l for l in diff]
if not diff_lines:
return 0
if diff_file:
try:
open (diff_file, 'w').writelines (diff_lines)
print ("Diff " + fromfile + " vs " + tofile + " was:\n-------")
# print (diff)
print ("".join(diff_lines))
except:
print ("Unexpected error:", sys.exc_info()[0])
return 1
def oiio_app (app):
# When we use Visual Studio, built applications are stored
# in the app/$(OutDir)/ directory, e.g., Release or Debug.
if (platform.system () != 'Windows' or options.devenv_config == ""):
return os.path.join (path, "src", app, app) + " "
else:
return os.path.join (path, "src", app, options.devenv_config, app) + " "
# Construct a command that will print info for an image, appending output to
# the file "out.txt". If 'safematch' is nonzero, it will exclude printing
# of fields that tend to change from run to run or release to release.
def info_command (file, extraargs="", safematch=False, hash=True,
verbose=True) :
args = "--info"
if verbose :
args += " -v -a"
if safematch :
args += " --no-metamatch \"DateTime|Software|OriginatingProgram|ImageHistory\""
if hash :
args += " --hash"
return (oiio_app("oiiotool") + args + " " + extraargs
+ " " + oiio_relpath(file,tmpdir) + redirect + ";\n")
# Construct a command that will compare two images, appending output to
# the file "out.txt". We allow a small number of pixels to have up to
# 1 LSB (8 bit) error, it's very hard to make different platforms and
# compilers always match to every last floating point bit.
def diff_command (fileA, fileB, extraargs="", silent=False, concat=True) :
command = (oiio_app("idiff") + "-a"
+ " -fail " + str(failthresh)
+ " -failpercent " + str(failpercent)
+ " -hardfail " + str(hardfail)
+ " -warn " + str(2*failthresh)
+ " -warnpercent " + str(failpercent)
+ " " + extraargs + " " + oiio_relpath(fileA,tmpdir)
+ " " + oiio_relpath(fileB,tmpdir))
if not silent :
command += redirect
if concat:
command += " ;\n"
return command
# Construct a command that will create a texture, appending console
# output to the file "out.txt".
def maketx_command (infile, outfile, extraargs="",
showinfo=False, showinfo_extra="",
silent=False, concat=True) :
command = (oiio_app("maketx")
+ " " + oiio_relpath(infile,tmpdir)
+ " " + extraargs
+ " -o " + oiio_relpath(outfile,tmpdir) )
if not silent :
command += redirect
if concat:
command += " ;\n"
if showinfo:
command += info_command (outfile, extraargs=showinfo_extra, safematch=1)
return command
# Construct a command that will test the basic ability to read and write
# an image, appending output to the file "out.txt". First, iinfo the
# file, including a hash (VERY unlikely not to match if we've read
# correctly). If testwrite is nonzero, also iconvert the file to make a
# copy (tests writing that format), and then idiff to make sure it
# matches the original.
def rw_command (dir, filename, testwrite=True, use_oiiotool=False, extraargs="",
preargs="", idiffextraargs="", output_filename="",
safematch=False, printinfo=True) :
fn = oiio_relpath (dir + "/" + filename, tmpdir)
if printinfo :
cmd = info_command (fn, safematch=safematch)
else :
cmd = ""
if output_filename == "" :
output_filename = filename
if testwrite :
if use_oiiotool :
cmd = (cmd + oiio_app("oiiotool") + preargs + " " + fn
+ " " + extraargs + " -o " + output_filename + redirect + ";\n")
else :
cmd = (cmd + oiio_app("iconvert") + preargs + " " + fn
+ " " + extraargs + " " + output_filename + redirect + ";\n")
cmd = (cmd + oiio_app("idiff") + " -a " + fn
+ " -fail " + str(failthresh)
+ " -failpercent " + str(failpercent)
+ " -hardfail " + str(hardfail)
+ " -warn " + str(2*failthresh)
+ " " + idiffextraargs + " " + output_filename + redirect + ";\n")
return cmd
# Construct a command that will testtex
def testtex_command (file, extraargs="") :
cmd = (oiio_app("testtex") + " " + file + " " + extraargs + " " +
redirect + ";\n")
return cmd
# Construct a command that will run oiiotool and append its output to out.txt
def oiiotool (args, silent=False, concat=True) :
cmd = (oiio_app("oiiotool") + " "
+ "-colorconfig " + colorconfig_file + " "
+ args)
if not silent :
cmd += redirect
if concat:
cmd += " ;\n"
return cmd
# Check one output file against reference images in a list of reference
# directories. For each directory, it will first check for a match under
# the identical name, and if that fails, it will look for alternatives of
# the form "basename-*.ext" (or ANY match in the ref directory, if anymatch
# is True).
def checkref (name, refdirlist) :
# Break the output into prefix+extension
(prefix, extension) = os.path.splitext(name)
ok = 0
for ref in refdirlist :
# We will first compare name to ref/name, and if that fails, we will
# compare it to everything else that matches ref/prefix-*.extension.
# That allows us to have multiple matching variants for different
# platforms, etc.
defaulttest = os.path.join(ref,name)
if anymatch :
pattern = "*.*"
else :
pattern = prefix+"-*"+extension+"*"
for testfile in ([defaulttest] + glob.glob (os.path.join (ref, pattern))) :
if not os.path.exists(testfile) :
continue
# print ("comparing " + name + " to " + testfile)
if extension in image_extensions :
# images -- use idiff
cmpcommand = diff_command (name, testfile, concat=False, silent=True)
cmpresult = os.system (cmpcommand)
elif extension == ".txt" :
cmpresult = text_diff (name, testfile, name + ".diff")
else :
# anything else
cmpresult = 0
if os.path.exists(testfile) and filecmp.cmp (name, testfile) :
cmpresult = 0
else :
cmpresult = 1
if cmpresult == 0 :
return (True, testfile) # we're done
return (False, defaulttest)
# Run 'command'. For each file in 'outputs', compare it to the copy
# in 'ref/'. If all outputs match their reference copies, return 0
# to pass. If any outputs do not match their references return 1 to
# fail.
def runtest (command, outputs, failureok=0) :
err = 0
# print ("working dir = " + tmpdir)
os.chdir (srcdir)
open ("out.txt", "w").close() # truncate out.txt
open ("out.err.txt", "w").close() # truncate out.txt
if os.path.isfile("debug.log") :
os.remove ("debug.log")
if options.path != "" :
sys.path = [options.path] + sys.path
print ("command = " + command)
test_environ = None
if (platform.system () == 'Windows') and (options.solution_path != "") and \
(os.path.isdir (options.solution_path)):
test_environ = os.environ
libOIIO_args = [options.solution_path, "libOpenImageIO"]
if options.devenv_config != "":
libOIIO_args.append (options.devenv_config)
libOIIO_path = os.path.normpath (os.path.join (*libOIIO_args))
test_environ["PATH"] = libOIIO_path + ';' + test_environ["PATH"]
for sub_command in [c.strip() for c in command.split(';') if c.strip()]:
cmdret = subprocess.call (sub_command, shell=True, env=test_environ)
if cmdret != 0 and failureok == 0 :
print ("#### Error: this command failed: ", sub_command)
print ("FAIL")
err = 1
for out in outputs :
(prefix, extension) = os.path.splitext(out)
(ok, testfile) = checkref (out, refdirlist)
if ok :
if extension in image_extensions :
# If we got a match for an image, save the idiff results
os.system (diff_command (out, testfile, silent=False))
print ("PASS: " + out + " matches " + testfile)
else :
err = 1
print ("NO MATCH for " + out)
print ("FAIL " + out)
if extension == ".txt" :
# If we failed to get a match for a text file, print the
# file and the diff, for easy debugging.
print ("-----" + out + "----->")
print (open(out,'r').read() + "<----------")
print ("-----" + testfile + "----->")
print (open(testfile,'r').read() + "<----------")
os.system ("ls -al " +out+" "+testfile)
print ("Diff was:\n-------")
print (open (out+".diff", 'r').read())
if extension in image_extensions :
# If we failed to get a match for an image, send the idiff
# results to the console
os.system (diff_command (out, testfile, silent=False))
if os.path.isfile("debug.log") and os.path.getsize("debug.log") :
print ("--- DEBUG LOG ---\n")
#flog = open("debug.log", "r")
# print (flog.read())
with open("debug.log", "r") as flog :
print (flog.read())
print ("--- END DEBUG LOG ---\n")
return (err)
##########################################################################
#
# Read the individual run.py file for this test, which will define
# command and outputs.
#
with open(os.path.join(test_source_dir,"run.py")) as f:
code = compile(f.read(), "run.py", 'exec')
exec (code)
# Allow a little more slop for slight pixel differences when in DEBUG
# mode or when running on remote Travis-CI or Appveyor machines.
if (("TRAVIS" in os.environ and os.environ["TRAVIS"]) or
("APPVEYOR" in os.environ and os.environ["APPVEYOR"]) or
("DEBUG" in os.environ and os.environ["DEBUG"])) :
failthresh *= 2.0
hardfail *= 2.0
failpercent *= 2.0
# Run the test and check the outputs
ret = runtest (command, outputs, failureok=failureok)
sys.exit (ret)
| [] | [] | [
"APPVEYOR",
"OIIO_TESTSUITE_SRC",
"OIIO_TESTSUITE_IMAGEDIR",
"TRAVIS",
"DEBUG",
"PYTHON_VERSION",
"OIIO_TESTSUITE_ROOT",
"SANITIZE"
] | [] | ["APPVEYOR", "OIIO_TESTSUITE_SRC", "OIIO_TESTSUITE_IMAGEDIR", "TRAVIS", "DEBUG", "PYTHON_VERSION", "OIIO_TESTSUITE_ROOT", "SANITIZE"] | python | 8 | 0 | |
pkg/cloud-provider/alicloud.go | /*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cloud_provider
import (
b64 "encoding/base64"
"encoding/json"
"fmt"
"io"
"os"
"time"
"github.com/golang/glog"
v1 "k8s.io/api/core/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/record"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/version"
)
// ProviderName is the name of this cloud provider.
const ProviderName = "alicloud"
// CLUSTER_ID default cluster id if it is not specified.
var CLUSTER_ID = "clusterid"
// KUBERNETES_ALICLOUD_IDENTITY is for statistic purpose.
var KUBERNETES_ALICLOUD_IDENTITY = fmt.Sprintf("Kubernetes.Alicloud/%s", version.Get().String())
// Cloud defines the main struct
type Cloud struct {
climgr *ClientMgr
cfg *CloudConfig
kubeClient kubernetes.Interface
eventBroadcaster record.EventBroadcaster
eventRecorder record.EventRecorder
}
var (
// DEFAULT_CHARGE_TYPE default charge type
// DEFAULT_BANDWIDTH default bandwidth
DEFAULT_BANDWIDTH = 100
DEFAULT_NODE_MONITOR_PERIOD = 120 * time.Second
DEFAULT_NODE_ADDR_SYNC_PERIOD = 240 * time.Second
// DEFAULT_REGION should be override in cloud initialize.
//DEFAULT_REGION = common.Hangzhou
)
// CloudConfig is the cloud config
type CloudConfig struct {
UID string `json:"uid"`
ClusterID string `json:"ClusterId"`
ClusterName string `json:"ClusterName"`
AccessKeyID string `json:"AccessKeyID"`
AccessKeySecret string `json:"AccessKeySecret"`
Region string `json:"Region"`
VpcID string `json:"VpcId"`
SubnetID string `json:"SubnetId"`
MasterID string `json:"MasterId"`
Endpoint string `json:"Endpoint"`
NodeIP string `json:"NodeIP"`
Debug bool `json:"Debug"`
}
// CCMVersion is the version of CCM
var CCMVersion string
var cfg CloudConfig
func init() {
cloudprovider.RegisterCloudProvider(ProviderName,
func(config io.Reader) (cloudprovider.Interface, error) {
var (
keyid = ""
keysecret = ""
regionid = ""
)
if config != nil {
if err := json.NewDecoder(config).Decode(&cfg); err != nil {
return nil, err
}
if cfg.AccessKeyID != "" && cfg.AccessKeySecret != "" && cfg.Region != "" {
key, err := b64.StdEncoding.DecodeString(cfg.AccessKeyID)
if err != nil {
return nil, err
}
keyid = string(key)
secret, err := b64.StdEncoding.DecodeString(cfg.AccessKeySecret)
if err != nil {
return nil, err
}
keysecret = string(secret)
region, err := b64.StdEncoding.DecodeString(cfg.Region)
if err != nil {
return nil, err
}
regionid = string(region)
glog.V(2).Infof("Alicloud: Try Accesskey AccessKeySecret and Region from config file.")
}
if cfg.ClusterID != "" {
CLUSTER_ID = cfg.ClusterID
glog.Infof("use clusterid %s", CLUSTER_ID)
}
}
if keyid == "" || keysecret == "" {
glog.V(2).Infof("cloud config does not have keyid and keysecret . try environment ACCESS_KEY_ID ACCESS_KEY_SECRET REGION_ID")
keyid = os.Getenv("ACCESS_KEY_ID")
keysecret = os.Getenv("ACCESS_KEY_SECRET")
regionid = os.Getenv("REGION_ID")
}
mgr, err := NewClientMgr(regionid, keyid, keysecret)
if err != nil {
return nil, err
}
return newAliCloud(mgr)
})
}
func newAliCloud(mgr *ClientMgr) (*Cloud, error) {
return &Cloud{
climgr: mgr,
cfg: &cfg,
}, nil
}
// Initialize provides the cloud with a kubernetes client builder and may spawn goroutines
// to perform housekeeping activities within the cloud provider.
func (c *Cloud) Initialize(builder controller.ControllerClientBuilder) {
c.kubeClient = builder.ClientOrDie(ProviderName)
c.eventBroadcaster = record.NewBroadcaster()
c.eventBroadcaster.StartLogging(glog.Infof)
c.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: c.kubeClient.CoreV1().Events("")})
c.eventRecorder = c.eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "CCM"})
}
func (c *Cloud) ProviderName() string {
return ProviderName
}
| [
"\"ACCESS_KEY_ID\"",
"\"ACCESS_KEY_SECRET\"",
"\"REGION_ID\""
] | [] | [
"ACCESS_KEY_SECRET",
"ACCESS_KEY_ID",
"REGION_ID"
] | [] | ["ACCESS_KEY_SECRET", "ACCESS_KEY_ID", "REGION_ID"] | go | 3 | 0 | |
gateware/de0nanoplatform.py | #
# This file is part of LUNA.
#
# Copyright (c) 2020 Great Scott Gadgets <[email protected]>
# SPDX-License-Identifier: BSD-3-Clause
"""
The DE0 Nano does not have an explicit USB port. Instead, you'll need to connect an external ULPI PHY breakout,
such as https://www.waveshare.com/wiki/USB3300_USB_HS_Board.
See the pin definitions below for connection information (ULPIResource).
The DE0 Nano is an -unsupported- platform! To use it, you'll need to set your LUNA_PLATFORM variable:
> export LUNA_PLATFORM="luna.gateware.platform.de0_nano:DE0NanoPlatform"
"""
import os
import logging
import subprocess
from nmigen import *
from nmigen.build import *
from nmigen.vendor.intel import IntelPlatform
from nmigen_boards.resources import *
from luna.gateware.platform.core import LUNAPlatform
__all__ = ["DE0NanoPlatform"]
class DE0NanoClockAndResetController(Elaboratable):
""" Controller for de0_nano's clocking and global resets. """
def __init__(self, *, clock_frequencies=None, clock_signal_name=None):
pass
def elaborate(self, platform):
m = Module()
# Create our domains; but don't do anything else for them, for now.
m.domains.sync = ClockDomain()
m.domains.usb = ClockDomain()
m.domains.jt51 = ClockDomain()
m.domains.adat = ClockDomain()
m.submodules.mainpll = Instance("ALTPLL",
p_BANDWIDTH_TYPE = "AUTO",
p_CLK0_DIVIDE_BY = 1,
p_CLK0_DUTY_CYCLE = 50,
p_CLK0_MULTIPLY_BY = 1,
p_CLK0_PHASE_SHIFT = 0,
p_INCLK0_INPUT_FREQUENCY = 16666,
p_OPERATION_MODE = "NORMAL",
# Drive our clock from the USB clock
# coming from the USB clock pin of the USB3300
i_inclk = ClockSignal("usb"),
o_clk = ClockSignal("sync"),
)
m.submodules.jt51pll = Instance("ALTPLL",
p_BANDWIDTH_TYPE = "AUTO",
p_CLK0_DIVIDE_BY = 218,
p_CLK0_DUTY_CYCLE = 50,
p_CLK0_MULTIPLY_BY = 13,
p_CLK0_PHASE_SHIFT = 0,
p_INCLK0_INPUT_FREQUENCY = 16666,
p_OPERATION_MODE = "NORMAL",
# Drive our clock from the USB clock
# coming from the USB clock pin of the USB3300
i_inclk = ClockSignal("usb"),
o_clk = ClockSignal("jt51"),
)
m.submodules.adatpll = Instance("ALTPLL",
p_BANDWIDTH_TYPE = "AUTO",
p_CLK0_DIVIDE_BY = 83,
p_CLK0_DUTY_CYCLE = 50,
p_CLK0_MULTIPLY_BY = 17,
p_CLK0_PHASE_SHIFT = 0,
p_INCLK0_INPUT_FREQUENCY = 16666,
p_OPERATION_MODE = "NORMAL",
# Drive our clock from the USB clock
# coming from the USB clock pin of the USB3300
i_inclk = ClockSignal("usb"),
o_clk = ClockSignal("adat"),
)
# Use a blinky to see if the clock signal works
# from nmigen_boards.test.blinky import Blinky
# m.submodules += Blinky()
return m
class DE0NanoPlatform(IntelPlatform, LUNAPlatform):
""" This is a de0_nano board with an USB3300 PHY attached to JP_2 """
name = "de0_nano"
device = "EP4CE22"
package = "F17"
speed = "C6"
default_clk = "clk_50MHz"
clock_domain_generator = DE0NanoClockAndResetController
default_usb_connection = "ulpi"
ignore_phy_vbus = True
def __init__(self, *args, **kwargs):
logging.warning("This platform is not officially supported, and thus not tested. Your results may vary.")
logging.warning("Note also that this platform does not use the DE0 nano's main USB port!")
logging.warning("You'll need to connect a ULPI PHY breakout. See the platform file for more info.")
super().__init__(*args, **kwargs)
#
# I/O resources.
#
resources = [
# Primary clock generator clocks.
Resource("clk_50MHz", 0, Pins("R8", dir="i"), Clock(50e6), Attrs(io_standard="3.3-V LVTTL")),
# USB2 / ULPI section of the USB3300.
ULPIResource("ulpi", 0,
data="JP_2:27 JP_2:25 JP_2:23 JP_2:21 JP_2:19 JP_2:17 JP_2:15 JP_2:13",
clk="JP_2:1", # this needs to be a clock pin of the FPGA or the core won't work
dir="JP_2:18", nxt="JP_2:16", stp="JP_2:14", rst="JP_2:22",
attrs=Attrs(io_standard="3.3-V LVCMOS")
),
UARTResource(0,
# GND on JP1 Pin 12.
rx="JP_1:8", tx="JP_1:10",
attrs=Attrs(io_standard="3.3-V LVTTL")),
*LEDResources(
pins="A15 A13 B13 A11 D1 F3 B1 L3",
attrs=Attrs(io_standard="3.3-V LVTTL")),
*ButtonResources(
pins="J15 E1", invert=True,
attrs=Attrs(io_standard="3.3-V LVTTL")),
*SwitchResources(
pins="M1 T8 B9 M15",
attrs=Attrs(io_standard="3.3-V LVTTL")),
SDRAMResource(0,
clk="R4", cke="L7", cs_n="P6", we_n="C2", ras_n="L2", cas_n="L1",
ba="M7 M6", a="P2 N5 N6 M8 P8 T7 N8 T6 R1 P1 N2 N1 L4",
dq="G2 G1 L8 K5 K2 J2 J1 R7 T4 T2 T3 R3 R5 P3 N3 K1", dqm="R6 T5",
attrs=Attrs(io_standard="3.3-V LVTTL")),
# Accelerometer
Resource("acc", 0,
Subsignal("cs_n", Pins("G5", dir="o")),
Subsignal("int", Pins("M2", dir="i")),
Attrs(io_standard="3.3-V LVTTL")),
# I2C is part of the Accelerometer
I2CResource(0,
scl="F2", sda="F1",
attrs=Attrs(io_standard="3.3-V LVTTL")),
# ADC
Resource("adc", 0,
Subsignal("cs_n", Pins("A10")),
Subsignal("saddr", Pins("B10")),
Subsignal("sclk", Pins("B14")),
Subsignal("sdat", Pins("A9")),
Attrs(io_standard="3.3-V LVTTL")),
# ECPS
Resource("epcs", 0,
Subsignal("data0", Pins("H2")),
Subsignal("dclk", Pins("H1")),
Subsignal("ncs0", Pins("D2")),
Subsignal("asd0", Pins("C1")),
Attrs(io_standard="3.3-V LVTTL")),
Resource("adat", 0,
Subsignal("tx", Pins("JP_3:5", dir="o")),
Subsignal("rx", Pins("JP_3:6", dir="i")),
Attrs(io_standard="3.3-V LVTTL")),
]
connectors = [
# PIN 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
Connector("JP", 1, "A8 D3 B8 C3 A2 A3 B3 B4 A4 B5 - - A5 D5 B6 A6 B7 D6 A7 C6 C8 E6 E7 D8 E8 F8 F9 E9 - - C9 D9 E11 E10 C11 B11 A12 D11 D12 B12"),
Connector("JP", 2, "T9 F13 R9 T15 T14 T13 R13 T12 R12 T11 - - T10 R11 P11 R10 N12 P9 N9 N11 L16 K16 R16 L15 P15 P16 R14 N16 - - N15 P14 L14 N14 M10 L13 J16 K15 J13 J14"),
Connector("JP", 3, "- E15 E16 M16 A14 B16 C14 C16 C15 D16 D15 D14 F15 F16 F14 G16 G15 - - - - - - - - -")
]
@property
def file_templates(self):
templates = super().file_templates
templates["{{name}}.qsf"] += r"""
set_global_assignment -name OPTIMIZATION_MODE "Aggressive Performance"
set_global_assignment -name FITTER_EFFORT "Standard Fit"
set_global_assignment -name PHYSICAL_SYNTHESIS_EFFORT "Extra"
set_instance_assignment -name DECREASE_INPUT_DELAY_TO_INPUT_REGISTER OFF -to *ulpi*
set_instance_assignment -name INCREASE_DELAY_TO_OUTPUT_PIN OFF -to *ulpi*
set_global_assignment -name NUM_PARALLEL_PROCESSORS ALL
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/filter/jt51_sincf.v
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/filter/jt51_interpol.v
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/filter/jt51_fir_ram.v
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/filter/jt51_fir8.v
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/filter/jt51_fir4.v
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/filter/jt51_fir.v
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/filter/jt51_dac2.v
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/jt51_timers.v
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/jt51_sh.v
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/jt51_reg.v
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/jt51_pm.v
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/jt51_phrom.v
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/jt51_phinc_rom.v
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/jt51_pg.v
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/jt51_op.v
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/jt51_noise_lfsr.v
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/jt51_noise.v
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/jt51_mod.v
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/jt51_mmr.v
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/jt51_lin2exp.v
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/jt51_lfo.v
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/jt51_kon.v
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/jt51_exprom.v
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/jt51_exp2lin.v
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/jt51_eg.v
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/jt51_csr_op.v
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/jt51_csr_ch.v
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/jt51_acc.v
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/jt51.v
"""
templates["{{name}}.sdc"] += r"""
create_clock -name "clk_60MHz" -period 16.667 [get_ports "ulpi_0__clk__io"]
"""
return templates
def toolchain_program(self, products, name):
""" Programs the attached de0_nano board via a Quartus programming cable. """
quartus_pgm = os.environ.get("QUARTUS_PGM", "quartus_pgm")
with products.extract("{}.sof".format(name)) as bitstream_filename:
subprocess.check_call([quartus_pgm, "--haltcc", "--mode", "JTAG",
"--operation", "P;" + bitstream_filename])
| [] | [] | [
"QUARTUS_PGM"
] | [] | ["QUARTUS_PGM"] | python | 1 | 0 | |
provider/coredns.go | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package provider
import (
"context"
"crypto/tls"
"crypto/x509"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"math/rand"
"net"
"os"
"strings"
"time"
etcdcv3 "github.com/coreos/etcd/clientv3"
log "github.com/sirupsen/logrus"
"github.com/kubernetes-incubator/external-dns/endpoint"
"github.com/kubernetes-incubator/external-dns/plan"
)
func init() {
rand.Seed(time.Now().UnixNano())
}
const (
priority = 10 // default priority when nothing is set
etcdTimeout = 5 * time.Second
coreDNSPrefix = "/skydns/"
randomPrefixLabel = "prefix"
)
// coreDNSClient is an interface to work with CoreDNS service records in etcd
type coreDNSClient interface {
GetServices(prefix string) ([]*Service, error)
SaveService(value *Service) error
DeleteService(key string) error
}
type coreDNSProvider struct {
dryRun bool
domainFilter DomainFilter
client coreDNSClient
}
// Service represents CoreDNS etcd record
type Service struct {
Host string `json:"host,omitempty"`
Port int `json:"port,omitempty"`
Priority int `json:"priority,omitempty"`
Weight int `json:"weight,omitempty"`
Text string `json:"text,omitempty"`
Mail bool `json:"mail,omitempty"` // Be an MX record. Priority becomes Preference.
TTL uint32 `json:"ttl,omitempty"`
// When a SRV record with a "Host: IP-address" is added, we synthesize
// a srv.Target domain name. Normally we convert the full Key where
// the record lives to a DNS name and use this as the srv.Target. When
// TargetStrip > 0 we strip the left most TargetStrip labels from the
// DNS name.
TargetStrip int `json:"targetstrip,omitempty"`
// Group is used to group (or *not* to group) different services
// together. Services with an identical Group are returned in the same
// answer.
Group string `json:"group,omitempty"`
// Etcd key where we found this service and ignored from json un-/marshalling
Key string `json:"-"`
}
type etcdClient struct {
client *etcdcv3.Client
ctx context.Context
}
var _ coreDNSClient = etcdClient{}
// GetService return all Service records stored in etcd stored anywhere under the given key (recursively)
func (c etcdClient) GetServices(prefix string) ([]*Service, error) {
ctx, cancel := context.WithTimeout(c.ctx, etcdTimeout)
defer cancel()
path := prefix
r, err := c.client.Get(ctx, path, etcdcv3.WithPrefix())
if err != nil {
return nil, err
}
var svcs []*Service
bx := make(map[Service]bool)
for _, n := range r.Kvs {
svc := new(Service)
if err := json.Unmarshal(n.Value, svc); err != nil {
return nil, fmt.Errorf("%s: %s", n.Key, err.Error())
}
b := Service{Host: svc.Host, Port: svc.Port, Priority: svc.Priority, Weight: svc.Weight, Text: svc.Text, Key: string(n.Key)}
if _, ok := bx[b]; ok {
// skip the service if already added to service list.
// the same service might be found in multiple etcd nodes.
continue
}
bx[b] = true
svc.Key = string(n.Key)
if svc.Priority == 0 {
svc.Priority = priority
}
svcs = append(svcs, svc)
}
return svcs, nil
}
// SaveService persists service data into etcd
func (c etcdClient) SaveService(service *Service) error {
ctx, cancel := context.WithTimeout(c.ctx, etcdTimeout)
defer cancel()
value, err := json.Marshal(&service)
if err != nil {
return err
}
_, err = c.client.Put(ctx, service.Key, string(value))
if err != nil {
return err
}
return nil
}
// DeleteService deletes service record from etcd
func (c etcdClient) DeleteService(key string) error {
ctx, cancel := context.WithTimeout(c.ctx, etcdTimeout)
defer cancel()
_, err := c.client.Delete(ctx, key)
return err
}
// loads TLS artifacts and builds tls.Clonfig object
func newTLSConfig(certPath, keyPath, caPath, serverName string, insecure bool) (*tls.Config, error) {
if certPath != "" && keyPath == "" || certPath == "" && keyPath != "" {
return nil, errors.New("either both cert and key or none must be provided")
}
var certificates []tls.Certificate
if certPath != "" {
cert, err := tls.LoadX509KeyPair(certPath, keyPath)
if err != nil {
return nil, fmt.Errorf("could not load TLS cert: %s", err)
}
certificates = append(certificates, cert)
}
roots, err := loadRoots(caPath)
if err != nil {
return nil, err
}
return &tls.Config{
Certificates: certificates,
RootCAs: roots,
InsecureSkipVerify: insecure,
ServerName: serverName,
}, nil
}
// loads CA cert
func loadRoots(caPath string) (*x509.CertPool, error) {
if caPath == "" {
return nil, nil
}
roots := x509.NewCertPool()
pem, err := ioutil.ReadFile(caPath)
if err != nil {
return nil, fmt.Errorf("error reading %s: %s", caPath, err)
}
ok := roots.AppendCertsFromPEM(pem)
if !ok {
return nil, fmt.Errorf("could not read root certs: %s", err)
}
return roots, nil
}
// builds etcd client config depending on connection scheme and TLS parameters
func getETCDConfig() (*etcdcv3.Config, error) {
etcdURLsStr := os.Getenv("ETCD_URLS")
if etcdURLsStr == "" {
etcdURLsStr = "http://localhost:2379"
}
etcdURLs := strings.Split(etcdURLsStr, ",")
firstURL := strings.ToLower(etcdURLs[0])
if strings.HasPrefix(firstURL, "http://") {
return &etcdcv3.Config{Endpoints: etcdURLs}, nil
} else if strings.HasPrefix(firstURL, "https://") {
caFile := os.Getenv("ETCD_CA_FILE")
certFile := os.Getenv("ETCD_CERT_FILE")
keyFile := os.Getenv("ETCD_KEY_FILE")
serverName := os.Getenv("ETCD_TLS_SERVER_NAME")
isInsecureStr := strings.ToLower(os.Getenv("ETCD_TLS_INSECURE"))
isInsecure := isInsecureStr == "true" || isInsecureStr == "yes" || isInsecureStr == "1"
tlsConfig, err := newTLSConfig(certFile, keyFile, caFile, serverName, isInsecure)
if err != nil {
return nil, err
}
return &etcdcv3.Config{
Endpoints: etcdURLs,
TLS: tlsConfig,
}, nil
} else {
return nil, errors.New("etcd URLs must start with either http:// or https://")
}
}
//newETCDClient is an etcd client constructor
func newETCDClient() (coreDNSClient, error) {
cfg, err := getETCDConfig()
if err != nil {
return nil, err
}
c, err := etcdcv3.New(*cfg)
if err != nil {
return nil, err
}
return etcdClient{c, context.Background()}, nil
}
// NewCoreDNSProvider is a CoreDNS provider constructor
func NewCoreDNSProvider(domainFilter DomainFilter, dryRun bool) (Provider, error) {
client, err := newETCDClient()
if err != nil {
return nil, err
}
return coreDNSProvider{
client: client,
dryRun: dryRun,
domainFilter: domainFilter,
}, nil
}
// Records returns all DNS records found in CoreDNS etcd backend. Depending on the record fields
// it may be mapped to one or two records of type A, CNAME, TXT, A+TXT, CNAME+TXT
func (p coreDNSProvider) Records() ([]*endpoint.Endpoint, error) {
var result []*endpoint.Endpoint
services, err := p.client.GetServices(coreDNSPrefix)
if err != nil {
return nil, err
}
for _, service := range services {
domains := strings.Split(strings.TrimPrefix(service.Key, coreDNSPrefix), "/")
reverse(domains)
dnsName := strings.Join(domains[service.TargetStrip:], ".")
if !p.domainFilter.Match(dnsName) {
continue
}
prefix := strings.Join(domains[:service.TargetStrip], ".")
if service.Host != "" {
ep := endpoint.NewEndpointWithTTL(
dnsName,
guessRecordType(service.Host),
endpoint.TTL(service.TTL),
service.Host,
)
ep.Labels["originalText"] = service.Text
ep.Labels[randomPrefixLabel] = prefix
result = append(result, ep)
}
if service.Text != "" {
ep := endpoint.NewEndpoint(
dnsName,
endpoint.RecordTypeTXT,
service.Text,
)
ep.Labels[randomPrefixLabel] = prefix
result = append(result, ep)
}
}
return result, nil
}
// ApplyChanges stores changes back to etcd converting them to CoreDNS format and aggregating A/CNAME and TXT records
func (p coreDNSProvider) ApplyChanges(changes *plan.Changes) error {
grouped := map[string][]*endpoint.Endpoint{}
for _, ep := range changes.Create {
grouped[ep.DNSName] = append(grouped[ep.DNSName], ep)
}
for i, ep := range changes.UpdateNew {
ep.Labels[randomPrefixLabel] = changes.UpdateOld[i].Labels[randomPrefixLabel]
grouped[ep.DNSName] = append(grouped[ep.DNSName], ep)
}
for dnsName, group := range grouped {
if !p.domainFilter.Match(dnsName) {
log.Debugf("Skipping record %s because it was filtered out by the specified --domain-filter", dnsName)
continue
}
var services []Service
for _, ep := range group {
if ep.RecordType == endpoint.RecordTypeTXT {
continue
}
prefix := ep.Labels[randomPrefixLabel]
if prefix == "" {
prefix = fmt.Sprintf("%08x", rand.Int31())
}
service := Service{
Host: ep.Targets[0],
Text: ep.Labels["originalText"],
Key: etcdKeyFor(prefix + "." + dnsName),
TargetStrip: strings.Count(prefix, ".") + 1,
TTL: uint32(ep.RecordTTL),
}
services = append(services, service)
}
index := 0
for _, ep := range group {
if ep.RecordType != "TXT" {
continue
}
if index >= len(services) {
prefix := ep.Labels[randomPrefixLabel]
if prefix == "" {
prefix = fmt.Sprintf("%08x", rand.Int31())
}
services = append(services, Service{
Key: etcdKeyFor(prefix + "." + dnsName),
TargetStrip: strings.Count(prefix, ".") + 1,
TTL: uint32(ep.RecordTTL),
})
}
services[index].Text = ep.Targets[0]
index++
}
for i := index; index > 0 && i < len(services); i++ {
services[i].Text = ""
}
for _, service := range services {
log.Infof("Add/set key %s to Host=%s, Text=%s, TTL=%d", service.Key, service.Host, service.Text, service.TTL)
if !p.dryRun {
err := p.client.SaveService(&service)
if err != nil {
return err
}
}
}
}
for _, ep := range changes.Delete {
dnsName := ep.DNSName
if ep.Labels[randomPrefixLabel] != "" {
dnsName = ep.Labels[randomPrefixLabel] + "." + dnsName
}
key := etcdKeyFor(dnsName)
log.Infof("Delete key %s", key)
if !p.dryRun {
err := p.client.DeleteService(key)
if err != nil {
return err
}
}
}
return nil
}
func guessRecordType(target string) string {
if net.ParseIP(target) != nil {
return endpoint.RecordTypeA
}
return endpoint.RecordTypeCNAME
}
func etcdKeyFor(dnsName string) string {
domains := strings.Split(dnsName, ".")
reverse(domains)
return coreDNSPrefix + strings.Join(domains, "/")
}
func reverse(slice []string) {
for i := 0; i < len(slice)/2; i++ {
j := len(slice) - i - 1
slice[i], slice[j] = slice[j], slice[i]
}
}
| [
"\"ETCD_URLS\"",
"\"ETCD_CA_FILE\"",
"\"ETCD_CERT_FILE\"",
"\"ETCD_KEY_FILE\"",
"\"ETCD_TLS_SERVER_NAME\"",
"\"ETCD_TLS_INSECURE\""
] | [] | [
"ETCD_TLS_SERVER_NAME",
"ETCD_URLS",
"ETCD_KEY_FILE",
"ETCD_TLS_INSECURE",
"ETCD_CA_FILE",
"ETCD_CERT_FILE"
] | [] | ["ETCD_TLS_SERVER_NAME", "ETCD_URLS", "ETCD_KEY_FILE", "ETCD_TLS_INSECURE", "ETCD_CA_FILE", "ETCD_CERT_FILE"] | go | 6 | 0 | |
work/src/py/google/protobuf/internal/api_implementation.py | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Determine which implementation of the protobuf API is used in this process.
"""
import os
import sys
try:
# pylint: disable=g-import-not-at-top
from google.protobuf.internal import _api_implementation
# The compile-time constants in the _api_implementation module can be used to
# switch to a certain implementation of the Python API at build time.
_api_version = _api_implementation.api_version
_proto_extension_modules_exist_in_build = True
except ImportError:
_api_version = -1 # Unspecified by compiler flags.
_proto_extension_modules_exist_in_build = False
if _api_version == 1:
raise ValueError('api_version=1 is no longer supported.')
if _api_version < 0: # Still unspecified?
try:
# The presence of this module in a build allows the proto implementation to
# be upgraded merely via build deps rather than a compiler flag or the
# runtime environment variable.
# pylint: disable=g-import-not-at-top
from google.protobuf import _use_fast_cpp_protos
# Work around a known issue in the classic bootstrap .par import hook.
if not _use_fast_cpp_protos:
raise ImportError('_use_fast_cpp_protos import succeeded but was None')
del _use_fast_cpp_protos
_api_version = 2
except ImportError:
if _proto_extension_modules_exist_in_build:
if sys.version_info[0] >= 3: # Python 3 defaults to C++ impl v2.
_api_version = 2
# TODO(b/17427486): Make Python 2 default to C++ impl v2.
_default_implementation_type = (
'python' if _api_version <= 0 else 'cpp')
# This environment variable can be used to switch to a certain implementation
# of the Python API, overriding the compile-time constants in the
# _api_implementation module. Right now only 'python' and 'cpp' are valid
# values. Any other value will be ignored.
_implementation_type = os.getenv('PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION',
_default_implementation_type)
if _implementation_type != 'python':
_implementation_type = 'cpp'
# This environment variable can be used to switch between the two
# 'cpp' implementations, overriding the compile-time constants in the
# _api_implementation module. Right now only '2' is supported. Any other
# value will cause an error to be raised.
_implementation_version_str = os.getenv(
'PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION_VERSION', '2')
if _implementation_version_str != '2':
raise ValueError(
'unsupported PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION_VERSION: "' +
_implementation_version_str + '" (supported versions: 2)'
)
_implementation_version = int(_implementation_version_str)
# Usage of this function is discouraged. Clients shouldn't care which
# implementation of the API is in use. Note that there is no guarantee
# that differences between APIs will be maintained.
# Please don't use this function if possible.
def Type():
return _implementation_type
# See comment on 'Type' above.
def Version():
return _implementation_version
| [] | [] | [
"PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION_VERSION",
"PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"
] | [] | ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION_VERSION", "PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] | python | 2 | 0 | |
python/examples/env-os.py | #!/usr/bin/env python3
import os
print('Home:', os.environ['HOME'])
print('My:', os.environ.get('MY'))
print('Foo:', os.environ.get('FOO', 'default_value'))
| [] | [] | [
"HOME",
"FOO",
"MY"
] | [] | ["HOME", "FOO", "MY"] | python | 3 | 0 | |
tests/dbshell/test_postgresql_psycopg2.py | # -*- coding: utf8 -*-
from __future__ import unicode_literals
import locale
import os
from django.db.backends.postgresql.client import DatabaseClient
from django.test import SimpleTestCase, mock
from django.utils import six
from django.utils.encoding import force_bytes, force_str
class PostgreSqlDbshellCommandTestCase(SimpleTestCase):
def _run_it(self, dbinfo):
"""
That function invokes the runshell command, while mocking
subprocess.call. It returns a 2-tuple with:
- The command line list
- The binary content of file pointed by environment PGPASSFILE, or
None.
"""
def _mock_subprocess_call(*args):
self.subprocess_args = list(*args)
if 'PGPASSFILE' in os.environ:
with open(os.environ['PGPASSFILE'], 'rb') as f:
self.pgpass = f.read().strip() # ignore line endings
else:
self.pgpass = None
return 0
self.subprocess_args = None
self.pgpass = None
with mock.patch('subprocess.call', new=_mock_subprocess_call):
DatabaseClient.runshell_db(dbinfo)
return self.subprocess_args, self.pgpass
def test_basic(self):
self.assertEqual(
self._run_it({
'NAME': 'dbname',
'USER': 'someuser',
'PASSWORD': 'somepassword',
'HOST': 'somehost',
'PORT': 444,
}), (
['psql', '-U', 'someuser', '-h', 'somehost', '-p', '444', 'dbname'],
b'somehost:444:dbname:someuser:somepassword',
)
)
def test_nopass(self):
self.assertEqual(
self._run_it({
'NAME': 'dbname',
'USER': 'someuser',
'HOST': 'somehost',
'PORT': 444,
}), (
['psql', '-U', 'someuser', '-h', 'somehost', '-p', '444', 'dbname'],
None,
)
)
def test_column(self):
self.assertEqual(
self._run_it({
'NAME': 'dbname',
'USER': 'some:user',
'PASSWORD': 'some:password',
'HOST': '::1',
'PORT': 444,
}), (
['psql', '-U', 'some:user', '-h', '::1', '-p', '444', 'dbname'],
b'\\:\\:1:444:dbname:some\\:user:some\\:password',
)
)
def test_escape_characters(self):
self.assertEqual(
self._run_it({
'NAME': 'dbname',
'USER': 'some\\user',
'PASSWORD': 'some\\password',
'HOST': 'somehost',
'PORT': 444,
}), (
['psql', '-U', 'some\\user', '-h', 'somehost', '-p', '444', 'dbname'],
b'somehost:444:dbname:some\\\\user:some\\\\password',
)
)
def test_accent(self):
# The pgpass temporary file needs to be encoded using the system locale.
encoding = locale.getpreferredencoding()
username = 'rôle'
password = 'sésame'
try:
username_str = force_str(username, encoding)
password_str = force_str(password, encoding)
pgpass_bytes = force_bytes(
'somehost:444:dbname:%s:%s' % (username, password),
encoding=encoding,
)
except UnicodeEncodeError:
if six.PY2:
self.skipTest("Your locale can't run this test.")
self.assertEqual(
self._run_it({
'NAME': 'dbname',
'USER': username_str,
'PASSWORD': password_str,
'HOST': 'somehost',
'PORT': 444,
}), (
['psql', '-U', username_str, '-h', 'somehost', '-p', '444', 'dbname'],
pgpass_bytes,
)
)
| [] | [] | [
"PGPASSFILE"
] | [] | ["PGPASSFILE"] | python | 1 | 0 | |
vendor/github.com/docker/cli/cli/command/commands/commands.go | package commands
import (
"os"
"runtime"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/command/builder"
"github.com/docker/cli/cli/command/container"
"github.com/docker/cli/cli/command/engine"
"github.com/docker/cli/cli/command/image"
"github.com/docker/cli/cli/command/manifest"
"github.com/docker/cli/cli/command/network"
"github.com/docker/cli/cli/command/registry"
"github.com/docker/cli/cli/command/system"
"github.com/docker/cli/cli/command/trust"
"github.com/docker/cli/cli/command/volume"
"github.com/spf13/cobra"
)
// AddCommands adds all the commands from cli/command to the root command
func AddCommands(cmd *cobra.Command, dockerCli command.Cli) {
cmd.AddCommand(
// container
container.NewContainerCommand(dockerCli),
container.NewRunCommand(dockerCli),
// image
image.NewImageCommand(dockerCli),
image.NewBuildCommand(dockerCli),
// builder
builder.NewBuilderCommand(dockerCli),
// manifest
manifest.NewManifestCommand(dockerCli),
// network
network.NewNetworkCommand(dockerCli),
// registry
registry.NewLoginCommand(dockerCli),
registry.NewLogoutCommand(dockerCli),
registry.NewSearchCommand(dockerCli),
// system
system.NewSystemCommand(dockerCli),
system.NewVersionCommand(dockerCli),
// trust
trust.NewTrustCommand(dockerCli),
// volume
volume.NewVolumeCommand(dockerCli),
// legacy commands may be hidden
hide(system.NewEventsCommand(dockerCli)),
hide(system.NewInfoCommand(dockerCli)),
hide(system.NewInspectCommand(dockerCli)),
hide(container.NewAttachCommand(dockerCli)),
hide(container.NewCommitCommand(dockerCli)),
hide(container.NewCopyCommand(dockerCli)),
hide(container.NewCreateCommand(dockerCli)),
hide(container.NewDiffCommand(dockerCli)),
hide(container.NewExecCommand(dockerCli)),
hide(container.NewExportCommand(dockerCli)),
hide(container.NewKillCommand(dockerCli)),
hide(container.NewLogsCommand(dockerCli)),
hide(container.NewPauseCommand(dockerCli)),
hide(container.NewPortCommand(dockerCli)),
hide(container.NewPsCommand(dockerCli)),
hide(container.NewRenameCommand(dockerCli)),
hide(container.NewRestartCommand(dockerCli)),
hide(container.NewRmCommand(dockerCli)),
hide(container.NewStartCommand(dockerCli)),
hide(container.NewStatsCommand(dockerCli)),
hide(container.NewStopCommand(dockerCli)),
hide(container.NewTopCommand(dockerCli)),
hide(container.NewUnpauseCommand(dockerCli)),
hide(container.NewUpdateCommand(dockerCli)),
hide(container.NewWaitCommand(dockerCli)),
hide(image.NewHistoryCommand(dockerCli)),
hide(image.NewImagesCommand(dockerCli)),
hide(image.NewImportCommand(dockerCli)),
hide(image.NewLoadCommand(dockerCli)),
hide(image.NewPullCommand(dockerCli)),
hide(image.NewPushCommand(dockerCli)),
hide(image.NewRemoveCommand(dockerCli)),
hide(image.NewSaveCommand(dockerCli)),
hide(image.NewTagCommand(dockerCli)),
)
if runtime.GOOS == "linux" {
// engine
cmd.AddCommand(engine.NewEngineCommand(dockerCli))
}
}
func hide(cmd *cobra.Command) *cobra.Command {
// If the environment variable with name "DOCKER_HIDE_LEGACY_COMMANDS" is not empty,
// these legacy commands (such as `docker ps`, `docker exec`, etc)
// will not be shown in output console.
if os.Getenv("DOCKER_HIDE_LEGACY_COMMANDS") == "" {
return cmd
}
cmdCopy := *cmd
cmdCopy.Hidden = true
cmdCopy.Aliases = []string{}
return &cmdCopy
}
| [
"\"DOCKER_HIDE_LEGACY_COMMANDS\""
] | [] | [
"DOCKER_HIDE_LEGACY_COMMANDS"
] | [] | ["DOCKER_HIDE_LEGACY_COMMANDS"] | go | 1 | 0 | |
cybergis_compute_client/CyberGISCompute.py | """
This module exposes CyberGISCompute class which creates a CyberGISCompute
object that serves as an entry point to the CyberGISX environment from a Python/Jupyter notebook.
All interactions with the High Performance Computing (HPC) backend are performed using this object.
Example:
cybergis = CyberGISCompute(url='localhost', port='3030', protocol='HTTP', isJupyter=False)
"""
from .Client import *
from .Job import *
from .UI import *
import base64
import os
from IPython.display import display, Markdown, Javascript
class CyberGISCompute:
"""
CyberGISCompute class
An inteface that handles all interactions with the HPC backend
Attributes:
client (Client object) : Initialized using url(str), protocol(str), port(str) and suffix(str)
jupyterhubApiToken (string) : jupyterhub's REST API token that can be used to authenticate the user
(https://jhubdocs.readthedocs.io/en/latest/jupyterhub/docs/source/rest.html)
username (string) : username
isJupyter (boolean) : set to True if you are working in a jupyter environment.
If you are working in a simple Python environment then set to False
ui (UI) : Serves as entry point to UI functionality
job (Job) : Serves as entry point to access job interactions
recentDownloadPath (str) : Gets the most recent download path from globus
jupyterhubHost (str) : static variable that stores the path to jupyterhubHost
"""
# static variable
jupyterhubHost = None
job = None
def __init__(self, url="cgjobsup.cigi.illinois.edu", port=443, protocol='HTTPS', suffix="", isJupyter=True):
"""
Initializes instance CyberGISCompute using inputs from the client
Args:
url (str) : url that needs to be accessed
port (str) : port of the Jupyter or Python interface
protocol (str) : Typically HTTP or HTTPS
suffix (str) : specify version. For e.g v2
isJupyter(booleans) : set to True if you are using Jupyter environment
Returns:
(obj) : this CyberGISCompute
"""
self.client = Client(url=url, protocol=protocol, port=port, suffix=suffix)
self.jupyterhubApiToken = None
self.username = None
self.isJupyter = isJupyter
self.ui = UI(self)
if isJupyter:
self.enable_jupyter()
# job
self.job = None
self.recentDownloadPath = None
def login(self, manualLogin=True):
"""
Authenticates the client's jupyterhubApiToken and gives them access
to CyberGISCompute features
Args:
manualLogin (boolean) : set to True if env variable and file login modes are not available
Returns :
None
"""
if self.jupyterhubApiToken is not None:
print('🎯 Logged in as ' + self.username)
return
# login via env variable
envToken = os.getenv('JUPYTERHUB_API_TOKEN')
if envToken is not None:
print('💻 Found system token')
try:
token = base64.b64encode((self.jupyterhubHost + '@' + envToken).encode('ascii')).decode('utf-8')
res = self.client.request('GET', '/user', {"jupyterhubApiToken": token})
self.jupyterhubApiToken = token
self.username = res['username']
return self.login()
except:
print('❌ Failed to login via system token')
# login via file
if path.exists('./cybergis_compute_user.json'):
with open(os.path.abspath('cybergis_compute_user.json')) as f:
user = json.load(f)
token = user['token']
print('📃 Found "cybergis_compute_user.json"')
try:
res = self.client.request('GET', '/user', {"jupyterhubApiToken": token})
self.jupyterhubApiToken = token
self.username = res['username']
return self.login()
except:
print('❌ Failed to login via token JSON file')
print('NOTE: if you want to login as another user, please remove this file')
elif manualLogin:
if self.isJupyter:
if (self.jupyterhubHost is not None):
import getpass
print('📢 Please go to Control Panel -> Token, request a new API token')
token = getpass.getpass('enter your API token here')
token = base64.b64encode((self.jupyterhubHost + '@' + token).encode('ascii')).decode('utf-8')
try:
res = self.client.request('GET', '/user', {"jupyterhubApiToken": token})
self.jupyterhubApiToken = token
self.username = res['username']
with open('./cybergis_compute_user.json', 'w') as json_file:
json.dump({"token": token}, json_file)
return self.login()
except:
print('❌ Failed to login via user input')
else:
print('❌ You might not be working on a web browser or enabled JavaScript')
else:
print('❌ Enable Jupyter using .enable_jupyter() before you login')
else:
print('❌ Not logged in. To enable more features, use .login()')
def create_job(self, maintainer='community_contribution', hpc=None, hpcUsername=None, hpcPassword=None, printJob=True):
"""
Creates a job object
Initializes instance CyberGISCompute using inputs from the client
Args:
maintainer (str) : Pre-packaged programs which can be configured and controlled remotely
and behave as a bridge between user and HPC backends
hpc(str) : HPC backend that is being accessed. For e.g 'keeling_community'
hpcUsername (str) : username for HPC backend
hpcPassword (str) : password for HPC backend
printJob (str) : prints the Job infortmation if set to True
Returns:
(Job) : The new job instance that was initialized
"""
self.login()
return Job(maintainer=maintainer, hpc=hpc, id=None, hpcUsername=hpcUsername, hpcPassword=hpcPassword, client=self.client, isJupyter=self.isJupyter, jupyterhubApiToken=self.jupyterhubApiToken, printJob=printJob)
def get_job_by_id(self, id=None):
"""
Returns Job object with the specified id
Args:
id(int) : Job id
Returns
(Job) : Job object with the specified id otherwise None
"""
self.login()
jobs = self.client.request('GET', '/user/job', {"jupyterhubApiToken": self.jupyterhubApiToken})
token = None
for job in jobs['job']:
if (job['id'] == id):
token = job['secretToken']
if (token is None):
print('❌ job with id ' + id + ' was not found')
return Job(secretToken=token, client=self.client, id=id, isJupyter=self.isJupyter, jupyterhubApiToken=self.jupyterhubApiToken)
def get_slurm_usage(self, raw=False):
"""
prints slurm usage
Args:
raw(boolean) : set to True if you want the raw output
Returns
(JSON) : Raw output if raw=True otherwise its printed or displayed directly into the interface
"""
self.login()
usage = self.client.request('GET', '/user/slurm-usage?format={}'.format(not raw), {"jupyterhubApiToken": self.jupyterhubApiToken})
if raw:
return usage
display(Markdown("Nodes: {}<br>Allocated CPUs: {}<br>Total CPU Time: {}<br>Memory Utilized: {}<br>Total Allocated Memory: {}<br>Total Walltime: {}".format(
usage['nodes'], usage['cpus'], usage['cpuTime'], usage['memory'], usage['memoryUsage'], usage['walltime'])))
def list_job(self, raw=False):
"""
prints a list of jobs that were submitted
Args:
raw (boolean) : set to True if you want the raw output
Returns
(JSON) : Raw output if raw=True otherwise its printed or displayed into the interface
"""
self.login()
if self.jupyterhubApiToken is None:
print('❌ please login')
jobs = self.client.request('GET', '/user/job', {"jupyterhubApiToken": self.jupyterhubApiToken})
if raw:
return jobs
headers = ['id', 'hpc', 'executableFolder', 'dataFolder', 'resultFolder', 'param', 'slurm', 'userId', 'maintainer', 'createdAt']
data = []
for job in jobs['job']:
data.append([
job['id'],
job['hpc'],
job['executableFolder'],
job['dataFolder'],
job['resultFolder'],
json.dumps(job['param']),
json.dumps(job['slurm']),
job['userId'],
job['maintainer'],
job['createdAt'],
])
if self.isJupyter:
if len(data) == 0:
print('empty')
return
display(HTML(tabulate(data, headers, numalign='left', stralign='left', colalign=('left', 'left'), tablefmt='html').replace('<td>', '<td style="text-align:left">').replace('<th>', '<th style="text-align:left">')))
else:
print(tabulate(data, headers, tablefmt="presto"))
def list_hpc(self, raw=False):
"""
prints a list of hpc resources that the server supports
Args:
raw (boolean) : set to True if you want the raw output
Returns
(JSON) : Raw output if raw=True otherwise its printed
or displayed directly into the interface
"""
hpc = self.client.request('GET', '/hpc')['hpc']
if raw:
return hpc
headers = ['hpc', 'ip', 'port', 'is_community_account']
data = []
for i in hpc:
data.append([
i,
hpc[i]['ip'],
hpc[i]['port'],
hpc[i]['is_community_account']
])
if self.isJupyter:
if len(data) == 0:
print('empty')
return
display(HTML(tabulate(data, headers, numalign='left', stralign='left', colalign=('left', 'left'), tablefmt='html').replace('<td>', '<td style="text-align:left">').replace('<th>', '<th style="text-align:left">')))
else:
print(tabulate(data, headers, tablefmt="presto"))
def list_container(self, raw=False):
"""
prints a list of containers that the server supports
Args:
raw (boolean) : set to True if you want the raw output
Returns
(JSON) : Raw output if raw=True otherwise its printed
or displayed directly into the interface
"""
container = self.client.request('GET', '/container')['container']
if raw:
return container
headers = ['container name', 'dockerfile', 'dockerhub']
data = []
for i in container:
data.append([
i,
container[i]['dockerfile'],
container[i]['dockerhub']
])
if self.isJupyter:
if len(data) == 0:
print('empty')
return
display(HTML(tabulate(data, headers, numalign='left', stralign='left', colalign=('left', 'left'), tablefmt='html').replace('<td>', '<td style="text-align:left">').replace('<th>', '<th style="text-align:left">')))
else:
print(tabulate(data, headers, tablefmt="presto"))
def list_git(self, raw=False):
"""
prints a list of Git projects that the server supports
Args:
raw (boolean) : set to True if you want the raw output
Returns
(JSON) : Raw output if raw=True otherwise its printed
or displayed directly into the interface
"""
git = self.client.request('GET', '/git')['git']
if raw:
return git
headers = ['link', 'name', 'container', 'repository', 'commit']
data = []
for i in git:
data.append([
'git://' + i,
git[i]['name'],
git[i]['container'],
git[i]['repository'],
git[i]['commit'] if 'commit' in git[i] else 'NONE',
])
if self.isJupyter:
if len(data) == 0:
print('empty')
return
display(HTML(tabulate(data, headers, numalign='left', stralign='left', colalign=('left', 'left'), tablefmt='html').replace('<td>', '<td style="text-align:left">').replace('<th>', '<th style="text-align:left">')))
else:
print(tabulate(data, headers, tablefmt="presto"))
def list_maintainer(self, raw=False):
"""
prints a list of maintainers that the server supports
Args:
raw (boolean) : set to True if you want the raw output
Returns
(JSON) : Raw output if raw=True otherwise its printed
or displayed directly into the interface
"""
maintainers = self.client.request('GET', '/maintainer')['maintainer']
if raw:
return maintainers
headers = ['maintainer', 'hpc', 'default_hpc', 'job_pool_capacity', 'executable_folder->from_user', 'executable_folder->must_have']
data = []
for i in maintainers:
maintainer = maintainers[i]
from_user = 'not specified'
if 'executable_folder' in maintainer:
from_user = maintainer['executable_folder']['from_user']
must_have = 'not specified'
if 'executable_folder' in maintainer:
if 'file_config' in maintainer['executable_folder']:
if 'must_have' in maintainer['executable_folder']['file_config']:
must_have = maintainer['executable_folder']['file_config']['must_have']
data.append([
i,
maintainer['hpc'],
maintainer['default_hpc'],
maintainer['job_pool_capacity'],
from_user,
must_have
])
if self.isJupyter:
if len(data) == 0:
print('empty')
return
display(HTML(tabulate(data, headers, numalign='left', stralign='left', colalign=('left', 'left'), tablefmt='html').replace('<td>', '<td style="text-align:left">').replace('<th>', '<th style="text-align:left">')))
else:
print(tabulate(data, headers, tablefmt="presto"))
# Integrated functions
def list_info(self, list_maintainer=False, list_container=False):
"""
calls list_git, list_hpc, list_job with options to call list_maintainer and list_container
Args:
list_maintainer (boolean) : set to True if you want to call list_maintainer
list_container (boolean) : set to True of you want to call list
Returns
None
"""
print('📦 Git repositories:')
self.list_git()
print('🖥 HPC endpoints:')
self.list_hpc()
if self.is_login():
print('📮 Submitted jobs:')
self.list_job()
if list_container:
print('🗳 Containers:')
self.list_container()
if list_maintainer:
print('🤖 Maintainers:')
self.list_maintainer()
def create_job_by_ui(self, defaultJob="hello_world", defaultDataFolder="./", defaultRemoteResultFolder=None):
"""
Displays the job submission UI
Args:
defaultJob (str) : Stores the default job that shows up on the UI
defaultDataFolder (str) : Stores the default input folder that shows up on the UI
defaultRemoteResultFolder (str) : Stores the default output folder that shows up on the UI
Returns:
None
"""
self.ui.defaultJobName = defaultJob
self.ui.defaultDataFolder = defaultDataFolder
if defaultRemoteResultFolder is not None:
self.ui.defaultRemoteResultFolder = defaultRemoteResultFolder if defaultRemoteResultFolder[0] == '/' else '/' + defaultRemoteResultFolder
self.ui.render()
def get_latest_created_job(self):
"""
Return the current job instance
Args:
None
Returns:
(JOB) : Latest Job object instance
"""
return self.job
# helper functions
def enable_jupyter(self):
"""
sets up jupyter environment in jupyterhubHost
Args:
None
Returns:
None
"""
self.isJupyter = True
# get jupyter variable
url = os.getenv('JUPYTER_INSTANCE_URL')
if url is not None:
CyberGISCompute.jupyterhubHost = url.replace('https://', '').replace('http://', '')
else:
display(Javascript('IPython.notebook.kernel.execute(`CyberGISCompute.jupyterhubHost = "${window.location.host}"`);'))
def get_user_jupyter_globus(self):
"""
Return the current job instance
Args:
None
Returns:
(JOB) : Latest Job object instance
"""
return self.client.request('GET', '/user/jupyter-globus', {"jupyterhubApiToken": self.jupyterhubApiToken})
def is_login(self):
"""
Checks whether jupyterhubApi token exists or not
Args:
None
Returns:
(boolean) : jupyterhubAPI existence check
"""
return self.jupyterhubApiToken is not None
| [] | [] | [
"JUPYTER_INSTANCE_URL",
"JUPYTERHUB_API_TOKEN"
] | [] | ["JUPYTER_INSTANCE_URL", "JUPYTERHUB_API_TOKEN"] | python | 2 | 0 | |
backend/pah_fm/settings.py | import os
import datetime
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Base URL without trailing slash
BASE_URL = os.environ.get("BASE_URL")
SECRET_KEY = os.environ.get("SECRET_KEY")
DEBUG = os.environ.get("DEBUG", "0") == "1"
ALLOWED_HOSTS = [
"localhost",
"127.0.0.1",
"52.232.62.212",
".pahfm.codeforpoznan.pl",
".execute-api.eu-west-1.amazonaws.com",
]
USE_X_FORWARDED_HOST = True
CSRF_COOKIE_SECURE = True
CSRF_COOKIE_DOMAIN = BASE_URL
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"import_export",
# 3rd party apps
"corsheaders",
"djmoney",
"rest_framework",
# local apps
"fleet_management",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"corsheaders.middleware.CorsMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"fleet_management.middleware.UpdateLastSeenMiddleware",
]
CORS_ORIGIN_WHITELIST = ("localhost:8080", "127.0.0.1:8080")
ROOT_URLCONF = "pah_fm.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [os.path.join(BASE_DIR, "templates")],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "pah_fm.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql_psycopg2",
"USER": os.environ.get("PAH_FM_DB_USER", "pah-fm"),
"NAME": os.environ.get("PAH_FM_DB_NAME", "pah-fm"),
"PASSWORD": os.environ.get("PAH_FM_DB_PASS", "pah-fm"),
"HOST": os.environ.get("PAH_FM_DB_HOST", "localhost"),
"PORT": os.environ.get("PAH_FM_DB_PORT", "5432"),
}
}
# DRF settings
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": (
"rest_framework_jwt.authentication.JSONWebTokenAuthentication",
"rest_framework.authentication.SessionAuthentication",
"rest_framework.authentication.BasicAuthentication",
),
"DEFAULT_PERMISSION_CLASSES": ("rest_framework.permissions.IsAuthenticated",),
"DEFAULT_RENDERER_CLASSES": (
"djangorestframework_camel_case.render.CamelCaseJSONRenderer",
),
"DEFAULT_PARSER_CLASSES": (
"djangorestframework_camel_case.parser.CamelCaseJSONParser",
),
"DEFAULT_FILTER_BACKENDS": ("django_filters.rest_framework.DjangoFilterBackend",),
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation"
".UserAttributeSimilarityValidator",
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# Custom user model
AUTH_USER_MODEL = "fleet_management.User"
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = "/static/"
STATIC_ROOT = os.path.join(BASE_DIR, "static")
STATICFILES_DIRS = [os.path.join(BASE_DIR, "pah_fm/static")]
# Email settings
EMAIL_HOST = "localhost"
EMAIL_PORT = 25
EMAIL_USE_TLS = False
EMAIL_ADDRESS = "[email protected]"
if DEBUG:
EMAIL_BACKEND = "django.core.mail.backends.filebased.EmailBackend"
EMAIL_FILE_PATH = os.path.join(BASE_DIR, "emails")
# verify if it's required for registering user
AUTHENTICATION_BACKENDS = ("django.contrib.auth.backends.AllowAllUsersModelBackend",)
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
MEDIA_URL = "/media/"
JWT_AUTH = {
"JWT_EXPIRATION_DELTA": datetime.timedelta(days=30),
}
RSA_PUBLIC_EXP = 257
RSA_BIT_LENGTH = 19
| [] | [] | [
"PAH_FM_DB_NAME",
"BASE_URL",
"PAH_FM_DB_HOST",
"PAH_FM_DB_USER",
"SECRET_KEY",
"PAH_FM_DB_PASS",
"PAH_FM_DB_PORT",
"DEBUG"
] | [] | ["PAH_FM_DB_NAME", "BASE_URL", "PAH_FM_DB_HOST", "PAH_FM_DB_USER", "SECRET_KEY", "PAH_FM_DB_PASS", "PAH_FM_DB_PORT", "DEBUG"] | python | 8 | 0 | |
vendor/golang.org/x/tools/go/packages/golist.go | // Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packages
import (
"bytes"
"encoding/json"
"fmt"
"go/types"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"reflect"
"regexp"
"strconv"
"strings"
"sync"
"time"
"golang.org/x/tools/go/internal/packagesdriver"
"golang.org/x/tools/internal/gopathwalk"
"golang.org/x/tools/internal/semver"
)
// debug controls verbose logging.
var debug, _ = strconv.ParseBool(os.Getenv("GOPACKAGESDEBUG"))
// A goTooOldError reports that the go command
// found by exec.LookPath is too old to use the new go list behavior.
type goTooOldError struct {
error
}
// responseDeduper wraps a driverResponse, deduplicating its contents.
type responseDeduper struct {
seenRoots map[string]bool
seenPackages map[string]*Package
dr *driverResponse
}
// init fills in r with a driverResponse.
func (r *responseDeduper) init(dr *driverResponse) {
r.dr = dr
r.seenRoots = map[string]bool{}
r.seenPackages = map[string]*Package{}
for _, pkg := range dr.Packages {
r.seenPackages[pkg.ID] = pkg
}
for _, root := range dr.Roots {
r.seenRoots[root] = true
}
}
func (r *responseDeduper) addPackage(p *Package) {
if r.seenPackages[p.ID] != nil {
return
}
r.seenPackages[p.ID] = p
r.dr.Packages = append(r.dr.Packages, p)
}
func (r *responseDeduper) addRoot(id string) {
if r.seenRoots[id] {
return
}
r.seenRoots[id] = true
r.dr.Roots = append(r.dr.Roots, id)
}
// goListDriver uses the go list command to interpret the patterns and produce
// the build system package structure.
// See driver for more details.
func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) {
var sizes types.Sizes
var sizeserr error
var sizeswg sync.WaitGroup
if cfg.Mode >= LoadTypes {
sizeswg.Add(1)
go func() {
sizes, sizeserr = getSizes(cfg)
sizeswg.Done()
}()
}
// Determine files requested in contains patterns
var containFiles []string
var packagesNamed []string
restPatterns := make([]string, 0, len(patterns))
// Extract file= and other [querytype]= patterns. Report an error if querytype
// doesn't exist.
extractQueries:
for _, pattern := range patterns {
eqidx := strings.Index(pattern, "=")
if eqidx < 0 {
restPatterns = append(restPatterns, pattern)
} else {
query, value := pattern[:eqidx], pattern[eqidx+len("="):]
switch query {
case "file":
containFiles = append(containFiles, value)
case "pattern":
restPatterns = append(restPatterns, value)
case "name":
packagesNamed = append(packagesNamed, value)
case "": // not a reserved query
restPatterns = append(restPatterns, pattern)
default:
for _, rune := range query {
if rune < 'a' || rune > 'z' { // not a reserved query
restPatterns = append(restPatterns, pattern)
continue extractQueries
}
}
// Reject all other patterns containing "="
return nil, fmt.Errorf("invalid query type %q in query pattern %q", query, pattern)
}
}
}
patterns = restPatterns
// TODO(matloob): Remove the definition of listfunc and just use golistPackages once go1.12 is released.
var listfunc driver
var isFallback bool
listfunc = func(cfg *Config, words ...string) (*driverResponse, error) {
response, err := golistDriverCurrent(cfg, words...)
if _, ok := err.(goTooOldError); ok {
isFallback = true
listfunc = golistDriverFallback
return listfunc(cfg, words...)
}
listfunc = golistDriverCurrent
return response, err
}
response := &responseDeduper{}
var err error
// see if we have any patterns to pass through to go list.
if len(restPatterns) > 0 {
dr, err := listfunc(cfg, restPatterns...)
if err != nil {
return nil, err
}
response.init(dr)
} else {
response.init(&driverResponse{})
}
sizeswg.Wait()
if sizeserr != nil {
return nil, sizeserr
}
// types.SizesFor always returns nil or a *types.StdSizes
response.dr.Sizes, _ = sizes.(*types.StdSizes)
var containsCandidates []string
if len(containFiles) != 0 {
if err := runContainsQueries(cfg, listfunc, isFallback, response, containFiles); err != nil {
return nil, err
}
}
if len(packagesNamed) != 0 {
if err := runNamedQueries(cfg, listfunc, response, packagesNamed); err != nil {
return nil, err
}
}
modifiedPkgs, needPkgs, err := processGolistOverlay(cfg, response.dr)
if err != nil {
return nil, err
}
if len(containFiles) > 0 {
containsCandidates = append(containsCandidates, modifiedPkgs...)
containsCandidates = append(containsCandidates, needPkgs...)
}
if len(needPkgs) > 0 {
addNeededOverlayPackages(cfg, listfunc, response, needPkgs)
if err != nil {
return nil, err
}
}
// Check candidate packages for containFiles.
if len(containFiles) > 0 {
for _, id := range containsCandidates {
pkg := response.seenPackages[id]
for _, f := range containFiles {
for _, g := range pkg.GoFiles {
if sameFile(f, g) {
response.addRoot(id)
}
}
}
}
}
return response.dr, nil
}
func addNeededOverlayPackages(cfg *Config, driver driver, response *responseDeduper, pkgs []string) error {
dr, err := driver(cfg, pkgs...)
if err != nil {
return err
}
for _, pkg := range dr.Packages {
response.addPackage(pkg)
}
return nil
}
func runContainsQueries(cfg *Config, driver driver, isFallback bool, response *responseDeduper, queries []string) error {
for _, query := range queries {
// TODO(matloob): Do only one query per directory.
fdir := filepath.Dir(query)
// Pass absolute path of directory to go list so that it knows to treat it as a directory,
// not a package path.
pattern, err := filepath.Abs(fdir)
if err != nil {
return fmt.Errorf("could not determine absolute path of file= query path %q: %v", query, err)
}
if isFallback {
pattern = "."
cfg.Dir = fdir
}
dirResponse, err := driver(cfg, pattern)
if err != nil {
return err
}
isRoot := make(map[string]bool, len(dirResponse.Roots))
for _, root := range dirResponse.Roots {
isRoot[root] = true
}
for _, pkg := range dirResponse.Packages {
// Add any new packages to the main set
// We don't bother to filter packages that will be dropped by the changes of roots,
// that will happen anyway during graph construction outside this function.
// Over-reporting packages is not a problem.
response.addPackage(pkg)
// if the package was not a root one, it cannot have the file
if !isRoot[pkg.ID] {
continue
}
for _, pkgFile := range pkg.GoFiles {
if filepath.Base(query) == filepath.Base(pkgFile) {
response.addRoot(pkg.ID)
break
}
}
}
}
return nil
}
// modCacheRegexp splits a path in a module cache into module, module version, and package.
var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`)
func runNamedQueries(cfg *Config, driver driver, response *responseDeduper, queries []string) error {
// calling `go env` isn't free; bail out if there's nothing to do.
if len(queries) == 0 {
return nil
}
// Determine which directories are relevant to scan.
roots, modRoot, err := roots(cfg)
if err != nil {
return err
}
// Scan the selected directories. Simple matches, from GOPATH/GOROOT
// or the local module, can simply be "go list"ed. Matches from the
// module cache need special treatment.
var matchesMu sync.Mutex
var simpleMatches, modCacheMatches []string
add := func(root gopathwalk.Root, dir string) {
// Walk calls this concurrently; protect the result slices.
matchesMu.Lock()
defer matchesMu.Unlock()
path := dir[len(root.Path)+1:]
if pathMatchesQueries(path, queries) {
switch root.Type {
case gopathwalk.RootModuleCache:
modCacheMatches = append(modCacheMatches, path)
case gopathwalk.RootCurrentModule:
// We'd need to read go.mod to find the full
// import path. Relative's easier.
rel, err := filepath.Rel(cfg.Dir, dir)
if err != nil {
// This ought to be impossible, since
// we found dir in the current module.
panic(err)
}
simpleMatches = append(simpleMatches, "./"+rel)
case gopathwalk.RootGOPATH, gopathwalk.RootGOROOT:
simpleMatches = append(simpleMatches, path)
}
}
}
startWalk := time.Now()
gopathwalk.Walk(roots, add, gopathwalk.Options{ModulesEnabled: modRoot != "", Debug: debug})
if debug {
log.Printf("%v for walk", time.Since(startWalk))
}
// Weird special case: the top-level package in a module will be in
// whatever directory the user checked the repository out into. It's
// more reasonable for that to not match the package name. So, if there
// are any Go files in the mod root, query it just to be safe.
if modRoot != "" {
rel, err := filepath.Rel(cfg.Dir, modRoot)
if err != nil {
panic(err) // See above.
}
files, err := ioutil.ReadDir(modRoot)
for _, f := range files {
if strings.HasSuffix(f.Name(), ".go") {
simpleMatches = append(simpleMatches, rel)
break
}
}
}
addResponse := func(r *driverResponse) {
for _, pkg := range r.Packages {
response.addPackage(pkg)
for _, name := range queries {
if pkg.Name == name {
response.addRoot(pkg.ID)
break
}
}
}
}
if len(simpleMatches) != 0 {
resp, err := driver(cfg, simpleMatches...)
if err != nil {
return err
}
addResponse(resp)
}
// Module cache matches are tricky. We want to avoid downloading new
// versions of things, so we need to use the ones present in the cache.
// go list doesn't accept version specifiers, so we have to write out a
// temporary module, and do the list in that module.
if len(modCacheMatches) != 0 {
// Collect all the matches, deduplicating by major version
// and preferring the newest.
type modInfo struct {
mod string
major string
}
mods := make(map[modInfo]string)
var imports []string
for _, modPath := range modCacheMatches {
matches := modCacheRegexp.FindStringSubmatch(modPath)
mod, ver := filepath.ToSlash(matches[1]), matches[2]
importPath := filepath.ToSlash(filepath.Join(matches[1], matches[3]))
major := semver.Major(ver)
if prevVer, ok := mods[modInfo{mod, major}]; !ok || semver.Compare(ver, prevVer) > 0 {
mods[modInfo{mod, major}] = ver
}
imports = append(imports, importPath)
}
// Build the temporary module.
var gomod bytes.Buffer
gomod.WriteString("module modquery\nrequire (\n")
for mod, version := range mods {
gomod.WriteString("\t" + mod.mod + " " + version + "\n")
}
gomod.WriteString(")\n")
tmpCfg := *cfg
// We're only trying to look at stuff in the module cache, so
// disable the network. This should speed things up, and has
// prevented errors in at least one case, #28518.
tmpCfg.Env = append(append([]string{"GOPROXY=off"}, cfg.Env...))
var err error
tmpCfg.Dir, err = ioutil.TempDir("", "gopackages-modquery")
if err != nil {
return err
}
defer os.RemoveAll(tmpCfg.Dir)
if err := ioutil.WriteFile(filepath.Join(tmpCfg.Dir, "go.mod"), gomod.Bytes(), 0777); err != nil {
return fmt.Errorf("writing go.mod for module cache query: %v", err)
}
// Run the query, using the import paths calculated from the matches above.
resp, err := driver(&tmpCfg, imports...)
if err != nil {
return fmt.Errorf("querying module cache matches: %v", err)
}
addResponse(resp)
}
return nil
}
func getSizes(cfg *Config) (types.Sizes, error) {
return packagesdriver.GetSizesGolist(cfg.Context, cfg.BuildFlags, cfg.Env, cfg.Dir, usesExportData(cfg))
}
// roots selects the appropriate paths to walk based on the passed-in configuration,
// particularly the environment and the presence of a go.mod in cfg.Dir's parents.
func roots(cfg *Config) ([]gopathwalk.Root, string, error) {
stdout, err := invokeGo(cfg, "env", "GOROOT", "GOPATH", "GOMOD")
if err != nil {
return nil, "", err
}
fields := strings.Split(stdout.String(), "\n")
if len(fields) != 4 || len(fields[3]) != 0 {
return nil, "", fmt.Errorf("go env returned unexpected output: %q", stdout.String())
}
goroot, gopath, gomod := fields[0], filepath.SplitList(fields[1]), fields[2]
var modDir string
if gomod != "" {
modDir = filepath.Dir(gomod)
}
var roots []gopathwalk.Root
// Always add GOROOT.
roots = append(roots, gopathwalk.Root{filepath.Join(goroot, "/src"), gopathwalk.RootGOROOT})
// If modules are enabled, scan the module dir.
if modDir != "" {
roots = append(roots, gopathwalk.Root{modDir, gopathwalk.RootCurrentModule})
}
// Add either GOPATH/src or GOPATH/pkg/mod, depending on module mode.
for _, p := range gopath {
if modDir != "" {
roots = append(roots, gopathwalk.Root{filepath.Join(p, "/pkg/mod"), gopathwalk.RootModuleCache})
} else {
roots = append(roots, gopathwalk.Root{filepath.Join(p, "/src"), gopathwalk.RootGOPATH})
}
}
return roots, modDir, nil
}
// These functions were copied from goimports. See further documentation there.
// pathMatchesQueries is adapted from pkgIsCandidate.
// TODO: is it reasonable to do Contains here, rather than an exact match on a path component?
func pathMatchesQueries(path string, queries []string) bool {
lastTwo := lastTwoComponents(path)
for _, query := range queries {
if strings.Contains(lastTwo, query) {
return true
}
if hasHyphenOrUpperASCII(lastTwo) && !hasHyphenOrUpperASCII(query) {
lastTwo = lowerASCIIAndRemoveHyphen(lastTwo)
if strings.Contains(lastTwo, query) {
return true
}
}
}
return false
}
// lastTwoComponents returns at most the last two path components
// of v, using either / or \ as the path separator.
func lastTwoComponents(v string) string {
nslash := 0
for i := len(v) - 1; i >= 0; i-- {
if v[i] == '/' || v[i] == '\\' {
nslash++
if nslash == 2 {
return v[i:]
}
}
}
return v
}
func hasHyphenOrUpperASCII(s string) bool {
for i := 0; i < len(s); i++ {
b := s[i]
if b == '-' || ('A' <= b && b <= 'Z') {
return true
}
}
return false
}
func lowerASCIIAndRemoveHyphen(s string) (ret string) {
buf := make([]byte, 0, len(s))
for i := 0; i < len(s); i++ {
b := s[i]
switch {
case b == '-':
continue
case 'A' <= b && b <= 'Z':
buf = append(buf, b+('a'-'A'))
default:
buf = append(buf, b)
}
}
return string(buf)
}
// Fields must match go list;
// see $GOROOT/src/cmd/go/internal/load/pkg.go.
type jsonPackage struct {
ImportPath string
Dir string
Name string
Export string
GoFiles []string
CompiledGoFiles []string
CFiles []string
CgoFiles []string
CXXFiles []string
MFiles []string
HFiles []string
FFiles []string
SFiles []string
SwigFiles []string
SwigCXXFiles []string
SysoFiles []string
Imports []string
ImportMap map[string]string
Deps []string
TestGoFiles []string
TestImports []string
XTestGoFiles []string
XTestImports []string
ForTest string // q in a "p [q.test]" package, else ""
DepOnly bool
Error *jsonPackageError
}
type jsonPackageError struct {
ImportStack []string
Pos string
Err string
}
func otherFiles(p *jsonPackage) [][]string {
return [][]string{p.CFiles, p.CXXFiles, p.MFiles, p.HFiles, p.FFiles, p.SFiles, p.SwigFiles, p.SwigCXXFiles, p.SysoFiles}
}
// golistDriverCurrent uses the "go list" command to expand the
// pattern words and return metadata for the specified packages.
// dir may be "" and env may be nil, as per os/exec.Command.
func golistDriverCurrent(cfg *Config, words ...string) (*driverResponse, error) {
// go list uses the following identifiers in ImportPath and Imports:
//
// "p" -- importable package or main (command)
// "q.test" -- q's test executable
// "p [q.test]" -- variant of p as built for q's test executable
// "q_test [q.test]" -- q's external test package
//
// The packages p that are built differently for a test q.test
// are q itself, plus any helpers used by the external test q_test,
// typically including "testing" and all its dependencies.
// Run "go list" for complete
// information on the specified packages.
buf, err := invokeGo(cfg, golistargs(cfg, words)...)
if err != nil {
return nil, err
}
seen := make(map[string]*jsonPackage)
// Decode the JSON and convert it to Package form.
var response driverResponse
for dec := json.NewDecoder(buf); dec.More(); {
p := new(jsonPackage)
if err := dec.Decode(p); err != nil {
return nil, fmt.Errorf("JSON decoding failed: %v", err)
}
if p.ImportPath == "" {
// The documentation for go list says that “[e]rroneous packages will have
// a non-empty ImportPath”. If for some reason it comes back empty, we
// prefer to error out rather than silently discarding data or handing
// back a package without any way to refer to it.
if p.Error != nil {
return nil, Error{
Pos: p.Error.Pos,
Msg: p.Error.Err,
}
}
return nil, fmt.Errorf("package missing import path: %+v", p)
}
if old, found := seen[p.ImportPath]; found {
if !reflect.DeepEqual(p, old) {
return nil, fmt.Errorf("go list repeated package %v with different values", p.ImportPath)
}
// skip the duplicate
continue
}
seen[p.ImportPath] = p
pkg := &Package{
Name: p.Name,
ID: p.ImportPath,
GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles),
CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles),
OtherFiles: absJoin(p.Dir, otherFiles(p)...),
}
// Workaround for https://golang.org/issue/28749.
// TODO(adonovan): delete before go1.12 release.
out := pkg.CompiledGoFiles[:0]
for _, f := range pkg.CompiledGoFiles {
if strings.HasSuffix(f, ".s") {
continue
}
out = append(out, f)
}
pkg.CompiledGoFiles = out
// Extract the PkgPath from the package's ID.
if i := strings.IndexByte(pkg.ID, ' '); i >= 0 {
pkg.PkgPath = pkg.ID[:i]
} else {
pkg.PkgPath = pkg.ID
}
if pkg.PkgPath == "unsafe" {
pkg.GoFiles = nil // ignore fake unsafe.go file
}
// Assume go list emits only absolute paths for Dir.
if p.Dir != "" && !filepath.IsAbs(p.Dir) {
log.Fatalf("internal error: go list returned non-absolute Package.Dir: %s", p.Dir)
}
if p.Export != "" && !filepath.IsAbs(p.Export) {
pkg.ExportFile = filepath.Join(p.Dir, p.Export)
} else {
pkg.ExportFile = p.Export
}
// imports
//
// Imports contains the IDs of all imported packages.
// ImportsMap records (path, ID) only where they differ.
ids := make(map[string]bool)
for _, id := range p.Imports {
ids[id] = true
}
pkg.Imports = make(map[string]*Package)
for path, id := range p.ImportMap {
pkg.Imports[path] = &Package{ID: id} // non-identity import
delete(ids, id)
}
for id := range ids {
if id == "C" {
continue
}
pkg.Imports[id] = &Package{ID: id} // identity import
}
if !p.DepOnly {
response.Roots = append(response.Roots, pkg.ID)
}
// Work around for pre-go.1.11 versions of go list.
// TODO(matloob): they should be handled by the fallback.
// Can we delete this?
if len(pkg.CompiledGoFiles) == 0 {
pkg.CompiledGoFiles = pkg.GoFiles
}
if p.Error != nil {
pkg.Errors = append(pkg.Errors, Error{
Pos: p.Error.Pos,
Msg: p.Error.Err,
})
}
response.Packages = append(response.Packages, pkg)
}
return &response, nil
}
// absJoin absolutizes and flattens the lists of files.
func absJoin(dir string, fileses ...[]string) (res []string) {
for _, files := range fileses {
for _, file := range files {
if !filepath.IsAbs(file) {
file = filepath.Join(dir, file)
}
res = append(res, file)
}
}
return res
}
func golistargs(cfg *Config, words []string) []string {
fullargs := []string{
"list", "-e", "-json", "-compiled",
fmt.Sprintf("-test=%t", cfg.Tests),
fmt.Sprintf("-export=%t", usesExportData(cfg)),
fmt.Sprintf("-deps=%t", cfg.Mode >= LoadImports),
// go list doesn't let you pass -test and -find together,
// probably because you'd just get the TestMain.
fmt.Sprintf("-find=%t", cfg.Mode < LoadImports && !cfg.Tests),
}
fullargs = append(fullargs, cfg.BuildFlags...)
fullargs = append(fullargs, "--")
fullargs = append(fullargs, words...)
return fullargs
}
// invokeGo returns the stdout of a go command invocation.
func invokeGo(cfg *Config, args ...string) (*bytes.Buffer, error) {
if debug {
defer func(start time.Time) { log.Printf("%s for %v", time.Since(start), cmdDebugStr(cfg, args...)) }(time.Now())
}
stdout := new(bytes.Buffer)
stderr := new(bytes.Buffer)
cmd := exec.CommandContext(cfg.Context, "go", args...)
// On darwin the cwd gets resolved to the real path, which breaks anything that
// expects the working directory to keep the original path, including the
// go command when dealing with modules.
// The Go stdlib has a special feature where if the cwd and the PWD are the
// same node then it trusts the PWD, so by setting it in the env for the child
// process we fix up all the paths returned by the go command.
cmd.Env = append(append([]string{}, cfg.Env...), "PWD="+cfg.Dir)
cmd.Dir = cfg.Dir
cmd.Stdout = stdout
cmd.Stderr = stderr
if err := cmd.Run(); err != nil {
exitErr, ok := err.(*exec.ExitError)
if !ok {
// Catastrophic error:
// - executable not found
// - context cancellation
return nil, fmt.Errorf("couldn't exec 'go %v': %s %T", args, err, err)
}
// Old go version?
if strings.Contains(stderr.String(), "flag provided but not defined") {
return nil, goTooOldError{fmt.Errorf("unsupported version of go: %s: %s", exitErr, stderr)}
}
// Export mode entails a build.
// If that build fails, errors appear on stderr
// (despite the -e flag) and the Export field is blank.
// Do not fail in that case.
// The same is true if an ad-hoc package given to go list doesn't exist.
// TODO(matloob): Remove these once we can depend on go list to exit with a zero status with -e even when
// packages don't exist or a build fails.
if !usesExportData(cfg) && !containsGoFile(args) {
return nil, fmt.Errorf("go %v: %s: %s", args, exitErr, stderr)
}
}
// As of writing, go list -export prints some non-fatal compilation
// errors to stderr, even with -e set. We would prefer that it put
// them in the Package.Error JSON (see https://golang.org/issue/26319).
// In the meantime, there's nowhere good to put them, but they can
// be useful for debugging. Print them if $GOPACKAGESPRINTGOLISTERRORS
// is set.
if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTGOLISTERRORS") != "" {
fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cfg, args...), stderr)
}
// debugging
if false {
fmt.Fprintf(os.Stderr, "%s stdout: <<%s>>\n", cmdDebugStr(cfg, args...), stdout)
}
return stdout, nil
}
func containsGoFile(s []string) bool {
for _, f := range s {
if strings.HasSuffix(f, ".go") {
return true
}
}
return false
}
func cmdDebugStr(cfg *Config, args ...string) string {
env := make(map[string]string)
for _, kv := range cfg.Env {
split := strings.Split(kv, "=")
k, v := split[0], split[1]
env[k] = v
}
return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v PWD=%v go %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["PWD"], args)
}
| [
"\"GOPACKAGESDEBUG\"",
"\"GOPACKAGESPRINTGOLISTERRORS\""
] | [] | [
"GOPACKAGESDEBUG",
"GOPACKAGESPRINTGOLISTERRORS"
] | [] | ["GOPACKAGESDEBUG", "GOPACKAGESPRINTGOLISTERRORS"] | go | 2 | 0 | |
providers/nextcloud/nextcloud_test.go | package nextcloud_test
import (
"os"
"testing"
"github.com/anant-sharma/goth"
"github.com/anant-sharma/goth/providers/nextcloud"
"github.com/stretchr/testify/assert"
)
func Test_New(t *testing.T) {
t.Parallel()
a := assert.New(t)
p := provider()
a.Equal(p.ClientKey, os.Getenv("NEXTCLOUD_KEY"))
a.Equal(p.Secret, os.Getenv("NEXTCLOUD_SECRET"))
a.Equal(p.CallbackURL, "/foo")
}
func Test_NewCustomisedURL(t *testing.T) {
t.Parallel()
a := assert.New(t)
p := urlCustomisedURLProvider()
session, err := p.BeginAuth("test_state")
s := session.(*nextcloud.Session)
a.NoError(err)
a.Contains(s.AuthURL, "http://authURL")
}
func Test_Implements_Provider(t *testing.T) {
t.Parallel()
a := assert.New(t)
a.Implements((*goth.Provider)(nil), provider())
}
func Test_BeginAuth(t *testing.T) {
t.Parallel()
a := assert.New(t)
p := provider()
session, err := p.BeginAuth("test_state")
s := session.(*nextcloud.Session)
a.NoError(err)
a.Contains(s.AuthURL, "/apps/oauth2/authorize?client_id=")
}
func Test_SessionFromJSON(t *testing.T) {
t.Parallel()
a := assert.New(t)
p := provider()
session, err := p.UnmarshalSession(`{"AuthURL":"https://nextcloud.com/oauth/authorize","AccessToken":"1234567890"}`)
a.NoError(err)
s := session.(*nextcloud.Session)
a.Equal(s.AuthURL, "https://nextcloud.com/oauth/authorize")
a.Equal(s.AccessToken, "1234567890")
}
func provider() *nextcloud.Provider {
return nextcloud.NewCustomisedDNS(
os.Getenv("NEXTCLOUD_KEY"),
os.Getenv("NEXTCLOUD_SECRET"),
"/foo",
os.Getenv("NEXTCLOUD_DNS"),
)
}
func urlCustomisedURLProvider() *nextcloud.Provider {
return nextcloud.NewCustomisedURL(os.Getenv("NEXTCLOUD_KEY"), os.Getenv("NEXTCLOUD_SECRET"), "/foo", "http://authURL", "http://tokenURL", "http://profileURL")
}
| [
"\"NEXTCLOUD_KEY\"",
"\"NEXTCLOUD_SECRET\"",
"\"NEXTCLOUD_KEY\"",
"\"NEXTCLOUD_SECRET\"",
"\"NEXTCLOUD_DNS\"",
"\"NEXTCLOUD_KEY\"",
"\"NEXTCLOUD_SECRET\""
] | [] | [
"NEXTCLOUD_SECRET",
"NEXTCLOUD_DNS",
"NEXTCLOUD_KEY"
] | [] | ["NEXTCLOUD_SECRET", "NEXTCLOUD_DNS", "NEXTCLOUD_KEY"] | go | 3 | 0 | |
com.dynamo.cr/com.dynamo.cr.bob/src/com/dynamo/bob/util/Exec.java | // Copyright 2020 The Defold Foundation
// Licensed under the Defold License version 1.0 (the "License"); you may not use
// this file except in compliance with the License.
//
// You may obtain a copy of the License, together with FAQs at
// https://www.defold.com/license
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
package com.dynamo.bob.util;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.List;
import java.util.Map;
public class Exec {
private static String verbosity = System.getenv("DM_BOB_VERBOSE");
private static Logger logger = Logger.getLogger(Exec.class.getCanonicalName());
private static int getVerbosity() {
if (verbosity == null)
return 0;
try {
return Integer.parseInt(verbosity);
} catch (NumberFormatException nfe) {
return 0;
}
}
public static int exec(String... args) throws IOException {
if (getVerbosity() >= 2) {
logger.log(Level.INFO, "CMD: " + String.join(" ", args));
}
Process p = new ProcessBuilder(args).redirectErrorStream(true).start();
int ret = 127;
byte[] buf = new byte[16 * 1024];
try {
InputStream is = p.getInputStream();
int n = is.read(buf);
while (n > 0) {
n = is.read(buf);
}
ret = p.waitFor();
} catch (InterruptedException e) {
logger.log(Level.SEVERE, "Unexpected interruption", e);
}
return ret;
}
public static class Result {
public Result(int ret, byte[] stdOutErr) {
this.ret = ret;
this.stdOutErr = stdOutErr;
}
public int ret;
public byte[] stdOutErr;
}
/**
* Exec command
* @param args arguments
* @return instance with return code and stdout/stderr combined
* @throws IOException
*/
public static Result execResult(String... args) throws IOException {
if (getVerbosity() >= 2) {
logger.log(Level.INFO, "CMD: " + String.join(" ", args));
}
Process p = new ProcessBuilder(args).redirectErrorStream(true).start();
int ret = 127;
byte[] buf = new byte[16 * 1024];
ByteArrayOutputStream out = new ByteArrayOutputStream(10 * 1024);
try {
InputStream is = p.getInputStream();
int n = is.read(buf);
while (n > 0) {
out.write(buf, 0, n);
n = is.read(buf);
}
ret = p.waitFor();
} catch (InterruptedException e) {
logger.log(Level.SEVERE, "Unexpected interruption", e);
}
return new Result(ret, out.toByteArray());
}
private static ProcessBuilder processBuilderWithArgs(Map<String, String> env, String[] args) {
if (getVerbosity() >= 2) {
logger.log(Level.INFO, "CMD: " + String.join(" ", args));
}
ProcessBuilder pb = new ProcessBuilder(args);
pb.redirectErrorStream(true);
Map<String, String> pbenv = pb.environment();
for (Map.Entry<String, String> entry : env.entrySet())
{
pbenv.put(entry.getKey(), entry.getValue());
}
return pb;
}
private static Result runProcessBuilder(ProcessBuilder pb) throws IOException {
Process p = pb.start();
int ret = 127;
byte[] buf = new byte[16 * 1024];
ByteArrayOutputStream out = new ByteArrayOutputStream(10 * 1024);
try {
InputStream is = p.getInputStream();
int n = is.read(buf);
while (n > 0) {
out.write(buf, 0, n);
n = is.read(buf);
}
ret = p.waitFor();
} catch (InterruptedException e) {
logger.log(Level.SEVERE, "Unexpected interruption", e);
}
return new Result(ret, out.toByteArray());
}
public static Result execResultWithEnvironment(Map<String, String> env, String... args) throws IOException {
ProcessBuilder pb = processBuilderWithArgs(env, args);
return runProcessBuilder(pb);
}
public static Result execResultWithEnvironmentWorkDir(Map<String, String> env, File workDir, String... args) throws IOException {
ProcessBuilder pb = processBuilderWithArgs(env, args);
pb.directory(workDir);
return runProcessBuilder(pb);
}
public static Result execResultWithEnvironment(Map<String, String> env, List<String> args) throws IOException {
String[] array = new String[args.size()];
array = args.toArray(array);
return Exec.execResultWithEnvironment(env, array);
}
}
| [
"\"DM_BOB_VERBOSE\""
] | [] | [
"DM_BOB_VERBOSE"
] | [] | ["DM_BOB_VERBOSE"] | java | 1 | 0 | |
main.go | package main
import (
"bytes"
"crypto/tls"
"crypto/x509"
"encoding/json"
"flag"
"io/ioutil"
"log"
"net/http"
"os"
"path/filepath"
"strings"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
var errorCounter = prometheus.NewCounter(prometheus.CounterOpts{
Namespace: "mesos",
Subsystem: "collector",
Name: "errors_total",
Help: "Total number of internal mesos-collector errors.",
})
func init() {
prometheus.MustRegister(errorCounter)
}
func getX509CertPool(pemFiles []string) *x509.CertPool {
pool := x509.NewCertPool()
for _, f := range pemFiles {
content, err := ioutil.ReadFile(f)
if err != nil {
log.Fatal(err)
}
ok := pool.AppendCertsFromPEM(content)
if !ok {
log.Fatalf("Error parsing .pem file %s", f)
}
}
return pool
}
func mkHTTPClient(url string, timeout time.Duration, auth authInfo, certPool *x509.CertPool) *httpClient {
transport := &http.Transport{
TLSClientConfig: &tls.Config{RootCAs: certPool, InsecureSkipVerify: auth.skipSSLVerify},
}
// HTTP Redirects are authenticated by Go (>=1.8), when redirecting to an identical domain or a subdomain.
// -> Hijack redirect authentication, since hostnames rarely follow this logic.
var redirectFunc func(req *http.Request, via []*http.Request) error
if auth.username != "" && auth.password != "" {
// Auth information is only available in the current context -> use lambda function
redirectFunc = func(req *http.Request, via []*http.Request) error {
req.SetBasicAuth(auth.username, auth.password)
return nil
}
}
client := &httpClient{
http.Client{Timeout: timeout, Transport: transport, CheckRedirect: redirectFunc},
url,
auth,
}
if auth.strictMode {
client.auth.signingKey = parsePrivateKey(client)
}
return client
}
func parsePrivateKey(httpClient *httpClient) []byte {
if _, err := os.Stat(httpClient.auth.privateKey); os.IsNotExist(err) {
buffer := bytes.NewBuffer([]byte(httpClient.auth.privateKey))
var key mesosSecret
if err := json.NewDecoder(buffer).Decode(&key); err != nil {
log.Printf("Error decoding prviate key %s: %s", key, err)
errorCounter.Inc()
return []byte{}
}
httpClient.auth.username = key.UID
httpClient.auth.loginURL = key.LoginEndpoint
return []byte(key.PrivateKey)
}
absPath, _ := filepath.Abs(httpClient.auth.privateKey)
key, err := ioutil.ReadFile(absPath)
if err != nil {
log.Printf("Error reading private key %s: %s", absPath, err)
errorCounter.Inc()
return []byte{}
}
return key
}
func csvInputToList(input string) []string {
var entryList []string
if input == "" {
return entryList
}
sanitizedString := strings.Replace(input, " ", "", -1)
entryList = strings.Split(sanitizedString, ",")
return entryList
}
func main() {
fs := flag.NewFlagSet("mesos-exporter", flag.ExitOnError)
addr := fs.String("addr", ":9105", "Address to listen on")
masterURL := fs.String("master", "", "Expose metrics from master running on this URL")
slaveURL := fs.String("slave", "", "Expose metrics from slave running on this URL")
timeout := fs.Duration("timeout", 10*time.Second, "Master polling timeout")
exportedTaskLabels := fs.String("exportedTaskLabels", "", "Comma-separated list of task labels to include in the corresponding metric")
exportedSlaveAttributes := fs.String("exportedSlaveAttributes", "", "Comma-separated list of slave attributes to include in the corresponding metric")
trustedCerts := fs.String("trustedCerts", "", "Comma-separated list of certificates (.pem files) trusted for requests to Mesos endpoints")
strictMode := fs.Bool("strictMode", false, "Use strict mode authentication")
username := fs.String("username", "", "Username for authentication")
password := fs.String("password", "", "Password for authentication")
loginURL := fs.String("loginURL", "https://leader.mesos/acs/api/v1/auth/login", "URL for strict mode authentication")
privateKey := fs.String("privateKey", "", "File path to certificate for strict mode authentication")
skipSSLVerify := fs.Bool("skipSSLVerify", false, "Skip SSL certificate verification")
fs.Parse(os.Args[1:])
if *masterURL != "" && *slaveURL != "" {
log.Fatal("Only -master or -slave can be given at a time")
}
auth := authInfo{
strictMode: *strictMode,
skipSSLVerify: *skipSSLVerify,
loginURL: *loginURL,
}
if *strictMode && *privateKey != "" {
auth.privateKey = *privateKey
} else {
auth.privateKey = os.Getenv("MESOS_EXPORTER_PRIVATE_KEY")
}
if *username != "" {
auth.username = *username
} else {
auth.username = os.Getenv("MESOS_EXPORTER_USERNAME")
}
if *password != "" {
auth.password = *password
} else {
auth.password = os.Getenv("MESOS_EXPORTER_PASSWORD")
}
var certPool *x509.CertPool
if *trustedCerts != "" {
certPool = getX509CertPool(csvInputToList(*trustedCerts))
}
slaveAttributeLabels := csvInputToList(*exportedSlaveAttributes)
slaveTaskLabels := csvInputToList(*exportedTaskLabels)
switch {
case *masterURL != "":
for _, f := range []func(*httpClient) prometheus.Collector{
newMasterCollector,
func(c *httpClient) prometheus.Collector {
return newMasterStateCollector(c, slaveAttributeLabels)
},
} {
c := f(mkHTTPClient(*masterURL, *timeout, auth, certPool))
if err := prometheus.Register(c); err != nil {
log.Fatal(err)
}
}
log.Printf("Exposing master metrics on %s", *addr)
case *slaveURL != "":
slaveCollectors := []func(*httpClient) prometheus.Collector{
func(c *httpClient) prometheus.Collector {
return newSlaveCollector(c)
},
func(c *httpClient) prometheus.Collector {
return newSlaveMonitorCollector(c)
},
func(c *httpClient) prometheus.Collector {
return newSlaveStateCollector(c, slaveTaskLabels, slaveAttributeLabels)
},
}
for _, f := range slaveCollectors {
c := f(mkHTTPClient(*slaveURL, *timeout, auth, certPool))
if err := prometheus.Register(c); err != nil {
log.Fatal(err)
}
}
log.Printf("Exposing slave metrics on %s", *addr)
default:
log.Fatal("Either -master or -slave is required")
}
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte(`<html>
<head><title>Mesos Exporter</title></head>
<body>
<h1>Mesos Exporter</h1>
<p><a href="/metrics">Metrics</a></p>
</body>
</html>`))
})
http.Handle("/metrics", promhttp.Handler())
if err := http.ListenAndServe(*addr, nil); err != nil {
log.Fatal(err)
}
}
| [
"\"MESOS_EXPORTER_PRIVATE_KEY\"",
"\"MESOS_EXPORTER_USERNAME\"",
"\"MESOS_EXPORTER_PASSWORD\""
] | [] | [
"MESOS_EXPORTER_USERNAME",
"MESOS_EXPORTER_PRIVATE_KEY",
"MESOS_EXPORTER_PASSWORD"
] | [] | ["MESOS_EXPORTER_USERNAME", "MESOS_EXPORTER_PRIVATE_KEY", "MESOS_EXPORTER_PASSWORD"] | go | 3 | 0 | |
mne/forward/forward.py | # Authors: Alexandre Gramfort <[email protected]>
# Matti Hamalainen <[email protected]>
# Martin Luessi <[email protected]>
#
# License: BSD (3-clause)
from ..externals.six import string_types
from time import time
import warnings
from copy import deepcopy
import re
import numpy as np
from scipy import linalg, sparse
import shutil
import os
from os import path as op
import tempfile
from ..fixes import sparse_block_diag
from ..io.constants import FIFF
from ..io.open import fiff_open
from ..io.tree import dir_tree_find
from ..io.tag import find_tag, read_tag
from ..io.matrix import (_read_named_matrix, _transpose_named_matrix,
write_named_matrix)
from ..io.meas_info import read_bad_channels, Info
from ..io.pick import (pick_channels_forward, pick_info, pick_channels,
pick_types)
from ..io.write import (write_int, start_block, end_block,
write_coord_trans, write_ch_info, write_name_list,
write_string, start_file, end_file, write_id)
from ..io.base import _BaseRaw
from ..evoked import Evoked, write_evokeds
from ..epochs import Epochs
from ..source_space import (_read_source_spaces_from_tree,
find_source_space_hemi,
_write_source_spaces_to_fid)
from ..transforms import (transform_surface_to, invert_transform,
write_trans)
from ..utils import (_check_fname, get_subjects_dir, has_mne_c,
run_subprocess, check_fname, logger, verbose)
class Forward(dict):
"""Forward class to represent info from forward solution
"""
def __repr__(self):
"""Summarize forward info instead of printing all"""
entr = '<Forward'
nchan = len(pick_types(self['info'], meg=True, eeg=False))
entr += ' | ' + 'MEG channels: %d' % nchan
nchan = len(pick_types(self['info'], meg=False, eeg=True))
entr += ' | ' + 'EEG channels: %d' % nchan
src_types = np.array([src['type'] for src in self['src']])
if (src_types == 'surf').all():
entr += (' | Source space: Surface with %d vertices'
% self['nsource'])
elif (src_types == 'vol').all():
entr += (' | Source space: Volume with %d grid points'
% self['nsource'])
elif (src_types == 'discrete').all():
entr += (' | Source space: Discrete with %d dipoles'
% self['nsource'])
else:
count_string = ''
if (src_types == 'surf').any():
count_string += '%d surface, ' % (src_types == 'surf').sum()
if (src_types == 'vol').any():
count_string += '%d volume, ' % (src_types == 'vol').sum()
if (src_types == 'discrete').any():
count_string += '%d discrete, ' \
% (src_types == 'discrete').sum()
count_string = count_string.rstrip(', ')
entr += (' | Source space: Mixed (%s) with %d vertices'
% (count_string, self['nsource']))
if self['source_ori'] == FIFF.FIFFV_MNE_UNKNOWN_ORI:
entr += (' | Source orientation: Unknown')
elif self['source_ori'] == FIFF.FIFFV_MNE_FIXED_ORI:
entr += (' | Source orientation: Fixed')
elif self['source_ori'] == FIFF.FIFFV_MNE_FREE_ORI:
entr += (' | Source orientation: Free')
entr += '>'
return entr
def prepare_bem_model(bem, sol_fname=None, method='linear'):
"""Wrapper for the mne_prepare_bem_model command line utility
Parameters
----------
bem : str
The name of the file containing the triangulations of the BEM surfaces
and the conductivities of the compartments. The standard ending for
this file is -bem.fif and it is produced either with the utility
mne_surf2bem or the convenience script mne_setup_forward_model.
sol_fname : None | str
The output file. None (the default) will employ the standard naming
scheme. To conform with the standard naming conventions the filename
should start with the subject name and end in "-bem-sol.fif".
method : 'linear' | 'constant'
The BEM approach.
"""
cmd = ['mne_prepare_bem_model', '--bem', bem, '--method', method]
if sol_fname is not None:
cmd.extend(('--sol', sol_fname))
run_subprocess(cmd)
def _block_diag(A, n):
"""Constructs a block diagonal from a packed structure
You have to try it on a matrix to see what it's doing.
If A is not sparse, then returns a sparse block diagonal "bd",
diagonalized from the
elements in "A".
"A" is ma x na, comprising bdn=(na/"n") blocks of submatrices.
Each submatrix is ma x "n", and these submatrices are
placed down the diagonal of the matrix.
If A is already sparse, then the operation is reversed, yielding
a block
row matrix, where each set of n columns corresponds to a block element
from the block diagonal.
Parameters
----------
A : array
The matrix
n : int
The block size
Returns
-------
bd : sparse matrix
The block diagonal matrix
"""
if sparse.issparse(A): # then make block sparse
raise NotImplemented('sparse reversal not implemented yet')
ma, na = A.shape
bdn = na // int(n) # number of submatrices
if na % n > 0:
raise ValueError('Width of matrix must be a multiple of n')
tmp = np.arange(ma * bdn, dtype=np.int).reshape(bdn, ma)
tmp = np.tile(tmp, (1, n))
ii = tmp.ravel()
jj = np.arange(na, dtype=np.int)[None, :]
jj = jj * np.ones(ma, dtype=np.int)[:, None]
jj = jj.T.ravel() # column indices foreach sparse bd
bd = sparse.coo_matrix((A.T.ravel(), np.c_[ii, jj].T)).tocsc()
return bd
def _inv_block_diag(A, n):
"""Constructs an inverse block diagonal from a packed structure
You have to try it on a matrix to see what it's doing.
"A" is ma x na, comprising bdn=(na/"n") blocks of submatrices.
Each submatrix is ma x "n", and the inverses of these submatrices
are placed down the diagonal of the matrix.
Parameters
----------
A : array
The matrix.
n : int
The block size.
Returns
-------
bd : sparse matrix
The block diagonal matrix.
"""
ma, na = A.shape
bdn = na // int(n) # number of submatrices
if na % n > 0:
raise ValueError('Width of matrix must be a multiple of n')
# modify A in-place to invert each sub-block
A = A.copy()
for start in range(0, na, 3):
# this is a view
A[:, start:start + 3] = linalg.inv(A[:, start:start + 3])
tmp = np.arange(ma * bdn, dtype=np.int).reshape(bdn, ma)
tmp = np.tile(tmp, (1, n))
ii = tmp.ravel()
jj = np.arange(na, dtype=np.int)[None, :]
jj = jj * np.ones(ma, dtype=np.int)[:, None]
jj = jj.T.ravel() # column indices foreach sparse bd
bd = sparse.coo_matrix((A.T.ravel(), np.c_[ii, jj].T)).tocsc()
return bd
def _get_tag_int(fid, node, name, id_):
"""Helper to check we have an appropriate tag"""
tag = find_tag(fid, node, id_)
if tag is None:
fid.close()
raise ValueError(name + ' tag not found')
return int(tag.data)
def _read_one(fid, node):
"""Read all interesting stuff for one forward solution
"""
# This function assumes the fid is open as a context manager
if node is None:
return None
one = Forward()
one['source_ori'] = _get_tag_int(fid, node, 'Source orientation',
FIFF.FIFF_MNE_SOURCE_ORIENTATION)
one['coord_frame'] = _get_tag_int(fid, node, 'Coordinate frame',
FIFF.FIFF_MNE_COORD_FRAME)
one['nsource'] = _get_tag_int(fid, node, 'Number of sources',
FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS)
one['nchan'] = _get_tag_int(fid, node, 'Number of channels',
FIFF.FIFF_NCHAN)
try:
one['sol'] = _read_named_matrix(fid, node,
FIFF.FIFF_MNE_FORWARD_SOLUTION)
one['sol'] = _transpose_named_matrix(one['sol'], copy=False)
one['_orig_sol'] = one['sol']['data'].copy()
except Exception:
logger.error('Forward solution data not found')
raise
try:
fwd_type = FIFF.FIFF_MNE_FORWARD_SOLUTION_GRAD
one['sol_grad'] = _read_named_matrix(fid, node, fwd_type)
one['sol_grad'] = _transpose_named_matrix(one['sol_grad'], copy=False)
one['_orig_sol_grad'] = one['sol_grad']['data'].copy()
except Exception:
one['sol_grad'] = None
if one['sol']['data'].shape[0] != one['nchan'] or \
(one['sol']['data'].shape[1] != one['nsource'] and
one['sol']['data'].shape[1] != 3 * one['nsource']):
raise ValueError('Forward solution matrix has wrong dimensions')
if one['sol_grad'] is not None:
if one['sol_grad']['data'].shape[0] != one['nchan'] or \
(one['sol_grad']['data'].shape[1] != 3 * one['nsource'] and
one['sol_grad']['data'].shape[1] != 3 * 3 * one['nsource']):
raise ValueError('Forward solution gradient matrix has '
'wrong dimensions')
return one
def _read_forward_meas_info(tree, fid):
"""Read light measurement info from forward operator
Parameters
----------
tree : tree
FIF tree structure.
fid : file id
The file id.
Returns
-------
info : instance of mne.io.meas_info.Info
The measurement info.
"""
# This function assumes fid is being used as a context manager
info = Info()
# Information from the MRI file
parent_mri = dir_tree_find(tree, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
if len(parent_mri) == 0:
raise ValueError('No parent MEG information found in operator')
parent_mri = parent_mri[0]
tag = find_tag(fid, parent_mri, FIFF.FIFF_MNE_FILE_NAME)
info['mri_file'] = tag.data if tag is not None else None
tag = find_tag(fid, parent_mri, FIFF.FIFF_PARENT_FILE_ID)
info['mri_id'] = tag.data if tag is not None else None
# Information from the MEG file
parent_meg = dir_tree_find(tree, FIFF.FIFFB_MNE_PARENT_MEAS_FILE)
if len(parent_meg) == 0:
raise ValueError('No parent MEG information found in operator')
parent_meg = parent_meg[0]
tag = find_tag(fid, parent_meg, FIFF.FIFF_MNE_FILE_NAME)
info['meas_file'] = tag.data if tag is not None else None
tag = find_tag(fid, parent_meg, FIFF.FIFF_PARENT_FILE_ID)
info['meas_id'] = tag.data if tag is not None else None
# Add channel information
chs = list()
for k in range(parent_meg['nent']):
kind = parent_meg['directory'][k].kind
pos = parent_meg['directory'][k].pos
if kind == FIFF.FIFF_CH_INFO:
tag = read_tag(fid, pos)
chs.append(tag.data)
info['chs'] = chs
info['ch_names'] = [c['ch_name'] for c in chs]
info['nchan'] = len(chs)
# Get the MRI <-> head coordinate transformation
tag = find_tag(fid, parent_mri, FIFF.FIFF_COORD_TRANS)
coord_head = FIFF.FIFFV_COORD_HEAD
coord_mri = FIFF.FIFFV_COORD_MRI
coord_device = FIFF.FIFFV_COORD_DEVICE
coord_ctf_head = FIFF.FIFFV_MNE_COORD_CTF_HEAD
if tag is None:
raise ValueError('MRI/head coordinate transformation not found')
cand = tag.data
if cand['from'] == coord_mri and cand['to'] == coord_head:
info['mri_head_t'] = cand
else:
raise ValueError('MRI/head coordinate transformation not found')
# Get the MEG device <-> head coordinate transformation
tag = find_tag(fid, parent_meg, FIFF.FIFF_COORD_TRANS)
if tag is None:
raise ValueError('MEG/head coordinate transformation not found')
cand = tag.data
if cand['from'] == coord_device and cand['to'] == coord_head:
info['dev_head_t'] = cand
elif cand['from'] == coord_ctf_head and cand['to'] == coord_head:
info['ctf_head_t'] = cand
else:
raise ValueError('MEG/head coordinate transformation not found')
info['bads'] = read_bad_channels(fid, parent_meg)
# Check if a custom reference has been applied
tag = find_tag(fid, parent_mri, FIFF.FIFF_CUSTOM_REF)
info['custom_ref_applied'] = bool(tag.data) if tag is not None else False
return info
def _subject_from_forward(forward):
"""Get subject id from inverse operator"""
return forward['src'][0].get('subject_his_id', None)
@verbose
def _merge_meg_eeg_fwds(megfwd, eegfwd, verbose=None):
"""Merge loaded MEG and EEG forward dicts into one dict"""
if megfwd is not None and eegfwd is not None:
if (megfwd['sol']['data'].shape[1] != eegfwd['sol']['data'].shape[1] or
megfwd['source_ori'] != eegfwd['source_ori'] or
megfwd['nsource'] != eegfwd['nsource'] or
megfwd['coord_frame'] != eegfwd['coord_frame']):
raise ValueError('The MEG and EEG forward solutions do not match')
fwd = megfwd
fwd['sol']['data'] = np.r_[fwd['sol']['data'], eegfwd['sol']['data']]
fwd['_orig_sol'] = np.r_[fwd['_orig_sol'], eegfwd['_orig_sol']]
fwd['sol']['nrow'] = fwd['sol']['nrow'] + eegfwd['sol']['nrow']
fwd['sol']['row_names'] = (fwd['sol']['row_names'] +
eegfwd['sol']['row_names'])
if fwd['sol_grad'] is not None:
fwd['sol_grad']['data'] = np.r_[fwd['sol_grad']['data'],
eegfwd['sol_grad']['data']]
fwd['_orig_sol_grad'] = np.r_[fwd['_orig_sol_grad'],
eegfwd['_orig_sol_grad']]
fwd['sol_grad']['nrow'] = (fwd['sol_grad']['nrow'] +
eegfwd['sol_grad']['nrow'])
fwd['sol_grad']['row_names'] = (fwd['sol_grad']['row_names'] +
eegfwd['sol_grad']['row_names'])
fwd['nchan'] = fwd['nchan'] + eegfwd['nchan']
logger.info(' MEG and EEG forward solutions combined')
elif megfwd is not None:
fwd = megfwd
else:
fwd = eegfwd
return fwd
@verbose
def read_forward_solution(fname, force_fixed=False, surf_ori=False,
include=[], exclude=[], verbose=None):
"""Read a forward solution a.k.a. lead field
Parameters
----------
fname : string
The file name, which should end with -fwd.fif or -fwd.fif.gz.
force_fixed : bool, optional (default False)
Force fixed source orientation mode?
surf_ori : bool, optional (default False)
Use surface-based source coordinate system? Note that force_fixed=True
implies surf_ori=True.
include : list, optional
List of names of channels to include. If empty all channels
are included.
exclude : list, optional
List of names of channels to exclude. If empty include all
channels.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fwd : instance of Forward
The forward solution.
"""
check_fname(fname, 'forward', ('-fwd.fif', '-fwd.fif.gz'))
# Open the file, create directory
logger.info('Reading forward solution from %s...' % fname)
f, tree, _ = fiff_open(fname)
with f as fid:
# Find all forward solutions
fwds = dir_tree_find(tree, FIFF.FIFFB_MNE_FORWARD_SOLUTION)
if len(fwds) == 0:
raise ValueError('No forward solutions in %s' % fname)
# Parent MRI data
parent_mri = dir_tree_find(tree, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
if len(parent_mri) == 0:
raise ValueError('No parent MRI information in %s' % fname)
parent_mri = parent_mri[0]
src = _read_source_spaces_from_tree(fid, tree, add_geom=False)
for s in src:
s['id'] = find_source_space_hemi(s)
fwd = None
# Locate and read the forward solutions
megnode = None
eegnode = None
for k in range(len(fwds)):
tag = find_tag(fid, fwds[k], FIFF.FIFF_MNE_INCLUDED_METHODS)
if tag is None:
raise ValueError('Methods not listed for one of the forward '
'solutions')
if tag.data == FIFF.FIFFV_MNE_MEG:
megnode = fwds[k]
elif tag.data == FIFF.FIFFV_MNE_EEG:
eegnode = fwds[k]
megfwd = _read_one(fid, megnode)
if megfwd is not None:
if is_fixed_orient(megfwd):
ori = 'fixed'
else:
ori = 'free'
logger.info(' Read MEG forward solution (%d sources, '
'%d channels, %s orientations)'
% (megfwd['nsource'], megfwd['nchan'], ori))
eegfwd = _read_one(fid, eegnode)
if eegfwd is not None:
if is_fixed_orient(eegfwd):
ori = 'fixed'
else:
ori = 'free'
logger.info(' Read EEG forward solution (%d sources, '
'%d channels, %s orientations)'
% (eegfwd['nsource'], eegfwd['nchan'], ori))
fwd = _merge_meg_eeg_fwds(megfwd, eegfwd)
# Get the MRI <-> head coordinate transformation
tag = find_tag(fid, parent_mri, FIFF.FIFF_COORD_TRANS)
if tag is None:
raise ValueError('MRI/head coordinate transformation not found')
mri_head_t = tag.data
if (mri_head_t['from'] != FIFF.FIFFV_COORD_MRI or
mri_head_t['to'] != FIFF.FIFFV_COORD_HEAD):
mri_head_t = invert_transform(mri_head_t)
if (mri_head_t['from'] != FIFF.FIFFV_COORD_MRI
or mri_head_t['to'] != FIFF.FIFFV_COORD_HEAD):
fid.close()
raise ValueError('MRI/head coordinate transformation not '
'found')
fwd['mri_head_t'] = mri_head_t
#
# get parent MEG info
#
fwd['info'] = _read_forward_meas_info(tree, fid)
# MNE environment
parent_env = dir_tree_find(tree, FIFF.FIFFB_MNE_ENV)
if len(parent_env) > 0:
parent_env = parent_env[0]
tag = find_tag(fid, parent_env, FIFF.FIFF_MNE_ENV_WORKING_DIR)
if tag is not None:
fwd['info']['working_dir'] = tag.data
tag = find_tag(fid, parent_env, FIFF.FIFF_MNE_ENV_COMMAND_LINE)
if tag is not None:
fwd['info']['command_line'] = tag.data
# Transform the source spaces to the correct coordinate frame
# if necessary
# Make sure forward solution is in either the MRI or HEAD coordinate frame
if (fwd['coord_frame'] != FIFF.FIFFV_COORD_MRI and
fwd['coord_frame'] != FIFF.FIFFV_COORD_HEAD):
raise ValueError('Only forward solutions computed in MRI or head '
'coordinates are acceptable')
nuse = 0
# Transform each source space to the HEAD or MRI coordinate frame,
# depending on the coordinate frame of the forward solution
# NOTE: the function transform_surface_to will also work on discrete and
# volume sources
for s in src:
try:
s = transform_surface_to(s, fwd['coord_frame'], mri_head_t)
except Exception as inst:
raise ValueError('Could not transform source space (%s)' % inst)
nuse += s['nuse']
# Make sure the number of sources match after transformation
if nuse != fwd['nsource']:
raise ValueError('Source spaces do not match the forward solution.')
logger.info(' Source spaces transformed to the forward solution '
'coordinate frame')
fwd['src'] = src
# Handle the source locations and orientations
fwd['source_rr'] = np.concatenate([ss['rr'][ss['vertno'], :]
for ss in src], axis=0)
# deal with transformations, storing orig copies so transforms can be done
# as necessary later
fwd['_orig_source_ori'] = fwd['source_ori']
convert_forward_solution(fwd, surf_ori, force_fixed, copy=False)
fwd = pick_channels_forward(fwd, include=include, exclude=exclude)
return Forward(fwd)
@verbose
def convert_forward_solution(fwd, surf_ori=False, force_fixed=False,
copy=True, verbose=None):
"""Convert forward solution between different source orientations
Parameters
----------
fwd : dict
The forward solution to modify.
surf_ori : bool, optional (default False)
Use surface-based source coordinate system? Note that force_fixed=True
implies surf_ori=True.
force_fixed : bool, optional (default False)
Force fixed source orientation mode?
copy : bool, optional (default True)
If False, operation will be done in-place (modifying the input).
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fwd : dict
The modified forward solution.
"""
if copy is True:
fwd = deepcopy(fwd)
# We need to change these entries (only):
# 1. source_nn
# 2. sol['data']
# 3. sol['ncol']
# 4. sol_grad['data']
# 5. sol_grad['ncol']
# 6. source_ori
if is_fixed_orient(fwd, orig=True) or force_fixed: # Fixed
nuse = 0
fwd['source_nn'] = np.concatenate([s['nn'][s['vertno'], :]
for s in fwd['src']], axis=0)
# Modify the forward solution for fixed source orientations
if not is_fixed_orient(fwd, orig=True):
logger.info(' Changing to fixed-orientation forward '
'solution with surface-based source orientations...')
fix_rot = _block_diag(fwd['source_nn'].T, 1)
# newer versions of numpy require explicit casting here, so *= no
# longer works
fwd['sol']['data'] = (fwd['_orig_sol']
* fix_rot).astype('float32')
fwd['sol']['ncol'] = fwd['nsource']
fwd['source_ori'] = FIFF.FIFFV_MNE_FIXED_ORI
if fwd['sol_grad'] is not None:
x = sparse_block_diag([fix_rot] * 3)
fwd['sol_grad']['data'] = fwd['_orig_sol_grad'] * x # dot prod
fwd['sol_grad']['ncol'] = 3 * fwd['nsource']
logger.info(' [done]')
fwd['source_ori'] = FIFF.FIFFV_MNE_FIXED_ORI
fwd['surf_ori'] = True
elif surf_ori: # Free, surf-oriented
# Rotate the local source coordinate systems
nuse_total = sum([s['nuse'] for s in fwd['src']])
fwd['source_nn'] = np.empty((3 * nuse_total, 3), dtype=np.float)
logger.info(' Converting to surface-based source orientations...')
if fwd['src'][0]['patch_inds'] is not None:
use_ave_nn = True
logger.info(' Average patch normals will be employed in the '
'rotation to the local surface coordinates....')
else:
use_ave_nn = False
# Actually determine the source orientations
nuse = 0
pp = 0
for s in fwd['src']:
for p in range(s['nuse']):
# Project out the surface normal and compute SVD
if use_ave_nn is True:
nn = s['nn'][s['pinfo'][s['patch_inds'][p]], :]
nn = np.sum(nn, axis=0)[:, np.newaxis]
nn /= linalg.norm(nn)
else:
nn = s['nn'][s['vertno'][p], :][:, np.newaxis]
U, S, _ = linalg.svd(np.eye(3, 3) - nn * nn.T)
# Make sure that ez is in the direction of nn
if np.sum(nn.ravel() * U[:, 2].ravel()) < 0:
U *= -1.0
fwd['source_nn'][pp:pp + 3, :] = U.T
pp += 3
nuse += s['nuse']
# Rotate the solution components as well
surf_rot = _block_diag(fwd['source_nn'].T, 3)
fwd['sol']['data'] = fwd['_orig_sol'] * surf_rot
fwd['sol']['ncol'] = 3 * fwd['nsource']
if fwd['sol_grad'] is not None:
x = sparse_block_diag([surf_rot] * 3)
fwd['sol_grad']['data'] = fwd['_orig_sol_grad'] * x # dot prod
fwd['sol_grad']['ncol'] = 3 * fwd['nsource']
logger.info('[done]')
fwd['source_ori'] = FIFF.FIFFV_MNE_FREE_ORI
fwd['surf_ori'] = True
else: # Free, cartesian
logger.info(' Cartesian source orientations...')
fwd['source_nn'] = np.kron(np.ones((fwd['nsource'], 1)), np.eye(3))
fwd['sol']['data'] = fwd['_orig_sol'].copy()
fwd['sol']['ncol'] = 3 * fwd['nsource']
if fwd['sol_grad'] is not None:
fwd['sol_grad']['data'] = fwd['_orig_sol_grad'].copy()
fwd['sol_grad']['ncol'] = 3 * fwd['nsource']
fwd['source_ori'] = FIFF.FIFFV_MNE_FREE_ORI
fwd['surf_ori'] = False
logger.info('[done]')
return fwd
@verbose
def write_forward_solution(fname, fwd, overwrite=False, verbose=None):
"""Write forward solution to a file
Parameters
----------
fname : str
File name to save the forward solution to. It should end with -fwd.fif
or -fwd.fif.gz.
fwd : dict
Forward solution.
overwrite : bool
If True, overwrite destination file (if it exists).
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
"""
check_fname(fname, 'forward', ('-fwd.fif', '-fwd.fif.gz'))
# check for file existence
_check_fname(fname, overwrite)
fid = start_file(fname)
start_block(fid, FIFF.FIFFB_MNE)
#
# MNE env
#
start_block(fid, FIFF.FIFFB_MNE_ENV)
write_id(fid, FIFF.FIFF_BLOCK_ID)
data = fwd['info'].get('working_dir', None)
if data is not None:
write_string(fid, FIFF.FIFF_MNE_ENV_WORKING_DIR, data)
data = fwd['info'].get('command_line', None)
if data is not None:
write_string(fid, FIFF.FIFF_MNE_ENV_COMMAND_LINE, data)
end_block(fid, FIFF.FIFFB_MNE_ENV)
#
# Information from the MRI file
#
start_block(fid, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
write_string(fid, FIFF.FIFF_MNE_FILE_NAME, fwd['info']['mri_file'])
if fwd['info']['mri_id'] is not None:
write_id(fid, FIFF.FIFF_PARENT_FILE_ID, fwd['info']['mri_id'])
# store the MRI to HEAD transform in MRI file
write_coord_trans(fid, fwd['info']['mri_head_t'])
end_block(fid, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
# write measurement info
write_forward_meas_info(fid, fwd['info'])
# invert our original source space transform
src = list()
for s in fwd['src']:
s = deepcopy(s)
try:
# returns source space to original coordinate frame
# usually MRI
s = transform_surface_to(s, fwd['mri_head_t']['from'],
fwd['mri_head_t'])
except Exception as inst:
raise ValueError('Could not transform source space (%s)' % inst)
src.append(s)
#
# Write the source spaces (again)
#
_write_source_spaces_to_fid(fid, src)
n_vert = sum([ss['nuse'] for ss in src])
n_col = fwd['sol']['data'].shape[1]
if fwd['source_ori'] == FIFF.FIFFV_MNE_FIXED_ORI:
assert n_col == n_vert
else:
assert n_col == 3 * n_vert
# Undo surf_ori rotation
sol = fwd['sol']['data']
if fwd['sol_grad'] is not None:
sol_grad = fwd['sol_grad']['data']
else:
sol_grad = None
if fwd['surf_ori'] is True:
inv_rot = _inv_block_diag(fwd['source_nn'].T, 3)
sol = sol * inv_rot
if sol_grad is not None:
sol_grad = sol_grad * sparse_block_diag([inv_rot] * 3) # dot prod
#
# MEG forward solution
#
picks_meg = pick_types(fwd['info'], meg=True, eeg=False, ref_meg=False,
exclude=[])
picks_eeg = pick_types(fwd['info'], meg=False, eeg=True, ref_meg=False,
exclude=[])
n_meg = len(picks_meg)
n_eeg = len(picks_eeg)
row_names_meg = [fwd['sol']['row_names'][p] for p in picks_meg]
row_names_eeg = [fwd['sol']['row_names'][p] for p in picks_eeg]
if n_meg > 0:
meg_solution = dict(data=sol[picks_meg], nrow=n_meg, ncol=n_col,
row_names=row_names_meg, col_names=[])
meg_solution = _transpose_named_matrix(meg_solution, copy=False)
start_block(fid, FIFF.FIFFB_MNE_FORWARD_SOLUTION)
write_int(fid, FIFF.FIFF_MNE_INCLUDED_METHODS, FIFF.FIFFV_MNE_MEG)
write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, fwd['coord_frame'])
write_int(fid, FIFF.FIFF_MNE_SOURCE_ORIENTATION, fwd['source_ori'])
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS, n_vert)
write_int(fid, FIFF.FIFF_NCHAN, n_meg)
write_named_matrix(fid, FIFF.FIFF_MNE_FORWARD_SOLUTION, meg_solution)
if sol_grad is not None:
meg_solution_grad = dict(data=sol_grad[picks_meg],
nrow=n_meg, ncol=n_col * 3,
row_names=row_names_meg, col_names=[])
meg_solution_grad = _transpose_named_matrix(meg_solution_grad,
copy=False)
write_named_matrix(fid, FIFF.FIFF_MNE_FORWARD_SOLUTION_GRAD,
meg_solution_grad)
end_block(fid, FIFF.FIFFB_MNE_FORWARD_SOLUTION)
#
# EEG forward solution
#
if n_eeg > 0:
eeg_solution = dict(data=sol[picks_eeg], nrow=n_eeg, ncol=n_col,
row_names=row_names_eeg, col_names=[])
eeg_solution = _transpose_named_matrix(eeg_solution, copy=False)
start_block(fid, FIFF.FIFFB_MNE_FORWARD_SOLUTION)
write_int(fid, FIFF.FIFF_MNE_INCLUDED_METHODS, FIFF.FIFFV_MNE_EEG)
write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, fwd['coord_frame'])
write_int(fid, FIFF.FIFF_MNE_SOURCE_ORIENTATION, fwd['source_ori'])
write_int(fid, FIFF.FIFF_NCHAN, n_eeg)
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS, n_vert)
write_named_matrix(fid, FIFF.FIFF_MNE_FORWARD_SOLUTION, eeg_solution)
if sol_grad is not None:
eeg_solution_grad = dict(data=sol_grad[picks_eeg],
nrow=n_eeg, ncol=n_col * 3,
row_names=row_names_eeg, col_names=[])
eeg_solution_grad = _transpose_named_matrix(eeg_solution_grad,
copy=False)
write_named_matrix(fid, FIFF.FIFF_MNE_FORWARD_SOLUTION_GRAD,
eeg_solution_grad)
end_block(fid, FIFF.FIFFB_MNE_FORWARD_SOLUTION)
end_block(fid, FIFF.FIFFB_MNE)
end_file(fid)
def _to_fixed_ori(forward):
"""Helper to convert the forward solution to fixed ori from free"""
if not forward['surf_ori'] or is_fixed_orient(forward):
raise ValueError('Only surface-oriented, free-orientation forward '
'solutions can be converted to fixed orientaton')
forward['sol']['data'] = forward['sol']['data'][:, 2::3]
forward['sol']['ncol'] = forward['sol']['ncol'] / 3
forward['source_ori'] = FIFF.FIFFV_MNE_FIXED_ORI
logger.info(' Converted the forward solution into the '
'fixed-orientation mode.')
return forward
def is_fixed_orient(forward, orig=False):
"""Has forward operator fixed orientation?
"""
if orig: # if we want to know about the original version
fixed_ori = (forward['_orig_source_ori'] == FIFF.FIFFV_MNE_FIXED_ORI)
else: # most of the time we want to know about the current version
fixed_ori = (forward['source_ori'] == FIFF.FIFFV_MNE_FIXED_ORI)
return fixed_ori
def write_forward_meas_info(fid, info):
"""Write measurement info stored in forward solution
Parameters
----------
fid : file id
The file id
info : instance of mne.io.meas_info.Info
The measurement info.
"""
#
# Information from the MEG file
#
start_block(fid, FIFF.FIFFB_MNE_PARENT_MEAS_FILE)
write_string(fid, FIFF.FIFF_MNE_FILE_NAME, info['meas_file'])
if info['meas_id'] is not None:
write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, info['meas_id'])
# get transformation from CTF and DEVICE to HEAD coordinate frame
meg_head_t = info.get('dev_head_t', info.get('ctf_head_t'))
if meg_head_t is None:
fid.close()
raise ValueError('Head<-->sensor transform not found')
write_coord_trans(fid, meg_head_t)
if 'chs' in info:
# Channel information
write_int(fid, FIFF.FIFF_NCHAN, len(info['chs']))
for k, c in enumerate(info['chs']):
# Scan numbers may have been messed up
c = deepcopy(c)
c['scanno'] = k + 1
write_ch_info(fid, c)
if 'bads' in info and len(info['bads']) > 0:
# Bad channels
start_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)
write_name_list(fid, FIFF.FIFF_MNE_CH_NAME_LIST, info['bads'])
end_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)
end_block(fid, FIFF.FIFFB_MNE_PARENT_MEAS_FILE)
@verbose
def compute_orient_prior(forward, loose=0.2, verbose=None):
"""Compute orientation prior
Parameters
----------
forward : dict
Forward operator.
loose : float in [0, 1] or None
The loose orientation parameter.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
orient_prior : array
Orientation priors.
"""
is_fixed_ori = is_fixed_orient(forward)
n_sources = forward['sol']['data'].shape[1]
if loose is not None:
if not (0 <= loose <= 1):
raise ValueError('loose value should be smaller than 1 and bigger '
'than 0, or None for not loose orientations.')
if loose < 1 and not forward['surf_ori']:
raise ValueError('Forward operator is not oriented in surface '
'coordinates. loose parameter should be None '
'not %s.' % loose)
if is_fixed_ori:
warnings.warn('Ignoring loose parameter with forward operator '
'with fixed orientation.')
orient_prior = np.ones(n_sources, dtype=np.float)
if (not is_fixed_ori) and (loose is not None) and (loose < 1):
logger.info('Applying loose dipole orientations. Loose value '
'of %s.' % loose)
orient_prior[np.mod(np.arange(n_sources), 3) != 2] *= loose
return orient_prior
def _restrict_gain_matrix(G, info):
"""Restrict gain matrix entries for optimal depth weighting"""
# Figure out which ones have been used
if not (len(info['chs']) == G.shape[0]):
raise ValueError("G.shape[0] and length of info['chs'] do not match: "
"%d != %d" % (G.shape[0], len(info['chs'])))
sel = pick_types(info, meg='grad', ref_meg=False, exclude=[])
if len(sel) > 0:
G = G[sel]
logger.info(' %d planar channels' % len(sel))
else:
sel = pick_types(info, meg='mag', ref_meg=False, exclude=[])
if len(sel) > 0:
G = G[sel]
logger.info(' %d magnetometer or axial gradiometer '
'channels' % len(sel))
else:
sel = pick_types(info, meg=False, eeg=True, exclude=[])
if len(sel) > 0:
G = G[sel]
logger.info(' %d EEG channels' % len(sel))
else:
logger.warning('Could not find MEG or EEG channels')
return G
def compute_depth_prior(G, gain_info, is_fixed_ori, exp=0.8, limit=10.0,
patch_areas=None, limit_depth_chs=False):
"""Compute weighting for depth prior
"""
logger.info('Creating the depth weighting matrix...')
# If possible, pick best depth-weighting channels
if limit_depth_chs is True:
G = _restrict_gain_matrix(G, gain_info)
# Compute the gain matrix
if is_fixed_ori:
d = np.sum(G ** 2, axis=0)
else:
n_pos = G.shape[1] // 3
d = np.zeros(n_pos)
for k in range(n_pos):
Gk = G[:, 3 * k:3 * (k + 1)]
d[k] = linalg.svdvals(np.dot(Gk.T, Gk))[0]
# XXX Currently the fwd solns never have "patch_areas" defined
if patch_areas is not None:
d /= patch_areas ** 2
logger.info(' Patch areas taken into account in the depth '
'weighting')
w = 1.0 / d
ws = np.sort(w)
weight_limit = limit ** 2
if limit_depth_chs is False:
# match old mne-python behavor
ind = np.argmin(ws)
n_limit = ind
limit = ws[ind] * weight_limit
wpp = (np.minimum(w / limit, 1)) ** exp
else:
# match C code behavior
limit = ws[-1]
n_limit = len(d)
if ws[-1] > weight_limit * ws[0]:
ind = np.where(ws > weight_limit * ws[0])[0][0]
limit = ws[ind]
n_limit = ind
logger.info(' limit = %d/%d = %f'
% (n_limit + 1, len(d),
np.sqrt(limit / ws[0])))
scale = 1.0 / limit
logger.info(' scale = %g exp = %g' % (scale, exp))
wpp = np.minimum(w / limit, 1) ** exp
depth_prior = wpp if is_fixed_ori else np.repeat(wpp, 3)
return depth_prior
def _stc_src_sel(src, stc):
""" Select the vertex indices of a source space using a source estimate
"""
src_sel_lh = np.intersect1d(src[0]['vertno'], stc.vertices[0])
src_sel_lh = np.searchsorted(src[0]['vertno'], src_sel_lh)
src_sel_rh = np.intersect1d(src[1]['vertno'], stc.vertices[1])
src_sel_rh = (np.searchsorted(src[1]['vertno'], src_sel_rh)
+ len(src[0]['vertno']))
src_sel = np.r_[src_sel_lh, src_sel_rh]
return src_sel
def _fill_measurement_info(info, fwd, sfreq):
""" Fill the measurement info of a Raw or Evoked object
"""
sel = pick_channels(info['ch_names'], fwd['sol']['row_names'])
info = pick_info(info, sel)
info['bads'] = []
info['filename'] = None
# this is probably correct based on what's done in meas_info.py...
info['meas_id'] = fwd['info']['meas_id']
info['file_id'] = info['meas_id']
now = time()
sec = np.floor(now)
usec = 1e6 * (now - sec)
info['meas_date'] = np.array([sec, usec], dtype=np.int32)
info['highpass'] = 0.0
info['lowpass'] = sfreq / 2.0
info['sfreq'] = sfreq
info['projs'] = []
return info
@verbose
def _apply_forward(fwd, stc, start=None, stop=None, verbose=None):
""" Apply forward model and return data, times, ch_names
"""
if not is_fixed_orient(fwd):
raise ValueError('Only fixed-orientation forward operators are '
'supported.')
if np.all(stc.data > 0):
warnings.warn('Source estimate only contains currents with positive '
'values. Use pick_ori="normal" when computing the '
'inverse to compute currents not current magnitudes.')
max_cur = np.max(np.abs(stc.data))
if max_cur > 1e-7: # 100 nAm threshold for warning
warnings.warn('The maximum current magnitude is %0.1f nAm, which is '
'very large. Are you trying to apply the forward model '
'to dSPM values? The result will only be correct if '
'currents are used.' % (1e9 * max_cur))
src_sel = _stc_src_sel(fwd['src'], stc)
n_src = sum([len(v) for v in stc.vertices])
if len(src_sel) != n_src:
raise RuntimeError('Only %i of %i SourceEstimate vertices found in '
'fwd' % (len(src_sel), n_src))
gain = fwd['sol']['data'][:, src_sel]
logger.info('Projecting source estimate to sensor space...')
data = np.dot(gain, stc.data[:, start:stop])
logger.info('[done]')
times = deepcopy(stc.times[start:stop])
return data, times
@verbose
def apply_forward(fwd, stc, evoked_template, start=None, stop=None,
verbose=None):
"""
Project source space currents to sensor space using a forward operator.
The sensor space data is computed for all channels present in fwd. Use
pick_channels_forward or pick_types_forward to restrict the solution to a
subset of channels.
The function returns an Evoked object, which is constructed from
evoked_template. The evoked_template should be from the same MEG system on
which the original data was acquired. An exception will be raised if the
forward operator contains channels that are not present in the template.
Parameters
----------
fwd : dict
Forward operator to use. Has to be fixed-orientation.
stc : SourceEstimate
The source estimate from which the sensor space data is computed.
evoked_template : Evoked object
Evoked object used as template to generate the output argument.
start : int, optional
Index of first time sample (index not time is seconds).
stop : int, optional
Index of first time sample not to include (index not time is seconds).
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
evoked : Evoked
Evoked object with computed sensor space data.
See Also
--------
apply_forward_raw: Compute sensor space data and return a Raw object.
"""
# make sure evoked_template contains all channels in fwd
for ch_name in fwd['sol']['row_names']:
if ch_name not in evoked_template.ch_names:
raise ValueError('Channel %s of forward operator not present in '
'evoked_template.' % ch_name)
# project the source estimate to the sensor space
data, times = _apply_forward(fwd, stc, start, stop)
# store sensor data in an Evoked object using the template
evoked = deepcopy(evoked_template)
evoked.nave = 1
evoked.data = data
evoked.times = times
sfreq = float(1.0 / stc.tstep)
evoked.first = int(np.round(evoked.times[0] * sfreq))
evoked.last = evoked.first + evoked.data.shape[1] - 1
# fill the measurement info
evoked.info = _fill_measurement_info(evoked.info, fwd, sfreq)
return evoked
@verbose
def apply_forward_raw(fwd, stc, raw_template, start=None, stop=None,
verbose=None):
"""Project source space currents to sensor space using a forward operator
The sensor space data is computed for all channels present in fwd. Use
pick_channels_forward or pick_types_forward to restrict the solution to a
subset of channels.
The function returns a Raw object, which is constructed from raw_template.
The raw_template should be from the same MEG system on which the original
data was acquired. An exception will be raised if the forward operator
contains channels that are not present in the template.
Parameters
----------
fwd : dict
Forward operator to use. Has to be fixed-orientation.
stc : SourceEstimate
The source estimate from which the sensor space data is computed.
raw_template : Raw object
Raw object used as template to generate the output argument.
start : int, optional
Index of first time sample (index not time is seconds).
stop : int, optional
Index of first time sample not to include (index not time is seconds).
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
raw : Raw object
Raw object with computed sensor space data.
See Also
--------
apply_forward: Compute sensor space data and return an Evoked object.
"""
# make sure raw_template contains all channels in fwd
for ch_name in fwd['sol']['row_names']:
if ch_name not in raw_template.ch_names:
raise ValueError('Channel %s of forward operator not present in '
'raw_template.' % ch_name)
# project the source estimate to the sensor space
data, times = _apply_forward(fwd, stc, start, stop)
# store sensor data in Raw object using the template
raw = raw_template.copy()
raw.preload = True
raw._data = data
raw._times = times
sfreq = float(1.0 / stc.tstep)
raw.first_samp = int(np.round(raw._times[0] * sfreq))
raw.last_samp = raw.first_samp + raw._data.shape[1] - 1
# fill the measurement info
raw.info = _fill_measurement_info(raw.info, fwd, sfreq)
raw.info['projs'] = []
raw._projector = None
return raw
def restrict_forward_to_stc(fwd, stc):
"""Restricts forward operator to active sources in a source estimate
Parameters
----------
fwd : dict
Forward operator.
stc : SourceEstimate
Source estimate.
Returns
-------
fwd_out : dict
Restricted forward operator.
"""
fwd_out = deepcopy(fwd)
src_sel = _stc_src_sel(fwd['src'], stc)
fwd_out['source_rr'] = fwd['source_rr'][src_sel]
fwd_out['nsource'] = len(src_sel)
if is_fixed_orient(fwd):
idx = src_sel
else:
idx = (3 * src_sel[:, None] + np.arange(3)).ravel()
fwd_out['source_nn'] = fwd['source_nn'][idx]
fwd_out['sol']['data'] = fwd['sol']['data'][:, idx]
fwd_out['sol']['ncol'] = len(idx)
for i in range(2):
fwd_out['src'][i]['vertno'] = stc.vertices[i]
fwd_out['src'][i]['nuse'] = len(stc.vertices[i])
fwd_out['src'][i]['inuse'] = fwd['src'][i]['inuse'].copy()
fwd_out['src'][i]['inuse'].fill(0)
fwd_out['src'][i]['inuse'][stc.vertices[i]] = 1
fwd_out['src'][i]['use_tris'] = np.array([], int)
fwd_out['src'][i]['nuse_tri'] = np.array([0])
return fwd_out
def restrict_forward_to_label(fwd, labels):
"""Restricts forward operator to labels
Parameters
----------
fwd : dict
Forward operator.
labels : label object | list
Label object or list of label objects.
Returns
-------
fwd_out : dict
Restricted forward operator.
"""
if not isinstance(labels, list):
labels = [labels]
fwd_out = deepcopy(fwd)
fwd_out['source_rr'] = np.zeros((0, 3))
fwd_out['nsource'] = 0
fwd_out['source_nn'] = np.zeros((0, 3))
fwd_out['sol']['data'] = np.zeros((fwd['sol']['data'].shape[0], 0))
fwd_out['sol']['ncol'] = 0
for i in range(2):
fwd_out['src'][i]['vertno'] = np.array([], int)
fwd_out['src'][i]['nuse'] = 0
fwd_out['src'][i]['inuse'] = fwd['src'][i]['inuse'].copy()
fwd_out['src'][i]['inuse'].fill(0)
fwd_out['src'][i]['use_tris'] = np.array([], int)
fwd_out['src'][i]['nuse_tri'] = np.array([0])
for label in labels:
if label.hemi == 'lh':
i = 0
src_sel = np.intersect1d(fwd['src'][0]['vertno'], label.vertices)
src_sel = np.searchsorted(fwd['src'][0]['vertno'], src_sel)
else:
i = 1
src_sel = np.intersect1d(fwd['src'][1]['vertno'], label.vertices)
src_sel = (np.searchsorted(fwd['src'][1]['vertno'], src_sel)
+ len(fwd['src'][0]['vertno']))
fwd_out['source_rr'] = np.vstack([fwd_out['source_rr'],
fwd['source_rr'][src_sel]])
fwd_out['nsource'] += len(src_sel)
fwd_out['src'][i]['vertno'] = np.r_[fwd_out['src'][i]['vertno'],
src_sel]
fwd_out['src'][i]['nuse'] += len(src_sel)
fwd_out['src'][i]['inuse'][src_sel] = 1
if is_fixed_orient(fwd):
idx = src_sel
else:
idx = (3 * src_sel[:, None] + np.arange(3)).ravel()
fwd_out['source_nn'] = np.vstack([fwd_out['source_nn'],
fwd['source_nn'][idx]])
fwd_out['sol']['data'] = np.hstack([fwd_out['sol']['data'],
fwd['sol']['data'][:, idx]])
fwd_out['sol']['ncol'] += len(idx)
return fwd_out
@verbose
def do_forward_solution(subject, meas, fname=None, src=None, spacing=None,
mindist=None, bem=None, mri=None, trans=None,
eeg=True, meg=True, fixed=False, grad=False,
mricoord=False, overwrite=False, subjects_dir=None,
verbose=None):
"""Calculate a forward solution for a subject using MNE-C routines
This function wraps to mne_do_forward_solution, so the mne
command-line tools must be installed and accessible from Python.
Parameters
----------
subject : str
Name of the subject.
meas : Raw | Epochs | Evoked | str
If Raw or Epochs, a temporary evoked file will be created and
saved to a temporary directory. If str, then it should be a
filename to a file with measurement information the mne
command-line tools can understand (i.e., raw or evoked).
fname : str | None
Destination forward solution filename. If None, the solution
will be created in a temporary directory, loaded, and deleted.
src : str | None
Source space name. If None, the MNE default is used.
spacing : str
The spacing to use. Can be ``'#'`` for spacing in mm, ``'ico#'`` for a
recursively subdivided icosahedron, or ``'oct#'`` for a recursively
subdivided octahedron (e.g., ``spacing='ico4'``). Default is 7 mm.
mindist : float | str | None
Minimum distance of sources from inner skull surface (in mm).
If None, the MNE default value is used. If string, 'all'
indicates to include all points.
bem : str | None
Name of the BEM to use (e.g., "sample-5120-5120-5120"). If None
(Default), the MNE default will be used.
trans : str | None
File name of the trans file. If None, mri must not be None.
mri : dict | str | None
Either a transformation (usually made using mne_analyze) or an
info dict (usually opened using read_trans()), or a filename.
If dict, the trans will be saved in a temporary directory. If
None, trans must not be None.
eeg : bool
If True (Default), include EEG computations.
meg : bool
If True (Default), include MEG computations.
fixed : bool
If True, make a fixed-orientation forward solution (Default:
False). Note that fixed-orientation inverses can still be
created from free-orientation forward solutions.
grad : bool
If True, compute the gradient of the field with respect to the
dipole coordinates as well (Default: False).
mricoord : bool
If True, calculate in MRI coordinates (Default: False).
overwrite : bool
If True, the destination file (if it exists) will be overwritten.
If False (default), an error will be raised if the file exists.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fwd : dict
The generated forward solution.
"""
if not has_mne_c():
raise RuntimeError('mne command line tools could not be found')
# check for file existence
temp_dir = tempfile.mkdtemp()
if fname is None:
fname = op.join(temp_dir, 'temp-fwd.fif')
_check_fname(fname, overwrite)
if not isinstance(subject, string_types):
raise ValueError('subject must be a string')
# check for meas to exist as string, or try to make evoked
meas_data = None
if isinstance(meas, string_types):
if not op.isfile(meas):
raise IOError('measurement file "%s" could not be found' % meas)
elif isinstance(meas, _BaseRaw):
events = np.array([[0, 0, 1]], dtype=np.int)
end = 1. / meas.info['sfreq']
meas_data = Epochs(meas, events, 1, 0, end, proj=False).average()
elif isinstance(meas, Epochs):
meas_data = meas.average()
elif isinstance(meas, Evoked):
meas_data = meas
else:
raise ValueError('meas must be string, Raw, Epochs, or Evoked')
if meas_data is not None:
meas = op.join(temp_dir, 'evoked.fif')
write_evokeds(meas, meas_data)
# deal with trans/mri
if mri is not None and trans is not None:
raise ValueError('trans and mri cannot both be specified')
if mri is None and trans is None:
# MNE allows this to default to a trans/mri in the subject's dir,
# but let's be safe here and force the user to pass us a trans/mri
raise ValueError('Either trans or mri must be specified')
if trans is not None:
if not isinstance(trans, string_types):
raise ValueError('trans must be a string')
if not op.isfile(trans):
raise IOError('trans file "%s" not found' % trans)
if mri is not None:
# deal with trans
if not isinstance(mri, string_types):
if isinstance(mri, dict):
mri_data = deepcopy(mri)
mri = op.join(temp_dir, 'mri-trans.fif')
try:
write_trans(mri, mri_data)
except Exception:
raise IOError('mri was a dict, but could not be '
'written to disk as a transform file')
else:
raise ValueError('trans must be a string or dict (trans)')
if not op.isfile(mri):
raise IOError('trans file "%s" could not be found' % trans)
# deal with meg/eeg
if not meg and not eeg:
raise ValueError('meg or eeg (or both) must be True')
path, fname = op.split(fname)
if not op.splitext(fname)[1] == '.fif':
raise ValueError('Forward name does not end with .fif')
path = op.abspath(path)
# deal with mindist
if mindist is not None:
if isinstance(mindist, string_types):
if not mindist.lower() == 'all':
raise ValueError('mindist, if string, must be "all"')
mindist = ['--all']
else:
mindist = ['--mindist', '%g' % mindist]
# src, spacing, bem
if src is not None:
if not isinstance(src, string_types):
raise ValueError('src must be a string or None')
if spacing is not None:
if not isinstance(spacing, string_types):
raise ValueError('spacing must be a string or None')
if bem is not None:
if not isinstance(bem, string_types):
raise ValueError('bem must be a string or None')
# put together the actual call
cmd = ['mne_do_forward_solution',
'--subject', subject,
'--meas', meas,
'--fwd', fname,
'--destdir', path]
if src is not None:
cmd += ['--src', src]
if spacing is not None:
if spacing.isdigit():
pass # spacing in mm
else:
# allow both "ico4" and "ico-4" style values
match = re.match("(oct|ico)-?(\d+)$", spacing)
if match is None:
raise ValueError("Invalid spacing parameter: %r" % spacing)
spacing = '-'.join(match.groups())
cmd += ['--spacing', spacing]
if mindist is not None:
cmd += mindist
if bem is not None:
cmd += ['--bem', bem]
if mri is not None:
cmd += ['--mri', '%s' % mri]
if trans is not None:
cmd += ['--trans', '%s' % trans]
if not meg:
cmd.append('--eegonly')
if not eeg:
cmd.append('--megonly')
if fixed:
cmd.append('--fixed')
if grad:
cmd.append('--grad')
if mricoord:
cmd.append('--mricoord')
if overwrite:
cmd.append('--overwrite')
env = os.environ.copy()
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
env['SUBJECTS_DIR'] = subjects_dir
try:
logger.info('Running forward solution generation command with '
'subjects_dir %s' % subjects_dir)
run_subprocess(cmd, env=env)
except Exception:
raise
else:
fwd = read_forward_solution(op.join(path, fname), verbose=False)
finally:
shutil.rmtree(temp_dir, ignore_errors=True)
return fwd
@verbose
def average_forward_solutions(fwds, weights=None):
"""Average forward solutions
Parameters
----------
fwds : list of dict
Forward solutions to average. Each entry (dict) should be a
forward solution.
weights : array | None
Weights to apply to each forward solution in averaging. If None,
forward solutions will be equally weighted. Weights must be
non-negative, and will be adjusted to sum to one.
Returns
-------
fwd : dict
The averaged forward solution.
"""
# check for fwds being a list
if not isinstance(fwds, list):
raise TypeError('fwds must be a list')
if not len(fwds) > 0:
raise ValueError('fwds must not be empty')
# check weights
if weights is None:
weights = np.ones(len(fwds))
weights = np.asanyarray(weights) # in case it's a list, convert it
if not np.all(weights >= 0):
raise ValueError('weights must be non-negative')
if not len(weights) == len(fwds):
raise ValueError('weights must be None or the same length as fwds')
w_sum = np.sum(weights)
if not w_sum > 0:
raise ValueError('weights cannot all be zero')
weights /= w_sum
# check our forward solutions
for fwd in fwds:
# check to make sure it's a forward solution
if not isinstance(fwd, dict):
raise TypeError('Each entry in fwds must be a dict')
# check to make sure the dict is actually a fwd
check_keys = ['info', 'sol_grad', 'nchan', 'src', 'source_nn', 'sol',
'source_rr', 'source_ori', 'surf_ori', 'coord_frame',
'mri_head_t', 'nsource']
if not all([key in fwd for key in check_keys]):
raise KeyError('forward solution dict does not have all standard '
'entries, cannot compute average.')
# check forward solution compatibility
if any([fwd['sol'][k] != fwds[0]['sol'][k]
for fwd in fwds[1:] for k in ['nrow', 'ncol']]):
raise ValueError('Forward solutions have incompatible dimensions')
if any([fwd[k] != fwds[0][k] for fwd in fwds[1:]
for k in ['source_ori', 'surf_ori', 'coord_frame']]):
raise ValueError('Forward solutions have incompatible orientations')
# actually average them (solutions and gradients)
fwd_ave = deepcopy(fwds[0])
fwd_ave['sol']['data'] *= weights[0]
fwd_ave['_orig_sol'] *= weights[0]
for fwd, w in zip(fwds[1:], weights[1:]):
fwd_ave['sol']['data'] += w * fwd['sol']['data']
fwd_ave['_orig_sol'] += w * fwd['_orig_sol']
if fwd_ave['sol_grad'] is not None:
fwd_ave['sol_grad']['data'] *= weights[0]
fwd_ave['_orig_sol_grad'] *= weights[0]
for fwd, w in zip(fwds[1:], weights[1:]):
fwd_ave['sol_grad']['data'] += w * fwd['sol_grad']['data']
fwd_ave['_orig_sol_grad'] += w * fwd['_orig_sol_grad']
return fwd_ave
| [] | [] | [] | [] | [] | python | 0 | 0 | |
vetApp/vetApp/asgi.py | """
ASGI config for vetApp project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "vetApp.settings")
application = get_asgi_application()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
hood/settings.py | from pathlib import Path
import os
import django_heroku
import dj_database_url
from decouple import config,Csv
# Build paths inside the project like this: BASE_DIR / 'subdir'.
#BASE_DIR = Path(__file__).resolve().parent.parent
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
#SECRET_KEY = 'j!p_&z#f6ap_bzzb3iw(=i%tfb$voyylu!75cqcp%f_=v%hp%c'
# SECURITY WARNING: don't run with debug turned on in production!
#DEBUG = True
#ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app',
'widget_tweaks',
'crispy_forms',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'hood.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'hood.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
MODE=config("MODE", default="dev")
SECRET_KEY = config('SECRET_KEY')
DEBUG = os.environ.get('DEBUG', False)
# development
if config('MODE')=="dev":
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config('DB_NAME'),
'USER': config('DB_USER'),
'PASSWORD': config('DB_PASSWORD'),
'HOST': config('DB_HOST'),
'PORT': '',
}
}
# production
else:
DATABASES = {
'default': dj_database_url.config(
default=config('DATABASE_URL')
)
}
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=Csv())
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
LOGIN_REDIRECT_URL = 'dashboard'
LOGOUT_REDIRECT_URL = 'login'
django_heroku.settings(locals()) | [] | [] | [
"DEBUG"
] | [] | ["DEBUG"] | python | 1 | 0 | |
tests/main_test.go | package tests
import (
"bytes"
"context"
"encoding/json"
"fmt"
"github.com/gin-gonic/gin"
"github.com/joho/godotenv"
"github.com/letsgo-framework/letsgo/database"
letslog "github.com/letsgo-framework/letsgo/log"
"github.com/letsgo-framework/letsgo/routes"
"github.com/letsgo-framework/letsgo/types"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo/readpref"
. "gopkg.in/check.v1"
"io/ioutil"
"log"
"net/http"
"os"
"testing"
)
type TestInsert struct {
Name string `form:"name" binding:"required" json:"name" bson:"name"`
}
type TestSuite struct {
srv *gin.Engine
}
var _ = Suite(&TestSuite{})
func TestMain(m *testing.M) {
// Setup log writing
letslog.InitLogFuncs()
err := godotenv.Load("../.env.testing")
database.TestConnect()
database.DB.Drop(context.Background())
if err != nil {
log.Fatal("Error loading .env file")
}
s := TestSuite{
srv: routes.PaveRoutes(),
}
go s.srv.Run(os.Getenv("PORT"))
os.Exit(m.Run())
}
func (s *TestSuite) TestGetEnv(c *C) {
dbPort := os.Getenv("DATABASE_PORT")
fmt.Printf("db port %s", dbPort)
if dbPort == "" {
c.Error()
c.Fail()
}
c.Assert(dbPort, Equals, "27017")
}
func (s *TestSuite) TestHelloWorld(c *C) {
requestURL := "http://127.0.0.1" + os.Getenv("PORT") + "/api/v1/"
client := &http.Client{}
req, _ := http.NewRequest("GET", requestURL, nil)
resp, err := client.Do(req)
if err != nil {
c.Error(err)
c.Fail()
}
defer resp.Body.Close()
c.Assert(resp.StatusCode, Equals, 200)
}
func (s *TestSuite) TestCredentials(c *C) {
requestURL := "http://127.0.0.1" + os.Getenv("PORT") + "/api/v1/credentials/"
client := &http.Client{}
req, _ := http.NewRequest("GET", requestURL, nil)
resp, err := client.Do(req)
if err != nil {
c.Error(err)
c.Fail()
}
defer resp.Body.Close()
c.Assert(resp.StatusCode, Equals, 200)
}
func (s *TestSuite) TestTokenSuccess(c *C) {
requestURL := "http://127.0.0.1" + os.Getenv("PORT") + "/api/v1/credentials/"
client := &http.Client{}
req, _ := http.NewRequest("GET", requestURL, nil)
resp, err := client.Do(req)
if err != nil {
c.Error(err)
c.Fail()
}
defer resp.Body.Close()
responseData, _ := ioutil.ReadAll(resp.Body)
var credResponse types.CredentialResponse
json.Unmarshal(responseData, &credResponse)
requestURL = "http://127.0.0.1" + os.Getenv("PORT") + "/api/v1/login?grant_type=client_credentials&client_id=" + credResponse.CLIENT_ID + "&client_secret=" + credResponse.CLIENT_SECRET + "&scope=read"
req, _ = http.NewRequest("GET", requestURL, nil)
resp, err = client.Do(req)
if err != nil {
c.Error(err)
c.Fail()
}
defer resp.Body.Close()
c.Assert(resp.StatusCode, Equals, 200)
}
func (s *TestSuite) TestTokenFail(c *C) {
requestURL := "http://127.0.0.1" + os.Getenv("PORT") + "/api/v1/login"
client := &http.Client{}
req, _ := http.NewRequest("GET", requestURL, nil)
resp, err := client.Do(req)
if err != nil {
c.Error(err)
c.Fail()
}
defer resp.Body.Close()
c.Assert(resp.StatusCode, Equals, 401)
}
func (s *TestSuite) TestAccessTokenSuccess(c *C) {
requestURL := "http://127.0.0.1" + os.Getenv("PORT") + "/api/v1/credentials/"
client := &http.Client{}
req, _ := http.NewRequest("GET", requestURL, nil)
resp, err := client.Do(req)
if err != nil {
c.Error(err)
c.Fail()
}
defer resp.Body.Close()
responseData, _ := ioutil.ReadAll(resp.Body)
var credResponse types.CredentialResponse
json.Unmarshal(responseData, &credResponse)
requestURL = "http://127.0.0.1" + os.Getenv("PORT") + "/api/v1/login?grant_type=client_credentials&client_id=" + credResponse.CLIENT_ID + "&client_secret=" + credResponse.CLIENT_SECRET + "&scope=read"
req, _ = http.NewRequest("GET", requestURL, nil)
resp, err = client.Do(req)
if err != nil {
c.Error(err)
c.Fail()
}
defer resp.Body.Close()
respData, _ := ioutil.ReadAll(resp.Body)
var tokenResponse types.TokenResponse
json.Unmarshal(respData, &tokenResponse)
requestURL = "http://127.0.0.1" + os.Getenv("PORT") + "/api/v1/auth?access_token=" + tokenResponse.AccessToken
req, _ = http.NewRequest("GET", requestURL, nil)
resp, err = client.Do(req)
if err != nil {
c.Error(err)
c.Fail()
}
defer resp.Body.Close()
c.Assert(resp.StatusCode, Equals, 200)
}
func (s *TestSuite) TestAccessTokenFail(c *C) {
requestURL := "http://127.0.0.1" + os.Getenv("PORT") + "/api/v1/credentials/"
client := &http.Client{}
req, _ := http.NewRequest("GET", requestURL, nil)
resp, err := client.Do(req)
if err != nil {
c.Error(err)
c.Fail()
}
defer resp.Body.Close()
responseData, _ := ioutil.ReadAll(resp.Body)
var credResponse types.CredentialResponse
json.Unmarshal(responseData, &credResponse)
requestURL = "http://127.0.0.1" + os.Getenv("PORT") + "/api/v1/login?grant_type=client_credentials&client_id=" + credResponse.CLIENT_ID + "&client_secret=" + credResponse.CLIENT_SECRET + "&scope=read"
req, _ = http.NewRequest("GET", requestURL, nil)
resp, err = client.Do(req)
if err != nil {
c.Error(err)
c.Fail()
}
defer resp.Body.Close()
respData, _ := ioutil.ReadAll(resp.Body)
var tokenResponse types.TokenResponse
json.Unmarshal(respData, &tokenResponse)
requestURL = "http://127.0.0.1" + os.Getenv("PORT") + "/api/v1/auth?access_token=mywrongaccesstoken"
req, _ = http.NewRequest("GET", requestURL, nil)
resp, err = client.Do(req)
if err != nil {
c.Error(err)
c.Fail()
}
defer resp.Body.Close()
c.Assert(resp.StatusCode, Equals, 401)
}
func (s *TestSuite) TestDatabaseTestConnection(c *C) {
database.TestConnect()
err := database.Client.Ping(context.Background(), readpref.Primary())
c.Assert(err, Equals, nil)
}
func (s *TestSuite) TestDatabaseConnection(c *C) {
database.Connect()
err := database.Client.Ping(context.Background(), readpref.Primary())
c.Assert(err, Equals, nil)
}
func (s *TestSuite) TestDBInsert(c *C) {
database.TestConnect()
input := TestInsert{Name: "testname"}
collection := database.DB.Collection("test_collection")
_, err := collection.InsertOne(context.Background(), input)
if err != nil {
c.Error(err)
}
result := TestInsert{}
err = collection.FindOne(context.Background(), bson.M{"name": "testname"}).Decode(&result)
if err != nil {
c.Error(err)
}
c.Assert(result, Equals, input)
}
//Test User-registartion
func (s *TestSuite) Test1UserRegistration(c *C) {
data := types.User{
Name: "Letsgo User",
Username: "letsgoUs3r",
Password: "qwerty",
}
requestURL := "http://127.0.0.1" + os.Getenv("PORT") + "/api/v1/register"
client := &http.Client{}
b := new(bytes.Buffer)
json.NewEncoder(b).Encode(data)
req, _ := http.NewRequest("POST", requestURL, b)
resp, err := client.Do(req)
if err != nil {
c.Error(err)
c.Fail()
}
defer resp.Body.Close()
respData, _ := ioutil.ReadAll(resp.Body)
var user types.User
json.Unmarshal(respData, &user)
c.Assert(resp.StatusCode, Equals, 200)
}
func (s *TestSuite) Test2UserLoginPasswordGrant(c *C) {
requestURL := "http://127.0.0.1" + os.Getenv("PORT") + "/api/v1/login?grant_type=password&client_id=client@letsgo&client_secret=Va4a8bFFhTJZdybnzyhjHjj6P9UVh7UL&scope=read&username=letsgoUs3r&password=qwerty"
req, _ := http.NewRequest("GET", requestURL, nil)
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
letslog.Debug(err.Error())
c.Error(err)
c.Fail()
}
defer resp.Body.Close()
c.Assert(resp.StatusCode, Equals, 200)
}
func Test(t *testing.T) {
TestingT(t)
}
| [
"\"PORT\"",
"\"DATABASE_PORT\"",
"\"PORT\"",
"\"PORT\"",
"\"PORT\"",
"\"PORT\"",
"\"PORT\"",
"\"PORT\"",
"\"PORT\"",
"\"PORT\"",
"\"PORT\"",
"\"PORT\"",
"\"PORT\"",
"\"PORT\"",
"\"PORT\""
] | [] | [
"PORT",
"DATABASE_PORT"
] | [] | ["PORT", "DATABASE_PORT"] | go | 2 | 0 | |
tests/vector_test.py | import os
import pickle
import pytest
from pyrsistent._pvector import python_pvector
@pytest.fixture(scope='session', params=['pyrsistent._pvector', 'pvectorc'])
def pvector(request):
if request.param == 'pvectorc' and os.environ.get('PYRSISTENT_NO_C_EXTENSION'):
pytest.skip('Configured to not run tests for C extension')
m = pytest.importorskip(request.param)
if request.param == 'pyrsistent._pvector':
return m.python_pvector
return m.pvector
def test_literalish_works():
from pyrsistent import pvector, v
assert v() is pvector()
assert v(1, 2) == pvector([1, 2])
def test_empty_initialization(pvector):
seq = pvector()
assert len(seq) == 0
with pytest.raises(IndexError) as error:
x = seq[0]
assert str(error.value) == 'Index out of range: 0'
def test_initialization_with_one_element(pvector):
seq = pvector([3])
assert len(seq) == 1
assert seq[0] == 3
def test_append_works_and_does_not_affect_original_within_tail(pvector):
seq1 = pvector([3])
seq2 = seq1.append(2)
assert len(seq1) == 1
assert seq1[0] == 3
assert len(seq2) == 2
assert seq2[0] == 3
assert seq2[1] == 2
def test_append_works_and_does_not_affect_original_outside_tail(pvector):
original = pvector([])
seq = original
for x in range(33):
seq = seq.append(x)
assert len(seq) == 33
assert seq[0] == 0
assert seq[31] == 31
assert seq[32] == 32
assert len(original) == 0
def test_append_when_root_overflows(pvector):
seq = pvector([])
for x in range(32 * 33):
seq = seq.append(x)
seq = seq.append(10001)
for i in range(32 * 33):
assert seq[i] == i
assert seq[32 * 33] == 10001
def test_multi_level_sequence(pvector):
seq = pvector(range(8000))
seq2 = seq.append(11)
assert seq[5] == 5
assert seq2[7373] == 7373
assert seq2[8000] == 11
def test_multi_level_sequence_from_iterator(pvector):
seq = pvector(iter(range(8000)))
seq2 = seq.append(11)
assert seq[5] == 5
assert seq2[7373] == 7373
assert seq2[8000] == 11
def test_random_insert_within_tail(pvector):
seq = pvector([1, 2, 3])
seq2 = seq.set(1, 4)
assert seq2[1] == 4
assert seq[1] == 2
def test_random_insert_outside_tail(pvector):
seq = pvector(range(20000))
seq2 = seq.set(19000, 4)
assert seq2[19000] == 4
assert seq[19000] == 19000
def test_insert_beyond_end(pvector):
seq = pvector(range(2))
seq2 = seq.set(2, 50)
assert seq2[2] == 50
with pytest.raises(IndexError) as error:
seq2.set(19, 4)
assert str(error.value) == 'Index out of range: 19'
def test_insert_with_index_from_the_end(pvector):
x = pvector([1, 2, 3, 4])
assert x.set(-2, 5) == pvector([1, 2, 5, 4])
def test_insert_with_too_negative_index(pvector):
x = pvector([1, 2, 3, 4])
with pytest.raises(IndexError):
x.set(-5, 17)
def test_iteration(pvector):
y = 0
seq = pvector(range(2000))
for x in seq:
assert x == y
y += 1
assert y == 2000
def test_zero_extend(pvector):
the_list = []
seq = pvector()
seq2 = seq.extend(the_list)
assert seq == seq2
def test_short_extend(pvector):
# Extend within tail length
the_list = [1, 2]
seq = pvector()
seq2 = seq.extend(the_list)
assert len(seq2) == len(the_list)
assert seq2[0] == the_list[0]
assert seq2[1] == the_list[1]
def test_long_extend(pvector):
# Multi level extend
seq = pvector()
length = 2137
# Extend from scratch
seq2 = seq.extend(range(length))
assert len(seq2) == length
for i in range(length):
assert seq2[i] == i
# Extend already filled vector
seq3 = seq2.extend(range(length, length + 5))
assert len(seq3) == length + 5
for i in range(length + 5):
assert seq3[i] == i
# Check that the original vector is still intact
assert len(seq2) == length
for i in range(length):
assert seq2[i] == i
def test_slicing_zero_length_range(pvector):
seq = pvector(range(10))
seq2 = seq[2:2]
assert len(seq2) == 0
def test_slicing_range(pvector):
seq = pvector(range(10))
seq2 = seq[2:4]
assert list(seq2) == [2, 3]
def test_slice_identity(pvector):
# Pvector is immutable, no need to make a copy!
seq = pvector(range(10))
assert seq is seq[::]
def test_slicing_range_with_step(pvector):
seq = pvector(range(100))
seq2 = seq[2:12:3]
assert list(seq2) == [2, 5, 8, 11]
def test_slicing_no_range_but_step(pvector):
seq = pvector(range(10))
seq2 = seq[::2]
assert list(seq2) == [0, 2, 4, 6, 8]
def test_slicing_reverse(pvector):
seq = pvector(range(10))
seq2 = seq[::-1]
assert seq2[0] == 9
assert seq2[1] == 8
assert len(seq2) == 10
seq3 = seq[-3: -7: -1]
assert seq3[0] == 7
assert seq3[3] == 4
assert len(seq3) == 4
def test_delete_index(pvector):
seq = pvector([1, 2, 3])
assert seq.delete(0) == pvector([2, 3])
assert seq.delete(1) == pvector([1, 3])
assert seq.delete(2) == pvector([1, 2])
assert seq.delete(-1) == pvector([1, 2])
assert seq.delete(-2) == pvector([1, 3])
assert seq.delete(-3) == pvector([2, 3])
def test_delete_index_out_of_bounds(pvector):
with pytest.raises(IndexError):
pvector([]).delete(0)
with pytest.raises(IndexError):
pvector([]).delete(-1)
def test_delete_index_malformed(pvector):
with pytest.raises(TypeError):
pvector([]).delete('a')
def test_delete_slice(pvector):
seq = pvector(range(5))
assert seq.delete(1, 4) == pvector([0, 4])
assert seq.delete(4, 1) == seq
assert seq.delete(0, 1) == pvector([1, 2, 3, 4])
assert seq.delete(6, 8) == seq
assert seq.delete(-1, 1) == seq
assert seq.delete(1, -1) == pvector([0, 4])
def test_remove(pvector):
seq = pvector(range(5))
assert seq.remove(3) == pvector([0, 1, 2, 4])
def test_remove_first_only(pvector):
seq = pvector([1, 2, 3, 2, 1])
assert seq.remove(2) == pvector([1, 3, 2, 1])
def test_remove_index_out_of_bounds(pvector):
seq = pvector(range(5))
with pytest.raises(ValueError) as err:
seq.remove(5)
assert 'not in' in str(err.value)
def test_addition(pvector):
v = pvector([1, 2]) + pvector([3, 4])
assert list(v) == [1, 2, 3, 4]
def test_sorted(pvector):
seq = pvector([5, 2, 3, 1])
assert [1, 2, 3, 5] == sorted(seq)
def test_boolean_conversion(pvector):
assert not bool(pvector())
assert bool(pvector([1]))
def test_access_with_negative_index(pvector):
seq = pvector([1, 2, 3, 4])
assert seq[-1] == 4
assert seq[-4] == 1
def test_index_error_positive(pvector):
with pytest.raises(IndexError):
pvector([1, 2, 3])[3]
def test_index_error_negative(pvector):
with pytest.raises(IndexError):
pvector([1, 2, 3])[-4]
def test_is_sequence(pvector):
from pyrsistent._compat import Sequence
assert isinstance(pvector(), Sequence)
def test_empty_repr(pvector):
assert str(pvector()) == "pvector([])"
def test_non_empty_repr(pvector):
v = pvector([1, 2, 3])
assert str(v) == "pvector([1, 2, 3])"
# There's some state that needs to be reset between calls in the native version,
# test that multiple invocations work.
assert str(v) == "pvector([1, 2, 3])"
def test_repr_when_contained_object_contains_reference_to_self(pvector):
x = [1, 2, 3]
v = pvector([1, 2, x])
x.append(v)
assert str(v) == 'pvector([1, 2, [1, 2, 3, pvector([1, 2, [...]])]])'
# Run a GC to provoke any potential misbehavior
import gc
gc.collect()
def test_is_hashable(pvector):
from pyrsistent._compat import Hashable
v = pvector([1, 2, 3])
v2 = pvector([1, 2, 3])
assert hash(v) == hash(v2)
assert isinstance(pvector(), Hashable)
def test_refuses_to_hash_when_members_are_unhashable(pvector):
v = pvector([1, 2, [1, 2]])
with pytest.raises(TypeError):
hash(v)
def test_compare_same_vectors(pvector):
v = pvector([1, 2])
assert v == v
assert pvector() == pvector()
def test_compare_with_other_type_of_object(pvector):
assert pvector([1, 2]) != 'foo'
def test_compare_equal_vectors(pvector):
v1 = pvector([1, 2])
v2 = pvector([1, 2])
assert v1 == v2
assert v1 >= v2
assert v1 <= v2
def test_compare_different_vectors_same_size(pvector):
v1 = pvector([1, 2])
v2 = pvector([1, 3])
assert v1 != v2
def test_compare_different_vectors_different_sizes(pvector):
v1 = pvector([1, 2])
v2 = pvector([1, 2, 3])
assert v1 != v2
def test_compare_lt_gt(pvector):
v1 = pvector([1, 2])
v2 = pvector([1, 2, 3])
assert v1 < v2
assert v2 > v1
def test_repeat(pvector):
v = pvector([1, 2])
assert 5 * pvector() is pvector()
assert v is 1 * v
assert 0 * v is pvector()
assert 2 * pvector([1, 2]) == pvector([1, 2, 1, 2])
assert -3 * pvector([1, 2]) is pvector()
def test_transform_zero_key_length(pvector):
x = pvector([1, 2])
assert x.transform([], 3) == 3
def test_transform_base_case(pvector):
x = pvector([1, 2])
assert x.transform([1], 3) == pvector([1, 3])
def test_transform_nested_vectors(pvector):
x = pvector([1, 2, pvector([3, 4]), 5])
assert x.transform([2, 0], 999) == pvector([1, 2, pvector([999, 4]), 5])
def test_transform_when_appending(pvector):
from pyrsistent import m
x = pvector([1, 2])
assert x.transform([2, 'd'], 999) == pvector([1, 2, m(d=999)])
def test_transform_index_error_out_range(pvector):
x = pvector([1, 2, pvector([3, 4]), 5])
with pytest.raises(IndexError):
x.transform([2, 10], 999)
def test_transform_index_error_wrong_type(pvector):
x = pvector([1, 2, pvector([3, 4]), 5])
with pytest.raises(TypeError):
x.transform([2, 'foo'], 999)
def test_transform_non_setable_type(pvector):
x = pvector([1, 2, 5])
with pytest.raises(TypeError):
x.transform([2, 3], 999)
def test_reverse(pvector):
x = pvector([1, 2, 5])
assert list(reversed(x)) == [5, 2, 1]
def test_contains(pvector):
x = pvector([1, 2, 5])
assert 2 in x
assert 3 not in x
def test_index(pvector):
x = pvector([1, 2, 5])
assert x.index(5) == 2
def test_index_not_found(pvector):
x = pvector([1, 2, 5])
with pytest.raises(ValueError):
x.index(7)
def test_index_not_found_with_limits(pvector):
x = pvector([1, 2, 5, 1])
with pytest.raises(ValueError):
x.index(1, 1, 3)
def test_count(pvector):
x = pvector([1, 2, 5, 1])
assert x.count(1) == 2
assert x.count(4) == 0
def test_empty_truthiness(pvector):
assert pvector([1])
assert not pvector([])
def test_pickling_empty_vector(pvector):
assert pickle.loads(pickle.dumps(pvector(), -1)) == pvector()
def test_pickling_non_empty_vector(pvector):
assert pickle.loads(pickle.dumps(pvector([1, 'a']), -1)) == pvector([1, 'a'])
def test_mset_basic_assignments(pvector):
v1 = pvector(range(2000))
v2 = v1.mset(1, -1, 505, -505, 1998, -1998)
# Original not changed
assert v1[1] == 1
assert v1[505] == 505
assert v1[1998] == 1998
# Other updated
assert v2[1] == -1
assert v2[505] == -505
assert v2[1998] == -1998
def test_mset_odd_number_of_arguments(pvector):
v = pvector([0, 1])
with pytest.raises(TypeError):
v.mset(0, 10, 1)
def test_mset_index_out_of_range(pvector):
v = pvector([0, 1])
with pytest.raises(IndexError):
v.mset(3, 10)
def test_evolver_no_update(pvector):
# This is mostly a test against memory leaks in the C implementation
v = pvector(range(40))
assert v.evolver().persistent() == v
def test_evolver_deallocate_dirty_evolver(pvector):
# Ref count handling in native implementation
v = pvector(range(3220))
e = v.evolver()
e[10] = -10
e[3220] = -3220
def test_evolver_simple_update_in_tree(pvector):
v = pvector(range(35))
e = v.evolver()
e[10] = -10
assert e[10] == -10
assert e.persistent()[10] == -10
def test_evolver_set_out_of_range(pvector):
v = pvector([0])
e = v.evolver()
with pytest.raises(IndexError) as error:
e[10] = 1
assert str(error.value) == "Index out of range: 10"
def test_evolver_multi_level_multi_update_in_tree(pvector):
# This test is mostly to detect memory/ref count issues in the native implementation
v = pvector(range(3500))
e = v.evolver()
# Update differs between first and second time since the
# corresponding node will be marked as dirty the first time only.
e[10] = -10
e[11] = -11
e[10] = -1000
# Update in neighbour node
e[50] = -50
e[50] = -5000
# Update in node in other half of vector
e[3000] = -3000
e[3000] = -30000
# Before freezing
assert e[10] == -1000
assert e[11] == -11
assert e[50] == -5000
assert e[3000] == -30000
# Run a GC to provoke any potential misbehavior
import gc
gc.collect()
v2 = e.persistent()
assert v2[10] == -1000
assert v2[50] == -5000
assert v2[3000] == -30000
# Run a GC to provoke any potential misbehavior
gc.collect()
# After freezing
assert e[10] == -1000
assert e[11] == -11
assert e[50] == -5000
assert e[3000] == -30000
# Original stays the same
assert v[10] == 10
assert v[50] == 50
assert v[3000] == 3000
def test_evolver_simple_update_in_tail(pvector):
v = pvector(range(35))
e = v.evolver()
e[33] = -33
assert e[33] == -33
assert e.persistent()[33] == -33
assert v[33] == 33
def test_evolver_simple_update_just_outside_vector(pvector):
v = pvector()
e = v.evolver()
e[0] = 1
assert e[0] == 1
assert e.persistent()[0] == 1
assert len(v) == 0
def test_evolver_append(pvector):
v = pvector()
e = v.evolver()
e.append(1000)
assert e[0] == 1000
e[0] = 2000
assert e[0] == 2000
assert list(e.persistent()) == [2000]
assert list(v) == []
def test_evolver_extend(pvector):
v = pvector([1000])
e = v.evolver()
e.extend([2000, 3000])
e[2] = 20000
assert list(e.persistent()) == [1000, 2000, 20000]
assert list(v) == [1000]
def test_evolver_assign_and_read_with_negative_indices(pvector):
v = pvector([1, 2, 3])
e = v.evolver()
e[-1] = 4
e.extend([11, 12, 13])
e[-1] = 33
assert e[-1] == 33
assert list(e.persistent()) == [1, 2, 4, 11, 12, 33]
def test_evolver_non_integral_access(pvector):
e = pvector([1]).evolver()
with pytest.raises(TypeError):
x = e['foo']
def test_evolver_non_integral_assignment(pvector):
e = pvector([1]).evolver()
with pytest.raises(TypeError):
e['foo'] = 1
def test_evolver_out_of_bounds_access(pvector):
e = pvector([1]).evolver()
with pytest.raises(IndexError):
x = e[1]
def test_evolver_out_of_bounds_assignment(pvector):
e = pvector([1]).evolver()
with pytest.raises(IndexError):
e[2] = 1
def test_no_dependencies_between_evolvers_from_the_same_pvector(pvector):
original_list = list(range(40))
v = pvector(original_list)
e1 = v.evolver()
e2 = v.evolver()
e1.extend([1, 2, 3])
e1[2] = 20
e1[35] = 350
e2.extend([-1, -2, -3])
e2[2] = -20
e2[35] = -350
e1_expected = original_list + [1, 2, 3]
e1_expected[2] = 20
e1_expected[35] = 350
assert list(e1.persistent()) == e1_expected
e2_expected = original_list + [-1, -2, -3]
e2_expected[2] = -20
e2_expected[35] = -350
assert list(e2.persistent()) == e2_expected
def test_pvectors_produced_from_the_same_evolver_do_not_interfere(pvector):
original_list = list(range(40))
v = pvector(original_list)
e = v.evolver()
e.extend([1, 2, 3])
e[2] = 20
e[35] = 350
v1 = e.persistent()
v1_expected = original_list + [1, 2, 3]
v1_expected[2] = 20
v1_expected[35] = 350
e.extend([-1, -2, -3])
e[3] = -30
e[36] = -360
v2 = e.persistent()
v2_expected = v1_expected + [-1, -2, -3]
v2_expected[3] = -30
v2_expected[36] = -360
assert list(v1) == v1_expected
assert list(v2) == v2_expected
def test_evolver_len(pvector):
e = pvector([1, 2, 3]).evolver()
e.extend([4, 5])
assert len(e) == 5
def test_evolver_is_dirty(pvector):
e = pvector([1, 2, 3]).evolver()
assert not e.is_dirty()
e.append(4)
assert e.is_dirty
e.persistent()
assert not e.is_dirty()
e[2] = 2000
assert e.is_dirty
e.persistent()
assert not e.is_dirty()
def test_vector_insert_one_step_beyond_end(pvector):
# This test exists to get the transform functionality under memory
# leak supervision. Most of the transformation tests are in test_transform.py.
v = pvector([1, 2])
assert v.transform([2], 3) == pvector([1, 2, 3])
def test_evolver_with_no_updates_returns_same_pvector(pvector):
v = pvector([1, 2])
assert v.evolver().persistent() is v
def test_evolver_returns_itself_on_evolving_operations(pvector):
# Does this to be able to chain operations
v = pvector([1, 2])
assert v.evolver().append(3).extend([4, 5]).set(1, 6).persistent() == pvector([1, 6, 3, 4, 5])
def test_evolver_delete_by_index(pvector):
e = pvector([1, 2, 3]).evolver()
del e[0]
assert e.persistent() == python_pvector([2, 3])
assert e.append(4).persistent() == python_pvector([2, 3, 4])
def test_evolver_delete_function_by_index(pvector):
e = pvector([1, 2, 3]).evolver()
assert e.delete(1).persistent() == python_pvector([1, 3])
def test_evolver_delete_function_by_index_multiple_times(pvector):
SIZE = 40
e = pvector(range(SIZE)).evolver()
for i in range(SIZE):
assert e[0] == i
assert list(e.persistent()) == list(range(i, SIZE))
del e[0]
assert e.persistent() == list()
def test_evolver_delete_function_invalid_index(pvector):
e = pvector([1, 2]).evolver()
with pytest.raises(TypeError):
del e["e"]
def test_delete_of_non_existing_element(pvector):
e = pvector([1, 2]).evolver()
with pytest.raises(IndexError):
del e[2]
del e[0]
del e[0]
with pytest.raises(IndexError):
del e[0]
assert e.persistent() == pvector()
def test_append_followed_by_delete(pvector):
e = pvector([1, 2]).evolver()
e.append(3)
del e[2]
def test_evolver_set_followed_by_delete(pvector):
evolver = pvector([1, 2]).evolver()
evolver[1] = 3
assert [evolver[i] for i in range(len(evolver))] == [1, 3]
del evolver[0]
assert evolver.persistent() == pvector([3])
def test_compare_with_list(pvector):
v = pvector([1, 2, 3])
assert v == [1, 2, 3]
assert v != [1, 2]
assert v > [1, 2]
assert v < [2, 2]
assert [1, 2] < v
assert v <= [1, 2, 3]
assert v <= [1, 2, 4]
assert v >= [1, 2, 3]
assert v >= [1, 2]
def test_python_no_c_extension_with_environment_variable():
from six.moves import reload_module
import pyrsistent._pvector
import pyrsistent
import os
os.environ['PYRSISTENT_NO_C_EXTENSION'] = 'TRUE'
reload_module(pyrsistent._pvector)
reload_module(pyrsistent)
assert type(pyrsistent.pvector()) is pyrsistent._pvector.PythonPVector
del os.environ['PYRSISTENT_NO_C_EXTENSION']
reload_module(pyrsistent._pvector)
reload_module(pyrsistent)
def test_supports_weakref(pvector):
import weakref
weakref.ref(pvector())
def test_failing_repr(pvector):
# See https://github.com/tobgu/pyrsistent/issues/84
class A(object):
def __repr__(self):
raise ValueError('oh no!')
with pytest.raises(ValueError):
repr(pvector([A()]))
def test_iterable(pvector):
"""
PVectors can be created from iterables even though they can't be len()
hinted.
"""
assert pvector(iter("a")) == pvector(iter("a"))
| [] | [] | [
"PYRSISTENT_NO_C_EXTENSION"
] | [] | ["PYRSISTENT_NO_C_EXTENSION"] | python | 1 | 0 | |
src/bpm/runc/client/client.go | // Copyright (C) 2017-Present CloudFoundry.org Foundation, Inc. All rights reserved.
//
// This program and the accompanying materials are made available under
// the terms of the under the Apache License, Version 2.0 (the "License”);
// you may not use this file except in compliance with the License.
//
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package client
import (
"encoding/json"
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
"regexp"
"syscall"
specs "github.com/opencontainers/runtime-spec/specs-go"
)
type Signal int
const (
Term Signal = iota
Quit
)
func (s Signal) String() string {
switch s {
case Term:
return "TERM"
case Quit:
return "QUIT"
default:
return "unknown"
}
}
// https://github.com/opencontainers/runc/blob/master/list.go#L24-L45
type ContainerState struct {
// ID is the container ID
ID string `json:"id"`
// InitProcessPid is the init process id in the parent namespace
InitProcessPid int `json:"pid"`
// Status is the current status of the container, running, paused, ...
Status string `json:"status"`
}
type RuncClient struct {
runcPath string
runcRoot string
}
func NewRuncClient(runcPath, runcRoot string) *RuncClient {
return &RuncClient{
runcPath: runcPath,
runcRoot: runcRoot,
}
}
func (*RuncClient) CreateBundle(
bundlePath string,
jobSpec specs.Spec,
user specs.User,
) error {
err := os.MkdirAll(bundlePath, 0700)
if err != nil {
return err
}
rootfsPath := filepath.Join(bundlePath, "rootfs")
err = os.MkdirAll(rootfsPath, 0755)
if err != nil {
return err
}
f, err := os.OpenFile(filepath.Join(bundlePath, "config.json"), os.O_RDWR|os.O_CREATE, 0600)
if err != nil {
// This is super hard to test as we are root.
return err
}
defer f.Close()
enc := json.NewEncoder(f)
enc.SetIndent("", "\t")
return enc.Encode(&jobSpec)
}
func (c *RuncClient) RunContainer(pidFilePath, bundlePath, containerID string, detach bool, stdout, stderr io.Writer) (int, error) {
args := []string{"--root", c.runcRoot, "run", "--bundle", bundlePath, "--pid-file", pidFilePath}
if detach {
args = append(args, "--detach")
}
args = append(args, containerID)
runcCmd := exec.Command(c.runcPath, args...)
runcCmd.Stdout = stdout
runcCmd.Stderr = stderr
if err := runcCmd.Run(); err != nil {
if status, ok := runcCmd.ProcessState.Sys().(syscall.WaitStatus); ok {
return status.ExitStatus(), err
}
// If we can't get the exit status for some reason then make
// sure to at least return a generic failure.
return 1, err
}
return 0, nil
}
// Exec assumes you are launching an interactive shell.
// We should improve the interface to mirror `runc exec` more generally.
func (c *RuncClient) Exec(containerID, command string, stdin io.Reader, stdout, stderr io.Writer) error {
runcCmd := exec.Command(
c.runcPath,
"--root", c.runcRoot,
"exec",
"--tty",
"--env", fmt.Sprintf("TERM=%s", os.Getenv("TERM")),
containerID,
command,
)
runcCmd.Stdin = stdin
runcCmd.Stdout = stdout
runcCmd.Stderr = stderr
return runcCmd.Run()
}
// ContainerState returns the following:
// - state, nil if the job is running,and no errors were encountered.
// - nil,nil if the container state is not running and no other errors were encountered
// - nil,error if there is any other error getting the container state
// (e.g. the container is running but in an unreachable state)
func (c *RuncClient) ContainerState(containerID string) (*specs.State, error) {
runcCmd := exec.Command(
c.runcPath,
"--root", c.runcRoot,
"state",
containerID,
)
var state specs.State
data, err := runcCmd.CombinedOutput()
if err != nil {
return nil, decodeContainerStateErr(data, err)
}
err = json.Unmarshal(data, &state)
if err != nil {
return nil, err
}
return &state, nil
}
func decodeContainerStateErr(b []byte, err error) error {
r := regexp.MustCompile(`^\s*container "[^"]*" does not exist\s*$`)
if r.MatchString(string(b)) {
return nil
}
return err
}
func (c *RuncClient) ListContainers() ([]ContainerState, error) {
runcCmd := exec.Command(
c.runcPath,
"--root", c.runcRoot,
"list",
"--format", "json",
)
data, err := runcCmd.Output()
if err != nil {
return []ContainerState{}, err
}
var containerStates []ContainerState
err = json.Unmarshal(data, &containerStates)
if err != nil {
return []ContainerState{}, err
}
return containerStates, nil
}
func (c *RuncClient) SignalContainer(containerID string, signal Signal) error {
runcCmd := exec.Command(
c.runcPath,
"--root", c.runcRoot,
"kill",
containerID,
signal.String(),
)
return runcCmd.Run()
}
func (c *RuncClient) DeleteContainer(containerID string) error {
runcCmd := exec.Command(
c.runcPath,
"--root", c.runcRoot,
"delete",
"-f",
containerID,
)
return runcCmd.Run()
}
func (*RuncClient) DestroyBundle(bundlePath string) error {
return os.RemoveAll(bundlePath)
}
| [
"\"TERM\""
] | [] | [
"TERM"
] | [] | ["TERM"] | go | 1 | 0 | |
backend/api_app/api_app/wsgi.py | """
WSGI config for api_app project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'api_app.settings')
application = get_wsgi_application()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
main.go | package main
import (
"log"
"net/http"
"os"
"strconv"
"time"
"github.com/dgrijalva/jwt-go"
"github.com/gin-gonic/gin"
"github.com/go-redis/redis/v7"
"github.com/twinj/uuid"
)
// User authentication data type
type User struct {
ID uint64 `json:"id"`
Username string `json:"username"`
Password string `json:"password"`
}
// TokenDetails data structure
type TokenDetails struct {
AccessToken string
RefreshToken string
AccessUUID string
RefreshUUID string
AtExpires int64
RtExpires int64
}
var (
redisClient *redis.Client
router = gin.Default()
user = User{
ID: 1,
Username: "jhondoe",
Password: "foobar",
}
accessSecret = []byte("access_foo_bar")
refreshSecret = []byte("refresh_foo_bar")
)
func initRedis() {
dsn := os.Getenv("REDIS_DSN")
if len(dsn) == 0 {
dsn = "localhost:6379"
}
redisClient = redis.NewClient(&redis.Options{
Addr: dsn,
})
if _, err := redisClient.Ping().Result(); err != nil {
panic(err)
}
}
// CreateToken generates JWT access token
func CreateToken(userid uint64) (*TokenDetails, error) {
td := &TokenDetails{}
td.AtExpires = time.Now().Add(time.Minute * 15).Unix()
td.AccessUUID = uuid.NewV4().String()
td.RtExpires = time.Now().Add(time.Hour * 24 * 7).Unix()
td.RefreshUUID = uuid.NewV4().String()
var err error
atClaims := jwt.MapClaims{
"authorized": true,
"access_uuid": td.AccessUUID,
"user_id": userid,
"exp": td.AtExpires,
}
at := jwt.NewWithClaims(jwt.SigningMethodHS256, atClaims)
td.AccessToken, err = at.SignedString(accessSecret)
if err != nil {
return nil, err
}
rtClaims := jwt.MapClaims{
"refresh_uuid": td.RefreshUUID,
"user_id": userid,
"exp": td.RtExpires,
}
rt := jwt.NewWithClaims(jwt.SigningMethodHS256, rtClaims)
td.RefreshToken, err = rt.SignedString(refreshSecret)
if err != nil {
return nil, err
}
return td, nil
}
// CreateAuth persists user authentication
func CreateAuth(userid uint64, td *TokenDetails) error {
at := time.Unix(td.AtExpires, 0)
rt := time.Unix(td.RtExpires, 0)
now := time.Now()
err := redisClient.Set(td.AccessUUID, strconv.Itoa(int(userid)), at.Sub(now)).Err()
if err != nil {
return err
}
err = redisClient.Set(td.RefreshUUID, strconv.Itoa(int(userid)), rt.Sub(now)).Err()
if err != nil {
return err
}
return nil
}
// Login controller
func Login(c *gin.Context) {
var u User
if err := c.ShouldBindJSON(&u); err != nil {
c.JSON(http.StatusUnprocessableEntity, "Invalid json provided")
return
}
if user.Username != u.Username || user.Password != u.Password {
c.JSON(http.StatusUnauthorized, "Please provide valid login details")
return
}
td, err := CreateToken(user.ID)
if err != nil {
c.JSON(http.StatusUnprocessableEntity, err.Error())
return
}
err = CreateAuth(user.ID, td)
if err != nil {
c.JSON(http.StatusUnprocessableEntity, err.Error())
return
}
tokens := map[string]string{
"access_token": td.AccessToken,
"refresh_token": td.RefreshToken,
}
c.JSON(http.StatusOK, tokens)
}
func main() {
initRedis()
router.POST("/login", Login)
log.Fatal(router.Run(":8080"))
}
| [
"\"REDIS_DSN\""
] | [] | [
"REDIS_DSN"
] | [] | ["REDIS_DSN"] | go | 1 | 0 | |
channel/two-listeners/main.go | package main
import (
"fmt"
"sync"
)
func main() {
var wg sync.WaitGroup
wg.Add(4)
ch := make(chan string)
go func() {
for v := range ch {
fmt.Println("1:", v)
wg.Done()
}
}()
go func() {
for v := range ch {
fmt.Println("2:", v)
wg.Done()
}
}()
go func() {
for v := range ch {
fmt.Println("3:", v)
wg.Done()
}
}()
go func() {
for v := range ch {
fmt.Println("4:", v)
wg.Done()
}
}()
go func() {
for v := range ch {
fmt.Println("5:", v)
wg.Done()
}
}()
ch <- "hello"
ch <- "world"
ch <- "how"
ch <- "are you"
wg.Wait()
return
}
| [] | [] | [] | [] | [] | go | null | null | null |
tokenbalance_test.go | package tokenbalance
import (
"github.com/stretchr/testify/assert"
"math/big"
"os"
"testing"
)
func TestFailedConnection(t *testing.T) {
c := &Config{
GethLocation: "https://google.com",
Logs: true,
}
err := c.Connect()
assert.Error(t, err)
}
func TestFailingNoConfig(t *testing.T) {
_, err := New("0xd26114cd6EE289AccF82350c8d8487fedB8A0C07", "0x42d4722b804585cdf6406fa7739e794b0aa8b1ff")
assert.Error(t, err)
}
func TestConnection(t *testing.T) {
c := &Config{
GethLocation: os.Getenv("ETH"),
Logs: true,
}
err := c.Connect()
assert.Nil(t, err)
}
func TestZeroDecimal(t *testing.T) {
number := big.NewInt(123456789)
tokenCorrected := bigIntString(number, 0)
assert.Equal(t, "123456789", tokenCorrected)
}
func TestZeroBalance(t *testing.T) {
number := big.NewInt(0)
tokenCorrected := bigIntString(number, 18)
assert.Equal(t, "0.0", tokenCorrected)
}
func TestFormatDecimal(t *testing.T) {
number := big.NewInt(0)
number.SetString("72094368689712", 10)
tokenCorrected := bigIntString(number, 18)
assert.Equal(t, "0.000072094368689712", tokenCorrected)
}
func TestFormatSmallDecimal(t *testing.T) {
number := big.NewInt(0)
number.SetString("123", 10)
tokenCorrected := bigIntString(number, 18)
assert.Equal(t, "0.000000000000000123", tokenCorrected)
}
func TestFormatVerySmallDecimal(t *testing.T) {
number := big.NewInt(0)
number.SetString("1142400000000001", 10)
tokenCorrected := bigIntString(number, 18)
assert.Equal(t, "0.001142400000000001", tokenCorrected)
}
func TestFailedNewTokenBalance(t *testing.T) {
_, err := New("0x42D4722B804585CDf6406fa7739e794b0Aa8b1FF", "0x42d4722b804585cdf6406fa7739e794b0aa8b1ff")
assert.Error(t, err)
}
func TestSymbolFix(t *testing.T) {
symbol := symbolFix("0x86Fa049857E0209aa7D9e616F7eb3b3B78ECfdb0")
assert.Equal(t, "EOS", symbol)
}
func TestTokenBalance_ToJSON(t *testing.T) {
symbol := symbolFix("0x86Fa049857E0209aa7D9e616F7eb3b3B78ECfdb0")
assert.Equal(t, "EOS", symbol)
}
func TestNewTokenBalance(t *testing.T) {
c := &Config{
GethLocation: os.Getenv("ETH"),
Logs: true,
}
err := c.Connect()
assert.Nil(t, err)
tb, err := New("0xd26114cd6EE289AccF82350c8d8487fedB8A0C07", "0x42d4722b804585cdf6406fa7739e794b0aa8b1ff")
assert.Nil(t, err)
assert.Equal(t, "0x42D4722B804585CDf6406fa7739e794b0Aa8b1FF", tb.Wallet.String())
assert.Equal(t, "0xd26114cd6EE289AccF82350c8d8487fedB8A0C07", tb.Contract.String())
assert.Equal(t, "600000.0", tb.BalanceString())
assert.Equal(t, "1.020095885777777767", tb.ETHString())
assert.Equal(t, int64(18), tb.Decimals)
assert.Equal(t, "OMG", tb.Symbol)
}
| [
"\"ETH\"",
"\"ETH\""
] | [] | [
"ETH"
] | [] | ["ETH"] | go | 1 | 0 | |
core/src/main/java/io/questdb/log/LogFactory.java | /*******************************************************************************
* ___ _ ____ ____
* / _ \ _ _ ___ ___| |_| _ \| __ )
* | | | | | | |/ _ \/ __| __| | | | _ \
* | |_| | |_| | __/\__ \ |_| |_| | |_) |
* \__\_\\__,_|\___||___/\__|____/|____/
*
* Copyright (c) 2014-2019 Appsicle
* Copyright (c) 2019-2020 QuestDB
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
******************************************************************************/
package io.questdb.log;
import io.questdb.mp.*;
import io.questdb.std.*;
import io.questdb.std.datetime.microtime.MicrosecondClock;
import io.questdb.std.datetime.microtime.MicrosecondClockImpl;
import java.io.*;
import java.lang.reflect.Constructor;
import java.lang.reflect.Field;
import java.util.Comparator;
import java.util.Properties;
public class LogFactory implements Closeable {
public static final LogFactory INSTANCE = new LogFactory();
public static final String DEBUG_TRIGGER = "ebug";
public static final String DEBUG_TRIGGER_ENV = "QDB_DEBUG";
public static final String CONFIG_SYSTEM_PROPERTY = "questdbLog";
private static final int DEFAULT_QUEUE_DEPTH = 1024;
private static final int DEFAULT_MSG_SIZE = 4 * 1024;
private static final String DEFAULT_CONFIG = "/qlog.conf";
private static final String EMPTY_STR = "";
private static final CharSequenceHashSet reserved = new CharSequenceHashSet();
private static final LengthDescendingComparator LDC = new LengthDescendingComparator();
private final CharSequenceObjHashMap<ScopeConfiguration> scopeConfigMap = new CharSequenceObjHashMap<>();
private final ObjList<ScopeConfiguration> scopeConfigs = new ObjList<>();
private final ObjHashSet<LogWriter> jobs = new ObjHashSet<>();
private final MicrosecondClock clock;
private WorkerPool workerPool;
private boolean configured = false;
private int queueDepth = DEFAULT_QUEUE_DEPTH;
private int recordLength = DEFAULT_MSG_SIZE;
static boolean envEnabled = true;
public LogFactory() {
this(MicrosecondClockImpl.INSTANCE);
}
public LogFactory(MicrosecondClock clock) {
this.clock = clock;
}
public static void configureFromProperties(LogFactory factory, Properties properties, WorkerPool workerPool) {
factory.workerPool = workerPool;
String writers = getProperty(properties, "writers");
if (writers == null) {
factory.configured = true;
return;
}
String s;
s = getProperty(properties, "queueDepth");
if (s != null && s.length() > 0) {
try {
factory.setQueueDepth(Numbers.parseInt(s));
} catch (NumericException e) {
throw new LogError("Invalid value for queueDepth");
}
}
s = getProperty(properties, "recordLength");
if (s != null && s.length() > 0) {
try {
factory.setRecordLength(Numbers.parseInt(s));
} catch (NumericException e) {
throw new LogError("Invalid value for recordLength");
}
}
for (String w : writers.split(",")) {
LogWriterConfig conf = createWriter(properties, w.trim());
if (conf != null) {
factory.add(conf);
}
}
factory.bind();
}
public static void configureFromSystemProperties(LogFactory factory) {
configureFromSystemProperties(factory, null);
}
public static void configureFromSystemProperties(LogFactory factory, WorkerPool workerPool) {
String conf = System.getProperty(CONFIG_SYSTEM_PROPERTY);
if (conf == null) {
conf = DEFAULT_CONFIG;
}
try (InputStream is = LogFactory.class.getResourceAsStream(conf)) {
if (is != null) {
Properties properties = new Properties();
properties.load(is);
configureFromProperties(factory, properties, workerPool);
} else {
File f = new File(conf);
if (f.canRead()) {
try (FileInputStream fis = new FileInputStream(f)) {
Properties properties = new Properties();
properties.load(fis);
configureFromProperties(factory, properties, workerPool);
}
} else {
factory.configureDefaultWriter();
}
}
} catch (IOException e) {
if (!DEFAULT_CONFIG.equals(conf)) {
throw new LogError("Cannot read " + conf, e);
} else {
factory.configureDefaultWriter();
}
}
factory.startThread();
}
public static void configureFromSystemProperties(WorkerPool workerPool) {
configureFromSystemProperties(INSTANCE, workerPool);
}
@SuppressWarnings("rawtypes")
public static Log getLog(Class clazz) {
return getLog(clazz.getName());
}
public static Log getLog(CharSequence key) {
if (!INSTANCE.configured) {
configureFromSystemProperties(INSTANCE, null);
}
return INSTANCE.create(key);
}
public void add(final LogWriterConfig config) {
final int index = scopeConfigMap.keyIndex(config.getScope());
ScopeConfiguration scopeConf;
if (index > -1) {
scopeConfigMap.putAt(index, config.getScope(), scopeConf = new ScopeConfiguration(LogLevel.LOG_LEVEL_MAX));
scopeConfigs.add(scopeConf);
} else {
scopeConf = scopeConfigMap.valueAtQuick(index);
}
scopeConf.add(config);
}
public void assign(WorkerPool workerPool) {
for (int i = 0, n = jobs.size(); i < n; i++) {
workerPool.assign(jobs.get(i));
}
if (this.workerPool == null) {
this.workerPool = workerPool;
}
}
public void bind() {
if (configured) {
return;
}
configured = true;
for (int i = 0, n = scopeConfigs.size(); i < n; i++) {
ScopeConfiguration conf = scopeConfigs.get(i);
conf.bind(jobs, queueDepth, recordLength);
}
scopeConfigMap.sortKeys(LDC);
for (int i = 0, n = jobs.size(); i < n; i++) {
jobs.get(i).bindProperties();
}
if (workerPool != null) {
assign(workerPool);
}
}
@Override
public void close() {
haltThread();
for (int i = 0, n = jobs.size(); i < n; i++) {
Misc.free(jobs.get(i));
}
for (int i = 0, n = scopeConfigs.size(); i < n; i++) {
Misc.free(scopeConfigs.getQuick(i));
}
}
public Log create(CharSequence key) {
if (!configured) {
throw new LogError("Not configured");
}
ScopeConfiguration scopeConfiguration = find(key);
if (scopeConfiguration == null) {
return new Logger(
clock,
compressScope(key),
null,
null,
null,
null,
null,
null,
null,
null
);
}
final Holder inf = scopeConfiguration.getHolder(Numbers.msb(LogLevel.LOG_LEVEL_INFO));
final Holder dbg = scopeConfiguration.getHolder(Numbers.msb(LogLevel.LOG_LEVEL_DEBUG));
final Holder err = scopeConfiguration.getHolder(Numbers.msb(LogLevel.LOG_LEVEL_ERROR));
final Holder adv = scopeConfiguration.getHolder(Numbers.msb(LogLevel.LOG_LEVEL_ADVISORY));
return new Logger(
clock,
compressScope(key),
dbg == null ? null : dbg.ring,
dbg == null ? null : dbg.lSeq,
inf == null ? null : inf.ring,
inf == null ? null : inf.lSeq,
err == null ? null : err.ring,
err == null ? null : err.lSeq,
adv == null ? null : adv.ring,
adv == null ? null : adv.lSeq
);
}
public ObjHashSet<LogWriter> getJobs() {
return jobs;
}
public int getQueueDepth() {
return queueDepth;
}
private void setQueueDepth(int queueDepth) {
this.queueDepth = queueDepth;
}
public int getRecordLength() {
return recordLength;
}
private void setRecordLength(int recordLength) {
this.recordLength = recordLength;
}
public void haltThread() {
if (workerPool != null) {
workerPool.halt();
workerPool = null;
}
}
public void startThread() {
if (this.workerPool != null) {
return;
}
this.workerPool = new WorkerPool(new WorkerPoolConfiguration() {
@Override
public int[] getWorkerAffinity() {
return new int[]{-1};
}
@Override
public int getWorkerCount() {
return 1;
}
@Override
public boolean haltOnError() {
return false;
}
@Override
public boolean isDaemonPool() {
return true;
}
});
assign(workerPool);
workerPool.start(null);
}
private static String getProperty(final Properties properties, String key) {
if (envEnabled) {
final String envValue = System.getenv("QDB_LOG_" + key.replace('.', '_').toUpperCase());
if (envValue == null) {
return properties.getProperty(key);
}
return envValue;
}
return properties.getProperty(key);
}
@SuppressWarnings("rawtypes")
private static LogWriterConfig createWriter(final Properties properties, String w) {
final String writer = "w." + w + '.';
final String clazz = getProperty(properties, writer + "class");
final String levelStr = getProperty(properties, writer + "level");
final String scope = getProperty(properties, writer + "scope");
if (clazz == null) {
return null;
}
final Class<?> cl;
final Constructor constructor;
try {
cl = Class.forName(clazz);
constructor = cl.getDeclaredConstructor(RingQueue.class, SCSequence.class, int.class);
} catch (ClassNotFoundException e) {
throw new LogError("Class not found " + clazz, e);
} catch (NoSuchMethodException e) {
throw new LogError("Constructor(RingQueue, Sequence, int) expected: " + clazz, e);
}
int level = 0;
if (levelStr != null) {
for (String s : levelStr.split(",")) {
switch (s.toUpperCase()) {
case "DEBUG":
level |= LogLevel.LOG_LEVEL_DEBUG;
break;
case "INFO":
level |= LogLevel.LOG_LEVEL_INFO;
break;
case "ERROR":
level |= LogLevel.LOG_LEVEL_ERROR;
break;
case "ADVISORY":
level |= LogLevel.LOG_LEVEL_ADVISORY;
break;
default:
throw new LogError("Unknown level: " + s);
}
}
}
if (isForcedDebug()) {
level = level | LogLevel.LOG_LEVEL_DEBUG;
}
// enable all LOG levels above the minimum set one
// ((-1 >>> (msb-1)) << msb) | level
final int msb = Numbers.msb(level);
level = (((-1 >>> (msb-1)) << msb) | level) & LogLevel.LOG_LEVEL_MASK;
return new LogWriterConfig(scope == null ? EMPTY_STR : scope, level, (ring, seq, level1) -> {
try {
LogWriter w1 = (LogWriter) constructor.newInstance(ring, seq, level1);
for (String n : properties.stringPropertyNames()) {
if (n.startsWith(writer)) {
String p = n.substring(writer.length());
if (reserved.contains(p)) {
continue;
}
try {
Field f = cl.getDeclaredField(p);
if (f.getType() == String.class) {
Unsafe.getUnsafe().putObject(w1, Unsafe.getUnsafe().objectFieldOffset(f), getProperty(properties, n));
}
} catch (Exception e) {
throw new LogError("Unknown property: " + n, e);
}
}
}
return w1;
} catch (Exception e) {
throw new LogError("Error creating log writer", e);
}
});
}
private static boolean isForcedDebug() {
return System.getProperty(DEBUG_TRIGGER) != null || System.getenv().containsKey(DEBUG_TRIGGER_ENV);
}
/**
* Converts fully qualified class name into an abbreviated form:
* com.questdb.mp.Sequence -> c.n.m.Sequence
*
* @param key typically class name
* @return abbreviated form of key
*/
private static CharSequence compressScope(CharSequence key) {
StringBuilder builder = new StringBuilder();
char c = 0;
boolean pick = true;
int z = 0;
for (int i = 0, n = key.length(); i < n; i++) {
char a = key.charAt(i);
if (a == '.') {
if (!pick) {
builder.append(c).append('.');
pick = true;
}
} else if (pick) {
c = a;
z = i;
pick = false;
}
}
for (; z < key.length(); z++) {
builder.append(key.charAt(z));
}
builder.append(' ');
return builder;
}
private void configureDefaultWriter() {
int level = LogLevel.LOG_LEVEL_INFO | LogLevel.LOG_LEVEL_ERROR | LogLevel.LOG_LEVEL_ADVISORY;
if (isForcedDebug()) {
level = level | LogLevel.LOG_LEVEL_DEBUG;
}
add(new LogWriterConfig(level, LogConsoleWriter::new));
bind();
}
private ScopeConfiguration find(CharSequence key) {
ObjList<CharSequence> keys = scopeConfigMap.keys();
CharSequence k = null;
for (int i = 0, n = keys.size(); i < n; i++) {
CharSequence s = keys.getQuick(i);
if (Chars.startsWith(key, s)) {
k = s;
break;
}
}
if (k == null) {
return null;
}
return scopeConfigMap.get(k);
}
private static class ScopeConfiguration implements Closeable {
private final int[] channels;
private final ObjList<LogWriterConfig> writerConfigs = new ObjList<>();
private final IntObjHashMap<Holder> holderMap = new IntObjHashMap<>();
private final ObjList<Holder> holderList = new ObjList<>();
private int ci = 0;
public ScopeConfiguration(int levels) {
this.channels = new int[levels];
}
public void bind(ObjHashSet<LogWriter> jobs, int queueDepth, int recordLength) {
// create queues for processed channels
for (int i = 0, n = channels.length; i < n; i++) {
int index = channels[i];
if (index > 0) {
int keyIndex = holderMap.keyIndex(index);
if (keyIndex > -1) {
Holder h = new Holder(queueDepth, recordLength);
holderMap.putAt(keyIndex, index, h);
holderList.add(h);
}
}
}
for (int i = 0, n = writerConfigs.size(); i < n; i++) {
LogWriterConfig c = writerConfigs.getQuick(i);
// the channels array has a guarantee that
// all bits in level mask will point to the same queue
// so we just get most significant bit number
// and dereference queue on its index
Holder h = holderMap.get(channels[Numbers.msb(c.getLevel())]);
// check if this queue was used by another writer
if (h.wSeq != null) {
// yes, it was
if (h.fanOut == null) {
h.fanOut = FanOut.to(h.wSeq).and(h.wSeq = new SCSequence());
} else {
h.fanOut.and(h.wSeq = new SCSequence());
}
} else {
// we are here first!
h.wSeq = new SCSequence();
}
// now h.wSeq contains out writer's sequence
jobs.add(c.getFactory().createLogWriter(h.ring, h.wSeq, c.getLevel()));
}
// and the last step is to link dependent sequences
for (int i = 0, n = holderList.size(); i < n; i++) {
Holder h = holderList.getQuick(i);
if (h.fanOut != null) {
h.lSeq.then(h.fanOut).then(h.lSeq);
} else {
h.lSeq.then(h.wSeq).then(h.lSeq);
}
}
}
@Override
public void close() {
for (int i = 0, n = holderList.size(); i < n; i++) {
Misc.free(holderList.getQuick(i));
}
}
/**
* Aggregates channels into set of queues. Consumer interest is represented by
* level, where consumer sets bits corresponding to channel indexes is it interested in.
* <p>
* Consumer 1 requires channels D & E. So its interest looks like {1,0,1}
* Consumer 2 requires channel I, so its interest is {0,1,0}
* <p>
* This method combines these interests as follows:
* <p>
* channels = {1,2,1}
* <p>
* which means that there will be need to 2 queues (1 and 2) and that Consumer 1
* will be using queue 1 and consumer 2 will be using queue 2.
* <p>
* More complex scenario where consumer interests overlap, for example:
* <p>
* consumer 1 {1,1,0}
* consumer 2 {0,1,1}
* <p>
* these interests will be combined as follows:
* <p>
* channels = {1,1,1}
* <p>
* which means that both consumers will be sharing same queue and they will have to
* filter applicable messages as they get them.
* <p>
* Algorithm iterates over set of bits in "level" twice. First pass is to establish
* minimum number of channel[] element out of those entries where bit in level is set.
* Additionally this pass will set channel[] elements to current consumer index where
* channel[] element is zero.
* <p>
* Second pass sets channel[] element to min value found on first pass.
*
* @param conf LogWriterConfig
*/
private void add(LogWriterConfig conf) {
int mask = conf.getLevel();
int min = Integer.MAX_VALUE;
int q = ++ci;
for (int i = 0, n = channels.length; i < n; i++) {
if (((mask >> i) & 1) == 1) {
int that = channels[i];
if (that == 0) {
channels[i] = q;
}
if (that > 0 && that < min) {
min = that;
}
}
}
if (mask > 1 && min < Integer.MAX_VALUE) {
for (int i = 0, n = channels.length; i < n; i++) {
if (((mask >> i) & 1) == 1) {
channels[i] = min;
}
}
}
writerConfigs.add(conf);
}
private Holder getHolder(int index) {
return holderMap.get(channels[index]);
}
}
private static class LengthDescendingComparator implements Comparator<CharSequence>, Serializable {
@Override
public int compare(CharSequence o1, CharSequence o2) {
int l1, l2;
if ((l1 = o1.length()) < (l2 = o2.length())) {
return 1;
}
if (l1 > l2) {
return -11;
}
return 0;
}
}
private static class Holder implements Closeable {
private final RingQueue<LogRecordSink> ring;
private final Sequence lSeq;
private SCSequence wSeq;
private FanOut fanOut;
public Holder(int queueDepth, final int recordLength) {
this.ring = new RingQueue<>(() -> new LogRecordSink(recordLength), queueDepth);
this.lSeq = new MPSequence(queueDepth);
}
@Override
public void close() {
for (int i = 0, n = ring.getCapacity(); i < n; i++) {
Misc.free(ring.get(i));
}
}
}
static {
reserved.add("scope");
reserved.add("class");
reserved.add("level");
}
}
| [
"\"QDB_LOG_\" + key.replace('.', '_'"
] | [] | [
"QDB_LOG_\" + key.replace('.', '_"
] | [] | ["QDB_LOG_\" + key.replace('.', '_"] | java | 1 | 0 | |
synchronize_test_cases.py | #!/usr/bin/env python
import base64
import getpass
import hashlib
import json
import os
import requests
import sys
import time
from ConfigParser import ConfigParser
from argparse import ArgumentParser, ArgumentTypeError
from collections import defaultdict
from urlparse import urljoin
class Synchronize(object):
PATHS = {'auth': 'session',
'file_item': 'file/{sha1sum}/_',
'file_item_info': 'file/info/{sha1sum}',
'test_case': 'test_case',
'test_case_item': 'test_case/{test_case_id}',
'testable': 'testable',
'project_info': 'p/{project_id}/info'}
@staticmethod
def _get_config(section):
config = ConfigParser()
if 'APPDATA' in os.environ: # Windows
os_config_path = os.environ['APPDATA']
elif 'XDG_CONFIG_HOME' in os.environ: # Modern Linux
os_config_path = os.environ['XDG_CONFIG_HOME']
elif 'HOME' in os.environ: # Legacy Linux
os_config_path = os.path.join(os.environ['HOME'], '.config')
else:
os_config_path = None
locations = [os.path.join(os.path.dirname(__file__), 'submit.ini'),
'submit.ini']
if os_config_path is not None:
locations.insert(1, os.path.join(os_config_path, 'submit.ini'))
if not config.read(locations):
raise Exception('No submit.ini found.')
if not config.has_section(section) and section != 'DEFAULT':
raise Exception('No section `{0}` found in submit.ini.'
.format(section))
return dict(config.items(section))
def __init__(self, config_section):
config = self._get_config(config_section)
self.debug = config['debug'].lower() in ('1', 'true', 'yes')
self.request_delay = int(config['request_delay'])
self.request_timeout = int(config['request_timeout'])
self._url = config['url']
self.session = requests.session()
self.session.headers['X-Requested-With'] = 'XMLHttpRequest'
self.email = None
def create_testables(self, testables, project_info):
"""Return a mapping of testable to testable infos."""
retval = {}
for testable in testables:
if testable in project_info['testables']:
retval[testable] = project_info['testables'][testable]
else:
url = self.url('testable')
response = self.request(url, 'PUT', name=testable,
project_id=unicode(project_info['id']),
executable='a.out')
self.msg('Creating testable: {0}'.format(response.status_code))
if response.status_code != 201:
raise Exception('Could not create testable {0}'
.format(testable))
retval[testable] = {'id': response.json()['testable_id'],
'name': testable, 'test_cases': []}
return retval
def get_info(self, project_id):
url = self.url('project_info', project_id=project_id)
response = self.request(url, 'GET')
self.msg('Fetching project info: {0}'.format(response.status_code))
if response.status_code != 200:
return None
return response.json()
def get_tests(self, testables):
tests = {}
for path in testables:
testable = os.path.basename(path)
tests[testable] = defaultdict(dict)
for filename in sorted(os.listdir(path)):
filepath = os.path.join(path, filename)
test_name, ext = os.path.splitext(filename)
if not ext:
sys.stderr.write('Ignoring invalid file: {} for {}\n'
.format(filename, testable))
continue
tests[testable][test_name][ext[1:]] = open(filepath).read()
for test_case, info in tests[testable].items():
if 'args' not in info:
info['args'] = 'a.out'
extra = set(info) - set(['args', 'stdin'])
if len(extra) > 1:
print('Too many extensions ({0}) for {1}/{2}. Goodbye!'
.format(list(extra), testable, test_case))
sys.exit(1)
info['source'] = list(extra)[0]
return tests
def login(self, email=None, password=None):
"""Login to establish a valid session."""
auth_url = self.url('auth')
while True:
if not email and not password:
sys.stdout.write('Email: ')
sys.stdout.flush()
email = sys.stdin.readline().strip()
if not email:
print('Goodbye!')
sys.exit(1)
password = getpass.getpass()
response = self.request(auth_url, 'PUT', email=email,
password=password)
if response.status_code == 201:
self.msg('logged in')
self.email = email
break
else:
print(response.json()['messages'])
email = password = None
def msg(self, message):
"""Output a debugging message."""
if self.debug:
print('\t' + message)
def request(self, url, method='get', **data):
time.sleep(self.request_delay)
args = (json.dumps(data),) if data else ()
retval = getattr(self.session, method.lower())(
url, *args, verify=False, timeout=self.request_timeout)
# Handle outage issues
if retval.status_code == 502:
print('The submission site is unexpectedly down. Please email '
'[email protected] with the URL: {0}'.format(url))
sys.exit(1)
elif retval.status_code == 503:
print('The submission site is temporarily down for maintenance. '
'Please try your submission again in a minute.')
sys.exit(1)
return retval
def send_file(self, data):
sha1sum = hashlib.sha1(data).hexdigest()
test_url = self.url('file_item_info', sha1sum=sha1sum)
upload_url = self.url('file_item', sha1sum=sha1sum)
# Have we already uploaded the file?
response = self.request(test_url, 'GET')
self.msg('Test file: {0}'.format(response.status_code))
if response.status_code == 200:
return response.json()['file_id']
# Upload the file
response = self.request(upload_url, 'PUT',
b64data=base64.b64encode(data).decode('ascii'))
self.msg('Send file: {0}'.format(response.status_code))
if response.status_code == 200:
return response.json()['file_id']
else:
return None
def synchronize(self, project, testables):
tests = self.get_tests(testables)
self.login()
project_info = self.get_info(project)
if not project_info:
print('You cannot edit `{0}`'.format(project))
return 1
mapping = self.create_testables(tests.keys(), project_info)
for testable, info in mapping.items():
self.synchronize_test_cases(info['id'], tests[testable],
info['test_cases'])
return 0
def synchronize_test_cases(self, testable_id, available, existing,
points=1, output_type='diff',
hide_expected=False):
for name in available:
info = available[name]
stdin_id, expected_id = self.upload_files(
info.get('stdin'), info[info['source']])
kwargs = {'name': name, 'args': info['args'],
'points': unicode(points),
'expected_id': unicode(expected_id),
'output_type': output_type,
'hide_expected': '1' if hide_expected else '0'}
if stdin_id:
kwargs['stdin_id'] = unicode(stdin_id)
if info['source'] in ('stdout', 'stderr'):
kwargs['output_source'] = info['source']
else:
kwargs['output_source'] = 'file'
kwargs['output_filename'] = info['source']
if name not in existing:
kwargs['testable_id'] = unicode(testable_id)
url = self.url('test_case')
method = 'PUT'
else:
url = self.url('test_case_item',
test_case_id=existing[name]['id'])
method = 'POST'
response = self.request(url, method, **kwargs)
if response.status_code not in (200, 201):
print('Error uploading {0}: {1}'
.format(name, response.json()['messages']))
return 1
return 0
def upload_files(self, stdin, expected):
stdin_id = self.send_file(stdin) if stdin else None
expected_id = self.send_file(expected)
return stdin_id, expected_id
def url(self, resource, **kwargs):
return urljoin(self._url, self.PATHS[resource]).format(**kwargs)
def readable_dir(path):
"""Test for a readable directory. """
# Verify the folder exists and is readable
if not os.path.isdir(path):
raise ArgumentTypeError('readable_dir: {0} is not a valid path'
.format(path))
if not os.access(path, os.R_OK):
raise ArgumentTypeError('readable_dir: {0} is not a readable '
'directory'.format(path))
return os.path.abspath(path)
def main():
parser = ArgumentParser()
parser.add_argument('-c', '--config', default='DEFAULT')
parser.add_argument('-p', '--project_id', required=True)
parser.add_argument('testables', nargs='+', type=readable_dir)
args = parser.parse_args()
client = Synchronize(args.config)
client.debug = True
return client.synchronize(args.project_id, args.testables)
if __name__ == '__main__':
try:
sys.exit(main())
except KeyboardInterrupt:
pass
| [] | [] | [
"APPDATA",
"HOME",
"XDG_CONFIG_HOME"
] | [] | ["APPDATA", "HOME", "XDG_CONFIG_HOME"] | python | 3 | 0 | |
src/specs/vendor/github.com/cloudfoundry-incubator/cf-test-helpers/workflowhelpers/test_suite_setup_test.go | package workflowhelpers_test
import (
"os"
"time"
"github.com/cloudfoundry-incubator/cf-test-helpers/config"
starterFakes "github.com/cloudfoundry-incubator/cf-test-helpers/internal/fakes"
. "github.com/cloudfoundry-incubator/cf-test-helpers/workflowhelpers"
"github.com/cloudfoundry-incubator/cf-test-helpers/workflowhelpers/internal"
"github.com/cloudfoundry-incubator/cf-test-helpers/workflowhelpers/internal/fakes"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("ReproducibleTestSuiteSetup", func() {
Describe("NewBaseTestSuiteSetup", func() {
var cfg config.Config
var apiEndpoint string
var skipSSLValidation bool
var shortTimeout time.Duration
var testUser *internal.TestUser
var testSpace *internal.TestSpace
var regularUserContext, adminUserContext UserContext
BeforeEach(func() {
apiEndpoint = "api.my-cf.com"
skipSSLValidation = false
})
JustBeforeEach(func() {
cfg = config.Config{
TimeoutScale: 2.0,
NamePrefix: "UNIT-TESTS",
SkipSSLValidation: skipSSLValidation,
ApiEndpoint: apiEndpoint,
AdminUser: "admin",
AdminPassword: "admin-password",
}
testSpace = internal.NewRegularTestSpace(&cfg, "10G")
testUser = internal.NewTestUser(&cfg, starterFakes.NewFakeCmdStarter())
shortTimeout = cfg.GetScaledTimeout(1 * time.Minute)
regularUserContext = NewUserContext(apiEndpoint, testUser, testSpace, skipSSLValidation, shortTimeout)
adminUserContext = NewUserContext(apiEndpoint, internal.NewAdminUser(&cfg, starterFakes.NewFakeCmdStarter()), nil, skipSSLValidation, shortTimeout)
})
It("sets ShortTimeout to 1 Minute, scaled by the config", func() {
setup := NewBaseTestSuiteSetup(&cfg, testSpace, testUser, regularUserContext, adminUserContext, false)
Expect(setup.ShortTimeout()).To(Equal(time.Duration(cfg.TimeoutScale * float64(1*time.Minute))))
})
It("sets LongTimeout to 5 Minutes, scaled by the config", func() {
setup := NewBaseTestSuiteSetup(&cfg, testSpace, testUser, regularUserContext, adminUserContext, false)
Expect(setup.LongTimeout()).To(Equal(time.Duration(cfg.TimeoutScale * float64(5*time.Minute))))
})
It("sets the regularUserContext", func() {
setup := NewBaseTestSuiteSetup(&cfg, testSpace, testUser, regularUserContext, adminUserContext, false)
Expect(setup.RegularUserContext()).To(Equal(regularUserContext))
})
It("sets the adminUserContext", func() {
setup := NewBaseTestSuiteSetup(&cfg, testSpace, testUser, regularUserContext, adminUserContext, false)
Expect(setup.AdminUserContext()).To(Equal(adminUserContext))
})
It("sets the TestUser", func() {
setup := NewBaseTestSuiteSetup(&cfg, testSpace, testUser, regularUserContext, adminUserContext, false)
Expect(setup.TestUser).To(Equal(testUser))
})
It("sets the TestSpace", func() {
setup := NewBaseTestSuiteSetup(&cfg, testSpace, testUser, regularUserContext, adminUserContext, false)
Expect(setup.TestSpace).To(Equal(testSpace))
})
It("sets the OrganizationName to the testSpace's organiation name", func() {
setup := NewBaseTestSuiteSetup(&cfg, testSpace, testUser, regularUserContext, adminUserContext, false)
Expect(setup.GetOrganizationName()).To(Equal(testSpace.OrganizationName()))
})
})
Describe("NewTestContextSuiteSetup", func() {
var cfg config.Config
var existingUserCfg config.Config
BeforeEach(func() {
cfg = config.Config{
TimeoutScale: 2.0,
NamePrefix: "UNIT-TESTS",
ApiEndpoint: "api.my-cf.com",
AdminUser: "admin",
AdminPassword: "admin-password",
}
existingUserCfg = config.Config{
UseExistingUser: true,
ExistingUser: "existing-user",
ExistingUserPassword: "existing-user-password",
}
})
Describe("regular user", func() {
It("has a regular TestSpace", func() {
setup := NewTestSuiteSetup(&cfg)
testSpace, ok := setup.TestSpace.(*internal.TestSpace)
Expect(ok).To(BeTrue())
Expect(testSpace.OrganizationName()).To(MatchRegexp("UNIT-TESTS-[0-9]+-ORG-.*"))
Expect(testSpace.SpaceName()).To(MatchRegexp("UNIT-TESTS-[0-9]+-SPACE-.*"))
})
It("has a regular TestUser", func() {
setup := NewTestSuiteSetup(&cfg)
Expect(setup.RegularUserContext().TestUser.Username()).To(MatchRegexp("UNIT-TESTS-[0-9]+-USER-.*"))
Expect(len(setup.RegularUserContext().TestUser.Password())).To(Equal(20))
Expect(setup.RegularUserContext().UseClientCredentials).To(BeFalse())
})
It("uses the api endpoint and SkipSSLValidation from the config", func() {
setup := NewTestSuiteSetup(&cfg)
Expect(setup.RegularUserContext().ApiUrl).To(Equal("api.my-cf.com"))
Expect(setup.RegularUserContext().SkipSSLValidation).To(Equal(false))
cfg.ApiEndpoint = "api.other-cf.com"
cfg.SkipSSLValidation = true
setup = NewTestSuiteSetup(&cfg)
Expect(setup.RegularUserContext().ApiUrl).To(Equal("api.other-cf.com"))
Expect(setup.RegularUserContext().SkipSSLValidation).To(BeTrue())
})
It("uses the short timeout", func() {
setup := NewTestSuiteSetup(&cfg)
Expect(setup.RegularUserContext().Timeout).To(Equal(setup.ShortTimeout()))
})
})
Context("admin user", func() {
It("creates an AdminUserContext from the config", func() {
setup := NewTestSuiteSetup(&cfg)
adminUserContext := setup.AdminUserContext()
Expect(adminUserContext.ApiUrl).To(Equal(cfg.ApiEndpoint))
Expect(adminUserContext.Username).To(Equal(cfg.AdminUser))
Expect(adminUserContext.Password).To(Equal(cfg.AdminPassword))
Expect(adminUserContext.TestSpace).To(BeNil())
Expect(adminUserContext.SkipSSLValidation).To(Equal(cfg.SkipSSLValidation))
Expect(adminUserContext.Timeout).To(Equal(cfg.GetScaledTimeout(1 * time.Minute)))
Expect(adminUserContext.UseClientCredentials).To(BeFalse())
})
})
It("uses the existing user", func() {
setup := NewTestSuiteSetup(&existingUserCfg)
regularUserContext := setup.RegularUserContext()
Expect(setup.SkipUserCreation).To(Equal(existingUserCfg.UseExistingUser))
Expect(regularUserContext.TestUser.Username()).To(Equal(existingUserCfg.ExistingUser))
Expect(regularUserContext.TestUser.Password()).To(Equal(existingUserCfg.ExistingUserPassword))
})
Context("Admin Client", func() {
BeforeEach(func() {
cfg = config.Config{
AdminClient: "admin-client",
AdminClientSecret: "admin-client-secret",
}
})
It("can create admin user context given client credentials", func() {
setup := NewTestSuiteSetup(&cfg)
adminUserContext := setup.AdminUserContext()
Expect(adminUserContext.Username).To(Equal(cfg.AdminClient))
Expect(adminUserContext.Password).To(Equal(cfg.AdminClientSecret))
Expect(adminUserContext.UseClientCredentials).To(BeTrue())
})
Context("when admin user is provided", func() {
BeforeEach(func() {
cfg.AdminUser = "should-not-be-used"
cfg.AdminPassword = "should-not-be-used-password"
})
It("doesn't create admin user context with client credentials if only client name is provided", func() {
cfg.AdminClientSecret = ""
setup := NewTestSuiteSetup(&cfg)
adminUserContext := setup.AdminUserContext()
Expect(adminUserContext.Username).To(Equal(cfg.AdminUser))
Expect(adminUserContext.Password).To(Equal(cfg.AdminPassword))
})
It("doesn't create admin user context with client credentials if only client secret is provided", func() {
cfg.AdminClient = ""
setup := NewTestSuiteSetup(&cfg)
adminUserContext := setup.AdminUserContext()
Expect(adminUserContext.Username).To(Equal(cfg.AdminUser))
Expect(adminUserContext.Password).To(Equal(cfg.AdminPassword))
})
It("prefers client credentials when both are provided", func() {
setup := NewTestSuiteSetup(&cfg)
adminUserContext := setup.AdminUserContext()
Expect(adminUserContext.Username).To(Equal(cfg.AdminClient))
Expect(adminUserContext.Password).To(Equal(cfg.AdminClientSecret))
})
})
})
Context("Existing (regular) Client", func() {
BeforeEach(func() {
cfg = config.Config{
NamePrefix: "CF_SMOKE_TESTS",
ExistingClient: "client",
ExistingClientSecret: "client-secret",
}
})
It("can create regular user context given client credentials", func() {
setup := NewTestSuiteSetup(&cfg)
regularUserContext := setup.RegularUserContext()
Expect(regularUserContext.Username).To(Equal(cfg.ExistingClient))
Expect(regularUserContext.Password).To(Equal(cfg.ExistingClientSecret))
Expect(regularUserContext.UseClientCredentials).To(BeTrue())
Expect(setup.SkipUserCreation).To(BeTrue())
})
It("doesn't create regular user context with client credentials if only client name is provided", func() {
cfg.ExistingClientSecret = ""
setup := NewTestSuiteSetup(&cfg)
regularUserContext := setup.RegularUserContext()
Expect(regularUserContext.Username).To(ContainSubstring(cfg.NamePrefix))
})
It("doesn't create regular user context with client credentials if only client secret is provided", func() {
cfg.ExistingClient = ""
setup := NewTestSuiteSetup(&cfg)
regularUserContext := setup.RegularUserContext()
Expect(regularUserContext.Username).To(ContainSubstring(cfg.NamePrefix))
})
It("prefers client credentials when both are provided", func() {
cfg.UseExistingUser = true
cfg.ExistingUser = "user"
cfg.ExistingUserPassword = "password"
setup := NewTestSuiteSetup(&cfg)
regularUserContext := setup.RegularUserContext()
Expect(regularUserContext.Username).To(Equal(cfg.ExistingClient))
Expect(regularUserContext.Password).To(Equal(cfg.ExistingClientSecret))
Expect(setup.SkipUserCreation).To(BeTrue())
})
})
})
Describe("NewSmokeTestSuiteSetup", func() {
It("always skips user creation", func() {
cfg := config.Config{
UseExistingUser: false,
}
setup := NewSmokeTestSuiteSetup(&cfg)
Expect(setup.SkipUserCreation).To(BeTrue())
})
})
Describe("Setup", func() {
var testSpace *fakes.FakeSpace
var testUser *fakes.FakeRemoteResource
var fakeRegularUserValues, fakeAdminUserValues *fakes.FakeUserValues
var fakeSpaceValues *fakes.FakeSpaceValues
var regularUserCmdStarter, adminUserCmdStarter *starterFakes.FakeCmdStarter
var regularUserContext, adminUserContext UserContext
var cfg config.Config
var apiUrl string
var testSetup *ReproducibleTestSuiteSetup
BeforeEach(func() {
apiUrl = "api-url.com"
testSpace = &fakes.FakeSpace{}
testUser = &fakes.FakeRemoteResource{}
regularUserCmdStarter = starterFakes.NewFakeCmdStarter()
adminUserCmdStarter = starterFakes.NewFakeCmdStarter()
fakeRegularUserValues = fakes.NewFakeUserValues("username", "password")
fakeAdminUserValues = fakes.NewFakeUserValues("admin", "admin")
fakeSpaceValues = fakes.NewFakeSpaceValues("org", "space")
regularUserContext = UserContext{
ApiUrl: apiUrl,
CommandStarter: regularUserCmdStarter,
TestUser: fakeRegularUserValues,
Timeout: 2 * time.Second,
TestSpace: fakeSpaceValues,
}
adminUserContext = UserContext{
ApiUrl: apiUrl,
CommandStarter: adminUserCmdStarter,
TestUser: fakeAdminUserValues,
Timeout: 2 * time.Second,
}
cfg = config.Config{}
})
JustBeforeEach(func() {
testSetup = NewBaseTestSuiteSetup(&cfg, testSpace, testUser, regularUserContext, adminUserContext, false)
})
It("logs in as the admin", func() {
testSetup.Setup()
Expect(adminUserCmdStarter.TotalCallsToStart).To(BeNumerically(">=", 2))
Expect(adminUserCmdStarter.CalledWith[0].Executable).To(Equal("cf"))
Expect(adminUserCmdStarter.CalledWith[0].Args).To(Equal([]string{"api", apiUrl}))
Expect(adminUserCmdStarter.CalledWith[1].Executable).To(Equal("cf"))
Expect(adminUserCmdStarter.CalledWith[1].Args).To(Equal([]string{"auth", "admin", "admin"}))
})
It("creates the user on the remote CF Api", func() {
testSetup.Setup()
Expect(testUser.CreateCallCount()).To(Equal(1))
Expect(adminUserCmdStarter.TotalCallsToStart).To(Equal(3))
})
It("creates the space on the remote CF api", func() {
testSetup.Setup()
Expect(testSpace.CreateCallCount()).To(Equal(1))
})
It("adds the user to the space", func() {
testSetup.Setup()
Expect(regularUserCmdStarter.TotalCallsToStart).To(BeNumerically(">=", 3))
Expect(regularUserCmdStarter.CalledWith[0].Executable).To(Equal("cf"))
Expect(regularUserCmdStarter.CalledWith[0].Args).To(Equal([]string{"set-space-role", fakeRegularUserValues.Username(), fakeSpaceValues.OrganizationName(), fakeSpaceValues.SpaceName(), "SpaceManager"}))
Expect(regularUserCmdStarter.CalledWith[1].Executable).To(Equal("cf"))
Expect(regularUserCmdStarter.CalledWith[1].Args).To(Equal([]string{"set-space-role", fakeRegularUserValues.Username(), fakeSpaceValues.OrganizationName(), fakeSpaceValues.SpaceName(), "SpaceDeveloper"}))
Expect(regularUserCmdStarter.CalledWith[2].Executable).To(Equal("cf"))
Expect(regularUserCmdStarter.CalledWith[2].Args).To(Equal([]string{"set-space-role", fakeRegularUserValues.Username(), fakeSpaceValues.OrganizationName(), fakeSpaceValues.SpaceName(), "SpaceAuditor"}))
})
It("logs in as the regular user in a unique CF_HOME and targets the correct space", func() {
originalCfHomeDir := "originl-cf-home-dir"
os.Setenv("CF_HOME", originalCfHomeDir)
testSetup.Setup()
Expect(os.Getenv("CF_HOME")).To(MatchRegexp("cf_home_.*"))
Expect(os.Getenv("CF_HOME")).NotTo(Equal(originalCfHomeDir))
Expect(regularUserCmdStarter.TotalCallsToStart).To(BeNumerically(">=", 6))
Expect(regularUserCmdStarter.CalledWith[3].Executable).To(Equal("cf"))
Expect(regularUserCmdStarter.CalledWith[3].Args).To(Equal([]string{"api", apiUrl}))
Expect(regularUserCmdStarter.CalledWith[4].Executable).To(Equal("cf"))
Expect(regularUserCmdStarter.CalledWith[4].Args).To(Equal([]string{"auth", fakeRegularUserValues.Username(), fakeRegularUserValues.Password()}))
Expect(regularUserCmdStarter.CalledWith[5].Executable).To(Equal("cf"))
Expect(regularUserCmdStarter.CalledWith[5].Args).To(Equal([]string{"target", "-o", fakeSpaceValues.OrganizationName(), "-s", fakeSpaceValues.SpaceName()}))
})
It("skips creating the user when called with skipUserCreation on", func() {
testSetup = NewBaseTestSuiteSetup(&cfg, testSpace, testUser, regularUserContext, adminUserContext, true)
testSetup.Setup()
Expect(testUser.CreateCallCount()).To(Equal(0))
})
})
Describe("TearDown", func() {
var testSpace *fakes.FakeSpace
var testUser *fakes.FakeRemoteResource
var fakeRegularUserValues, fakeAdminUserValues *fakes.FakeUserValues
var fakeSpaceValues *fakes.FakeSpaceValues
var regularUserCmdStarter, adminUserCmdStarter *starterFakes.FakeCmdStarter
var regularUserContext, adminUserContext UserContext
var cfg config.Config
var apiUrl string
var testSetup *ReproducibleTestSuiteSetup
BeforeEach(func() {
apiUrl = "api-url.com"
testSpace = &fakes.FakeSpace{}
testUser = &fakes.FakeRemoteResource{}
regularUserCmdStarter = starterFakes.NewFakeCmdStarter()
adminUserCmdStarter = starterFakes.NewFakeCmdStarter()
fakeRegularUserValues = fakes.NewFakeUserValues("username", "password")
fakeAdminUserValues = fakes.NewFakeUserValues("admin", "admin")
fakeSpaceValues = fakes.NewFakeSpaceValues("org", "space")
regularUserContext = UserContext{
ApiUrl: apiUrl,
CommandStarter: regularUserCmdStarter,
TestUser: fakeRegularUserValues,
Timeout: 2 * time.Second,
TestSpace: fakeSpaceValues,
}
adminUserContext = UserContext{
ApiUrl: apiUrl,
CommandStarter: adminUserCmdStarter,
TestUser: fakeAdminUserValues,
Timeout: 2 * time.Second,
}
cfg = config.Config{}
})
JustBeforeEach(func() {
testSetup = NewBaseTestSuiteSetup(&cfg, testSpace, testUser, regularUserContext, adminUserContext, false)
})
It("logs out the regular user", func() {
testSetup.Teardown()
Expect(regularUserCmdStarter.TotalCallsToStart).To(BeNumerically(">=", 1))
Expect(regularUserCmdStarter.CalledWith[0].Executable).To(Equal("cf"))
Expect(regularUserCmdStarter.CalledWith[0].Args).To(Equal([]string{"logout"}))
})
It("restores cf home directory", func() {
originalCfHomeDir := "originl-cf-home-dir"
os.Setenv("CF_HOME", originalCfHomeDir)
testSetup.Setup()
Expect(os.Getenv("CF_HOME")).NotTo(Equal(originalCfHomeDir))
testSetup.Teardown()
Expect(os.Getenv("CF_HOME")).To(Equal(originalCfHomeDir))
})
It("logs in as an admin", func() {
testSetup.Teardown()
Expect(adminUserCmdStarter.TotalCallsToStart).To(BeNumerically(">=", 2))
Expect(adminUserCmdStarter.CalledWith[0].Executable).To(Equal("cf"))
Expect(adminUserCmdStarter.CalledWith[0].Args).To(Equal([]string{"api", apiUrl}))
Expect(adminUserCmdStarter.CalledWith[1].Executable).To(Equal("cf"))
Expect(adminUserCmdStarter.CalledWith[1].Args).To(Equal([]string{"auth", "admin", "admin"}))
})
It("destroys the user", func() {
testSetup.Teardown()
Expect(testUser.DestroyCallCount()).To(Equal(1))
})
Context("when the user should remain", func() {
BeforeEach(func() {
testUser.ShouldRemainReturns = true
})
It("does not destroy the user", func() {
testSetup.Teardown()
Expect(testUser.DestroyCallCount()).To(Equal(0))
})
})
Context("when the user was not created", func() {
JustBeforeEach(func() {
testSetup = NewBaseTestSuiteSetup(&cfg, testSpace, testUser, regularUserContext, adminUserContext, true)
})
It("does not destroy the user", func() {
testSetup.Teardown()
Expect(testUser.DestroyCallCount()).To(Equal(0))
})
})
It("destroys the space", func() {
testSetup.Teardown()
Expect(testSpace.DestroyCallCount()).To(Equal(1))
})
})
})
| [
"\"CF_HOME\"",
"\"CF_HOME\"",
"\"CF_HOME\"",
"\"CF_HOME\""
] | [] | [
"CF_HOME"
] | [] | ["CF_HOME"] | go | 1 | 0 | |
installforlogin.go | // Copyright 2021 John Slee. Released under the terms of the MIT License
// (see LICENSE.md in this directory)
package main
import (
"fmt"
"os"
"os/exec"
"path"
"strings"
"howett.net/plist"
)
type installForLoginPlist struct {
UserName string `plist:"UserName"`
GroupName string `plist:"GroupName"`
RunAtLoad bool `plist:"RunAtLoad"`
Label string `plist:"Label"`
ProgramArguments []string `plist:"ProgramArguments"`
StandardOutPath string `plist:"StandardOutPath"`
StandardErrorPath string `plist:"StandardErrorPath"`
}
// userName gets the current user *name* in a way that doesn't require cgo
// *and* (unlike os/user in Go standard library, at this time of writing) will
// still work when the user isn't listed in /etc/passwd, eg. when directory
// services are involved. Ugly, yes, but should be very reliable
func userName() (string, error) {
cmd := exec.Command("/usr/bin/id", "-u", "-n")
b, err := cmd.Output()
if err != nil {
return "", fmt.Errorf("user: %w")
}
return strings.TrimSpace(string(b)), nil
}
// groupName gets the current primary group --- same thinking as userName
func groupName() (string, error) {
cmd := exec.Command("/usr/bin/id", "-g", "-n")
b, err := cmd.Output()
if err != nil {
return "", fmt.Errorf("group: %w")
}
return strings.TrimSpace(string(b)), nil
}
// InstallForLogin installs a plist file to run wfhlogger at user login
func InstallForLogin(workdir string) error {
const Label = "io.jslee.wfhlogger"
ipl := installForLoginPlist{
Label: Label,
ProgramArguments: []string{"wfhlogger"},
RunAtLoad: true,
StandardOutPath: path.Join(workdir, "wfhlogger.log"),
StandardErrorPath: path.Join(workdir, "wfhlogger.log"),
}
var err error
var bin string
if bin, err = os.Executable(); err != nil {
return fmt.Errorf("InstallForLogin: get executable path: %w", err)
}
ipl.ProgramArguments = []string{bin}
if ipl.UserName, err = userName(); err != nil {
return fmt.Errorf("InstallForLogin: get current user: %w", err)
}
if ipl.GroupName, err = groupName(); err != nil {
return fmt.Errorf("InstallForLogin: get current primary group: %w", err)
}
home := os.Getenv("HOME")
if home == "" {
return fmt.Errorf("InstallForLogin: help me out here, why isn't $HOME set?")
}
iplDir := path.Join(home, "Library", "LaunchAgents")
if err := os.MkdirAll(iplDir, 0700); err != nil {
return fmt.Errorf("InstallForLogin: attempting to mkdir %q: %w", iplDir, err)
}
iplPath := path.Join(iplDir, Label+".plist")
iplFile, err := os.OpenFile(iplPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
if err != nil {
return fmt.Errorf("InstallForLogin: attempting to open %q: %w", iplPath, err)
}
defer iplFile.Close()
encoder := plist.NewEncoder(iplFile)
encoder.Indent(" ")
if err := encoder.Encode(ipl); err != nil {
return fmt.Errorf("InstallForLogin: attempting to write plist %q: %w", iplPath, err)
}
fmt.Printf("Created launchd service: %s\n", iplPath)
return nil
}
| [
"\"HOME\""
] | [] | [
"HOME"
] | [] | ["HOME"] | go | 1 | 0 | |
src/testcases/CWE369_Divide_by_Zero/s02/CWE369_Divide_by_Zero__int_Environment_divide_72a.java | /* TEMPLATE GENERATED TESTCASE FILE
Filename: CWE369_Divide_by_Zero__int_Environment_divide_72a.java
Label Definition File: CWE369_Divide_by_Zero__int.label.xml
Template File: sources-sinks-72a.tmpl.java
*/
/*
* @description
* CWE: 369 Divide by zero
* BadSource: Environment Read data from an environment variable
* GoodSource: A hardcoded non-zero, non-min, non-max, even number
* Sinks: divide
* GoodSink: Check for zero before dividing
* BadSink : Dividing by a value that may be zero
* Flow Variant: 72 Data flow: data passed in a Vector from one method to another in different source files in the same package
*
* */
package testcases.CWE369_Divide_by_Zero.s02;
import testcasesupport.*;
import java.util.Vector;
import javax.servlet.http.*;
import java.util.logging.Level;
public class CWE369_Divide_by_Zero__int_Environment_divide_72a extends AbstractTestCase
{
public void bad() throws Throwable
{
int data;
data = Integer.MIN_VALUE; /* Initialize data */
/* get environment variable ADD */
/* POTENTIAL FLAW: Read data from an environment variable */
{
String stringNumber = System.getenv("ADD");
if (stringNumber != null) // avoid NPD incidental warnings
{
try
{
data = Integer.parseInt(stringNumber.trim());
}
catch(NumberFormatException exceptNumberFormat)
{
IO.logger.log(Level.WARNING, "Number format exception parsing data from string", exceptNumberFormat);
}
}
}
Vector<Integer> dataVector = new Vector<Integer>(5);
dataVector.add(0, data);
dataVector.add(1, data);
dataVector.add(2, data);
(new CWE369_Divide_by_Zero__int_Environment_divide_72b()).badSink(dataVector );
}
public void good() throws Throwable
{
goodG2B();
goodB2G();
}
/* goodG2B() - use GoodSource and BadSink */
private void goodG2B() throws Throwable
{
int data;
/* FIX: Use a hardcoded number that won't cause underflow, overflow, divide by zero, or loss-of-precision issues */
data = 2;
Vector<Integer> dataVector = new Vector<Integer>(5);
dataVector.add(0, data);
dataVector.add(1, data);
dataVector.add(2, data);
(new CWE369_Divide_by_Zero__int_Environment_divide_72b()).goodG2BSink(dataVector );
}
/* goodB2G() - use BadSource and GoodSink */
private void goodB2G() throws Throwable
{
int data;
data = Integer.MIN_VALUE; /* Initialize data */
/* get environment variable ADD */
/* POTENTIAL FLAW: Read data from an environment variable */
{
String stringNumber = System.getenv("ADD");
if (stringNumber != null) // avoid NPD incidental warnings
{
try
{
data = Integer.parseInt(stringNumber.trim());
}
catch(NumberFormatException exceptNumberFormat)
{
IO.logger.log(Level.WARNING, "Number format exception parsing data from string", exceptNumberFormat);
}
}
}
Vector<Integer> dataVector = new Vector<Integer>(5);
dataVector.add(0, data);
dataVector.add(1, data);
dataVector.add(2, data);
(new CWE369_Divide_by_Zero__int_Environment_divide_72b()).goodB2GSink(dataVector );
}
/* Below is the main(). It is only used when building this testcase on
* its own for testing or for building a binary to use in testing binary
* analysis tools. It is not used when compiling all the testcases as one
* application, which is how source code analysis tools are tested.
*/
public static void main(String[] args) throws ClassNotFoundException,
InstantiationException, IllegalAccessException
{
mainFromParent(args);
}
}
| [
"\"ADD\"",
"\"ADD\""
] | [] | [
"ADD"
] | [] | ["ADD"] | java | 1 | 0 | |
src/cmd/compile/internal/gc/ssa.go | // Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"bytes"
"encoding/binary"
"fmt"
"html"
"os"
"sort"
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/objabi"
"cmd/internal/src"
"cmd/internal/sys"
)
var ssaConfig *ssa.Config
var ssaCaches []ssa.Cache
func initssaconfig() {
types_ := ssa.Types{
Bool: types.Types[TBOOL],
Int8: types.Types[TINT8],
Int16: types.Types[TINT16],
Int32: types.Types[TINT32],
Int64: types.Types[TINT64],
UInt8: types.Types[TUINT8],
UInt16: types.Types[TUINT16],
UInt32: types.Types[TUINT32],
UInt64: types.Types[TUINT64],
Float32: types.Types[TFLOAT32],
Float64: types.Types[TFLOAT64],
Int: types.Types[TINT],
UInt: types.Types[TUINT],
Uintptr: types.Types[TUINTPTR],
String: types.Types[TSTRING],
BytePtr: types.NewPtr(types.Types[TUINT8]),
Int32Ptr: types.NewPtr(types.Types[TINT32]),
UInt32Ptr: types.NewPtr(types.Types[TUINT32]),
IntPtr: types.NewPtr(types.Types[TINT]),
UintptrPtr: types.NewPtr(types.Types[TUINTPTR]),
Float32Ptr: types.NewPtr(types.Types[TFLOAT32]),
Float64Ptr: types.NewPtr(types.Types[TFLOAT64]),
BytePtrPtr: types.NewPtr(types.NewPtr(types.Types[TUINT8])),
}
// Generate a few pointer types that are uncommon in the frontend but common in the backend.
// Caching is disabled in the backend, so generating these here avoids allocations.
_ = types.NewPtr(types.Types[TINTER]) // *interface{}
_ = types.NewPtr(types.NewPtr(types.Types[TSTRING])) // **string
_ = types.NewPtr(types.NewPtr(types.Idealstring)) // **string
_ = types.NewPtr(types.NewSlice(types.Types[TINTER])) // *[]interface{}
_ = types.NewPtr(types.NewPtr(types.Bytetype)) // **byte
_ = types.NewPtr(types.NewSlice(types.Bytetype)) // *[]byte
_ = types.NewPtr(types.NewSlice(types.Types[TSTRING])) // *[]string
_ = types.NewPtr(types.NewSlice(types.Idealstring)) // *[]string
_ = types.NewPtr(types.NewPtr(types.NewPtr(types.Types[TUINT8]))) // ***uint8
_ = types.NewPtr(types.Types[TINT16]) // *int16
_ = types.NewPtr(types.Types[TINT64]) // *int64
_ = types.NewPtr(types.Errortype) // *error
types.NewPtrCacheEnabled = false
ssaConfig = ssa.NewConfig(thearch.LinkArch.Name, types_, Ctxt, Debug['N'] == 0)
if thearch.LinkArch.Name == "386" {
ssaConfig.Set387(thearch.Use387)
}
ssaCaches = make([]ssa.Cache, nBackendWorkers)
// Set up some runtime functions we'll need to call.
Newproc = sysfunc("newproc")
Deferproc = sysfunc("deferproc")
Deferreturn = sysfunc("deferreturn")
Duffcopy = sysfunc("duffcopy")
Duffzero = sysfunc("duffzero")
panicindex = sysfunc("panicindex")
panicslice = sysfunc("panicslice")
panicdivide = sysfunc("panicdivide")
growslice = sysfunc("growslice")
panicdottypeE = sysfunc("panicdottypeE")
panicdottypeI = sysfunc("panicdottypeI")
panicnildottype = sysfunc("panicnildottype")
assertE2I = sysfunc("assertE2I")
assertE2I2 = sysfunc("assertE2I2")
assertI2I = sysfunc("assertI2I")
assertI2I2 = sysfunc("assertI2I2")
goschedguarded = sysfunc("goschedguarded")
writeBarrier = sysfunc("writeBarrier")
writebarrierptr = sysfunc("writebarrierptr")
typedmemmove = sysfunc("typedmemmove")
typedmemclr = sysfunc("typedmemclr")
Udiv = sysfunc("udiv")
// GO386=387 runtime functions
ControlWord64trunc = sysfunc("controlWord64trunc")
ControlWord32 = sysfunc("controlWord32")
}
// buildssa builds an SSA function for fn.
// worker indicates which of the backend workers is doing the processing.
func buildssa(fn *Node, worker int) *ssa.Func {
name := fn.funcname()
printssa := name == os.Getenv("GOSSAFUNC")
if printssa {
fmt.Println("generating SSA for", name)
dumplist("buildssa-enter", fn.Func.Enter)
dumplist("buildssa-body", fn.Nbody)
dumplist("buildssa-exit", fn.Func.Exit)
}
var s state
s.pushLine(fn.Pos)
defer s.popLine()
s.hasdefer = fn.Func.HasDefer()
if fn.Func.Pragma&CgoUnsafeArgs != 0 {
s.cgoUnsafeArgs = true
}
fe := ssafn{
curfn: fn,
log: printssa,
}
s.curfn = fn
s.f = ssa.NewFunc(&fe)
s.config = ssaConfig
s.f.Config = ssaConfig
s.f.Cache = &ssaCaches[worker]
s.f.Cache.Reset()
s.f.DebugTest = s.f.DebugHashMatch("GOSSAHASH", name)
s.f.Name = name
if fn.Func.Pragma&Nosplit != 0 {
s.f.NoSplit = true
}
defer func() {
if s.f.WBPos.IsKnown() {
fn.Func.WBPos = s.f.WBPos
}
}()
s.exitCode = fn.Func.Exit
s.panics = map[funcLine]*ssa.Block{}
if name == os.Getenv("GOSSAFUNC") {
s.f.HTMLWriter = ssa.NewHTMLWriter("ssa.html", s.f.Frontend(), name)
// TODO: generate and print a mapping from nodes to values and blocks
}
// Allocate starting block
s.f.Entry = s.f.NewBlock(ssa.BlockPlain)
// Allocate starting values
s.labels = map[string]*ssaLabel{}
s.labeledNodes = map[*Node]*ssaLabel{}
s.fwdVars = map[*Node]*ssa.Value{}
s.startmem = s.entryNewValue0(ssa.OpInitMem, types.TypeMem)
s.sp = s.entryNewValue0(ssa.OpSP, types.Types[TUINTPTR]) // TODO: use generic pointer type (unsafe.Pointer?) instead
s.sb = s.entryNewValue0(ssa.OpSB, types.Types[TUINTPTR])
s.startBlock(s.f.Entry)
s.vars[&memVar] = s.startmem
s.varsyms = map[*Node]interface{}{}
// Generate addresses of local declarations
s.decladdrs = map[*Node]*ssa.Value{}
for _, n := range fn.Func.Dcl {
switch n.Class() {
case PPARAM, PPARAMOUT:
aux := s.lookupSymbol(n, &ssa.ArgSymbol{Node: n})
s.decladdrs[n] = s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type), aux, s.sp)
if n.Class() == PPARAMOUT && s.canSSA(n) {
// Save ssa-able PPARAMOUT variables so we can
// store them back to the stack at the end of
// the function.
s.returns = append(s.returns, n)
}
case PAUTO:
// processed at each use, to prevent Addr coming
// before the decl.
case PAUTOHEAP:
// moved to heap - already handled by frontend
case PFUNC:
// local function - already handled by frontend
default:
s.Fatalf("local variable with class %s unimplemented", classnames[n.Class()])
}
}
// Populate SSAable arguments.
for _, n := range fn.Func.Dcl {
if n.Class() == PPARAM && s.canSSA(n) {
s.vars[n] = s.newValue0A(ssa.OpArg, n.Type, n)
}
}
// Convert the AST-based IR to the SSA-based IR
s.stmtList(fn.Func.Enter)
s.stmtList(fn.Nbody)
// fallthrough to exit
if s.curBlock != nil {
s.pushLine(fn.Func.Endlineno)
s.exit()
s.popLine()
}
s.insertPhis()
// Don't carry reference this around longer than necessary
s.exitCode = Nodes{}
// Main call to ssa package to compile function
ssa.Compile(s.f)
return s.f
}
type state struct {
// configuration (arch) information
config *ssa.Config
// function we're building
f *ssa.Func
// Node for function
curfn *Node
// labels and labeled control flow nodes (OFOR, OFORUNTIL, OSWITCH, OSELECT) in f
labels map[string]*ssaLabel
labeledNodes map[*Node]*ssaLabel
// Code that must precede any return
// (e.g., copying value of heap-escaped paramout back to true paramout)
exitCode Nodes
// unlabeled break and continue statement tracking
breakTo *ssa.Block // current target for plain break statement
continueTo *ssa.Block // current target for plain continue statement
// current location where we're interpreting the AST
curBlock *ssa.Block
// variable assignments in the current block (map from variable symbol to ssa value)
// *Node is the unique identifier (an ONAME Node) for the variable.
// TODO: keep a single varnum map, then make all of these maps slices instead?
vars map[*Node]*ssa.Value
// fwdVars are variables that are used before they are defined in the current block.
// This map exists just to coalesce multiple references into a single FwdRef op.
// *Node is the unique identifier (an ONAME Node) for the variable.
fwdVars map[*Node]*ssa.Value
// all defined variables at the end of each block. Indexed by block ID.
defvars []map[*Node]*ssa.Value
// addresses of PPARAM and PPARAMOUT variables.
decladdrs map[*Node]*ssa.Value
// symbols for PEXTERN, PAUTO and PPARAMOUT variables so they can be reused.
varsyms map[*Node]interface{}
// starting values. Memory, stack pointer, and globals pointer
startmem *ssa.Value
sp *ssa.Value
sb *ssa.Value
// line number stack. The current line number is top of stack
line []src.XPos
// list of panic calls by function name and line number.
// Used to deduplicate panic calls.
panics map[funcLine]*ssa.Block
// list of PPARAMOUT (return) variables.
returns []*Node
cgoUnsafeArgs bool
hasdefer bool // whether the function contains a defer statement
}
type funcLine struct {
f *obj.LSym
file string
line uint
}
type ssaLabel struct {
target *ssa.Block // block identified by this label
breakTarget *ssa.Block // block to break to in control flow node identified by this label
continueTarget *ssa.Block // block to continue to in control flow node identified by this label
}
// label returns the label associated with sym, creating it if necessary.
func (s *state) label(sym *types.Sym) *ssaLabel {
lab := s.labels[sym.Name]
if lab == nil {
lab = new(ssaLabel)
s.labels[sym.Name] = lab
}
return lab
}
func (s *state) Logf(msg string, args ...interface{}) { s.f.Logf(msg, args...) }
func (s *state) Log() bool { return s.f.Log() }
func (s *state) Fatalf(msg string, args ...interface{}) {
s.f.Frontend().Fatalf(s.peekPos(), msg, args...)
}
func (s *state) Warnl(pos src.XPos, msg string, args ...interface{}) { s.f.Warnl(pos, msg, args...) }
func (s *state) Debug_checknil() bool { return s.f.Frontend().Debug_checknil() }
var (
// dummy node for the memory variable
memVar = Node{Op: ONAME, Sym: &types.Sym{Name: "mem"}}
// dummy nodes for temporary variables
ptrVar = Node{Op: ONAME, Sym: &types.Sym{Name: "ptr"}}
lenVar = Node{Op: ONAME, Sym: &types.Sym{Name: "len"}}
newlenVar = Node{Op: ONAME, Sym: &types.Sym{Name: "newlen"}}
capVar = Node{Op: ONAME, Sym: &types.Sym{Name: "cap"}}
typVar = Node{Op: ONAME, Sym: &types.Sym{Name: "typ"}}
okVar = Node{Op: ONAME, Sym: &types.Sym{Name: "ok"}}
)
// startBlock sets the current block we're generating code in to b.
func (s *state) startBlock(b *ssa.Block) {
if s.curBlock != nil {
s.Fatalf("starting block %v when block %v has not ended", b, s.curBlock)
}
s.curBlock = b
s.vars = map[*Node]*ssa.Value{}
for n := range s.fwdVars {
delete(s.fwdVars, n)
}
}
// endBlock marks the end of generating code for the current block.
// Returns the (former) current block. Returns nil if there is no current
// block, i.e. if no code flows to the current execution point.
func (s *state) endBlock() *ssa.Block {
b := s.curBlock
if b == nil {
return nil
}
for len(s.defvars) <= int(b.ID) {
s.defvars = append(s.defvars, nil)
}
s.defvars[b.ID] = s.vars
s.curBlock = nil
s.vars = nil
b.Pos = s.peekPos()
return b
}
// pushLine pushes a line number on the line number stack.
func (s *state) pushLine(line src.XPos) {
if !line.IsKnown() {
// the frontend may emit node with line number missing,
// use the parent line number in this case.
line = s.peekPos()
if Debug['K'] != 0 {
Warn("buildssa: unknown position (line 0)")
}
}
s.line = append(s.line, line)
}
// popLine pops the top of the line number stack.
func (s *state) popLine() {
s.line = s.line[:len(s.line)-1]
}
// peekPos peeks the top of the line number stack.
func (s *state) peekPos() src.XPos {
return s.line[len(s.line)-1]
}
// newValue0 adds a new value with no arguments to the current block.
func (s *state) newValue0(op ssa.Op, t *types.Type) *ssa.Value {
return s.curBlock.NewValue0(s.peekPos(), op, t)
}
// newValue0A adds a new value with no arguments and an aux value to the current block.
func (s *state) newValue0A(op ssa.Op, t *types.Type, aux interface{}) *ssa.Value {
return s.curBlock.NewValue0A(s.peekPos(), op, t, aux)
}
// newValue0I adds a new value with no arguments and an auxint value to the current block.
func (s *state) newValue0I(op ssa.Op, t *types.Type, auxint int64) *ssa.Value {
return s.curBlock.NewValue0I(s.peekPos(), op, t, auxint)
}
// newValue1 adds a new value with one argument to the current block.
func (s *state) newValue1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value {
return s.curBlock.NewValue1(s.peekPos(), op, t, arg)
}
// newValue1A adds a new value with one argument and an aux value to the current block.
func (s *state) newValue1A(op ssa.Op, t *types.Type, aux interface{}, arg *ssa.Value) *ssa.Value {
return s.curBlock.NewValue1A(s.peekPos(), op, t, aux, arg)
}
// newValue1I adds a new value with one argument and an auxint value to the current block.
func (s *state) newValue1I(op ssa.Op, t *types.Type, aux int64, arg *ssa.Value) *ssa.Value {
return s.curBlock.NewValue1I(s.peekPos(), op, t, aux, arg)
}
// newValue2 adds a new value with two arguments to the current block.
func (s *state) newValue2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue2(s.peekPos(), op, t, arg0, arg1)
}
// newValue2I adds a new value with two arguments and an auxint value to the current block.
func (s *state) newValue2I(op ssa.Op, t *types.Type, aux int64, arg0, arg1 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue2I(s.peekPos(), op, t, aux, arg0, arg1)
}
// newValue3 adds a new value with three arguments to the current block.
func (s *state) newValue3(op ssa.Op, t *types.Type, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue3(s.peekPos(), op, t, arg0, arg1, arg2)
}
// newValue3I adds a new value with three arguments and an auxint value to the current block.
func (s *state) newValue3I(op ssa.Op, t *types.Type, aux int64, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue3I(s.peekPos(), op, t, aux, arg0, arg1, arg2)
}
// newValue3A adds a new value with three arguments and an aux value to the current block.
func (s *state) newValue3A(op ssa.Op, t *types.Type, aux interface{}, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue3A(s.peekPos(), op, t, aux, arg0, arg1, arg2)
}
// newValue4 adds a new value with four arguments to the current block.
func (s *state) newValue4(op ssa.Op, t *types.Type, arg0, arg1, arg2, arg3 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue4(s.peekPos(), op, t, arg0, arg1, arg2, arg3)
}
// entryNewValue0 adds a new value with no arguments to the entry block.
func (s *state) entryNewValue0(op ssa.Op, t *types.Type) *ssa.Value {
return s.f.Entry.NewValue0(src.NoXPos, op, t)
}
// entryNewValue0A adds a new value with no arguments and an aux value to the entry block.
func (s *state) entryNewValue0A(op ssa.Op, t *types.Type, aux interface{}) *ssa.Value {
return s.f.Entry.NewValue0A(s.peekPos(), op, t, aux)
}
// entryNewValue1 adds a new value with one argument to the entry block.
func (s *state) entryNewValue1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value {
return s.f.Entry.NewValue1(s.peekPos(), op, t, arg)
}
// entryNewValue1 adds a new value with one argument and an auxint value to the entry block.
func (s *state) entryNewValue1I(op ssa.Op, t *types.Type, auxint int64, arg *ssa.Value) *ssa.Value {
return s.f.Entry.NewValue1I(s.peekPos(), op, t, auxint, arg)
}
// entryNewValue1A adds a new value with one argument and an aux value to the entry block.
func (s *state) entryNewValue1A(op ssa.Op, t *types.Type, aux interface{}, arg *ssa.Value) *ssa.Value {
return s.f.Entry.NewValue1A(s.peekPos(), op, t, aux, arg)
}
// entryNewValue2 adds a new value with two arguments to the entry block.
func (s *state) entryNewValue2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value {
return s.f.Entry.NewValue2(s.peekPos(), op, t, arg0, arg1)
}
// const* routines add a new const value to the entry block.
func (s *state) constSlice(t *types.Type) *ssa.Value {
return s.f.ConstSlice(s.peekPos(), t)
}
func (s *state) constInterface(t *types.Type) *ssa.Value {
return s.f.ConstInterface(s.peekPos(), t)
}
func (s *state) constNil(t *types.Type) *ssa.Value { return s.f.ConstNil(s.peekPos(), t) }
func (s *state) constEmptyString(t *types.Type) *ssa.Value {
return s.f.ConstEmptyString(s.peekPos(), t)
}
func (s *state) constBool(c bool) *ssa.Value {
return s.f.ConstBool(s.peekPos(), types.Types[TBOOL], c)
}
func (s *state) constInt8(t *types.Type, c int8) *ssa.Value {
return s.f.ConstInt8(s.peekPos(), t, c)
}
func (s *state) constInt16(t *types.Type, c int16) *ssa.Value {
return s.f.ConstInt16(s.peekPos(), t, c)
}
func (s *state) constInt32(t *types.Type, c int32) *ssa.Value {
return s.f.ConstInt32(s.peekPos(), t, c)
}
func (s *state) constInt64(t *types.Type, c int64) *ssa.Value {
return s.f.ConstInt64(s.peekPos(), t, c)
}
func (s *state) constFloat32(t *types.Type, c float64) *ssa.Value {
return s.f.ConstFloat32(s.peekPos(), t, c)
}
func (s *state) constFloat64(t *types.Type, c float64) *ssa.Value {
return s.f.ConstFloat64(s.peekPos(), t, c)
}
func (s *state) constInt(t *types.Type, c int64) *ssa.Value {
if s.config.PtrSize == 8 {
return s.constInt64(t, c)
}
if int64(int32(c)) != c {
s.Fatalf("integer constant too big %d", c)
}
return s.constInt32(t, int32(c))
}
func (s *state) constOffPtrSP(t *types.Type, c int64) *ssa.Value {
return s.f.ConstOffPtrSP(s.peekPos(), t, c, s.sp)
}
// stmtList converts the statement list n to SSA and adds it to s.
func (s *state) stmtList(l Nodes) {
for _, n := range l.Slice() {
s.stmt(n)
}
}
// stmt converts the statement n to SSA and adds it to s.
func (s *state) stmt(n *Node) {
s.pushLine(n.Pos)
defer s.popLine()
// If s.curBlock is nil, and n isn't a label (which might have an associated goto somewhere),
// then this code is dead. Stop here.
if s.curBlock == nil && n.Op != OLABEL {
return
}
s.stmtList(n.Ninit)
switch n.Op {
case OBLOCK:
s.stmtList(n.List)
// No-ops
case OEMPTY, ODCLCONST, ODCLTYPE, OFALL:
// Expression statements
case OCALLFUNC:
if isIntrinsicCall(n) {
s.intrinsicCall(n)
return
}
fallthrough
case OCALLMETH, OCALLINTER:
s.call(n, callNormal)
if n.Op == OCALLFUNC && n.Left.Op == ONAME && n.Left.Class() == PFUNC {
if fn := n.Left.Sym.Name; compiling_runtime && fn == "throw" ||
n.Left.Sym.Pkg == Runtimepkg && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "block") {
m := s.mem()
b := s.endBlock()
b.Kind = ssa.BlockExit
b.SetControl(m)
// TODO: never rewrite OPANIC to OCALLFUNC in the
// first place. Need to wait until all backends
// go through SSA.
}
}
case ODEFER:
s.call(n.Left, callDefer)
case OPROC:
s.call(n.Left, callGo)
case OAS2DOTTYPE:
res, resok := s.dottype(n.Rlist.First(), true)
deref := false
if !canSSAType(n.Rlist.First().Type) {
if res.Op != ssa.OpLoad {
s.Fatalf("dottype of non-load")
}
mem := s.mem()
if mem.Op == ssa.OpVarKill {
mem = mem.Args[0]
}
if res.Args[1] != mem {
s.Fatalf("memory no longer live from 2-result dottype load")
}
deref = true
res = res.Args[0]
}
s.assign(n.List.First(), res, deref, 0)
s.assign(n.List.Second(), resok, false, 0)
return
case OAS2FUNC:
// We come here only when it is an intrinsic call returning two values.
if !isIntrinsicCall(n.Rlist.First()) {
s.Fatalf("non-intrinsic AS2FUNC not expanded %v", n.Rlist.First())
}
v := s.intrinsicCall(n.Rlist.First())
v1 := s.newValue1(ssa.OpSelect0, n.List.First().Type, v)
v2 := s.newValue1(ssa.OpSelect1, n.List.Second().Type, v)
s.assign(n.List.First(), v1, false, 0)
s.assign(n.List.Second(), v2, false, 0)
return
case ODCL:
if n.Left.Class() == PAUTOHEAP {
Fatalf("DCL %v", n)
}
case OLABEL:
sym := n.Left.Sym
lab := s.label(sym)
// Associate label with its control flow node, if any
if ctl := n.labeledControl(); ctl != nil {
s.labeledNodes[ctl] = lab
}
// The label might already have a target block via a goto.
if lab.target == nil {
lab.target = s.f.NewBlock(ssa.BlockPlain)
}
// Go to that label.
// (We pretend "label:" is preceded by "goto label", unless the predecessor is unreachable.)
if s.curBlock != nil {
b := s.endBlock()
b.AddEdgeTo(lab.target)
}
s.startBlock(lab.target)
case OGOTO:
sym := n.Left.Sym
lab := s.label(sym)
if lab.target == nil {
lab.target = s.f.NewBlock(ssa.BlockPlain)
}
b := s.endBlock()
b.AddEdgeTo(lab.target)
case OAS:
if n.Left == n.Right && n.Left.Op == ONAME {
// An x=x assignment. No point in doing anything
// here. In addition, skipping this assignment
// prevents generating:
// VARDEF x
// COPY x -> x
// which is bad because x is incorrectly considered
// dead before the vardef. See issue #14904.
return
}
// Evaluate RHS.
rhs := n.Right
if rhs != nil {
switch rhs.Op {
case OSTRUCTLIT, OARRAYLIT, OSLICELIT:
// All literals with nonzero fields have already been
// rewritten during walk. Any that remain are just T{}
// or equivalents. Use the zero value.
if !iszero(rhs) {
Fatalf("literal with nonzero value in SSA: %v", rhs)
}
rhs = nil
case OAPPEND:
// Check whether we're writing the result of an append back to the same slice.
// If so, we handle it specially to avoid write barriers on the fast
// (non-growth) path.
if !samesafeexpr(n.Left, rhs.List.First()) {
break
}
// If the slice can be SSA'd, it'll be on the stack,
// so there will be no write barriers,
// so there's no need to attempt to prevent them.
if s.canSSA(n.Left) {
if Debug_append > 0 { // replicating old diagnostic message
Warnl(n.Pos, "append: len-only update (in local slice)")
}
break
}
if Debug_append > 0 {
Warnl(n.Pos, "append: len-only update")
}
s.append(rhs, true)
return
}
}
if isblank(n.Left) {
// _ = rhs
// Just evaluate rhs for side-effects.
if rhs != nil {
s.expr(rhs)
}
return
}
var t *types.Type
if n.Right != nil {
t = n.Right.Type
} else {
t = n.Left.Type
}
var r *ssa.Value
deref := !canSSAType(t)
if deref {
if rhs == nil {
r = nil // Signal assign to use OpZero.
} else {
r = s.addr(rhs, false)
}
} else {
if rhs == nil {
r = s.zeroVal(t)
} else {
r = s.expr(rhs)
}
}
var skip skipMask
if rhs != nil && (rhs.Op == OSLICE || rhs.Op == OSLICE3 || rhs.Op == OSLICESTR) && samesafeexpr(rhs.Left, n.Left) {
// We're assigning a slicing operation back to its source.
// Don't write back fields we aren't changing. See issue #14855.
i, j, k := rhs.SliceBounds()
if i != nil && (i.Op == OLITERAL && i.Val().Ctype() == CTINT && i.Int64() == 0) {
// [0:...] is the same as [:...]
i = nil
}
// TODO: detect defaults for len/cap also.
// Currently doesn't really work because (*p)[:len(*p)] appears here as:
// tmp = len(*p)
// (*p)[:tmp]
//if j != nil && (j.Op == OLEN && samesafeexpr(j.Left, n.Left)) {
// j = nil
//}
//if k != nil && (k.Op == OCAP && samesafeexpr(k.Left, n.Left)) {
// k = nil
//}
if i == nil {
skip |= skipPtr
if j == nil {
skip |= skipLen
}
if k == nil {
skip |= skipCap
}
}
}
s.assign(n.Left, r, deref, skip)
case OIF:
bThen := s.f.NewBlock(ssa.BlockPlain)
bEnd := s.f.NewBlock(ssa.BlockPlain)
var bElse *ssa.Block
var likely int8
if n.Likely() {
likely = 1
}
if n.Rlist.Len() != 0 {
bElse = s.f.NewBlock(ssa.BlockPlain)
s.condBranch(n.Left, bThen, bElse, likely)
} else {
s.condBranch(n.Left, bThen, bEnd, likely)
}
s.startBlock(bThen)
s.stmtList(n.Nbody)
if b := s.endBlock(); b != nil {
b.AddEdgeTo(bEnd)
}
if n.Rlist.Len() != 0 {
s.startBlock(bElse)
s.stmtList(n.Rlist)
if b := s.endBlock(); b != nil {
b.AddEdgeTo(bEnd)
}
}
s.startBlock(bEnd)
case ORETURN:
s.stmtList(n.List)
s.exit()
case ORETJMP:
s.stmtList(n.List)
b := s.exit()
b.Kind = ssa.BlockRetJmp // override BlockRet
b.Aux = n.Left.Sym.Linksym()
case OCONTINUE, OBREAK:
var to *ssa.Block
if n.Left == nil {
// plain break/continue
switch n.Op {
case OCONTINUE:
to = s.continueTo
case OBREAK:
to = s.breakTo
}
} else {
// labeled break/continue; look up the target
sym := n.Left.Sym
lab := s.label(sym)
switch n.Op {
case OCONTINUE:
to = lab.continueTarget
case OBREAK:
to = lab.breakTarget
}
}
b := s.endBlock()
b.AddEdgeTo(to)
case OFOR, OFORUNTIL:
// OFOR: for Ninit; Left; Right { Nbody }
// For = cond; body; incr
// Foruntil = body; incr; cond
bCond := s.f.NewBlock(ssa.BlockPlain)
bBody := s.f.NewBlock(ssa.BlockPlain)
bIncr := s.f.NewBlock(ssa.BlockPlain)
bEnd := s.f.NewBlock(ssa.BlockPlain)
// first, jump to condition test (OFOR) or body (OFORUNTIL)
b := s.endBlock()
if n.Op == OFOR {
b.AddEdgeTo(bCond)
// generate code to test condition
s.startBlock(bCond)
if n.Left != nil {
s.condBranch(n.Left, bBody, bEnd, 1)
} else {
b := s.endBlock()
b.Kind = ssa.BlockPlain
b.AddEdgeTo(bBody)
}
} else {
b.AddEdgeTo(bBody)
}
// set up for continue/break in body
prevContinue := s.continueTo
prevBreak := s.breakTo
s.continueTo = bIncr
s.breakTo = bEnd
lab := s.labeledNodes[n]
if lab != nil {
// labeled for loop
lab.continueTarget = bIncr
lab.breakTarget = bEnd
}
// generate body
s.startBlock(bBody)
s.stmtList(n.Nbody)
// tear down continue/break
s.continueTo = prevContinue
s.breakTo = prevBreak
if lab != nil {
lab.continueTarget = nil
lab.breakTarget = nil
}
// done with body, goto incr
if b := s.endBlock(); b != nil {
b.AddEdgeTo(bIncr)
}
// generate incr
s.startBlock(bIncr)
if n.Right != nil {
s.stmt(n.Right)
}
if b := s.endBlock(); b != nil {
b.AddEdgeTo(bCond)
}
if n.Op == OFORUNTIL {
// generate code to test condition
s.startBlock(bCond)
if n.Left != nil {
s.condBranch(n.Left, bBody, bEnd, 1)
} else {
b := s.endBlock()
b.Kind = ssa.BlockPlain
b.AddEdgeTo(bBody)
}
}
s.startBlock(bEnd)
case OSWITCH, OSELECT:
// These have been mostly rewritten by the front end into their Nbody fields.
// Our main task is to correctly hook up any break statements.
bEnd := s.f.NewBlock(ssa.BlockPlain)
prevBreak := s.breakTo
s.breakTo = bEnd
lab := s.labeledNodes[n]
if lab != nil {
// labeled
lab.breakTarget = bEnd
}
// generate body code
s.stmtList(n.Nbody)
s.breakTo = prevBreak
if lab != nil {
lab.breakTarget = nil
}
// walk adds explicit OBREAK nodes to the end of all reachable code paths.
// If we still have a current block here, then mark it unreachable.
if s.curBlock != nil {
m := s.mem()
b := s.endBlock()
b.Kind = ssa.BlockExit
b.SetControl(m)
}
s.startBlock(bEnd)
case OVARKILL:
// Insert a varkill op to record that a variable is no longer live.
// We only care about liveness info at call sites, so putting the
// varkill in the store chain is enough to keep it correctly ordered
// with respect to call ops.
if !s.canSSA(n.Left) {
s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, types.TypeMem, n.Left, s.mem())
}
case OVARLIVE:
// Insert a varlive op to record that a variable is still live.
if !n.Left.Addrtaken() {
s.Fatalf("VARLIVE variable %v must have Addrtaken set", n.Left)
}
var aux interface{}
switch n.Left.Class() {
case PAUTO:
aux = s.lookupSymbol(n.Left, &ssa.AutoSymbol{Node: n.Left})
case PPARAM, PPARAMOUT:
aux = s.lookupSymbol(n.Left, &ssa.ArgSymbol{Node: n.Left})
default:
s.Fatalf("VARLIVE variable %v must be Auto or Arg", n.Left)
}
s.vars[&memVar] = s.newValue1A(ssa.OpVarLive, types.TypeMem, aux, s.mem())
case OCHECKNIL:
p := s.expr(n.Left)
s.nilCheck(p)
default:
s.Fatalf("unhandled stmt %v", n.Op)
}
}
// exit processes any code that needs to be generated just before returning.
// It returns a BlockRet block that ends the control flow. Its control value
// will be set to the final memory state.
func (s *state) exit() *ssa.Block {
if s.hasdefer {
s.rtcall(Deferreturn, true, nil)
}
// Run exit code. Typically, this code copies heap-allocated PPARAMOUT
// variables back to the stack.
s.stmtList(s.exitCode)
// Store SSAable PPARAMOUT variables back to stack locations.
for _, n := range s.returns {
addr := s.decladdrs[n]
val := s.variable(n, n.Type)
s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, n.Type, addr, val, s.mem())
// TODO: if val is ever spilled, we'd like to use the
// PPARAMOUT slot for spilling it. That won't happen
// currently.
}
// Do actual return.
m := s.mem()
b := s.endBlock()
b.Kind = ssa.BlockRet
b.SetControl(m)
return b
}
type opAndType struct {
op Op
etype types.EType
}
var opToSSA = map[opAndType]ssa.Op{
opAndType{OADD, TINT8}: ssa.OpAdd8,
opAndType{OADD, TUINT8}: ssa.OpAdd8,
opAndType{OADD, TINT16}: ssa.OpAdd16,
opAndType{OADD, TUINT16}: ssa.OpAdd16,
opAndType{OADD, TINT32}: ssa.OpAdd32,
opAndType{OADD, TUINT32}: ssa.OpAdd32,
opAndType{OADD, TPTR32}: ssa.OpAdd32,
opAndType{OADD, TINT64}: ssa.OpAdd64,
opAndType{OADD, TUINT64}: ssa.OpAdd64,
opAndType{OADD, TPTR64}: ssa.OpAdd64,
opAndType{OADD, TFLOAT32}: ssa.OpAdd32F,
opAndType{OADD, TFLOAT64}: ssa.OpAdd64F,
opAndType{OSUB, TINT8}: ssa.OpSub8,
opAndType{OSUB, TUINT8}: ssa.OpSub8,
opAndType{OSUB, TINT16}: ssa.OpSub16,
opAndType{OSUB, TUINT16}: ssa.OpSub16,
opAndType{OSUB, TINT32}: ssa.OpSub32,
opAndType{OSUB, TUINT32}: ssa.OpSub32,
opAndType{OSUB, TINT64}: ssa.OpSub64,
opAndType{OSUB, TUINT64}: ssa.OpSub64,
opAndType{OSUB, TFLOAT32}: ssa.OpSub32F,
opAndType{OSUB, TFLOAT64}: ssa.OpSub64F,
opAndType{ONOT, TBOOL}: ssa.OpNot,
opAndType{OMINUS, TINT8}: ssa.OpNeg8,
opAndType{OMINUS, TUINT8}: ssa.OpNeg8,
opAndType{OMINUS, TINT16}: ssa.OpNeg16,
opAndType{OMINUS, TUINT16}: ssa.OpNeg16,
opAndType{OMINUS, TINT32}: ssa.OpNeg32,
opAndType{OMINUS, TUINT32}: ssa.OpNeg32,
opAndType{OMINUS, TINT64}: ssa.OpNeg64,
opAndType{OMINUS, TUINT64}: ssa.OpNeg64,
opAndType{OMINUS, TFLOAT32}: ssa.OpNeg32F,
opAndType{OMINUS, TFLOAT64}: ssa.OpNeg64F,
opAndType{OCOM, TINT8}: ssa.OpCom8,
opAndType{OCOM, TUINT8}: ssa.OpCom8,
opAndType{OCOM, TINT16}: ssa.OpCom16,
opAndType{OCOM, TUINT16}: ssa.OpCom16,
opAndType{OCOM, TINT32}: ssa.OpCom32,
opAndType{OCOM, TUINT32}: ssa.OpCom32,
opAndType{OCOM, TINT64}: ssa.OpCom64,
opAndType{OCOM, TUINT64}: ssa.OpCom64,
opAndType{OIMAG, TCOMPLEX64}: ssa.OpComplexImag,
opAndType{OIMAG, TCOMPLEX128}: ssa.OpComplexImag,
opAndType{OREAL, TCOMPLEX64}: ssa.OpComplexReal,
opAndType{OREAL, TCOMPLEX128}: ssa.OpComplexReal,
opAndType{OMUL, TINT8}: ssa.OpMul8,
opAndType{OMUL, TUINT8}: ssa.OpMul8,
opAndType{OMUL, TINT16}: ssa.OpMul16,
opAndType{OMUL, TUINT16}: ssa.OpMul16,
opAndType{OMUL, TINT32}: ssa.OpMul32,
opAndType{OMUL, TUINT32}: ssa.OpMul32,
opAndType{OMUL, TINT64}: ssa.OpMul64,
opAndType{OMUL, TUINT64}: ssa.OpMul64,
opAndType{OMUL, TFLOAT32}: ssa.OpMul32F,
opAndType{OMUL, TFLOAT64}: ssa.OpMul64F,
opAndType{ODIV, TFLOAT32}: ssa.OpDiv32F,
opAndType{ODIV, TFLOAT64}: ssa.OpDiv64F,
opAndType{ODIV, TINT8}: ssa.OpDiv8,
opAndType{ODIV, TUINT8}: ssa.OpDiv8u,
opAndType{ODIV, TINT16}: ssa.OpDiv16,
opAndType{ODIV, TUINT16}: ssa.OpDiv16u,
opAndType{ODIV, TINT32}: ssa.OpDiv32,
opAndType{ODIV, TUINT32}: ssa.OpDiv32u,
opAndType{ODIV, TINT64}: ssa.OpDiv64,
opAndType{ODIV, TUINT64}: ssa.OpDiv64u,
opAndType{OMOD, TINT8}: ssa.OpMod8,
opAndType{OMOD, TUINT8}: ssa.OpMod8u,
opAndType{OMOD, TINT16}: ssa.OpMod16,
opAndType{OMOD, TUINT16}: ssa.OpMod16u,
opAndType{OMOD, TINT32}: ssa.OpMod32,
opAndType{OMOD, TUINT32}: ssa.OpMod32u,
opAndType{OMOD, TINT64}: ssa.OpMod64,
opAndType{OMOD, TUINT64}: ssa.OpMod64u,
opAndType{OAND, TINT8}: ssa.OpAnd8,
opAndType{OAND, TUINT8}: ssa.OpAnd8,
opAndType{OAND, TINT16}: ssa.OpAnd16,
opAndType{OAND, TUINT16}: ssa.OpAnd16,
opAndType{OAND, TINT32}: ssa.OpAnd32,
opAndType{OAND, TUINT32}: ssa.OpAnd32,
opAndType{OAND, TINT64}: ssa.OpAnd64,
opAndType{OAND, TUINT64}: ssa.OpAnd64,
opAndType{OOR, TINT8}: ssa.OpOr8,
opAndType{OOR, TUINT8}: ssa.OpOr8,
opAndType{OOR, TINT16}: ssa.OpOr16,
opAndType{OOR, TUINT16}: ssa.OpOr16,
opAndType{OOR, TINT32}: ssa.OpOr32,
opAndType{OOR, TUINT32}: ssa.OpOr32,
opAndType{OOR, TINT64}: ssa.OpOr64,
opAndType{OOR, TUINT64}: ssa.OpOr64,
opAndType{OXOR, TINT8}: ssa.OpXor8,
opAndType{OXOR, TUINT8}: ssa.OpXor8,
opAndType{OXOR, TINT16}: ssa.OpXor16,
opAndType{OXOR, TUINT16}: ssa.OpXor16,
opAndType{OXOR, TINT32}: ssa.OpXor32,
opAndType{OXOR, TUINT32}: ssa.OpXor32,
opAndType{OXOR, TINT64}: ssa.OpXor64,
opAndType{OXOR, TUINT64}: ssa.OpXor64,
opAndType{OEQ, TBOOL}: ssa.OpEqB,
opAndType{OEQ, TINT8}: ssa.OpEq8,
opAndType{OEQ, TUINT8}: ssa.OpEq8,
opAndType{OEQ, TINT16}: ssa.OpEq16,
opAndType{OEQ, TUINT16}: ssa.OpEq16,
opAndType{OEQ, TINT32}: ssa.OpEq32,
opAndType{OEQ, TUINT32}: ssa.OpEq32,
opAndType{OEQ, TINT64}: ssa.OpEq64,
opAndType{OEQ, TUINT64}: ssa.OpEq64,
opAndType{OEQ, TINTER}: ssa.OpEqInter,
opAndType{OEQ, TSLICE}: ssa.OpEqSlice,
opAndType{OEQ, TFUNC}: ssa.OpEqPtr,
opAndType{OEQ, TMAP}: ssa.OpEqPtr,
opAndType{OEQ, TCHAN}: ssa.OpEqPtr,
opAndType{OEQ, TPTR32}: ssa.OpEqPtr,
opAndType{OEQ, TPTR64}: ssa.OpEqPtr,
opAndType{OEQ, TUINTPTR}: ssa.OpEqPtr,
opAndType{OEQ, TUNSAFEPTR}: ssa.OpEqPtr,
opAndType{OEQ, TFLOAT64}: ssa.OpEq64F,
opAndType{OEQ, TFLOAT32}: ssa.OpEq32F,
opAndType{ONE, TBOOL}: ssa.OpNeqB,
opAndType{ONE, TINT8}: ssa.OpNeq8,
opAndType{ONE, TUINT8}: ssa.OpNeq8,
opAndType{ONE, TINT16}: ssa.OpNeq16,
opAndType{ONE, TUINT16}: ssa.OpNeq16,
opAndType{ONE, TINT32}: ssa.OpNeq32,
opAndType{ONE, TUINT32}: ssa.OpNeq32,
opAndType{ONE, TINT64}: ssa.OpNeq64,
opAndType{ONE, TUINT64}: ssa.OpNeq64,
opAndType{ONE, TINTER}: ssa.OpNeqInter,
opAndType{ONE, TSLICE}: ssa.OpNeqSlice,
opAndType{ONE, TFUNC}: ssa.OpNeqPtr,
opAndType{ONE, TMAP}: ssa.OpNeqPtr,
opAndType{ONE, TCHAN}: ssa.OpNeqPtr,
opAndType{ONE, TPTR32}: ssa.OpNeqPtr,
opAndType{ONE, TPTR64}: ssa.OpNeqPtr,
opAndType{ONE, TUINTPTR}: ssa.OpNeqPtr,
opAndType{ONE, TUNSAFEPTR}: ssa.OpNeqPtr,
opAndType{ONE, TFLOAT64}: ssa.OpNeq64F,
opAndType{ONE, TFLOAT32}: ssa.OpNeq32F,
opAndType{OLT, TINT8}: ssa.OpLess8,
opAndType{OLT, TUINT8}: ssa.OpLess8U,
opAndType{OLT, TINT16}: ssa.OpLess16,
opAndType{OLT, TUINT16}: ssa.OpLess16U,
opAndType{OLT, TINT32}: ssa.OpLess32,
opAndType{OLT, TUINT32}: ssa.OpLess32U,
opAndType{OLT, TINT64}: ssa.OpLess64,
opAndType{OLT, TUINT64}: ssa.OpLess64U,
opAndType{OLT, TFLOAT64}: ssa.OpLess64F,
opAndType{OLT, TFLOAT32}: ssa.OpLess32F,
opAndType{OGT, TINT8}: ssa.OpGreater8,
opAndType{OGT, TUINT8}: ssa.OpGreater8U,
opAndType{OGT, TINT16}: ssa.OpGreater16,
opAndType{OGT, TUINT16}: ssa.OpGreater16U,
opAndType{OGT, TINT32}: ssa.OpGreater32,
opAndType{OGT, TUINT32}: ssa.OpGreater32U,
opAndType{OGT, TINT64}: ssa.OpGreater64,
opAndType{OGT, TUINT64}: ssa.OpGreater64U,
opAndType{OGT, TFLOAT64}: ssa.OpGreater64F,
opAndType{OGT, TFLOAT32}: ssa.OpGreater32F,
opAndType{OLE, TINT8}: ssa.OpLeq8,
opAndType{OLE, TUINT8}: ssa.OpLeq8U,
opAndType{OLE, TINT16}: ssa.OpLeq16,
opAndType{OLE, TUINT16}: ssa.OpLeq16U,
opAndType{OLE, TINT32}: ssa.OpLeq32,
opAndType{OLE, TUINT32}: ssa.OpLeq32U,
opAndType{OLE, TINT64}: ssa.OpLeq64,
opAndType{OLE, TUINT64}: ssa.OpLeq64U,
opAndType{OLE, TFLOAT64}: ssa.OpLeq64F,
opAndType{OLE, TFLOAT32}: ssa.OpLeq32F,
opAndType{OGE, TINT8}: ssa.OpGeq8,
opAndType{OGE, TUINT8}: ssa.OpGeq8U,
opAndType{OGE, TINT16}: ssa.OpGeq16,
opAndType{OGE, TUINT16}: ssa.OpGeq16U,
opAndType{OGE, TINT32}: ssa.OpGeq32,
opAndType{OGE, TUINT32}: ssa.OpGeq32U,
opAndType{OGE, TINT64}: ssa.OpGeq64,
opAndType{OGE, TUINT64}: ssa.OpGeq64U,
opAndType{OGE, TFLOAT64}: ssa.OpGeq64F,
opAndType{OGE, TFLOAT32}: ssa.OpGeq32F,
}
func (s *state) concreteEtype(t *types.Type) types.EType {
e := t.Etype
switch e {
default:
return e
case TINT:
if s.config.PtrSize == 8 {
return TINT64
}
return TINT32
case TUINT:
if s.config.PtrSize == 8 {
return TUINT64
}
return TUINT32
case TUINTPTR:
if s.config.PtrSize == 8 {
return TUINT64
}
return TUINT32
}
}
func (s *state) ssaOp(op Op, t *types.Type) ssa.Op {
etype := s.concreteEtype(t)
x, ok := opToSSA[opAndType{op, etype}]
if !ok {
s.Fatalf("unhandled binary op %v %s", op, etype)
}
return x
}
func floatForComplex(t *types.Type) *types.Type {
if t.Size() == 8 {
return types.Types[TFLOAT32]
} else {
return types.Types[TFLOAT64]
}
}
type opAndTwoTypes struct {
op Op
etype1 types.EType
etype2 types.EType
}
type twoTypes struct {
etype1 types.EType
etype2 types.EType
}
type twoOpsAndType struct {
op1 ssa.Op
op2 ssa.Op
intermediateType types.EType
}
var fpConvOpToSSA = map[twoTypes]twoOpsAndType{
twoTypes{TINT8, TFLOAT32}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to32F, TINT32},
twoTypes{TINT16, TFLOAT32}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to32F, TINT32},
twoTypes{TINT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to32F, TINT32},
twoTypes{TINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to32F, TINT64},
twoTypes{TINT8, TFLOAT64}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to64F, TINT32},
twoTypes{TINT16, TFLOAT64}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to64F, TINT32},
twoTypes{TINT32, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to64F, TINT32},
twoTypes{TINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to64F, TINT64},
twoTypes{TFLOAT32, TINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32},
twoTypes{TFLOAT32, TINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32},
twoTypes{TFLOAT32, TINT32}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpCopy, TINT32},
twoTypes{TFLOAT32, TINT64}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpCopy, TINT64},
twoTypes{TFLOAT64, TINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32},
twoTypes{TFLOAT64, TINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32},
twoTypes{TFLOAT64, TINT32}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpCopy, TINT32},
twoTypes{TFLOAT64, TINT64}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpCopy, TINT64},
// unsigned
twoTypes{TUINT8, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to32F, TINT32},
twoTypes{TUINT16, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to32F, TINT32},
twoTypes{TUINT32, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to32F, TINT64}, // go wide to dodge unsigned
twoTypes{TUINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64}, // Cvt64Uto32F, branchy code expansion instead
twoTypes{TUINT8, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to64F, TINT32},
twoTypes{TUINT16, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to64F, TINT32},
twoTypes{TUINT32, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to64F, TINT64}, // go wide to dodge unsigned
twoTypes{TUINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64}, // Cvt64Uto64F, branchy code expansion instead
twoTypes{TFLOAT32, TUINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32},
twoTypes{TFLOAT32, TUINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32},
twoTypes{TFLOAT32, TUINT32}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned
twoTypes{TFLOAT32, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64}, // Cvt32Fto64U, branchy code expansion instead
twoTypes{TFLOAT64, TUINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32},
twoTypes{TFLOAT64, TUINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32},
twoTypes{TFLOAT64, TUINT32}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned
twoTypes{TFLOAT64, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64}, // Cvt64Fto64U, branchy code expansion instead
// float
twoTypes{TFLOAT64, TFLOAT32}: twoOpsAndType{ssa.OpCvt64Fto32F, ssa.OpCopy, TFLOAT32},
twoTypes{TFLOAT64, TFLOAT64}: twoOpsAndType{ssa.OpRound64F, ssa.OpCopy, TFLOAT64},
twoTypes{TFLOAT32, TFLOAT32}: twoOpsAndType{ssa.OpRound32F, ssa.OpCopy, TFLOAT32},
twoTypes{TFLOAT32, TFLOAT64}: twoOpsAndType{ssa.OpCvt32Fto64F, ssa.OpCopy, TFLOAT64},
}
// this map is used only for 32-bit arch, and only includes the difference
// on 32-bit arch, don't use int64<->float conversion for uint32
var fpConvOpToSSA32 = map[twoTypes]twoOpsAndType{
twoTypes{TUINT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto32F, TUINT32},
twoTypes{TUINT32, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto64F, TUINT32},
twoTypes{TFLOAT32, TUINT32}: twoOpsAndType{ssa.OpCvt32Fto32U, ssa.OpCopy, TUINT32},
twoTypes{TFLOAT64, TUINT32}: twoOpsAndType{ssa.OpCvt64Fto32U, ssa.OpCopy, TUINT32},
}
// uint64<->float conversions, only on machines that have intructions for that
var uint64fpConvOpToSSA = map[twoTypes]twoOpsAndType{
twoTypes{TUINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto32F, TUINT64},
twoTypes{TUINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto64F, TUINT64},
twoTypes{TFLOAT32, TUINT64}: twoOpsAndType{ssa.OpCvt32Fto64U, ssa.OpCopy, TUINT64},
twoTypes{TFLOAT64, TUINT64}: twoOpsAndType{ssa.OpCvt64Fto64U, ssa.OpCopy, TUINT64},
}
var shiftOpToSSA = map[opAndTwoTypes]ssa.Op{
opAndTwoTypes{OLSH, TINT8, TUINT8}: ssa.OpLsh8x8,
opAndTwoTypes{OLSH, TUINT8, TUINT8}: ssa.OpLsh8x8,
opAndTwoTypes{OLSH, TINT8, TUINT16}: ssa.OpLsh8x16,
opAndTwoTypes{OLSH, TUINT8, TUINT16}: ssa.OpLsh8x16,
opAndTwoTypes{OLSH, TINT8, TUINT32}: ssa.OpLsh8x32,
opAndTwoTypes{OLSH, TUINT8, TUINT32}: ssa.OpLsh8x32,
opAndTwoTypes{OLSH, TINT8, TUINT64}: ssa.OpLsh8x64,
opAndTwoTypes{OLSH, TUINT8, TUINT64}: ssa.OpLsh8x64,
opAndTwoTypes{OLSH, TINT16, TUINT8}: ssa.OpLsh16x8,
opAndTwoTypes{OLSH, TUINT16, TUINT8}: ssa.OpLsh16x8,
opAndTwoTypes{OLSH, TINT16, TUINT16}: ssa.OpLsh16x16,
opAndTwoTypes{OLSH, TUINT16, TUINT16}: ssa.OpLsh16x16,
opAndTwoTypes{OLSH, TINT16, TUINT32}: ssa.OpLsh16x32,
opAndTwoTypes{OLSH, TUINT16, TUINT32}: ssa.OpLsh16x32,
opAndTwoTypes{OLSH, TINT16, TUINT64}: ssa.OpLsh16x64,
opAndTwoTypes{OLSH, TUINT16, TUINT64}: ssa.OpLsh16x64,
opAndTwoTypes{OLSH, TINT32, TUINT8}: ssa.OpLsh32x8,
opAndTwoTypes{OLSH, TUINT32, TUINT8}: ssa.OpLsh32x8,
opAndTwoTypes{OLSH, TINT32, TUINT16}: ssa.OpLsh32x16,
opAndTwoTypes{OLSH, TUINT32, TUINT16}: ssa.OpLsh32x16,
opAndTwoTypes{OLSH, TINT32, TUINT32}: ssa.OpLsh32x32,
opAndTwoTypes{OLSH, TUINT32, TUINT32}: ssa.OpLsh32x32,
opAndTwoTypes{OLSH, TINT32, TUINT64}: ssa.OpLsh32x64,
opAndTwoTypes{OLSH, TUINT32, TUINT64}: ssa.OpLsh32x64,
opAndTwoTypes{OLSH, TINT64, TUINT8}: ssa.OpLsh64x8,
opAndTwoTypes{OLSH, TUINT64, TUINT8}: ssa.OpLsh64x8,
opAndTwoTypes{OLSH, TINT64, TUINT16}: ssa.OpLsh64x16,
opAndTwoTypes{OLSH, TUINT64, TUINT16}: ssa.OpLsh64x16,
opAndTwoTypes{OLSH, TINT64, TUINT32}: ssa.OpLsh64x32,
opAndTwoTypes{OLSH, TUINT64, TUINT32}: ssa.OpLsh64x32,
opAndTwoTypes{OLSH, TINT64, TUINT64}: ssa.OpLsh64x64,
opAndTwoTypes{OLSH, TUINT64, TUINT64}: ssa.OpLsh64x64,
opAndTwoTypes{ORSH, TINT8, TUINT8}: ssa.OpRsh8x8,
opAndTwoTypes{ORSH, TUINT8, TUINT8}: ssa.OpRsh8Ux8,
opAndTwoTypes{ORSH, TINT8, TUINT16}: ssa.OpRsh8x16,
opAndTwoTypes{ORSH, TUINT8, TUINT16}: ssa.OpRsh8Ux16,
opAndTwoTypes{ORSH, TINT8, TUINT32}: ssa.OpRsh8x32,
opAndTwoTypes{ORSH, TUINT8, TUINT32}: ssa.OpRsh8Ux32,
opAndTwoTypes{ORSH, TINT8, TUINT64}: ssa.OpRsh8x64,
opAndTwoTypes{ORSH, TUINT8, TUINT64}: ssa.OpRsh8Ux64,
opAndTwoTypes{ORSH, TINT16, TUINT8}: ssa.OpRsh16x8,
opAndTwoTypes{ORSH, TUINT16, TUINT8}: ssa.OpRsh16Ux8,
opAndTwoTypes{ORSH, TINT16, TUINT16}: ssa.OpRsh16x16,
opAndTwoTypes{ORSH, TUINT16, TUINT16}: ssa.OpRsh16Ux16,
opAndTwoTypes{ORSH, TINT16, TUINT32}: ssa.OpRsh16x32,
opAndTwoTypes{ORSH, TUINT16, TUINT32}: ssa.OpRsh16Ux32,
opAndTwoTypes{ORSH, TINT16, TUINT64}: ssa.OpRsh16x64,
opAndTwoTypes{ORSH, TUINT16, TUINT64}: ssa.OpRsh16Ux64,
opAndTwoTypes{ORSH, TINT32, TUINT8}: ssa.OpRsh32x8,
opAndTwoTypes{ORSH, TUINT32, TUINT8}: ssa.OpRsh32Ux8,
opAndTwoTypes{ORSH, TINT32, TUINT16}: ssa.OpRsh32x16,
opAndTwoTypes{ORSH, TUINT32, TUINT16}: ssa.OpRsh32Ux16,
opAndTwoTypes{ORSH, TINT32, TUINT32}: ssa.OpRsh32x32,
opAndTwoTypes{ORSH, TUINT32, TUINT32}: ssa.OpRsh32Ux32,
opAndTwoTypes{ORSH, TINT32, TUINT64}: ssa.OpRsh32x64,
opAndTwoTypes{ORSH, TUINT32, TUINT64}: ssa.OpRsh32Ux64,
opAndTwoTypes{ORSH, TINT64, TUINT8}: ssa.OpRsh64x8,
opAndTwoTypes{ORSH, TUINT64, TUINT8}: ssa.OpRsh64Ux8,
opAndTwoTypes{ORSH, TINT64, TUINT16}: ssa.OpRsh64x16,
opAndTwoTypes{ORSH, TUINT64, TUINT16}: ssa.OpRsh64Ux16,
opAndTwoTypes{ORSH, TINT64, TUINT32}: ssa.OpRsh64x32,
opAndTwoTypes{ORSH, TUINT64, TUINT32}: ssa.OpRsh64Ux32,
opAndTwoTypes{ORSH, TINT64, TUINT64}: ssa.OpRsh64x64,
opAndTwoTypes{ORSH, TUINT64, TUINT64}: ssa.OpRsh64Ux64,
}
func (s *state) ssaShiftOp(op Op, t *types.Type, u *types.Type) ssa.Op {
etype1 := s.concreteEtype(t)
etype2 := s.concreteEtype(u)
x, ok := shiftOpToSSA[opAndTwoTypes{op, etype1, etype2}]
if !ok {
s.Fatalf("unhandled shift op %v etype=%s/%s", op, etype1, etype2)
}
return x
}
// expr converts the expression n to ssa, adds it to s and returns the ssa result.
func (s *state) expr(n *Node) *ssa.Value {
if !(n.Op == ONAME || n.Op == OLITERAL && n.Sym != nil) {
// ONAMEs and named OLITERALs have the line number
// of the decl, not the use. See issue 14742.
s.pushLine(n.Pos)
defer s.popLine()
}
s.stmtList(n.Ninit)
switch n.Op {
case OARRAYBYTESTRTMP:
slice := s.expr(n.Left)
ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, slice)
len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], slice)
return s.newValue2(ssa.OpStringMake, n.Type, ptr, len)
case OSTRARRAYBYTETMP:
str := s.expr(n.Left)
ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, str)
len := s.newValue1(ssa.OpStringLen, types.Types[TINT], str)
return s.newValue3(ssa.OpSliceMake, n.Type, ptr, len, len)
case OCFUNC:
aux := s.lookupSymbol(n, &ssa.ExternSymbol{Sym: n.Left.Sym.Linksym()})
return s.entryNewValue1A(ssa.OpAddr, n.Type, aux, s.sb)
case ONAME:
if n.Class() == PFUNC {
// "value" of a function is the address of the function's closure
sym := funcsym(n.Sym).Linksym()
aux := s.lookupSymbol(n, &ssa.ExternSymbol{Sym: sym})
return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type), aux, s.sb)
}
if s.canSSA(n) {
return s.variable(n, n.Type)
}
addr := s.addr(n, false)
return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem())
case OCLOSUREVAR:
addr := s.addr(n, false)
return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem())
case OLITERAL:
switch u := n.Val().U.(type) {
case *Mpint:
i := u.Int64()
switch n.Type.Size() {
case 1:
return s.constInt8(n.Type, int8(i))
case 2:
return s.constInt16(n.Type, int16(i))
case 4:
return s.constInt32(n.Type, int32(i))
case 8:
return s.constInt64(n.Type, i)
default:
s.Fatalf("bad integer size %d", n.Type.Size())
return nil
}
case string:
if u == "" {
return s.constEmptyString(n.Type)
}
return s.entryNewValue0A(ssa.OpConstString, n.Type, u)
case bool:
return s.constBool(u)
case *NilVal:
t := n.Type
switch {
case t.IsSlice():
return s.constSlice(t)
case t.IsInterface():
return s.constInterface(t)
default:
return s.constNil(t)
}
case *Mpflt:
switch n.Type.Size() {
case 4:
return s.constFloat32(n.Type, u.Float32())
case 8:
return s.constFloat64(n.Type, u.Float64())
default:
s.Fatalf("bad float size %d", n.Type.Size())
return nil
}
case *Mpcplx:
r := &u.Real
i := &u.Imag
switch n.Type.Size() {
case 8:
pt := types.Types[TFLOAT32]
return s.newValue2(ssa.OpComplexMake, n.Type,
s.constFloat32(pt, r.Float32()),
s.constFloat32(pt, i.Float32()))
case 16:
pt := types.Types[TFLOAT64]
return s.newValue2(ssa.OpComplexMake, n.Type,
s.constFloat64(pt, r.Float64()),
s.constFloat64(pt, i.Float64()))
default:
s.Fatalf("bad float size %d", n.Type.Size())
return nil
}
default:
s.Fatalf("unhandled OLITERAL %v", n.Val().Ctype())
return nil
}
case OCONVNOP:
to := n.Type
from := n.Left.Type
// Assume everything will work out, so set up our return value.
// Anything interesting that happens from here is a fatal.
x := s.expr(n.Left)
// Special case for not confusing GC and liveness.
// We don't want pointers accidentally classified
// as not-pointers or vice-versa because of copy
// elision.
if to.IsPtrShaped() != from.IsPtrShaped() {
return s.newValue2(ssa.OpConvert, to, x, s.mem())
}
v := s.newValue1(ssa.OpCopy, to, x) // ensure that v has the right type
// CONVNOP closure
if to.Etype == TFUNC && from.IsPtrShaped() {
return v
}
// named <--> unnamed type or typed <--> untyped const
if from.Etype == to.Etype {
return v
}
// unsafe.Pointer <--> *T
if to.Etype == TUNSAFEPTR && from.IsPtr() || from.Etype == TUNSAFEPTR && to.IsPtr() {
return v
}
dowidth(from)
dowidth(to)
if from.Width != to.Width {
s.Fatalf("CONVNOP width mismatch %v (%d) -> %v (%d)\n", from, from.Width, to, to.Width)
return nil
}
if etypesign(from.Etype) != etypesign(to.Etype) {
s.Fatalf("CONVNOP sign mismatch %v (%s) -> %v (%s)\n", from, from.Etype, to, to.Etype)
return nil
}
if instrumenting {
// These appear to be fine, but they fail the
// integer constraint below, so okay them here.
// Sample non-integer conversion: map[string]string -> *uint8
return v
}
if etypesign(from.Etype) == 0 {
s.Fatalf("CONVNOP unrecognized non-integer %v -> %v\n", from, to)
return nil
}
// integer, same width, same sign
return v
case OCONV:
x := s.expr(n.Left)
ft := n.Left.Type // from type
tt := n.Type // to type
if ft.IsBoolean() && tt.IsKind(TUINT8) {
// Bool -> uint8 is generated internally when indexing into runtime.staticbyte.
return s.newValue1(ssa.OpCopy, n.Type, x)
}
if ft.IsInteger() && tt.IsInteger() {
var op ssa.Op
if tt.Size() == ft.Size() {
op = ssa.OpCopy
} else if tt.Size() < ft.Size() {
// truncation
switch 10*ft.Size() + tt.Size() {
case 21:
op = ssa.OpTrunc16to8
case 41:
op = ssa.OpTrunc32to8
case 42:
op = ssa.OpTrunc32to16
case 81:
op = ssa.OpTrunc64to8
case 82:
op = ssa.OpTrunc64to16
case 84:
op = ssa.OpTrunc64to32
default:
s.Fatalf("weird integer truncation %v -> %v", ft, tt)
}
} else if ft.IsSigned() {
// sign extension
switch 10*ft.Size() + tt.Size() {
case 12:
op = ssa.OpSignExt8to16
case 14:
op = ssa.OpSignExt8to32
case 18:
op = ssa.OpSignExt8to64
case 24:
op = ssa.OpSignExt16to32
case 28:
op = ssa.OpSignExt16to64
case 48:
op = ssa.OpSignExt32to64
default:
s.Fatalf("bad integer sign extension %v -> %v", ft, tt)
}
} else {
// zero extension
switch 10*ft.Size() + tt.Size() {
case 12:
op = ssa.OpZeroExt8to16
case 14:
op = ssa.OpZeroExt8to32
case 18:
op = ssa.OpZeroExt8to64
case 24:
op = ssa.OpZeroExt16to32
case 28:
op = ssa.OpZeroExt16to64
case 48:
op = ssa.OpZeroExt32to64
default:
s.Fatalf("weird integer sign extension %v -> %v", ft, tt)
}
}
return s.newValue1(op, n.Type, x)
}
if ft.IsFloat() || tt.IsFloat() {
conv, ok := fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]
if s.config.RegSize == 4 && thearch.LinkArch.Family != sys.MIPS {
if conv1, ok1 := fpConvOpToSSA32[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
conv = conv1
}
}
if thearch.LinkArch.Family == sys.ARM64 {
if conv1, ok1 := uint64fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
conv = conv1
}
}
if thearch.LinkArch.Family == sys.MIPS {
if ft.Size() == 4 && ft.IsInteger() && !ft.IsSigned() {
// tt is float32 or float64, and ft is also unsigned
if tt.Size() == 4 {
return s.uint32Tofloat32(n, x, ft, tt)
}
if tt.Size() == 8 {
return s.uint32Tofloat64(n, x, ft, tt)
}
} else if tt.Size() == 4 && tt.IsInteger() && !tt.IsSigned() {
// ft is float32 or float64, and tt is unsigned integer
if ft.Size() == 4 {
return s.float32ToUint32(n, x, ft, tt)
}
if ft.Size() == 8 {
return s.float64ToUint32(n, x, ft, tt)
}
}
}
if !ok {
s.Fatalf("weird float conversion %v -> %v", ft, tt)
}
op1, op2, it := conv.op1, conv.op2, conv.intermediateType
if op1 != ssa.OpInvalid && op2 != ssa.OpInvalid {
// normal case, not tripping over unsigned 64
if op1 == ssa.OpCopy {
if op2 == ssa.OpCopy {
return x
}
return s.newValue1(op2, n.Type, x)
}
if op2 == ssa.OpCopy {
return s.newValue1(op1, n.Type, x)
}
return s.newValue1(op2, n.Type, s.newValue1(op1, types.Types[it], x))
}
// Tricky 64-bit unsigned cases.
if ft.IsInteger() {
// tt is float32 or float64, and ft is also unsigned
if tt.Size() == 4 {
return s.uint64Tofloat32(n, x, ft, tt)
}
if tt.Size() == 8 {
return s.uint64Tofloat64(n, x, ft, tt)
}
s.Fatalf("weird unsigned integer to float conversion %v -> %v", ft, tt)
}
// ft is float32 or float64, and tt is unsigned integer
if ft.Size() == 4 {
return s.float32ToUint64(n, x, ft, tt)
}
if ft.Size() == 8 {
return s.float64ToUint64(n, x, ft, tt)
}
s.Fatalf("weird float to unsigned integer conversion %v -> %v", ft, tt)
return nil
}
if ft.IsComplex() && tt.IsComplex() {
var op ssa.Op
if ft.Size() == tt.Size() {
switch ft.Size() {
case 8:
op = ssa.OpRound32F
case 16:
op = ssa.OpRound64F
default:
s.Fatalf("weird complex conversion %v -> %v", ft, tt)
}
} else if ft.Size() == 8 && tt.Size() == 16 {
op = ssa.OpCvt32Fto64F
} else if ft.Size() == 16 && tt.Size() == 8 {
op = ssa.OpCvt64Fto32F
} else {
s.Fatalf("weird complex conversion %v -> %v", ft, tt)
}
ftp := floatForComplex(ft)
ttp := floatForComplex(tt)
return s.newValue2(ssa.OpComplexMake, tt,
s.newValue1(op, ttp, s.newValue1(ssa.OpComplexReal, ftp, x)),
s.newValue1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, x)))
}
s.Fatalf("unhandled OCONV %s -> %s", n.Left.Type.Etype, n.Type.Etype)
return nil
case ODOTTYPE:
res, _ := s.dottype(n, false)
return res
// binary ops
case OLT, OEQ, ONE, OLE, OGE, OGT:
a := s.expr(n.Left)
b := s.expr(n.Right)
if n.Left.Type.IsComplex() {
pt := floatForComplex(n.Left.Type)
op := s.ssaOp(OEQ, pt)
r := s.newValue2(op, types.Types[TBOOL], s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b))
i := s.newValue2(op, types.Types[TBOOL], s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b))
c := s.newValue2(ssa.OpAndB, types.Types[TBOOL], r, i)
switch n.Op {
case OEQ:
return c
case ONE:
return s.newValue1(ssa.OpNot, types.Types[TBOOL], c)
default:
s.Fatalf("ordered complex compare %v", n.Op)
}
}
return s.newValue2(s.ssaOp(n.Op, n.Left.Type), types.Types[TBOOL], a, b)
case OMUL:
a := s.expr(n.Left)
b := s.expr(n.Right)
if n.Type.IsComplex() {
mulop := ssa.OpMul64F
addop := ssa.OpAdd64F
subop := ssa.OpSub64F
pt := floatForComplex(n.Type) // Could be Float32 or Float64
wt := types.Types[TFLOAT64] // Compute in Float64 to minimize cancelation error
areal := s.newValue1(ssa.OpComplexReal, pt, a)
breal := s.newValue1(ssa.OpComplexReal, pt, b)
aimag := s.newValue1(ssa.OpComplexImag, pt, a)
bimag := s.newValue1(ssa.OpComplexImag, pt, b)
if pt != wt { // Widen for calculation
areal = s.newValue1(ssa.OpCvt32Fto64F, wt, areal)
breal = s.newValue1(ssa.OpCvt32Fto64F, wt, breal)
aimag = s.newValue1(ssa.OpCvt32Fto64F, wt, aimag)
bimag = s.newValue1(ssa.OpCvt32Fto64F, wt, bimag)
}
xreal := s.newValue2(subop, wt, s.newValue2(mulop, wt, areal, breal), s.newValue2(mulop, wt, aimag, bimag))
ximag := s.newValue2(addop, wt, s.newValue2(mulop, wt, areal, bimag), s.newValue2(mulop, wt, aimag, breal))
if pt != wt { // Narrow to store back
xreal = s.newValue1(ssa.OpCvt64Fto32F, pt, xreal)
ximag = s.newValue1(ssa.OpCvt64Fto32F, pt, ximag)
}
return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag)
}
return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
case ODIV:
a := s.expr(n.Left)
b := s.expr(n.Right)
if n.Type.IsComplex() {
// TODO this is not executed because the front-end substitutes a runtime call.
// That probably ought to change; with modest optimization the widen/narrow
// conversions could all be elided in larger expression trees.
mulop := ssa.OpMul64F
addop := ssa.OpAdd64F
subop := ssa.OpSub64F
divop := ssa.OpDiv64F
pt := floatForComplex(n.Type) // Could be Float32 or Float64
wt := types.Types[TFLOAT64] // Compute in Float64 to minimize cancelation error
areal := s.newValue1(ssa.OpComplexReal, pt, a)
breal := s.newValue1(ssa.OpComplexReal, pt, b)
aimag := s.newValue1(ssa.OpComplexImag, pt, a)
bimag := s.newValue1(ssa.OpComplexImag, pt, b)
if pt != wt { // Widen for calculation
areal = s.newValue1(ssa.OpCvt32Fto64F, wt, areal)
breal = s.newValue1(ssa.OpCvt32Fto64F, wt, breal)
aimag = s.newValue1(ssa.OpCvt32Fto64F, wt, aimag)
bimag = s.newValue1(ssa.OpCvt32Fto64F, wt, bimag)
}
denom := s.newValue2(addop, wt, s.newValue2(mulop, wt, breal, breal), s.newValue2(mulop, wt, bimag, bimag))
xreal := s.newValue2(addop, wt, s.newValue2(mulop, wt, areal, breal), s.newValue2(mulop, wt, aimag, bimag))
ximag := s.newValue2(subop, wt, s.newValue2(mulop, wt, aimag, breal), s.newValue2(mulop, wt, areal, bimag))
// TODO not sure if this is best done in wide precision or narrow
// Double-rounding might be an issue.
// Note that the pre-SSA implementation does the entire calculation
// in wide format, so wide is compatible.
xreal = s.newValue2(divop, wt, xreal, denom)
ximag = s.newValue2(divop, wt, ximag, denom)
if pt != wt { // Narrow to store back
xreal = s.newValue1(ssa.OpCvt64Fto32F, pt, xreal)
ximag = s.newValue1(ssa.OpCvt64Fto32F, pt, ximag)
}
return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag)
}
if n.Type.IsFloat() {
return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
}
return s.intDivide(n, a, b)
case OMOD:
a := s.expr(n.Left)
b := s.expr(n.Right)
return s.intDivide(n, a, b)
case OADD, OSUB:
a := s.expr(n.Left)
b := s.expr(n.Right)
if n.Type.IsComplex() {
pt := floatForComplex(n.Type)
op := s.ssaOp(n.Op, pt)
return s.newValue2(ssa.OpComplexMake, n.Type,
s.newValue2(op, pt, s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)),
s.newValue2(op, pt, s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b)))
}
return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
case OAND, OOR, OXOR:
a := s.expr(n.Left)
b := s.expr(n.Right)
return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
case OLSH, ORSH:
a := s.expr(n.Left)
b := s.expr(n.Right)
return s.newValue2(s.ssaShiftOp(n.Op, n.Type, n.Right.Type), a.Type, a, b)
case OANDAND, OOROR:
// To implement OANDAND (and OOROR), we introduce a
// new temporary variable to hold the result. The
// variable is associated with the OANDAND node in the
// s.vars table (normally variables are only
// associated with ONAME nodes). We convert
// A && B
// to
// var = A
// if var {
// var = B
// }
// Using var in the subsequent block introduces the
// necessary phi variable.
el := s.expr(n.Left)
s.vars[n] = el
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(el)
// In theory, we should set b.Likely here based on context.
// However, gc only gives us likeliness hints
// in a single place, for plain OIF statements,
// and passing around context is finnicky, so don't bother for now.
bRight := s.f.NewBlock(ssa.BlockPlain)
bResult := s.f.NewBlock(ssa.BlockPlain)
if n.Op == OANDAND {
b.AddEdgeTo(bRight)
b.AddEdgeTo(bResult)
} else if n.Op == OOROR {
b.AddEdgeTo(bResult)
b.AddEdgeTo(bRight)
}
s.startBlock(bRight)
er := s.expr(n.Right)
s.vars[n] = er
b = s.endBlock()
b.AddEdgeTo(bResult)
s.startBlock(bResult)
return s.variable(n, types.Types[TBOOL])
case OCOMPLEX:
r := s.expr(n.Left)
i := s.expr(n.Right)
return s.newValue2(ssa.OpComplexMake, n.Type, r, i)
// unary ops
case OMINUS:
a := s.expr(n.Left)
if n.Type.IsComplex() {
tp := floatForComplex(n.Type)
negop := s.ssaOp(n.Op, tp)
return s.newValue2(ssa.OpComplexMake, n.Type,
s.newValue1(negop, tp, s.newValue1(ssa.OpComplexReal, tp, a)),
s.newValue1(negop, tp, s.newValue1(ssa.OpComplexImag, tp, a)))
}
return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a)
case ONOT, OCOM:
a := s.expr(n.Left)
return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a)
case OIMAG, OREAL:
a := s.expr(n.Left)
return s.newValue1(s.ssaOp(n.Op, n.Left.Type), n.Type, a)
case OPLUS:
return s.expr(n.Left)
case OADDR:
return s.addr(n.Left, n.Bounded())
case OINDREGSP:
addr := s.constOffPtrSP(types.NewPtr(n.Type), n.Xoffset)
return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem())
case OIND:
p := s.exprPtr(n.Left, false, n.Pos)
return s.newValue2(ssa.OpLoad, n.Type, p, s.mem())
case ODOT:
t := n.Left.Type
if canSSAType(t) {
v := s.expr(n.Left)
return s.newValue1I(ssa.OpStructSelect, n.Type, int64(fieldIdx(n)), v)
}
if n.Left.Op == OSTRUCTLIT {
// All literals with nonzero fields have already been
// rewritten during walk. Any that remain are just T{}
// or equivalents. Use the zero value.
if !iszero(n.Left) {
Fatalf("literal with nonzero value in SSA: %v", n.Left)
}
return s.zeroVal(n.Type)
}
p := s.addr(n, false)
return s.newValue2(ssa.OpLoad, n.Type, p, s.mem())
case ODOTPTR:
p := s.exprPtr(n.Left, false, n.Pos)
p = s.newValue1I(ssa.OpOffPtr, types.NewPtr(n.Type), n.Xoffset, p)
return s.newValue2(ssa.OpLoad, n.Type, p, s.mem())
case OINDEX:
switch {
case n.Left.Type.IsString():
if n.Bounded() && Isconst(n.Left, CTSTR) && Isconst(n.Right, CTINT) {
// Replace "abc"[1] with 'b'.
// Delayed until now because "abc"[1] is not an ideal constant.
// See test/fixedbugs/issue11370.go.
return s.newValue0I(ssa.OpConst8, types.Types[TUINT8], int64(int8(n.Left.Val().U.(string)[n.Right.Int64()])))
}
a := s.expr(n.Left)
i := s.expr(n.Right)
i = s.extendIndex(i, panicindex)
if !n.Bounded() {
len := s.newValue1(ssa.OpStringLen, types.Types[TINT], a)
s.boundsCheck(i, len)
}
ptrtyp := s.f.Config.Types.BytePtr
ptr := s.newValue1(ssa.OpStringPtr, ptrtyp, a)
if Isconst(n.Right, CTINT) {
ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, n.Right.Int64(), ptr)
} else {
ptr = s.newValue2(ssa.OpAddPtr, ptrtyp, ptr, i)
}
return s.newValue2(ssa.OpLoad, types.Types[TUINT8], ptr, s.mem())
case n.Left.Type.IsSlice():
p := s.addr(n, false)
return s.newValue2(ssa.OpLoad, n.Left.Type.Elem(), p, s.mem())
case n.Left.Type.IsArray():
if bound := n.Left.Type.NumElem(); bound <= 1 {
// SSA can handle arrays of length at most 1.
a := s.expr(n.Left)
i := s.expr(n.Right)
if bound == 0 {
// Bounds check will never succeed. Might as well
// use constants for the bounds check.
z := s.constInt(types.Types[TINT], 0)
s.boundsCheck(z, z)
// The return value won't be live, return junk.
return s.newValue0(ssa.OpUnknown, n.Type)
}
i = s.extendIndex(i, panicindex)
if !n.Bounded() {
s.boundsCheck(i, s.constInt(types.Types[TINT], bound))
}
return s.newValue1I(ssa.OpArraySelect, n.Type, 0, a)
}
p := s.addr(n, false)
return s.newValue2(ssa.OpLoad, n.Left.Type.Elem(), p, s.mem())
default:
s.Fatalf("bad type for index %v", n.Left.Type)
return nil
}
case OLEN, OCAP:
switch {
case n.Left.Type.IsSlice():
op := ssa.OpSliceLen
if n.Op == OCAP {
op = ssa.OpSliceCap
}
return s.newValue1(op, types.Types[TINT], s.expr(n.Left))
case n.Left.Type.IsString(): // string; not reachable for OCAP
return s.newValue1(ssa.OpStringLen, types.Types[TINT], s.expr(n.Left))
case n.Left.Type.IsMap(), n.Left.Type.IsChan():
return s.referenceTypeBuiltin(n, s.expr(n.Left))
default: // array
return s.constInt(types.Types[TINT], n.Left.Type.NumElem())
}
case OSPTR:
a := s.expr(n.Left)
if n.Left.Type.IsSlice() {
return s.newValue1(ssa.OpSlicePtr, n.Type, a)
} else {
return s.newValue1(ssa.OpStringPtr, n.Type, a)
}
case OITAB:
a := s.expr(n.Left)
return s.newValue1(ssa.OpITab, n.Type, a)
case OIDATA:
a := s.expr(n.Left)
return s.newValue1(ssa.OpIData, n.Type, a)
case OEFACE:
tab := s.expr(n.Left)
data := s.expr(n.Right)
return s.newValue2(ssa.OpIMake, n.Type, tab, data)
case OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR:
v := s.expr(n.Left)
var i, j, k *ssa.Value
low, high, max := n.SliceBounds()
if low != nil {
i = s.extendIndex(s.expr(low), panicslice)
}
if high != nil {
j = s.extendIndex(s.expr(high), panicslice)
}
if max != nil {
k = s.extendIndex(s.expr(max), panicslice)
}
p, l, c := s.slice(n.Left.Type, v, i, j, k)
return s.newValue3(ssa.OpSliceMake, n.Type, p, l, c)
case OSLICESTR:
v := s.expr(n.Left)
var i, j *ssa.Value
low, high, _ := n.SliceBounds()
if low != nil {
i = s.extendIndex(s.expr(low), panicslice)
}
if high != nil {
j = s.extendIndex(s.expr(high), panicslice)
}
p, l, _ := s.slice(n.Left.Type, v, i, j, nil)
return s.newValue2(ssa.OpStringMake, n.Type, p, l)
case OCALLFUNC:
if isIntrinsicCall(n) {
return s.intrinsicCall(n)
}
fallthrough
case OCALLINTER, OCALLMETH:
a := s.call(n, callNormal)
return s.newValue2(ssa.OpLoad, n.Type, a, s.mem())
case OGETG:
return s.newValue1(ssa.OpGetG, n.Type, s.mem())
case OAPPEND:
return s.append(n, false)
case OSTRUCTLIT, OARRAYLIT:
// All literals with nonzero fields have already been
// rewritten during walk. Any that remain are just T{}
// or equivalents. Use the zero value.
if !iszero(n) {
Fatalf("literal with nonzero value in SSA: %v", n)
}
return s.zeroVal(n.Type)
default:
s.Fatalf("unhandled expr %v", n.Op)
return nil
}
}
// append converts an OAPPEND node to SSA.
// If inplace is false, it converts the OAPPEND expression n to an ssa.Value,
// adds it to s, and returns the Value.
// If inplace is true, it writes the result of the OAPPEND expression n
// back to the slice being appended to, and returns nil.
// inplace MUST be set to false if the slice can be SSA'd.
func (s *state) append(n *Node, inplace bool) *ssa.Value {
// If inplace is false, process as expression "append(s, e1, e2, e3)":
//
// ptr, len, cap := s
// newlen := len + 3
// if newlen > cap {
// ptr, len, cap = growslice(s, newlen)
// newlen = len + 3 // recalculate to avoid a spill
// }
// // with write barriers, if needed:
// *(ptr+len) = e1
// *(ptr+len+1) = e2
// *(ptr+len+2) = e3
// return makeslice(ptr, newlen, cap)
//
//
// If inplace is true, process as statement "s = append(s, e1, e2, e3)":
//
// a := &s
// ptr, len, cap := s
// newlen := len + 3
// if newlen > cap {
// newptr, len, newcap = growslice(ptr, len, cap, newlen)
// vardef(a) // if necessary, advise liveness we are writing a new a
// *a.cap = newcap // write before ptr to avoid a spill
// *a.ptr = newptr // with write barrier
// }
// newlen = len + 3 // recalculate to avoid a spill
// *a.len = newlen
// // with write barriers, if needed:
// *(ptr+len) = e1
// *(ptr+len+1) = e2
// *(ptr+len+2) = e3
et := n.Type.Elem()
pt := types.NewPtr(et)
// Evaluate slice
sn := n.List.First() // the slice node is the first in the list
var slice, addr *ssa.Value
if inplace {
addr = s.addr(sn, false)
slice = s.newValue2(ssa.OpLoad, n.Type, addr, s.mem())
} else {
slice = s.expr(sn)
}
// Allocate new blocks
grow := s.f.NewBlock(ssa.BlockPlain)
assign := s.f.NewBlock(ssa.BlockPlain)
// Decide if we need to grow
nargs := int64(n.List.Len() - 1)
p := s.newValue1(ssa.OpSlicePtr, pt, slice)
l := s.newValue1(ssa.OpSliceLen, types.Types[TINT], slice)
c := s.newValue1(ssa.OpSliceCap, types.Types[TINT], slice)
nl := s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], l, s.constInt(types.Types[TINT], nargs))
cmp := s.newValue2(s.ssaOp(OGT, types.Types[TINT]), types.Types[TBOOL], nl, c)
s.vars[&ptrVar] = p
if !inplace {
s.vars[&newlenVar] = nl
s.vars[&capVar] = c
} else {
s.vars[&lenVar] = l
}
b := s.endBlock()
b.Kind = ssa.BlockIf
b.Likely = ssa.BranchUnlikely
b.SetControl(cmp)
b.AddEdgeTo(grow)
b.AddEdgeTo(assign)
// Call growslice
s.startBlock(grow)
taddr := s.expr(n.Left)
r := s.rtcall(growslice, true, []*types.Type{pt, types.Types[TINT], types.Types[TINT]}, taddr, p, l, c, nl)
if inplace {
if sn.Op == ONAME {
// Tell liveness we're about to build a new slice
s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, sn, s.mem())
}
capaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, int64(array_cap), addr)
s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TINT], capaddr, r[2], s.mem())
s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, pt, addr, r[0], s.mem())
// load the value we just stored to avoid having to spill it
s.vars[&ptrVar] = s.newValue2(ssa.OpLoad, pt, addr, s.mem())
s.vars[&lenVar] = r[1] // avoid a spill in the fast path
} else {
s.vars[&ptrVar] = r[0]
s.vars[&newlenVar] = s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], r[1], s.constInt(types.Types[TINT], nargs))
s.vars[&capVar] = r[2]
}
b = s.endBlock()
b.AddEdgeTo(assign)
// assign new elements to slots
s.startBlock(assign)
if inplace {
l = s.variable(&lenVar, types.Types[TINT]) // generates phi for len
nl = s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], l, s.constInt(types.Types[TINT], nargs))
lenaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, int64(array_nel), addr)
s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TINT], lenaddr, nl, s.mem())
}
// Evaluate args
type argRec struct {
// if store is true, we're appending the value v. If false, we're appending the
// value at *v.
v *ssa.Value
store bool
}
args := make([]argRec, 0, nargs)
for _, n := range n.List.Slice()[1:] {
if canSSAType(n.Type) {
args = append(args, argRec{v: s.expr(n), store: true})
} else {
v := s.addr(n, false)
args = append(args, argRec{v: v})
}
}
p = s.variable(&ptrVar, pt) // generates phi for ptr
if !inplace {
nl = s.variable(&newlenVar, types.Types[TINT]) // generates phi for nl
c = s.variable(&capVar, types.Types[TINT]) // generates phi for cap
}
p2 := s.newValue2(ssa.OpPtrIndex, pt, p, l)
for i, arg := range args {
addr := s.newValue2(ssa.OpPtrIndex, pt, p2, s.constInt(types.Types[TINT], int64(i)))
if arg.store {
s.storeType(et, addr, arg.v, 0)
} else {
store := s.newValue3I(ssa.OpMove, types.TypeMem, et.Size(), addr, arg.v, s.mem())
store.Aux = et
s.vars[&memVar] = store
}
}
delete(s.vars, &ptrVar)
if inplace {
delete(s.vars, &lenVar)
return nil
}
delete(s.vars, &newlenVar)
delete(s.vars, &capVar)
// make result
return s.newValue3(ssa.OpSliceMake, n.Type, p, nl, c)
}
// condBranch evaluates the boolean expression cond and branches to yes
// if cond is true and no if cond is false.
// This function is intended to handle && and || better than just calling
// s.expr(cond) and branching on the result.
func (s *state) condBranch(cond *Node, yes, no *ssa.Block, likely int8) {
if cond.Op == OANDAND {
mid := s.f.NewBlock(ssa.BlockPlain)
s.stmtList(cond.Ninit)
s.condBranch(cond.Left, mid, no, max8(likely, 0))
s.startBlock(mid)
s.condBranch(cond.Right, yes, no, likely)
return
// Note: if likely==1, then both recursive calls pass 1.
// If likely==-1, then we don't have enough information to decide
// whether the first branch is likely or not. So we pass 0 for
// the likeliness of the first branch.
// TODO: have the frontend give us branch prediction hints for
// OANDAND and OOROR nodes (if it ever has such info).
}
if cond.Op == OOROR {
mid := s.f.NewBlock(ssa.BlockPlain)
s.stmtList(cond.Ninit)
s.condBranch(cond.Left, yes, mid, min8(likely, 0))
s.startBlock(mid)
s.condBranch(cond.Right, yes, no, likely)
return
// Note: if likely==-1, then both recursive calls pass -1.
// If likely==1, then we don't have enough info to decide
// the likelihood of the first branch.
}
if cond.Op == ONOT {
s.stmtList(cond.Ninit)
s.condBranch(cond.Left, no, yes, -likely)
return
}
c := s.expr(cond)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(c)
b.Likely = ssa.BranchPrediction(likely) // gc and ssa both use -1/0/+1 for likeliness
b.AddEdgeTo(yes)
b.AddEdgeTo(no)
}
type skipMask uint8
const (
skipPtr skipMask = 1 << iota
skipLen
skipCap
)
// assign does left = right.
// Right has already been evaluated to ssa, left has not.
// If deref is true, then we do left = *right instead (and right has already been nil-checked).
// If deref is true and right == nil, just do left = 0.
// skip indicates assignments (at the top level) that can be avoided.
func (s *state) assign(left *Node, right *ssa.Value, deref bool, skip skipMask) {
if left.Op == ONAME && isblank(left) {
return
}
t := left.Type
dowidth(t)
if s.canSSA(left) {
if deref {
s.Fatalf("can SSA LHS %v but not RHS %s", left, right)
}
if left.Op == ODOT {
// We're assigning to a field of an ssa-able value.
// We need to build a new structure with the new value for the
// field we're assigning and the old values for the other fields.
// For instance:
// type T struct {a, b, c int}
// var T x
// x.b = 5
// For the x.b = 5 assignment we want to generate x = T{x.a, 5, x.c}
// Grab information about the structure type.
t := left.Left.Type
nf := t.NumFields()
idx := fieldIdx(left)
// Grab old value of structure.
old := s.expr(left.Left)
// Make new structure.
new := s.newValue0(ssa.StructMakeOp(t.NumFields()), t)
// Add fields as args.
for i := 0; i < nf; i++ {
if i == idx {
new.AddArg(right)
} else {
new.AddArg(s.newValue1I(ssa.OpStructSelect, t.FieldType(i), int64(i), old))
}
}
// Recursively assign the new value we've made to the base of the dot op.
s.assign(left.Left, new, false, 0)
// TODO: do we need to update named values here?
return
}
if left.Op == OINDEX && left.Left.Type.IsArray() {
// We're assigning to an element of an ssa-able array.
// a[i] = v
t := left.Left.Type
n := t.NumElem()
i := s.expr(left.Right) // index
if n == 0 {
// The bounds check must fail. Might as well
// ignore the actual index and just use zeros.
z := s.constInt(types.Types[TINT], 0)
s.boundsCheck(z, z)
return
}
if n != 1 {
s.Fatalf("assigning to non-1-length array")
}
// Rewrite to a = [1]{v}
i = s.extendIndex(i, panicindex)
s.boundsCheck(i, s.constInt(types.Types[TINT], 1))
v := s.newValue1(ssa.OpArrayMake1, t, right)
s.assign(left.Left, v, false, 0)
return
}
// Update variable assignment.
s.vars[left] = right
s.addNamedValue(left, right)
return
}
// Left is not ssa-able. Compute its address.
addr := s.addr(left, false)
if left.Op == ONAME && skip == 0 {
s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, left, s.mem())
}
if isReflectHeaderDataField(left) {
// Package unsafe's documentation says storing pointers into
// reflect.SliceHeader and reflect.StringHeader's Data fields
// is valid, even though they have type uintptr (#19168).
// Mark it pointer type to signal the writebarrier pass to
// insert a write barrier.
t = types.Types[TUNSAFEPTR]
}
if deref {
// Treat as a mem->mem move.
var store *ssa.Value
if right == nil {
store = s.newValue2I(ssa.OpZero, types.TypeMem, t.Size(), addr, s.mem())
} else {
store = s.newValue3I(ssa.OpMove, types.TypeMem, t.Size(), addr, right, s.mem())
}
store.Aux = t
s.vars[&memVar] = store
return
}
// Treat as a store.
s.storeType(t, addr, right, skip)
}
// zeroVal returns the zero value for type t.
func (s *state) zeroVal(t *types.Type) *ssa.Value {
switch {
case t.IsInteger():
switch t.Size() {
case 1:
return s.constInt8(t, 0)
case 2:
return s.constInt16(t, 0)
case 4:
return s.constInt32(t, 0)
case 8:
return s.constInt64(t, 0)
default:
s.Fatalf("bad sized integer type %v", t)
}
case t.IsFloat():
switch t.Size() {
case 4:
return s.constFloat32(t, 0)
case 8:
return s.constFloat64(t, 0)
default:
s.Fatalf("bad sized float type %v", t)
}
case t.IsComplex():
switch t.Size() {
case 8:
z := s.constFloat32(types.Types[TFLOAT32], 0)
return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
case 16:
z := s.constFloat64(types.Types[TFLOAT64], 0)
return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
default:
s.Fatalf("bad sized complex type %v", t)
}
case t.IsString():
return s.constEmptyString(t)
case t.IsPtrShaped():
return s.constNil(t)
case t.IsBoolean():
return s.constBool(false)
case t.IsInterface():
return s.constInterface(t)
case t.IsSlice():
return s.constSlice(t)
case t.IsStruct():
n := t.NumFields()
v := s.entryNewValue0(ssa.StructMakeOp(t.NumFields()), t)
for i := 0; i < n; i++ {
v.AddArg(s.zeroVal(t.FieldType(i)))
}
return v
case t.IsArray():
switch t.NumElem() {
case 0:
return s.entryNewValue0(ssa.OpArrayMake0, t)
case 1:
return s.entryNewValue1(ssa.OpArrayMake1, t, s.zeroVal(t.Elem()))
}
}
s.Fatalf("zero for type %v not implemented", t)
return nil
}
type callKind int8
const (
callNormal callKind = iota
callDefer
callGo
)
var intrinsics map[intrinsicKey]intrinsicBuilder
// An intrinsicBuilder converts a call node n into an ssa value that
// implements that call as an intrinsic. args is a list of arguments to the func.
type intrinsicBuilder func(s *state, n *Node, args []*ssa.Value) *ssa.Value
type intrinsicKey struct {
arch *sys.Arch
pkg string
fn string
}
func init() {
intrinsics = map[intrinsicKey]intrinsicBuilder{}
var all []*sys.Arch
var p4 []*sys.Arch
var p8 []*sys.Arch
for _, a := range sys.Archs {
all = append(all, a)
if a.PtrSize == 4 {
p4 = append(p4, a)
} else {
p8 = append(p8, a)
}
}
// add adds the intrinsic b for pkg.fn for the given list of architectures.
add := func(pkg, fn string, b intrinsicBuilder, archs ...*sys.Arch) {
for _, a := range archs {
intrinsics[intrinsicKey{a, pkg, fn}] = b
}
}
// addF does the same as add but operates on architecture families.
addF := func(pkg, fn string, b intrinsicBuilder, archFamilies ...sys.ArchFamily) {
m := 0
for _, f := range archFamilies {
if f >= 32 {
panic("too many architecture families")
}
m |= 1 << uint(f)
}
for _, a := range all {
if m>>uint(a.Family)&1 != 0 {
intrinsics[intrinsicKey{a, pkg, fn}] = b
}
}
}
// alias defines pkg.fn = pkg2.fn2 for all architectures in archs for which pkg2.fn2 exists.
alias := func(pkg, fn, pkg2, fn2 string, archs ...*sys.Arch) {
for _, a := range archs {
if b, ok := intrinsics[intrinsicKey{a, pkg2, fn2}]; ok {
intrinsics[intrinsicKey{a, pkg, fn}] = b
}
}
}
/******** runtime ********/
if !instrumenting {
add("runtime", "slicebytetostringtmp",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
// Compiler frontend optimizations emit OARRAYBYTESTRTMP nodes
// for the backend instead of slicebytetostringtmp calls
// when not instrumenting.
slice := args[0]
ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, slice)
len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], slice)
return s.newValue2(ssa.OpStringMake, n.Type, ptr, len)
},
all...)
}
add("runtime", "KeepAlive",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
data := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, args[0])
s.vars[&memVar] = s.newValue2(ssa.OpKeepAlive, types.TypeMem, data, s.mem())
return nil
},
all...)
add("runtime", "getclosureptr",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue0(ssa.OpGetClosurePtr, s.f.Config.Types.Uintptr)
},
all...)
/******** runtime/internal/sys ********/
addF("runtime/internal/sys", "Ctz32",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpCtz32, types.Types[TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS)
addF("runtime/internal/sys", "Ctz64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpCtz64, types.Types[TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS)
addF("runtime/internal/sys", "Bswap32",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBswap32, types.Types[TUINT32], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X)
addF("runtime/internal/sys", "Bswap64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBswap64, types.Types[TUINT64], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X)
/******** runtime/internal/atomic ********/
addF("runtime/internal/atomic", "Load",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoad32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
},
sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64)
addF("runtime/internal/atomic", "Load64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoad64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
},
sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64)
addF("runtime/internal/atomic", "Loadp",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoadPtr, types.NewTuple(s.f.Config.Types.BytePtr, types.TypeMem), args[0], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, s.f.Config.Types.BytePtr, v)
},
sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64)
addF("runtime/internal/atomic", "Store",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore32, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64)
addF("runtime/internal/atomic", "Store64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore64, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64)
addF("runtime/internal/atomic", "StorepNoWB",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
s.vars[&memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS)
addF("runtime/internal/atomic", "Xchg",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue3(ssa.OpAtomicExchange32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], args[1], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
},
sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64)
addF("runtime/internal/atomic", "Xchg64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue3(ssa.OpAtomicExchange64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], args[1], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
},
sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64)
addF("runtime/internal/atomic", "Xadd",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue3(ssa.OpAtomicAdd32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], args[1], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
},
sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64)
addF("runtime/internal/atomic", "Xadd64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue3(ssa.OpAtomicAdd64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], args[1], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
},
sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64)
addF("runtime/internal/atomic", "Cas",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v)
},
sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64)
addF("runtime/internal/atomic", "Cas64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue4(ssa.OpAtomicCompareAndSwap64, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v)
},
sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64)
addF("runtime/internal/atomic", "And8",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
s.vars[&memVar] = s.newValue3(ssa.OpAtomicAnd8, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64)
addF("runtime/internal/atomic", "Or8",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
s.vars[&memVar] = s.newValue3(ssa.OpAtomicOr8, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64)
alias("runtime/internal/atomic", "Loadint64", "runtime/internal/atomic", "Load64", all...)
alias("runtime/internal/atomic", "Xaddint64", "runtime/internal/atomic", "Xadd64", all...)
alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load", p4...)
alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load64", p8...)
alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load", p4...)
alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load64", p8...)
alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store", p4...)
alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store64", p8...)
alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg", p4...)
alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg64", p8...)
alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd", p4...)
alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd64", p8...)
alias("runtime/internal/atomic", "Casuintptr", "runtime/internal/atomic", "Cas", p4...)
alias("runtime/internal/atomic", "Casuintptr", "runtime/internal/atomic", "Cas64", p8...)
alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas", p4...)
alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas64", p8...)
/******** math ********/
addF("math", "Sqrt",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpSqrt, types.Types[TFLOAT64], args[0])
},
sys.AMD64, sys.ARM, sys.ARM64, sys.MIPS, sys.PPC64, sys.S390X)
addF("math", "Trunc",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpTrunc, types.Types[TFLOAT64], args[0])
},
sys.PPC64)
addF("math", "Ceil",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpCeil, types.Types[TFLOAT64], args[0])
},
sys.PPC64)
addF("math", "Floor",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpFloor, types.Types[TFLOAT64], args[0])
},
sys.PPC64)
/******** math/bits ********/
addF("math/bits", "TrailingZeros64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpCtz64, types.Types[TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
addF("math/bits", "TrailingZeros32",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpCtz32, types.Types[TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
addF("math/bits", "TrailingZeros16",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
x := s.newValue1(ssa.OpZeroExt16to32, types.Types[TUINT32], args[0])
c := s.constInt32(types.Types[TUINT32], 1<<16)
y := s.newValue2(ssa.OpOr32, types.Types[TUINT32], x, c)
return s.newValue1(ssa.OpCtz32, types.Types[TINT], y)
},
sys.ARM, sys.MIPS)
addF("math/bits", "TrailingZeros16",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
x := s.newValue1(ssa.OpZeroExt16to64, types.Types[TUINT64], args[0])
c := s.constInt64(types.Types[TUINT64], 1<<16)
y := s.newValue2(ssa.OpOr64, types.Types[TUINT64], x, c)
return s.newValue1(ssa.OpCtz64, types.Types[TINT], y)
},
sys.AMD64, sys.ARM64, sys.S390X)
addF("math/bits", "TrailingZeros8",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
x := s.newValue1(ssa.OpZeroExt8to32, types.Types[TUINT32], args[0])
c := s.constInt32(types.Types[TUINT32], 1<<8)
y := s.newValue2(ssa.OpOr32, types.Types[TUINT32], x, c)
return s.newValue1(ssa.OpCtz32, types.Types[TINT], y)
},
sys.ARM, sys.MIPS)
addF("math/bits", "TrailingZeros8",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
x := s.newValue1(ssa.OpZeroExt8to64, types.Types[TUINT64], args[0])
c := s.constInt64(types.Types[TUINT64], 1<<8)
y := s.newValue2(ssa.OpOr64, types.Types[TUINT64], x, c)
return s.newValue1(ssa.OpCtz64, types.Types[TINT], y)
},
sys.AMD64, sys.ARM64, sys.S390X)
alias("math/bits", "ReverseBytes64", "runtime/internal/sys", "Bswap64", all...)
alias("math/bits", "ReverseBytes32", "runtime/internal/sys", "Bswap32", all...)
// ReverseBytes inlines correctly, no need to intrinsify it.
// ReverseBytes16 lowers to a rotate, no need for anything special here.
addF("math/bits", "Len64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBitLen64, types.Types[TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
addF("math/bits", "Len32",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
return s.newValue1(ssa.OpBitLen32, types.Types[TINT], args[0])
}
x := s.newValue1(ssa.OpZeroExt32to64, types.Types[TUINT64], args[0])
return s.newValue1(ssa.OpBitLen64, types.Types[TINT], x)
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
addF("math/bits", "Len16",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
x := s.newValue1(ssa.OpZeroExt16to32, types.Types[TUINT32], args[0])
return s.newValue1(ssa.OpBitLen32, types.Types[TINT], x)
}
x := s.newValue1(ssa.OpZeroExt16to64, types.Types[TUINT64], args[0])
return s.newValue1(ssa.OpBitLen64, types.Types[TINT], x)
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
// Note: disabled on AMD64 because the Go code is faster!
addF("math/bits", "Len8",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
x := s.newValue1(ssa.OpZeroExt8to32, types.Types[TUINT32], args[0])
return s.newValue1(ssa.OpBitLen32, types.Types[TINT], x)
}
x := s.newValue1(ssa.OpZeroExt8to64, types.Types[TUINT64], args[0])
return s.newValue1(ssa.OpBitLen64, types.Types[TINT], x)
},
sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
addF("math/bits", "Len",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
return s.newValue1(ssa.OpBitLen32, types.Types[TINT], args[0])
}
return s.newValue1(ssa.OpBitLen64, types.Types[TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
// LeadingZeros is handled because it trivially calls Len.
addF("math/bits", "Reverse64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBitRev64, types.Types[TINT], args[0])
},
sys.ARM64)
addF("math/bits", "Reverse32",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBitRev32, types.Types[TINT], args[0])
},
sys.ARM64)
addF("math/bits", "Reverse16",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBitRev16, types.Types[TINT], args[0])
},
sys.ARM64)
addF("math/bits", "Reverse8",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBitRev8, types.Types[TINT], args[0])
},
sys.ARM64)
addF("math/bits", "Reverse",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
return s.newValue1(ssa.OpBitRev32, types.Types[TINT], args[0])
}
return s.newValue1(ssa.OpBitRev64, types.Types[TINT], args[0])
},
sys.ARM64)
makeOnesCountAMD64 := func(op64 ssa.Op, op32 ssa.Op) func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
aux := s.lookupSymbol(n, &ssa.ExternSymbol{Sym: syslook("support_popcnt").Sym.Linksym()})
addr := s.entryNewValue1A(ssa.OpAddr, types.Types[TBOOL].PtrTo(), aux, s.sb)
v := s.newValue2(ssa.OpLoad, types.Types[TBOOL], addr, s.mem())
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(v)
bTrue := s.f.NewBlock(ssa.BlockPlain)
bFalse := s.f.NewBlock(ssa.BlockPlain)
bEnd := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bTrue)
b.AddEdgeTo(bFalse)
b.Likely = ssa.BranchLikely // most machines have popcnt nowadays
// We have the intrinsic - use it directly.
s.startBlock(bTrue)
op := op64
if s.config.PtrSize == 4 {
op = op32
}
s.vars[n] = s.newValue1(op, types.Types[TINT], args[0])
s.endBlock().AddEdgeTo(bEnd)
// Call the pure Go version.
s.startBlock(bFalse)
a := s.call(n, callNormal)
s.vars[n] = s.newValue2(ssa.OpLoad, types.Types[TINT], a, s.mem())
s.endBlock().AddEdgeTo(bEnd)
// Merge results.
s.startBlock(bEnd)
return s.variable(n, types.Types[TINT])
}
}
addF("math/bits", "OnesCount64",
makeOnesCountAMD64(ssa.OpPopCount64, ssa.OpPopCount64),
sys.AMD64)
addF("math/bits", "OnesCount64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpPopCount64, types.Types[TINT], args[0])
},
sys.PPC64)
addF("math/bits", "OnesCount32",
makeOnesCountAMD64(ssa.OpPopCount32, ssa.OpPopCount32),
sys.AMD64)
addF("math/bits", "OnesCount32",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpPopCount32, types.Types[TINT], args[0])
},
sys.PPC64)
addF("math/bits", "OnesCount16",
makeOnesCountAMD64(ssa.OpPopCount16, ssa.OpPopCount16),
sys.AMD64)
// Note: no OnesCount8, the Go implementation is faster - just a table load.
addF("math/bits", "OnesCount",
makeOnesCountAMD64(ssa.OpPopCount64, ssa.OpPopCount32),
sys.AMD64)
/******** sync/atomic ********/
// Note: these are disabled by flag_race in findIntrinsic below.
alias("sync/atomic", "LoadInt32", "runtime/internal/atomic", "Load", all...)
alias("sync/atomic", "LoadInt64", "runtime/internal/atomic", "Load64", all...)
alias("sync/atomic", "LoadPointer", "runtime/internal/atomic", "Loadp", all...)
alias("sync/atomic", "LoadUint32", "runtime/internal/atomic", "Load", all...)
alias("sync/atomic", "LoadUint64", "runtime/internal/atomic", "Load64", all...)
alias("sync/atomic", "LoadUintptr", "runtime/internal/atomic", "Load", p4...)
alias("sync/atomic", "LoadUintptr", "runtime/internal/atomic", "Load64", p8...)
alias("sync/atomic", "StoreInt32", "runtime/internal/atomic", "Store", all...)
alias("sync/atomic", "StoreInt64", "runtime/internal/atomic", "Store64", all...)
// Note: not StorePointer, that needs a write barrier. Same below for {CompareAnd}Swap.
alias("sync/atomic", "StoreUint32", "runtime/internal/atomic", "Store", all...)
alias("sync/atomic", "StoreUint64", "runtime/internal/atomic", "Store64", all...)
alias("sync/atomic", "StoreUintptr", "runtime/internal/atomic", "Store", p4...)
alias("sync/atomic", "StoreUintptr", "runtime/internal/atomic", "Store64", p8...)
alias("sync/atomic", "SwapInt32", "runtime/internal/atomic", "Xchg", all...)
alias("sync/atomic", "SwapInt64", "runtime/internal/atomic", "Xchg64", all...)
alias("sync/atomic", "SwapUint32", "runtime/internal/atomic", "Xchg", all...)
alias("sync/atomic", "SwapUint64", "runtime/internal/atomic", "Xchg64", all...)
alias("sync/atomic", "SwapUintptr", "runtime/internal/atomic", "Xchg", p4...)
alias("sync/atomic", "SwapUintptr", "runtime/internal/atomic", "Xchg64", p8...)
alias("sync/atomic", "CompareAndSwapInt32", "runtime/internal/atomic", "Cas", all...)
alias("sync/atomic", "CompareAndSwapInt64", "runtime/internal/atomic", "Cas64", all...)
alias("sync/atomic", "CompareAndSwapUint32", "runtime/internal/atomic", "Cas", all...)
alias("sync/atomic", "CompareAndSwapUint64", "runtime/internal/atomic", "Cas64", all...)
alias("sync/atomic", "CompareAndSwapUintptr", "runtime/internal/atomic", "Cas", p4...)
alias("sync/atomic", "CompareAndSwapUintptr", "runtime/internal/atomic", "Cas64", p8...)
alias("sync/atomic", "AddInt32", "runtime/internal/atomic", "Xadd", all...)
alias("sync/atomic", "AddInt64", "runtime/internal/atomic", "Xadd64", all...)
alias("sync/atomic", "AddUint32", "runtime/internal/atomic", "Xadd", all...)
alias("sync/atomic", "AddUint64", "runtime/internal/atomic", "Xadd64", all...)
alias("sync/atomic", "AddUintptr", "runtime/internal/atomic", "Xadd", p4...)
alias("sync/atomic", "AddUintptr", "runtime/internal/atomic", "Xadd64", p8...)
/******** math/big ********/
add("math/big", "mulWW",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1])
},
sys.ArchAMD64)
add("math/big", "divWW",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue3(ssa.OpDiv128u, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1], args[2])
},
sys.ArchAMD64)
}
// findIntrinsic returns a function which builds the SSA equivalent of the
// function identified by the symbol sym. If sym is not an intrinsic call, returns nil.
func findIntrinsic(sym *types.Sym) intrinsicBuilder {
if ssa.IntrinsicsDisable {
return nil
}
if sym == nil || sym.Pkg == nil {
return nil
}
pkg := sym.Pkg.Path
if sym.Pkg == localpkg {
pkg = myimportpath
}
if flag_race && pkg == "sync/atomic" {
// The race detector needs to be able to intercept these calls.
// We can't intrinsify them.
return nil
}
fn := sym.Name
return intrinsics[intrinsicKey{thearch.LinkArch.Arch, pkg, fn}]
}
func isIntrinsicCall(n *Node) bool {
if n == nil || n.Left == nil {
return false
}
return findIntrinsic(n.Left.Sym) != nil
}
// intrinsicCall converts a call to a recognized intrinsic function into the intrinsic SSA operation.
func (s *state) intrinsicCall(n *Node) *ssa.Value {
v := findIntrinsic(n.Left.Sym)(s, n, s.intrinsicArgs(n))
if ssa.IntrinsicsDebug > 0 {
x := v
if x == nil {
x = s.mem()
}
if x.Op == ssa.OpSelect0 || x.Op == ssa.OpSelect1 {
x = x.Args[0]
}
Warnl(n.Pos, "intrinsic substitution for %v with %s", n.Left.Sym.Name, x.LongString())
}
return v
}
type callArg struct {
offset int64
v *ssa.Value
}
type byOffset []callArg
func (x byOffset) Len() int { return len(x) }
func (x byOffset) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byOffset) Less(i, j int) bool {
return x[i].offset < x[j].offset
}
// intrinsicArgs extracts args from n, evaluates them to SSA values, and returns them.
func (s *state) intrinsicArgs(n *Node) []*ssa.Value {
// This code is complicated because of how walk transforms calls. For a call node,
// each entry in n.List is either an assignment to OINDREGSP which actually
// stores an arg, or an assignment to a temporary which computes an arg
// which is later assigned.
// The args can also be out of order.
// TODO: when walk goes away someday, this code can go away also.
var args []callArg
temps := map[*Node]*ssa.Value{}
for _, a := range n.List.Slice() {
if a.Op != OAS {
s.Fatalf("non-assignment as a function argument %s", opnames[a.Op])
}
l, r := a.Left, a.Right
switch l.Op {
case ONAME:
// Evaluate and store to "temporary".
// Walk ensures these temporaries are dead outside of n.
temps[l] = s.expr(r)
case OINDREGSP:
// Store a value to an argument slot.
var v *ssa.Value
if x, ok := temps[r]; ok {
// This is a previously computed temporary.
v = x
} else {
// This is an explicit value; evaluate it.
v = s.expr(r)
}
args = append(args, callArg{l.Xoffset, v})
default:
s.Fatalf("function argument assignment target not allowed: %s", opnames[l.Op])
}
}
sort.Sort(byOffset(args))
res := make([]*ssa.Value, len(args))
for i, a := range args {
res[i] = a.v
}
return res
}
// Calls the function n using the specified call type.
// Returns the address of the return value (or nil if none).
func (s *state) call(n *Node, k callKind) *ssa.Value {
var sym *types.Sym // target symbol (if static)
var closure *ssa.Value // ptr to closure to run (if dynamic)
var codeptr *ssa.Value // ptr to target code (if dynamic)
var rcvr *ssa.Value // receiver to set
fn := n.Left
switch n.Op {
case OCALLFUNC:
if k == callNormal && fn.Op == ONAME && fn.Class() == PFUNC {
sym = fn.Sym
break
}
closure = s.expr(fn)
case OCALLMETH:
if fn.Op != ODOTMETH {
Fatalf("OCALLMETH: n.Left not an ODOTMETH: %v", fn)
}
if k == callNormal {
sym = fn.Sym
break
}
// Make a name n2 for the function.
// fn.Sym might be sync.(*Mutex).Unlock.
// Make a PFUNC node out of that, then evaluate it.
// We get back an SSA value representing &sync.(*Mutex).Unlock·f.
// We can then pass that to defer or go.
n2 := newnamel(fn.Pos, fn.Sym)
n2.Name.Curfn = s.curfn
n2.SetClass(PFUNC)
n2.Pos = fn.Pos
n2.Type = types.Types[TUINT8] // dummy type for a static closure. Could use runtime.funcval if we had it.
closure = s.expr(n2)
// Note: receiver is already assigned in n.List, so we don't
// want to set it here.
case OCALLINTER:
if fn.Op != ODOTINTER {
Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op)
}
i := s.expr(fn.Left)
itab := s.newValue1(ssa.OpITab, types.Types[TUINTPTR], i)
if k != callNormal {
s.nilCheck(itab)
}
itabidx := fn.Xoffset + 2*int64(Widthptr) + 8 // offset of fun field in runtime.itab
itab = s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.UintptrPtr, itabidx, itab)
if k == callNormal {
codeptr = s.newValue2(ssa.OpLoad, types.Types[TUINTPTR], itab, s.mem())
} else {
closure = itab
}
rcvr = s.newValue1(ssa.OpIData, types.Types[TUINTPTR], i)
}
dowidth(fn.Type)
stksize := fn.Type.ArgWidth() // includes receiver
// Run all argument assignments. The arg slots have already
// been offset by the appropriate amount (+2*widthptr for go/defer,
// +widthptr for interface calls).
// For OCALLMETH, the receiver is set in these statements.
s.stmtList(n.List)
// Set receiver (for interface calls)
if rcvr != nil {
argStart := Ctxt.FixedFrameSize()
if k != callNormal {
argStart += int64(2 * Widthptr)
}
addr := s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart)
s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TUINTPTR], addr, rcvr, s.mem())
}
// Defer/go args
if k != callNormal {
// Write argsize and closure (args to Newproc/Deferproc).
argStart := Ctxt.FixedFrameSize()
argsize := s.constInt32(types.Types[TUINT32], int32(stksize))
addr := s.constOffPtrSP(s.f.Config.Types.UInt32Ptr, argStart)
s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TUINT32], addr, argsize, s.mem())
addr = s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart+int64(Widthptr))
s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TUINTPTR], addr, closure, s.mem())
stksize += 2 * int64(Widthptr)
}
// call target
var call *ssa.Value
switch {
case k == callDefer:
call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, Deferproc, s.mem())
case k == callGo:
call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, Newproc, s.mem())
case closure != nil:
codeptr = s.newValue2(ssa.OpLoad, types.Types[TUINTPTR], closure, s.mem())
call = s.newValue3(ssa.OpClosureCall, types.TypeMem, codeptr, closure, s.mem())
case codeptr != nil:
call = s.newValue2(ssa.OpInterCall, types.TypeMem, codeptr, s.mem())
case sym != nil:
call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, sym.Linksym(), s.mem())
default:
Fatalf("bad call type %v %v", n.Op, n)
}
call.AuxInt = stksize // Call operations carry the argsize of the callee along with them
s.vars[&memVar] = call
// Finish block for defers
if k == callDefer {
b := s.endBlock()
b.Kind = ssa.BlockDefer
b.SetControl(call)
bNext := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bNext)
// Add recover edge to exit code.
r := s.f.NewBlock(ssa.BlockPlain)
s.startBlock(r)
s.exit()
b.AddEdgeTo(r)
b.Likely = ssa.BranchLikely
s.startBlock(bNext)
}
res := n.Left.Type.Results()
if res.NumFields() == 0 || k != callNormal {
// call has no return value. Continue with the next statement.
return nil
}
fp := res.Field(0)
return s.constOffPtrSP(types.NewPtr(fp.Type), fp.Offset+Ctxt.FixedFrameSize())
}
// etypesign returns the signed-ness of e, for integer/pointer etypes.
// -1 means signed, +1 means unsigned, 0 means non-integer/non-pointer.
func etypesign(e types.EType) int8 {
switch e {
case TINT8, TINT16, TINT32, TINT64, TINT:
return -1
case TUINT8, TUINT16, TUINT32, TUINT64, TUINT, TUINTPTR, TUNSAFEPTR:
return +1
}
return 0
}
// lookupSymbol is used to retrieve the symbol (Extern, Arg or Auto) used for a particular node.
// This improves the effectiveness of cse by using the same Aux values for the
// same symbols.
func (s *state) lookupSymbol(n *Node, sym interface{}) interface{} {
switch sym.(type) {
default:
s.Fatalf("sym %v is of unknown type %T", sym, sym)
case *ssa.ExternSymbol, *ssa.ArgSymbol, *ssa.AutoSymbol:
// these are the only valid types
}
if lsym, ok := s.varsyms[n]; ok {
return lsym
}
s.varsyms[n] = sym
return sym
}
// addr converts the address of the expression n to SSA, adds it to s and returns the SSA result.
// The value that the returned Value represents is guaranteed to be non-nil.
// If bounded is true then this address does not require a nil check for its operand
// even if that would otherwise be implied.
func (s *state) addr(n *Node, bounded bool) *ssa.Value {
t := types.NewPtr(n.Type)
switch n.Op {
case ONAME:
switch n.Class() {
case PEXTERN:
// global variable
aux := s.lookupSymbol(n, &ssa.ExternSymbol{Sym: n.Sym.Linksym()})
v := s.entryNewValue1A(ssa.OpAddr, t, aux, s.sb)
// TODO: Make OpAddr use AuxInt as well as Aux.
if n.Xoffset != 0 {
v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, n.Xoffset, v)
}
return v
case PPARAM:
// parameter slot
v := s.decladdrs[n]
if v != nil {
return v
}
if n == nodfp {
// Special arg that points to the frame pointer (Used by ORECOVER).
aux := s.lookupSymbol(n, &ssa.ArgSymbol{Node: n})
return s.entryNewValue1A(ssa.OpAddr, t, aux, s.sp)
}
s.Fatalf("addr of undeclared ONAME %v. declared: %v", n, s.decladdrs)
return nil
case PAUTO:
aux := s.lookupSymbol(n, &ssa.AutoSymbol{Node: n})
return s.newValue1A(ssa.OpAddr, t, aux, s.sp)
case PPARAMOUT: // Same as PAUTO -- cannot generate LEA early.
// ensure that we reuse symbols for out parameters so
// that cse works on their addresses
aux := s.lookupSymbol(n, &ssa.ArgSymbol{Node: n})
return s.newValue1A(ssa.OpAddr, t, aux, s.sp)
default:
s.Fatalf("variable address class %v not implemented", classnames[n.Class()])
return nil
}
case OINDREGSP:
// indirect off REGSP
// used for storing/loading arguments/returns to/from callees
return s.constOffPtrSP(t, n.Xoffset)
case OINDEX:
if n.Left.Type.IsSlice() {
a := s.expr(n.Left)
i := s.expr(n.Right)
i = s.extendIndex(i, panicindex)
len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], a)
if !n.Bounded() {
s.boundsCheck(i, len)
}
p := s.newValue1(ssa.OpSlicePtr, t, a)
return s.newValue2(ssa.OpPtrIndex, t, p, i)
} else { // array
a := s.addr(n.Left, bounded)
i := s.expr(n.Right)
i = s.extendIndex(i, panicindex)
len := s.constInt(types.Types[TINT], n.Left.Type.NumElem())
if !n.Bounded() {
s.boundsCheck(i, len)
}
return s.newValue2(ssa.OpPtrIndex, types.NewPtr(n.Left.Type.Elem()), a, i)
}
case OIND:
return s.exprPtr(n.Left, bounded, n.Pos)
case ODOT:
p := s.addr(n.Left, bounded)
return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p)
case ODOTPTR:
p := s.exprPtr(n.Left, bounded, n.Pos)
return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p)
case OCLOSUREVAR:
return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset,
s.entryNewValue0(ssa.OpGetClosurePtr, s.f.Config.Types.BytePtr))
case OCONVNOP:
addr := s.addr(n.Left, bounded)
return s.newValue1(ssa.OpCopy, t, addr) // ensure that addr has the right type
case OCALLFUNC, OCALLINTER, OCALLMETH:
return s.call(n, callNormal)
case ODOTTYPE:
v, _ := s.dottype(n, false)
if v.Op != ssa.OpLoad {
s.Fatalf("dottype of non-load")
}
if v.Args[1] != s.mem() {
s.Fatalf("memory no longer live from dottype load")
}
return v.Args[0]
default:
s.Fatalf("unhandled addr %v", n.Op)
return nil
}
}
// canSSA reports whether n is SSA-able.
// n must be an ONAME (or an ODOT sequence with an ONAME base).
func (s *state) canSSA(n *Node) bool {
if Debug['N'] != 0 {
return false
}
for n.Op == ODOT || (n.Op == OINDEX && n.Left.Type.IsArray()) {
n = n.Left
}
if n.Op != ONAME {
return false
}
if n.Addrtaken() {
return false
}
if n.isParamHeapCopy() {
return false
}
if n.Class() == PAUTOHEAP {
Fatalf("canSSA of PAUTOHEAP %v", n)
}
switch n.Class() {
case PEXTERN:
return false
case PPARAMOUT:
if s.hasdefer {
// TODO: handle this case? Named return values must be
// in memory so that the deferred function can see them.
// Maybe do: if !strings.HasPrefix(n.String(), "~") { return false }
// Or maybe not, see issue 18860. Even unnamed return values
// must be written back so if a defer recovers, the caller can see them.
return false
}
if s.cgoUnsafeArgs {
// Cgo effectively takes the address of all result args,
// but the compiler can't see that.
return false
}
}
if n.Class() == PPARAM && n.Sym != nil && n.Sym.Name == ".this" {
// wrappers generated by genwrapper need to update
// the .this pointer in place.
// TODO: treat as a PPARMOUT?
return false
}
return canSSAType(n.Type)
// TODO: try to make more variables SSAable?
}
// canSSA reports whether variables of type t are SSA-able.
func canSSAType(t *types.Type) bool {
dowidth(t)
if t.Width > int64(4*Widthptr) {
// 4*Widthptr is an arbitrary constant. We want it
// to be at least 3*Widthptr so slices can be registerized.
// Too big and we'll introduce too much register pressure.
return false
}
switch t.Etype {
case TARRAY:
// We can't do larger arrays because dynamic indexing is
// not supported on SSA variables.
// TODO: allow if all indexes are constant.
if t.NumElem() <= 1 {
return canSSAType(t.Elem())
}
return false
case TSTRUCT:
if t.NumFields() > ssa.MaxStruct {
return false
}
for _, t1 := range t.Fields().Slice() {
if !canSSAType(t1.Type) {
return false
}
}
return true
default:
return true
}
}
// exprPtr evaluates n to a pointer and nil-checks it.
func (s *state) exprPtr(n *Node, bounded bool, lineno src.XPos) *ssa.Value {
p := s.expr(n)
if bounded || n.NonNil() {
if s.f.Frontend().Debug_checknil() && lineno.Line() > 1 {
s.f.Warnl(lineno, "removed nil check")
}
return p
}
s.nilCheck(p)
return p
}
// nilCheck generates nil pointer checking code.
// Used only for automatically inserted nil checks,
// not for user code like 'x != nil'.
func (s *state) nilCheck(ptr *ssa.Value) {
if disable_checknil != 0 || s.curfn.Func.NilCheckDisabled() {
return
}
s.newValue2(ssa.OpNilCheck, types.TypeVoid, ptr, s.mem())
}
// boundsCheck generates bounds checking code. Checks if 0 <= idx < len, branches to exit if not.
// Starts a new block on return.
// idx is already converted to full int width.
func (s *state) boundsCheck(idx, len *ssa.Value) {
if Debug['B'] != 0 {
return
}
// bounds check
cmp := s.newValue2(ssa.OpIsInBounds, types.Types[TBOOL], idx, len)
s.check(cmp, panicindex)
}
// sliceBoundsCheck generates slice bounds checking code. Checks if 0 <= idx <= len, branches to exit if not.
// Starts a new block on return.
// idx and len are already converted to full int width.
func (s *state) sliceBoundsCheck(idx, len *ssa.Value) {
if Debug['B'] != 0 {
return
}
// bounds check
cmp := s.newValue2(ssa.OpIsSliceInBounds, types.Types[TBOOL], idx, len)
s.check(cmp, panicslice)
}
// If cmp (a bool) is false, panic using the given function.
func (s *state) check(cmp *ssa.Value, fn *obj.LSym) {
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
b.Likely = ssa.BranchLikely
bNext := s.f.NewBlock(ssa.BlockPlain)
line := s.peekPos()
pos := Ctxt.PosTable.Pos(line)
fl := funcLine{f: fn, file: pos.Filename(), line: pos.Line()}
bPanic := s.panics[fl]
if bPanic == nil {
bPanic = s.f.NewBlock(ssa.BlockPlain)
s.panics[fl] = bPanic
s.startBlock(bPanic)
// The panic call takes/returns memory to ensure that the right
// memory state is observed if the panic happens.
s.rtcall(fn, false, nil)
}
b.AddEdgeTo(bNext)
b.AddEdgeTo(bPanic)
s.startBlock(bNext)
}
func (s *state) intDivide(n *Node, a, b *ssa.Value) *ssa.Value {
needcheck := true
switch b.Op {
case ssa.OpConst8, ssa.OpConst16, ssa.OpConst32, ssa.OpConst64:
if b.AuxInt != 0 {
needcheck = false
}
}
if needcheck {
// do a size-appropriate check for zero
cmp := s.newValue2(s.ssaOp(ONE, n.Type), types.Types[TBOOL], b, s.zeroVal(n.Type))
s.check(cmp, panicdivide)
}
return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
}
// rtcall issues a call to the given runtime function fn with the listed args.
// Returns a slice of results of the given result types.
// The call is added to the end of the current block.
// If returns is false, the block is marked as an exit block.
func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args ...*ssa.Value) []*ssa.Value {
// Write args to the stack
off := Ctxt.FixedFrameSize()
for _, arg := range args {
t := arg.Type
off = Rnd(off, t.Alignment())
ptr := s.constOffPtrSP(t.PtrTo(), off)
size := t.Size()
s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, ptr, arg, s.mem())
off += size
}
off = Rnd(off, int64(Widthreg))
// Issue call
call := s.newValue1A(ssa.OpStaticCall, types.TypeMem, fn, s.mem())
s.vars[&memVar] = call
if !returns {
// Finish block
b := s.endBlock()
b.Kind = ssa.BlockExit
b.SetControl(call)
call.AuxInt = off - Ctxt.FixedFrameSize()
if len(results) > 0 {
Fatalf("panic call can't have results")
}
return nil
}
// Load results
res := make([]*ssa.Value, len(results))
for i, t := range results {
off = Rnd(off, t.Alignment())
ptr := s.constOffPtrSP(types.NewPtr(t), off)
res[i] = s.newValue2(ssa.OpLoad, t, ptr, s.mem())
off += t.Size()
}
off = Rnd(off, int64(Widthptr))
// Remember how much callee stack space we needed.
call.AuxInt = off
return res
}
// do *left = right for type t.
func (s *state) storeType(t *types.Type, left, right *ssa.Value, skip skipMask) {
if skip == 0 && (!types.Haspointers(t) || ssa.IsStackAddr(left)) {
// Known to not have write barrier. Store the whole type.
s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, left, right, s.mem())
return
}
// store scalar fields first, so write barrier stores for
// pointer fields can be grouped together, and scalar values
// don't need to be live across the write barrier call.
// TODO: if the writebarrier pass knows how to reorder stores,
// we can do a single store here as long as skip==0.
s.storeTypeScalars(t, left, right, skip)
if skip&skipPtr == 0 && types.Haspointers(t) {
s.storeTypePtrs(t, left, right)
}
}
// do *left = right for all scalar (non-pointer) parts of t.
func (s *state) storeTypeScalars(t *types.Type, left, right *ssa.Value, skip skipMask) {
switch {
case t.IsBoolean() || t.IsInteger() || t.IsFloat() || t.IsComplex():
s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, left, right, s.mem())
case t.IsPtrShaped():
// no scalar fields.
case t.IsString():
if skip&skipLen != 0 {
return
}
len := s.newValue1(ssa.OpStringLen, types.Types[TINT], right)
lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.PtrSize, left)
s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TINT], lenAddr, len, s.mem())
case t.IsSlice():
if skip&skipLen == 0 {
len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], right)
lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.PtrSize, left)
s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TINT], lenAddr, len, s.mem())
}
if skip&skipCap == 0 {
cap := s.newValue1(ssa.OpSliceCap, types.Types[TINT], right)
capAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, 2*s.config.PtrSize, left)
s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TINT], capAddr, cap, s.mem())
}
case t.IsInterface():
// itab field doesn't need a write barrier (even though it is a pointer).
itab := s.newValue1(ssa.OpITab, s.f.Config.Types.BytePtr, right)
s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TUINTPTR], left, itab, s.mem())
case t.IsStruct():
n := t.NumFields()
for i := 0; i < n; i++ {
ft := t.FieldType(i)
addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
s.storeTypeScalars(ft, addr, val, 0)
}
case t.IsArray() && t.NumElem() == 0:
// nothing
case t.IsArray() && t.NumElem() == 1:
s.storeTypeScalars(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right), 0)
default:
s.Fatalf("bad write barrier type %v", t)
}
}
// do *left = right for all pointer parts of t.
func (s *state) storeTypePtrs(t *types.Type, left, right *ssa.Value) {
switch {
case t.IsPtrShaped():
s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, left, right, s.mem())
case t.IsString():
ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, right)
s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, s.f.Config.Types.BytePtr, left, ptr, s.mem())
case t.IsSlice():
ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, right)
s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, s.f.Config.Types.BytePtr, left, ptr, s.mem())
case t.IsInterface():
// itab field is treated as a scalar.
idata := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, right)
idataAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.BytePtrPtr, s.config.PtrSize, left)
s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, s.f.Config.Types.BytePtr, idataAddr, idata, s.mem())
case t.IsStruct():
n := t.NumFields()
for i := 0; i < n; i++ {
ft := t.FieldType(i)
if !types.Haspointers(ft) {
continue
}
addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
s.storeTypePtrs(ft, addr, val)
}
case t.IsArray() && t.NumElem() == 0:
// nothing
case t.IsArray() && t.NumElem() == 1:
s.storeTypePtrs(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right))
default:
s.Fatalf("bad write barrier type %v", t)
}
}
// slice computes the slice v[i:j:k] and returns ptr, len, and cap of result.
// i,j,k may be nil, in which case they are set to their default value.
// t is a slice, ptr to array, or string type.
func (s *state) slice(t *types.Type, v, i, j, k *ssa.Value) (p, l, c *ssa.Value) {
var elemtype *types.Type
var ptrtype *types.Type
var ptr *ssa.Value
var len *ssa.Value
var cap *ssa.Value
zero := s.constInt(types.Types[TINT], 0)
switch {
case t.IsSlice():
elemtype = t.Elem()
ptrtype = types.NewPtr(elemtype)
ptr = s.newValue1(ssa.OpSlicePtr, ptrtype, v)
len = s.newValue1(ssa.OpSliceLen, types.Types[TINT], v)
cap = s.newValue1(ssa.OpSliceCap, types.Types[TINT], v)
case t.IsString():
elemtype = types.Types[TUINT8]
ptrtype = types.NewPtr(elemtype)
ptr = s.newValue1(ssa.OpStringPtr, ptrtype, v)
len = s.newValue1(ssa.OpStringLen, types.Types[TINT], v)
cap = len
case t.IsPtr():
if !t.Elem().IsArray() {
s.Fatalf("bad ptr to array in slice %v\n", t)
}
elemtype = t.Elem().Elem()
ptrtype = types.NewPtr(elemtype)
s.nilCheck(v)
ptr = v
len = s.constInt(types.Types[TINT], t.Elem().NumElem())
cap = len
default:
s.Fatalf("bad type in slice %v\n", t)
}
// Set default values
if i == nil {
i = zero
}
if j == nil {
j = len
}
if k == nil {
k = cap
}
// Panic if slice indices are not in bounds.
s.sliceBoundsCheck(i, j)
if j != k {
s.sliceBoundsCheck(j, k)
}
if k != cap {
s.sliceBoundsCheck(k, cap)
}
// Generate the following code assuming that indexes are in bounds.
// The masking is to make sure that we don't generate a slice
// that points to the next object in memory.
// rlen = j - i
// rcap = k - i
// delta = i * elemsize
// rptr = p + delta&mask(rcap)
// result = (SliceMake rptr rlen rcap)
// where mask(x) is 0 if x==0 and -1 if x>0.
subOp := s.ssaOp(OSUB, types.Types[TINT])
mulOp := s.ssaOp(OMUL, types.Types[TINT])
andOp := s.ssaOp(OAND, types.Types[TINT])
rlen := s.newValue2(subOp, types.Types[TINT], j, i)
var rcap *ssa.Value
switch {
case t.IsString():
// Capacity of the result is unimportant. However, we use
// rcap to test if we've generated a zero-length slice.
// Use length of strings for that.
rcap = rlen
case j == k:
rcap = rlen
default:
rcap = s.newValue2(subOp, types.Types[TINT], k, i)
}
var rptr *ssa.Value
if (i.Op == ssa.OpConst64 || i.Op == ssa.OpConst32) && i.AuxInt == 0 {
// No pointer arithmetic necessary.
rptr = ptr
} else {
// delta = # of bytes to offset pointer by.
delta := s.newValue2(mulOp, types.Types[TINT], i, s.constInt(types.Types[TINT], elemtype.Width))
// If we're slicing to the point where the capacity is zero,
// zero out the delta.
mask := s.newValue1(ssa.OpSlicemask, types.Types[TINT], rcap)
delta = s.newValue2(andOp, types.Types[TINT], delta, mask)
// Compute rptr = ptr + delta
rptr = s.newValue2(ssa.OpAddPtr, ptrtype, ptr, delta)
}
return rptr, rlen, rcap
}
type u642fcvtTab struct {
geq, cvt2F, and, rsh, or, add ssa.Op
one func(*state, *types.Type, int64) *ssa.Value
}
var u64_f64 u642fcvtTab = u642fcvtTab{
geq: ssa.OpGeq64,
cvt2F: ssa.OpCvt64to64F,
and: ssa.OpAnd64,
rsh: ssa.OpRsh64Ux64,
or: ssa.OpOr64,
add: ssa.OpAdd64F,
one: (*state).constInt64,
}
var u64_f32 u642fcvtTab = u642fcvtTab{
geq: ssa.OpGeq64,
cvt2F: ssa.OpCvt64to32F,
and: ssa.OpAnd64,
rsh: ssa.OpRsh64Ux64,
or: ssa.OpOr64,
add: ssa.OpAdd32F,
one: (*state).constInt64,
}
func (s *state) uint64Tofloat64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.uint64Tofloat(&u64_f64, n, x, ft, tt)
}
func (s *state) uint64Tofloat32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.uint64Tofloat(&u64_f32, n, x, ft, tt)
}
func (s *state) uint64Tofloat(cvttab *u642fcvtTab, n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
// if x >= 0 {
// result = (floatY) x
// } else {
// y = uintX(x) ; y = x & 1
// z = uintX(x) ; z = z >> 1
// z = z >> 1
// z = z | y
// result = floatY(z)
// result = result + result
// }
//
// Code borrowed from old code generator.
// What's going on: large 64-bit "unsigned" looks like
// negative number to hardware's integer-to-float
// conversion. However, because the mantissa is only
// 63 bits, we don't need the LSB, so instead we do an
// unsigned right shift (divide by two), convert, and
// double. However, before we do that, we need to be
// sure that we do not lose a "1" if that made the
// difference in the resulting rounding. Therefore, we
// preserve it, and OR (not ADD) it back in. The case
// that matters is when the eleven discarded bits are
// equal to 10000000001; that rounds up, and the 1 cannot
// be lost else it would round down if the LSB of the
// candidate mantissa is 0.
cmp := s.newValue2(cvttab.geq, types.Types[TBOOL], x, s.zeroVal(ft))
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
b.Likely = ssa.BranchLikely
bThen := s.f.NewBlock(ssa.BlockPlain)
bElse := s.f.NewBlock(ssa.BlockPlain)
bAfter := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bThen)
s.startBlock(bThen)
a0 := s.newValue1(cvttab.cvt2F, tt, x)
s.vars[n] = a0
s.endBlock()
bThen.AddEdgeTo(bAfter)
b.AddEdgeTo(bElse)
s.startBlock(bElse)
one := cvttab.one(s, ft, 1)
y := s.newValue2(cvttab.and, ft, x, one)
z := s.newValue2(cvttab.rsh, ft, x, one)
z = s.newValue2(cvttab.or, ft, z, y)
a := s.newValue1(cvttab.cvt2F, tt, z)
a1 := s.newValue2(cvttab.add, tt, a, a)
s.vars[n] = a1
s.endBlock()
bElse.AddEdgeTo(bAfter)
s.startBlock(bAfter)
return s.variable(n, n.Type)
}
type u322fcvtTab struct {
cvtI2F, cvtF2F ssa.Op
}
var u32_f64 u322fcvtTab = u322fcvtTab{
cvtI2F: ssa.OpCvt32to64F,
cvtF2F: ssa.OpCopy,
}
var u32_f32 u322fcvtTab = u322fcvtTab{
cvtI2F: ssa.OpCvt32to32F,
cvtF2F: ssa.OpCvt64Fto32F,
}
func (s *state) uint32Tofloat64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.uint32Tofloat(&u32_f64, n, x, ft, tt)
}
func (s *state) uint32Tofloat32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.uint32Tofloat(&u32_f32, n, x, ft, tt)
}
func (s *state) uint32Tofloat(cvttab *u322fcvtTab, n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
// if x >= 0 {
// result = floatY(x)
// } else {
// result = floatY(float64(x) + (1<<32))
// }
cmp := s.newValue2(ssa.OpGeq32, types.Types[TBOOL], x, s.zeroVal(ft))
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
b.Likely = ssa.BranchLikely
bThen := s.f.NewBlock(ssa.BlockPlain)
bElse := s.f.NewBlock(ssa.BlockPlain)
bAfter := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bThen)
s.startBlock(bThen)
a0 := s.newValue1(cvttab.cvtI2F, tt, x)
s.vars[n] = a0
s.endBlock()
bThen.AddEdgeTo(bAfter)
b.AddEdgeTo(bElse)
s.startBlock(bElse)
a1 := s.newValue1(ssa.OpCvt32to64F, types.Types[TFLOAT64], x)
twoToThe32 := s.constFloat64(types.Types[TFLOAT64], float64(1<<32))
a2 := s.newValue2(ssa.OpAdd64F, types.Types[TFLOAT64], a1, twoToThe32)
a3 := s.newValue1(cvttab.cvtF2F, tt, a2)
s.vars[n] = a3
s.endBlock()
bElse.AddEdgeTo(bAfter)
s.startBlock(bAfter)
return s.variable(n, n.Type)
}
// referenceTypeBuiltin generates code for the len/cap builtins for maps and channels.
func (s *state) referenceTypeBuiltin(n *Node, x *ssa.Value) *ssa.Value {
if !n.Left.Type.IsMap() && !n.Left.Type.IsChan() {
s.Fatalf("node must be a map or a channel")
}
// if n == nil {
// return 0
// } else {
// // len
// return *((*int)n)
// // cap
// return *(((*int)n)+1)
// }
lenType := n.Type
nilValue := s.constNil(types.Types[TUINTPTR])
cmp := s.newValue2(ssa.OpEqPtr, types.Types[TBOOL], x, nilValue)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
b.Likely = ssa.BranchUnlikely
bThen := s.f.NewBlock(ssa.BlockPlain)
bElse := s.f.NewBlock(ssa.BlockPlain)
bAfter := s.f.NewBlock(ssa.BlockPlain)
// length/capacity of a nil map/chan is zero
b.AddEdgeTo(bThen)
s.startBlock(bThen)
s.vars[n] = s.zeroVal(lenType)
s.endBlock()
bThen.AddEdgeTo(bAfter)
b.AddEdgeTo(bElse)
s.startBlock(bElse)
if n.Op == OLEN {
// length is stored in the first word for map/chan
s.vars[n] = s.newValue2(ssa.OpLoad, lenType, x, s.mem())
} else if n.Op == OCAP {
// capacity is stored in the second word for chan
sw := s.newValue1I(ssa.OpOffPtr, lenType.PtrTo(), lenType.Width, x)
s.vars[n] = s.newValue2(ssa.OpLoad, lenType, sw, s.mem())
} else {
s.Fatalf("op must be OLEN or OCAP")
}
s.endBlock()
bElse.AddEdgeTo(bAfter)
s.startBlock(bAfter)
return s.variable(n, lenType)
}
type f2uCvtTab struct {
ltf, cvt2U, subf, or ssa.Op
floatValue func(*state, *types.Type, float64) *ssa.Value
intValue func(*state, *types.Type, int64) *ssa.Value
cutoff uint64
}
var f32_u64 f2uCvtTab = f2uCvtTab{
ltf: ssa.OpLess32F,
cvt2U: ssa.OpCvt32Fto64,
subf: ssa.OpSub32F,
or: ssa.OpOr64,
floatValue: (*state).constFloat32,
intValue: (*state).constInt64,
cutoff: 9223372036854775808,
}
var f64_u64 f2uCvtTab = f2uCvtTab{
ltf: ssa.OpLess64F,
cvt2U: ssa.OpCvt64Fto64,
subf: ssa.OpSub64F,
or: ssa.OpOr64,
floatValue: (*state).constFloat64,
intValue: (*state).constInt64,
cutoff: 9223372036854775808,
}
var f32_u32 f2uCvtTab = f2uCvtTab{
ltf: ssa.OpLess32F,
cvt2U: ssa.OpCvt32Fto32,
subf: ssa.OpSub32F,
or: ssa.OpOr32,
floatValue: (*state).constFloat32,
intValue: func(s *state, t *types.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) },
cutoff: 2147483648,
}
var f64_u32 f2uCvtTab = f2uCvtTab{
ltf: ssa.OpLess64F,
cvt2U: ssa.OpCvt64Fto32,
subf: ssa.OpSub64F,
or: ssa.OpOr32,
floatValue: (*state).constFloat64,
intValue: func(s *state, t *types.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) },
cutoff: 2147483648,
}
func (s *state) float32ToUint64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.floatToUint(&f32_u64, n, x, ft, tt)
}
func (s *state) float64ToUint64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.floatToUint(&f64_u64, n, x, ft, tt)
}
func (s *state) float32ToUint32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.floatToUint(&f32_u32, n, x, ft, tt)
}
func (s *state) float64ToUint32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.floatToUint(&f64_u32, n, x, ft, tt)
}
func (s *state) floatToUint(cvttab *f2uCvtTab, n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
// cutoff:=1<<(intY_Size-1)
// if x < floatX(cutoff) {
// result = uintY(x)
// } else {
// y = x - floatX(cutoff)
// z = uintY(y)
// result = z | -(cutoff)
// }
cutoff := cvttab.floatValue(s, ft, float64(cvttab.cutoff))
cmp := s.newValue2(cvttab.ltf, types.Types[TBOOL], x, cutoff)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
b.Likely = ssa.BranchLikely
bThen := s.f.NewBlock(ssa.BlockPlain)
bElse := s.f.NewBlock(ssa.BlockPlain)
bAfter := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bThen)
s.startBlock(bThen)
a0 := s.newValue1(cvttab.cvt2U, tt, x)
s.vars[n] = a0
s.endBlock()
bThen.AddEdgeTo(bAfter)
b.AddEdgeTo(bElse)
s.startBlock(bElse)
y := s.newValue2(cvttab.subf, ft, x, cutoff)
y = s.newValue1(cvttab.cvt2U, tt, y)
z := cvttab.intValue(s, tt, int64(-cvttab.cutoff))
a1 := s.newValue2(cvttab.or, tt, y, z)
s.vars[n] = a1
s.endBlock()
bElse.AddEdgeTo(bAfter)
s.startBlock(bAfter)
return s.variable(n, n.Type)
}
// dottype generates SSA for a type assertion node.
// commaok indicates whether to panic or return a bool.
// If commaok is false, resok will be nil.
func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) {
iface := s.expr(n.Left) // input interface
target := s.expr(n.Right) // target type
byteptr := s.f.Config.Types.BytePtr
if n.Type.IsInterface() {
if n.Type.IsEmptyInterface() {
// Converting to an empty interface.
// Input could be an empty or nonempty interface.
if Debug_typeassert > 0 {
Warnl(n.Pos, "type assertion inlined")
}
// Get itab/type field from input.
itab := s.newValue1(ssa.OpITab, byteptr, iface)
// Conversion succeeds iff that field is not nil.
cond := s.newValue2(ssa.OpNeqPtr, types.Types[TBOOL], itab, s.constNil(byteptr))
if n.Left.Type.IsEmptyInterface() && commaok {
// Converting empty interface to empty interface with ,ok is just a nil check.
return iface, cond
}
// Branch on nilness.
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cond)
b.Likely = ssa.BranchLikely
bOk := s.f.NewBlock(ssa.BlockPlain)
bFail := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bOk)
b.AddEdgeTo(bFail)
if !commaok {
// On failure, panic by calling panicnildottype.
s.startBlock(bFail)
s.rtcall(panicnildottype, false, nil, target)
// On success, return (perhaps modified) input interface.
s.startBlock(bOk)
if n.Left.Type.IsEmptyInterface() {
res = iface // Use input interface unchanged.
return
}
// Load type out of itab, build interface with existing idata.
off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), itab)
typ := s.newValue2(ssa.OpLoad, byteptr, off, s.mem())
idata := s.newValue1(ssa.OpIData, n.Type, iface)
res = s.newValue2(ssa.OpIMake, n.Type, typ, idata)
return
}
s.startBlock(bOk)
// nonempty -> empty
// Need to load type from itab
off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), itab)
s.vars[&typVar] = s.newValue2(ssa.OpLoad, byteptr, off, s.mem())
s.endBlock()
// itab is nil, might as well use that as the nil result.
s.startBlock(bFail)
s.vars[&typVar] = itab
s.endBlock()
// Merge point.
bEnd := s.f.NewBlock(ssa.BlockPlain)
bOk.AddEdgeTo(bEnd)
bFail.AddEdgeTo(bEnd)
s.startBlock(bEnd)
idata := s.newValue1(ssa.OpIData, n.Type, iface)
res = s.newValue2(ssa.OpIMake, n.Type, s.variable(&typVar, byteptr), idata)
resok = cond
delete(s.vars, &typVar)
return
}
// converting to a nonempty interface needs a runtime call.
if Debug_typeassert > 0 {
Warnl(n.Pos, "type assertion not inlined")
}
if n.Left.Type.IsEmptyInterface() {
if commaok {
call := s.rtcall(assertE2I2, true, []*types.Type{n.Type, types.Types[TBOOL]}, target, iface)
return call[0], call[1]
}
return s.rtcall(assertE2I, true, []*types.Type{n.Type}, target, iface)[0], nil
}
if commaok {
call := s.rtcall(assertI2I2, true, []*types.Type{n.Type, types.Types[TBOOL]}, target, iface)
return call[0], call[1]
}
return s.rtcall(assertI2I, true, []*types.Type{n.Type}, target, iface)[0], nil
}
if Debug_typeassert > 0 {
Warnl(n.Pos, "type assertion inlined")
}
// Converting to a concrete type.
direct := isdirectiface(n.Type)
itab := s.newValue1(ssa.OpITab, byteptr, iface) // type word of interface
if Debug_typeassert > 0 {
Warnl(n.Pos, "type assertion inlined")
}
var targetITab *ssa.Value
if n.Left.Type.IsEmptyInterface() {
// Looking for pointer to target type.
targetITab = target
} else {
// Looking for pointer to itab for target type and source interface.
targetITab = s.expr(n.List.First())
}
var tmp *Node // temporary for use with large types
var addr *ssa.Value // address of tmp
if commaok && !canSSAType(n.Type) {
// unSSAable type, use temporary.
// TODO: get rid of some of these temporaries.
tmp = tempAt(n.Pos, s.curfn, n.Type)
addr = s.addr(tmp, false)
s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, tmp, s.mem())
}
cond := s.newValue2(ssa.OpEqPtr, types.Types[TBOOL], itab, targetITab)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cond)
b.Likely = ssa.BranchLikely
bOk := s.f.NewBlock(ssa.BlockPlain)
bFail := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bOk)
b.AddEdgeTo(bFail)
if !commaok {
// on failure, panic by calling panicdottype
s.startBlock(bFail)
taddr := s.expr(n.Right.Right)
if n.Left.Type.IsEmptyInterface() {
s.rtcall(panicdottypeE, false, nil, itab, target, taddr)
} else {
s.rtcall(panicdottypeI, false, nil, itab, target, taddr)
}
// on success, return data from interface
s.startBlock(bOk)
if direct {
return s.newValue1(ssa.OpIData, n.Type, iface), nil
}
p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface)
return s.newValue2(ssa.OpLoad, n.Type, p, s.mem()), nil
}
// commaok is the more complicated case because we have
// a control flow merge point.
bEnd := s.f.NewBlock(ssa.BlockPlain)
// Note that we need a new valVar each time (unlike okVar where we can
// reuse the variable) because it might have a different type every time.
valVar := &Node{Op: ONAME, Sym: &types.Sym{Name: "val"}}
// type assertion succeeded
s.startBlock(bOk)
if tmp == nil {
if direct {
s.vars[valVar] = s.newValue1(ssa.OpIData, n.Type, iface)
} else {
p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface)
s.vars[valVar] = s.newValue2(ssa.OpLoad, n.Type, p, s.mem())
}
} else {
p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface)
store := s.newValue3I(ssa.OpMove, types.TypeMem, n.Type.Size(), addr, p, s.mem())
store.Aux = n.Type
s.vars[&memVar] = store
}
s.vars[&okVar] = s.constBool(true)
s.endBlock()
bOk.AddEdgeTo(bEnd)
// type assertion failed
s.startBlock(bFail)
if tmp == nil {
s.vars[valVar] = s.zeroVal(n.Type)
} else {
store := s.newValue2I(ssa.OpZero, types.TypeMem, n.Type.Size(), addr, s.mem())
store.Aux = n.Type
s.vars[&memVar] = store
}
s.vars[&okVar] = s.constBool(false)
s.endBlock()
bFail.AddEdgeTo(bEnd)
// merge point
s.startBlock(bEnd)
if tmp == nil {
res = s.variable(valVar, n.Type)
delete(s.vars, valVar)
} else {
res = s.newValue2(ssa.OpLoad, n.Type, addr, s.mem())
s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, types.TypeMem, tmp, s.mem())
}
resok = s.variable(&okVar, types.Types[TBOOL])
delete(s.vars, &okVar)
return res, resok
}
// variable returns the value of a variable at the current location.
func (s *state) variable(name *Node, t *types.Type) *ssa.Value {
v := s.vars[name]
if v != nil {
return v
}
v = s.fwdVars[name]
if v != nil {
return v
}
if s.curBlock == s.f.Entry {
// No variable should be live at entry.
s.Fatalf("Value live at entry. It shouldn't be. func %s, node %v, value %v", s.f.Name, name, v)
}
// Make a FwdRef, which records a value that's live on block input.
// We'll find the matching definition as part of insertPhis.
v = s.newValue0A(ssa.OpFwdRef, t, name)
s.fwdVars[name] = v
s.addNamedValue(name, v)
return v
}
func (s *state) mem() *ssa.Value {
return s.variable(&memVar, types.TypeMem)
}
func (s *state) addNamedValue(n *Node, v *ssa.Value) {
if n.Class() == Pxxx {
// Don't track our dummy nodes (&memVar etc.).
return
}
if n.IsAutoTmp() {
// Don't track temporary variables.
return
}
if n.Class() == PPARAMOUT {
// Don't track named output values. This prevents return values
// from being assigned too early. See #14591 and #14762. TODO: allow this.
return
}
if n.Class() == PAUTO && n.Xoffset != 0 {
s.Fatalf("AUTO var with offset %v %d", n, n.Xoffset)
}
loc := ssa.LocalSlot{N: n, Type: n.Type, Off: 0}
values, ok := s.f.NamedValues[loc]
if !ok {
s.f.Names = append(s.f.Names, loc)
}
s.f.NamedValues[loc] = append(values, v)
}
// Branch is an unresolved branch.
type Branch struct {
P *obj.Prog // branch instruction
B *ssa.Block // target
}
// SSAGenState contains state needed during Prog generation.
type SSAGenState struct {
pp *Progs
// Branches remembers all the branch instructions we've seen
// and where they would like to go.
Branches []Branch
// bstart remembers where each block starts (indexed by block ID)
bstart []*obj.Prog
// 387 port: maps from SSE registers (REG_X?) to 387 registers (REG_F?)
SSEto387 map[int16]int16
// Some architectures require a 64-bit temporary for FP-related register shuffling. Examples include x86-387, PPC, and Sparc V8.
ScratchFpMem *Node
maxarg int64 // largest frame size for arguments to calls made by the function
// Map from GC safe points to stack map index, generated by
// liveness analysis.
stackMapIndex map[*ssa.Value]int
}
// Prog appends a new Prog.
func (s *SSAGenState) Prog(as obj.As) *obj.Prog {
return s.pp.Prog(as)
}
// Pc returns the current Prog.
func (s *SSAGenState) Pc() *obj.Prog {
return s.pp.next
}
// SetPos sets the current source position.
func (s *SSAGenState) SetPos(pos src.XPos) {
s.pp.pos = pos
}
// DebugFriendlySetPos sets the position subject to heuristics
// that reduce "jumpy" line number churn when debugging.
// Spill/fill/copy instructions from the register allocator,
// phi functions, and instructions with a no-pos position
// are examples of instructions that can cause churn.
func (s *SSAGenState) DebugFriendlySetPosFrom(v *ssa.Value) {
// The two choices here are either to leave lineno unchanged,
// or to explicitly set it to src.NoXPos. Leaving it unchanged
// (reusing the preceding line number) produces slightly better-
// looking assembly language output from the compiler, and is
// expected by some already-existing tests.
// The debug information appears to be the same in either case
switch v.Op {
case ssa.OpPhi, ssa.OpCopy, ssa.OpLoadReg, ssa.OpStoreReg:
// leave the position unchanged from beginning of block
// or previous line number.
default:
if v.Pos != src.NoXPos {
s.SetPos(v.Pos)
}
}
}
// genssa appends entries to pp for each instruction in f.
func genssa(f *ssa.Func, pp *Progs) {
var s SSAGenState
e := f.Frontend().(*ssafn)
// Generate GC bitmaps, except if the stack is too large,
// in which compilation will fail later anyway (issue 20529).
if e.stksize < maxStackSize {
s.stackMapIndex = liveness(e, f)
}
// Remember where each block starts.
s.bstart = make([]*obj.Prog, f.NumBlocks())
s.pp = pp
var progToValue map[*obj.Prog]*ssa.Value
var progToBlock map[*obj.Prog]*ssa.Block
var valueToProg []*obj.Prog
var logProgs = e.log
if logProgs {
progToValue = make(map[*obj.Prog]*ssa.Value, f.NumValues())
progToBlock = make(map[*obj.Prog]*ssa.Block, f.NumBlocks())
f.Logf("genssa %s\n", f.Name)
progToBlock[s.pp.next] = f.Blocks[0]
}
if thearch.Use387 {
s.SSEto387 = map[int16]int16{}
}
s.ScratchFpMem = e.scratchFpMem
logLocationLists := Debug_locationlist != 0
if Ctxt.Flag_locationlists {
e.curfn.Func.DebugInfo = ssa.BuildFuncDebug(f, logLocationLists)
valueToProg = make([]*obj.Prog, f.NumValues())
}
// Emit basic blocks
for i, b := range f.Blocks {
s.bstart[b.ID] = s.pp.next
// Emit values in block
thearch.SSAMarkMoves(&s, b)
for _, v := range b.Values {
x := s.pp.next
s.DebugFriendlySetPosFrom(v)
switch v.Op {
case ssa.OpInitMem:
// memory arg needs no code
case ssa.OpArg:
// input args need no code
case ssa.OpSP, ssa.OpSB:
// nothing to do
case ssa.OpSelect0, ssa.OpSelect1:
// nothing to do
case ssa.OpGetG:
// nothing to do when there's a g register,
// and checkLower complains if there's not
case ssa.OpVarDef, ssa.OpVarLive, ssa.OpKeepAlive:
// nothing to do; already used by liveness
case ssa.OpVarKill:
// Zero variable if it is ambiguously live.
// After the VARKILL anything this variable references
// might be collected. If it were to become live again later,
// the GC will see references to already-collected objects.
// See issue 20029.
n := v.Aux.(*Node)
if n.Name.Needzero() {
if n.Class() != PAUTO {
v.Fatalf("zero of variable which isn't PAUTO %v", n)
}
if n.Type.Size()%int64(Widthptr) != 0 {
v.Fatalf("zero of variable not a multiple of ptr size %v", n)
}
thearch.ZeroAuto(s.pp, n)
}
case ssa.OpPhi:
CheckLoweredPhi(v)
case ssa.OpRegKill:
// nothing to do
default:
// let the backend handle it
thearch.SSAGenValue(&s, v)
}
if Ctxt.Flag_locationlists {
valueToProg[v.ID] = x
}
if logProgs {
for ; x != s.pp.next; x = x.Link {
progToValue[x] = v
}
}
}
// Emit control flow instructions for block
var next *ssa.Block
if i < len(f.Blocks)-1 && Debug['N'] == 0 {
// If -N, leave next==nil so every block with successors
// ends in a JMP (except call blocks - plive doesn't like
// select{send,recv} followed by a JMP call). Helps keep
// line numbers for otherwise empty blocks.
next = f.Blocks[i+1]
}
x := s.pp.next
s.SetPos(b.Pos)
thearch.SSAGenBlock(&s, b, next)
if logProgs {
for ; x != s.pp.next; x = x.Link {
progToBlock[x] = b
}
}
}
if Ctxt.Flag_locationlists {
for _, locList := range e.curfn.Func.DebugInfo.Variables {
for _, loc := range locList.Locations {
loc.StartProg = valueToProg[loc.Start.ID]
if loc.End == nil {
Fatalf("empty loc %v compiling %v", loc, f.Name)
}
loc.EndProg = valueToProg[loc.End.ID]
if !logLocationLists {
loc.Start = nil
loc.End = nil
}
}
}
}
// Resolve branches
for _, br := range s.Branches {
br.P.To.Val = s.bstart[br.B.ID]
}
if logProgs {
for p := pp.Text; p != nil; p = p.Link {
var s string
if v, ok := progToValue[p]; ok {
s = v.String()
} else if b, ok := progToBlock[p]; ok {
s = b.String()
} else {
s = " " // most value and branch strings are 2-3 characters long
}
f.Logf("%s\t%s\n", s, p)
}
if f.HTMLWriter != nil {
// LineHist is defunct now - this code won't do
// anything.
// TODO: fix this (ideally without a global variable)
// saved := pp.Text.Ctxt.LineHist.PrintFilenameOnly
// pp.Text.Ctxt.LineHist.PrintFilenameOnly = true
var buf bytes.Buffer
buf.WriteString("<code>")
buf.WriteString("<dl class=\"ssa-gen\">")
for p := pp.Text; p != nil; p = p.Link {
buf.WriteString("<dt class=\"ssa-prog-src\">")
if v, ok := progToValue[p]; ok {
buf.WriteString(v.HTML())
} else if b, ok := progToBlock[p]; ok {
buf.WriteString(b.HTML())
}
buf.WriteString("</dt>")
buf.WriteString("<dd class=\"ssa-prog\">")
buf.WriteString(html.EscapeString(p.String()))
buf.WriteString("</dd>")
buf.WriteString("</li>")
}
buf.WriteString("</dl>")
buf.WriteString("</code>")
f.HTMLWriter.WriteColumn("genssa", buf.String())
// pp.Text.Ctxt.LineHist.PrintFilenameOnly = saved
}
}
defframe(&s, e)
if Debug['f'] != 0 {
frame(0)
}
f.HTMLWriter.Close()
f.HTMLWriter = nil
}
func defframe(s *SSAGenState, e *ssafn) {
pp := s.pp
frame := Rnd(s.maxarg+e.stksize, int64(Widthreg))
if thearch.PadFrame != nil {
frame = thearch.PadFrame(frame)
}
// Fill in argument and frame size.
pp.Text.To.Type = obj.TYPE_TEXTSIZE
pp.Text.To.Val = int32(Rnd(e.curfn.Type.ArgWidth(), int64(Widthreg)))
pp.Text.To.Offset = frame
// Insert code to zero ambiguously live variables so that the
// garbage collector only sees initialized values when it
// looks for pointers.
p := pp.Text
var lo, hi int64
// Opaque state for backend to use. Current backends use it to
// keep track of which helper registers have been zeroed.
var state uint32
// Iterate through declarations. They are sorted in decreasing Xoffset order.
for _, n := range e.curfn.Func.Dcl {
if !n.Name.Needzero() {
continue
}
if n.Class() != PAUTO {
Fatalf("needzero class %d", n.Class())
}
if n.Type.Size()%int64(Widthptr) != 0 || n.Xoffset%int64(Widthptr) != 0 || n.Type.Size() == 0 {
Fatalf("var %L has size %d offset %d", n, n.Type.Size(), n.Xoffset)
}
if lo != hi && n.Xoffset+n.Type.Size() >= lo-int64(2*Widthreg) {
// Merge with range we already have.
lo = n.Xoffset
continue
}
// Zero old range
p = thearch.ZeroRange(pp, p, frame+lo, hi-lo, &state)
// Set new range.
lo = n.Xoffset
hi = lo + n.Type.Size()
}
// Zero final range.
thearch.ZeroRange(pp, p, frame+lo, hi-lo, &state)
}
type FloatingEQNEJump struct {
Jump obj.As
Index int
}
func (s *SSAGenState) oneFPJump(b *ssa.Block, jumps *FloatingEQNEJump) {
p := s.Prog(jumps.Jump)
p.To.Type = obj.TYPE_BRANCH
to := jumps.Index
s.Branches = append(s.Branches, Branch{p, b.Succs[to].Block()})
}
func (s *SSAGenState) FPJump(b, next *ssa.Block, jumps *[2][2]FloatingEQNEJump) {
switch next {
case b.Succs[0].Block():
s.oneFPJump(b, &jumps[0][0])
s.oneFPJump(b, &jumps[0][1])
case b.Succs[1].Block():
s.oneFPJump(b, &jumps[1][0])
s.oneFPJump(b, &jumps[1][1])
default:
s.oneFPJump(b, &jumps[1][0])
s.oneFPJump(b, &jumps[1][1])
q := s.Prog(obj.AJMP)
q.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, Branch{q, b.Succs[1].Block()})
}
}
func AuxOffset(v *ssa.Value) (offset int64) {
if v.Aux == nil {
return 0
}
switch sym := v.Aux.(type) {
case *ssa.AutoSymbol:
n := sym.Node.(*Node)
return n.Xoffset
}
return 0
}
// AddAux adds the offset in the aux fields (AuxInt and Aux) of v to a.
func AddAux(a *obj.Addr, v *ssa.Value) {
AddAux2(a, v, v.AuxInt)
}
func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) {
if a.Type != obj.TYPE_MEM && a.Type != obj.TYPE_ADDR {
v.Fatalf("bad AddAux addr %v", a)
}
// add integer offset
a.Offset += offset
// If no additional symbol offset, we're done.
if v.Aux == nil {
return
}
// Add symbol's offset from its base register.
switch sym := v.Aux.(type) {
case *ssa.ExternSymbol:
a.Name = obj.NAME_EXTERN
a.Sym = sym.Sym
case *ssa.ArgSymbol:
n := sym.Node.(*Node)
a.Name = obj.NAME_PARAM
a.Sym = n.Orig.Sym.Linksym()
a.Offset += n.Xoffset
case *ssa.AutoSymbol:
n := sym.Node.(*Node)
a.Name = obj.NAME_AUTO
a.Sym = n.Sym.Linksym()
a.Offset += n.Xoffset
default:
v.Fatalf("aux in %s not implemented %#v", v, v.Aux)
}
}
// extendIndex extends v to a full int width.
// panic using the given function if v does not fit in an int (only on 32-bit archs).
func (s *state) extendIndex(v *ssa.Value, panicfn *obj.LSym) *ssa.Value {
size := v.Type.Size()
if size == s.config.PtrSize {
return v
}
if size > s.config.PtrSize {
// truncate 64-bit indexes on 32-bit pointer archs. Test the
// high word and branch to out-of-bounds failure if it is not 0.
if Debug['B'] == 0 {
hi := s.newValue1(ssa.OpInt64Hi, types.Types[TUINT32], v)
cmp := s.newValue2(ssa.OpEq32, types.Types[TBOOL], hi, s.constInt32(types.Types[TUINT32], 0))
s.check(cmp, panicfn)
}
return s.newValue1(ssa.OpTrunc64to32, types.Types[TINT], v)
}
// Extend value to the required size
var op ssa.Op
if v.Type.IsSigned() {
switch 10*size + s.config.PtrSize {
case 14:
op = ssa.OpSignExt8to32
case 18:
op = ssa.OpSignExt8to64
case 24:
op = ssa.OpSignExt16to32
case 28:
op = ssa.OpSignExt16to64
case 48:
op = ssa.OpSignExt32to64
default:
s.Fatalf("bad signed index extension %s", v.Type)
}
} else {
switch 10*size + s.config.PtrSize {
case 14:
op = ssa.OpZeroExt8to32
case 18:
op = ssa.OpZeroExt8to64
case 24:
op = ssa.OpZeroExt16to32
case 28:
op = ssa.OpZeroExt16to64
case 48:
op = ssa.OpZeroExt32to64
default:
s.Fatalf("bad unsigned index extension %s", v.Type)
}
}
return s.newValue1(op, types.Types[TINT], v)
}
// CheckLoweredPhi checks that regalloc and stackalloc correctly handled phi values.
// Called during ssaGenValue.
func CheckLoweredPhi(v *ssa.Value) {
if v.Op != ssa.OpPhi {
v.Fatalf("CheckLoweredPhi called with non-phi value: %v", v.LongString())
}
if v.Type.IsMemory() {
return
}
f := v.Block.Func
loc := f.RegAlloc[v.ID]
for _, a := range v.Args {
if aloc := f.RegAlloc[a.ID]; aloc != loc { // TODO: .Equal() instead?
v.Fatalf("phi arg at different location than phi: %v @ %s, but arg %v @ %s\n%s\n", v, loc, a, aloc, v.Block.Func)
}
}
}
// CheckLoweredGetClosurePtr checks that v is the first instruction in the function's entry block.
// The output of LoweredGetClosurePtr is generally hardwired to the correct register.
// That register contains the closure pointer on closure entry.
func CheckLoweredGetClosurePtr(v *ssa.Value) {
entry := v.Block.Func.Entry
if entry != v.Block || entry.Values[0] != v {
Fatalf("in %s, badly placed LoweredGetClosurePtr: %v %v", v.Block.Func.Name, v.Block, v)
}
}
// AutoVar returns a *Node and int64 representing the auto variable and offset within it
// where v should be spilled.
func AutoVar(v *ssa.Value) (*Node, int64) {
loc := v.Block.Func.RegAlloc[v.ID].(ssa.LocalSlot)
if v.Type.Size() > loc.Type.Size() {
v.Fatalf("spill/restore type %s doesn't fit in slot type %s", v.Type, loc.Type)
}
return loc.N.(*Node), loc.Off
}
func AddrAuto(a *obj.Addr, v *ssa.Value) {
n, off := AutoVar(v)
a.Type = obj.TYPE_MEM
a.Sym = n.Sym.Linksym()
a.Reg = int16(thearch.REGSP)
a.Offset = n.Xoffset + off
if n.Class() == PPARAM || n.Class() == PPARAMOUT {
a.Name = obj.NAME_PARAM
} else {
a.Name = obj.NAME_AUTO
}
}
func (s *SSAGenState) AddrScratch(a *obj.Addr) {
if s.ScratchFpMem == nil {
panic("no scratch memory available; forgot to declare usesScratch for Op?")
}
a.Type = obj.TYPE_MEM
a.Name = obj.NAME_AUTO
a.Sym = s.ScratchFpMem.Sym.Linksym()
a.Reg = int16(thearch.REGSP)
a.Offset = s.ScratchFpMem.Xoffset
}
func (s *SSAGenState) Call(v *ssa.Value) *obj.Prog {
idx, ok := s.stackMapIndex[v]
if !ok {
Fatalf("missing stack map index for %v", v.LongString())
}
p := s.Prog(obj.APCDATA)
Addrconst(&p.From, objabi.PCDATA_StackMapIndex)
Addrconst(&p.To, int64(idx))
if sym, _ := v.Aux.(*obj.LSym); sym == Deferreturn {
// Deferred calls will appear to be returning to
// the CALL deferreturn(SB) that we are about to emit.
// However, the stack trace code will show the line
// of the instruction byte before the return PC.
// To avoid that being an unrelated instruction,
// insert an actual hardware NOP that will have the right line number.
// This is different from obj.ANOP, which is a virtual no-op
// that doesn't make it into the instruction stream.
thearch.Ginsnop(s.pp)
}
p = s.Prog(obj.ACALL)
if sym, ok := v.Aux.(*obj.LSym); ok {
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = sym
} else {
// TODO(mdempsky): Can these differences be eliminated?
switch thearch.LinkArch.Family {
case sys.AMD64, sys.I386, sys.PPC64, sys.S390X:
p.To.Type = obj.TYPE_REG
case sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64:
p.To.Type = obj.TYPE_MEM
default:
Fatalf("unknown indirect call family")
}
p.To.Reg = v.Args[0].Reg()
}
if s.maxarg < v.AuxInt {
s.maxarg = v.AuxInt
}
return p
}
// fieldIdx finds the index of the field referred to by the ODOT node n.
func fieldIdx(n *Node) int {
t := n.Left.Type
f := n.Sym
if !t.IsStruct() {
panic("ODOT's LHS is not a struct")
}
var i int
for _, t1 := range t.Fields().Slice() {
if t1.Sym != f {
i++
continue
}
if t1.Offset != n.Xoffset {
panic("field offset doesn't match")
}
return i
}
panic(fmt.Sprintf("can't find field in expr %v\n", n))
// TODO: keep the result of this function somewhere in the ODOT Node
// so we don't have to recompute it each time we need it.
}
// ssafn holds frontend information about a function that the backend is processing.
// It also exports a bunch of compiler services for the ssa backend.
type ssafn struct {
curfn *Node
strings map[string]interface{} // map from constant string to data symbols
scratchFpMem *Node // temp for floating point register / memory moves on some architectures
stksize int64 // stack size for current frame
stkptrsize int64 // prefix of stack containing pointers
log bool
}
// StringData returns a symbol (a *types.Sym wrapped in an interface) which
// is the data component of a global string constant containing s.
func (e *ssafn) StringData(s string) interface{} {
if aux, ok := e.strings[s]; ok {
return aux
}
if e.strings == nil {
e.strings = make(map[string]interface{})
}
data := stringsym(s)
aux := &ssa.ExternSymbol{Sym: data}
e.strings[s] = aux
return aux
}
func (e *ssafn) Auto(pos src.XPos, t *types.Type) ssa.GCNode {
n := tempAt(pos, e.curfn, t) // Note: adds new auto to e.curfn.Func.Dcl list
return n
}
func (e *ssafn) SplitString(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
n := name.N.(*Node)
ptrType := types.NewPtr(types.Types[TUINT8])
lenType := types.Types[TINT]
if n.Class() == PAUTO && !n.Addrtaken() {
// Split this string up into two separate variables.
p := e.splitSlot(&name, ".ptr", 0, ptrType)
l := e.splitSlot(&name, ".len", ptrType.Size(), lenType)
return p, l
}
// Return the two parts of the larger variable.
return ssa.LocalSlot{N: n, Type: ptrType, Off: name.Off}, ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(Widthptr)}
}
func (e *ssafn) SplitInterface(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
n := name.N.(*Node)
t := types.NewPtr(types.Types[TUINT8])
if n.Class() == PAUTO && !n.Addrtaken() {
// Split this interface up into two separate variables.
f := ".itab"
if n.Type.IsEmptyInterface() {
f = ".type"
}
c := e.splitSlot(&name, f, 0, t)
d := e.splitSlot(&name, ".data", t.Size(), t)
return c, d
}
// Return the two parts of the larger variable.
return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: t, Off: name.Off + int64(Widthptr)}
}
func (e *ssafn) SplitSlice(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot, ssa.LocalSlot) {
n := name.N.(*Node)
ptrType := types.NewPtr(name.Type.ElemType())
lenType := types.Types[TINT]
if n.Class() == PAUTO && !n.Addrtaken() {
// Split this slice up into three separate variables.
p := e.splitSlot(&name, ".ptr", 0, ptrType)
l := e.splitSlot(&name, ".len", ptrType.Size(), lenType)
c := e.splitSlot(&name, ".cap", ptrType.Size()+lenType.Size(), lenType)
return p, l, c
}
// Return the three parts of the larger variable.
return ssa.LocalSlot{N: n, Type: ptrType, Off: name.Off},
ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(Widthptr)},
ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(2*Widthptr)}
}
func (e *ssafn) SplitComplex(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
n := name.N.(*Node)
s := name.Type.Size() / 2
var t *types.Type
if s == 8 {
t = types.Types[TFLOAT64]
} else {
t = types.Types[TFLOAT32]
}
if n.Class() == PAUTO && !n.Addrtaken() {
// Split this complex up into two separate variables.
r := e.splitSlot(&name, ".real", 0, t)
i := e.splitSlot(&name, ".imag", t.Size(), t)
return r, i
}
// Return the two parts of the larger variable.
return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: t, Off: name.Off + s}
}
func (e *ssafn) SplitInt64(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
n := name.N.(*Node)
var t *types.Type
if name.Type.IsSigned() {
t = types.Types[TINT32]
} else {
t = types.Types[TUINT32]
}
if n.Class() == PAUTO && !n.Addrtaken() {
// Split this int64 up into two separate variables.
if thearch.LinkArch.ByteOrder == binary.BigEndian {
return e.splitSlot(&name, ".hi", 0, t), e.splitSlot(&name, ".lo", t.Size(), types.Types[TUINT32])
}
return e.splitSlot(&name, ".hi", t.Size(), t), e.splitSlot(&name, ".lo", 0, types.Types[TUINT32])
}
// Return the two parts of the larger variable.
if thearch.LinkArch.ByteOrder == binary.BigEndian {
return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: types.Types[TUINT32], Off: name.Off + 4}
}
return ssa.LocalSlot{N: n, Type: t, Off: name.Off + 4}, ssa.LocalSlot{N: n, Type: types.Types[TUINT32], Off: name.Off}
}
func (e *ssafn) SplitStruct(name ssa.LocalSlot, i int) ssa.LocalSlot {
n := name.N.(*Node)
st := name.Type
ft := st.FieldType(i)
var offset int64
for f := 0; f < i; f++ {
offset += st.FieldType(f).Size()
}
if n.Class() == PAUTO && !n.Addrtaken() {
// Note: the _ field may appear several times. But
// have no fear, identically-named but distinct Autos are
// ok, albeit maybe confusing for a debugger.
return e.splitSlot(&name, "."+st.FieldName(i), offset, ft)
}
return ssa.LocalSlot{N: n, Type: ft, Off: name.Off + st.FieldOff(i)}
}
func (e *ssafn) SplitArray(name ssa.LocalSlot) ssa.LocalSlot {
n := name.N.(*Node)
at := name.Type
if at.NumElem() != 1 {
Fatalf("bad array size")
}
et := at.ElemType()
if n.Class() == PAUTO && !n.Addrtaken() {
return e.splitSlot(&name, "[0]", 0, et)
}
return ssa.LocalSlot{N: n, Type: et, Off: name.Off}
}
func (e *ssafn) DerefItab(it *obj.LSym, offset int64) *obj.LSym {
return itabsym(it, offset)
}
// splitSlot returns a slot representing the data of parent starting at offset.
func (e *ssafn) splitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t *types.Type) ssa.LocalSlot {
s := &types.Sym{Name: parent.N.(*Node).Sym.Name + suffix, Pkg: localpkg}
n := new(Node)
n.Name = new(Name)
n.Op = ONAME
n.Pos = parent.N.(*Node).Pos
n.Orig = n
s.Def = asTypesNode(n)
asNode(s.Def).Name.SetUsed(true)
n.Sym = s
n.Type = t
n.SetClass(PAUTO)
n.SetAddable(true)
n.Esc = EscNever
n.Name.Curfn = e.curfn
e.curfn.Func.Dcl = append(e.curfn.Func.Dcl, n)
dowidth(t)
return ssa.LocalSlot{N: n, Type: t, Off: 0, SplitOf: parent, SplitOffset: offset}
}
func (e *ssafn) CanSSA(t *types.Type) bool {
return canSSAType(t)
}
func (e *ssafn) Line(pos src.XPos) string {
return linestr(pos)
}
// Log logs a message from the compiler.
func (e *ssafn) Logf(msg string, args ...interface{}) {
if e.log {
fmt.Printf(msg, args...)
}
}
func (e *ssafn) Log() bool {
return e.log
}
// Fatal reports a compiler error and exits.
func (e *ssafn) Fatalf(pos src.XPos, msg string, args ...interface{}) {
lineno = pos
Fatalf(msg, args...)
}
// Warnl reports a "warning", which is usually flag-triggered
// logging output for the benefit of tests.
func (e *ssafn) Warnl(pos src.XPos, fmt_ string, args ...interface{}) {
Warnl(pos, fmt_, args...)
}
func (e *ssafn) Debug_checknil() bool {
return Debug_checknil != 0
}
func (e *ssafn) Debug_wb() bool {
return Debug_wb != 0
}
func (e *ssafn) UseWriteBarrier() bool {
return use_writebarrier
}
func (e *ssafn) Syslook(name string) *obj.LSym {
switch name {
case "goschedguarded":
return goschedguarded
case "writeBarrier":
return writeBarrier
case "writebarrierptr":
return writebarrierptr
case "typedmemmove":
return typedmemmove
case "typedmemclr":
return typedmemclr
}
Fatalf("unknown Syslook func %v", name)
return nil
}
func (n *Node) Typ() *types.Type {
return n.Type
}
| [
"\"GOSSAFUNC\"",
"\"GOSSAFUNC\""
] | [] | [
"GOSSAFUNC"
] | [] | ["GOSSAFUNC"] | go | 1 | 0 | |
acquisition/hydrate_users.py | """Hydration from users ids to csv file
MIT License (MIT)
Copyright (c) 2015 Julien BLEGEAN <[email protected]>
"""
from datetime import datetime
import json
import math
import sys
import csv
import os
import time
import requests
from requests_oauthlib import OAuth1
from urlparse import parse_qs
# taken from
REQUEST_TOKEN_URL = "https://api.twitter.com/oauth/request_token"
AUTHORIZE_URL = "https://api.twitter.com/oauth/authorize?oauth_token="
ACCESS_TOKEN_URL = "https://api.twitter.com/oauth/access_token"
CONSUMER_KEY = os.environ.get('CONSUMER_KEY')
CONSUMER_SECRET = os.environ.get('CONSUMER_SECRET')
OAUTH_TOKEN = os.environ.get('ACCESS_TOKEN')
OAUTH_TOKEN_SECRET = os.environ.get('ACCESS_TOKEN_SECRET')
def setup_oauth():
"""Authorize your app via identifier."""
# Request token
oauth = OAuth1(CONSUMER_KEY, client_secret=CONSUMER_SECRET)
r = requests.post(url=REQUEST_TOKEN_URL, auth=oauth)
credentials = parse_qs(r.content)
resource_owner_key = credentials.get('oauth_token')[0]
resource_owner_secret = credentials.get('oauth_token_secret')[0]
# Authorize
authorize_url = AUTHORIZE_URL + resource_owner_key
print 'Please go here and authorize: ' + authorize_url
verifier = raw_input('Please input the verifier: ')
oauth = OAuth1(CONSUMER_KEY,
client_secret=CONSUMER_SECRET,
resource_owner_key=resource_owner_key,
resource_owner_secret=resource_owner_secret,
verifier=verifier)
# Finally, Obtain the Access Token
r = requests.post(url=ACCESS_TOKEN_URL, auth=oauth)
credentials = parse_qs(r.content)
token = credentials.get('oauth_token')[0]
secret = credentials.get('oauth_token_secret')[0]
return token, secret
def get_oauth():
oauth = OAuth1(CONSUMER_KEY,
client_secret=CONSUMER_SECRET,
resource_owner_key=OAUTH_TOKEN,
resource_owner_secret=OAUTH_TOKEN_SECRET)
return oauth
def addUser(user) :
""" Process user id and add its content to csv file
:param user: the user id
"""
global i,outfile
i = i + 1
if math.fmod(i,100) == 0 :
print(i)
# FIELDS
# user fields
u_id = '"%s"' % user["id_str"].encode("ASCII","ignore")
# user name
u_name = user["name"].encode("ASCII", 'ignore')
u_name = json.dumps(u_name)
# screen name
u_screen_name = '"%s"' % user["screen_name"].encode("ASCII", 'ignore')
# sign up date
u_created_at = user["created_at"].encode("ASCII", 'ignore')
u_created_at = '"%s"' % datetime.strptime(u_created_at, '%a %b %d %H:%M:%S +0000 %Y').strftime('%Y-%m-%d %H:%M:%S')
# location
u_location = user["location"].encode("ASCII", 'ignore')
u_location = json.dumps(u_location)
# number of tweets
u_tweets_count = '"%s"' % str(user["statuses_count"])
# number of followers
u_followers_count = '"%s"' % str(user["followers_count"])
# number of friends
u_friends_count = '"%s"' % str(user["friends_count"])
# number of favourites
u_favourites_count = '"%s"' % str(user["favourites_count"])
# number of listed
u_listed_count = '"%s"' % str(user["listed_count"])
# timezone
if user["time_zone"] == None :
u_time_zone = 'NULL'
else :
u_time_zone = user["time_zone"].encode("ASCII","ignore")
u_time_zone = json.dumps(u_time_zone)
# group all the fields
usr = [u_id,u_name,u_screen_name,u_created_at,u_location,u_tweets_count,u_followers_count,u_friends_count,u_favourites_count,u_listed_count,u_time_zone]
usrs = '%s\n' % ','.join(usr)
# write to file
with open(outpath, "a") as file:
file.write(usrs)
# counter
i = 0
# user ids input file
filepath = str(sys.argv[1])
# open an clean ids
file = open(filepath)
all_ids = file.readlines()
all_ids = [id.strip() for id in all_ids]
# output filename
outpath = "%s_h" % filepath
if not OAUTH_TOKEN:
token, secret = setup_oauth()
print "OAUTH_TOKEN: " + token
print "OAUTH_TOKEN_SECRET: " + secret
print
else:
oauth = get_oauth()
# iterate tweets
k = 0
ids = []
# send by 100 chunks
for k in range(0,len(all_ids),100) :
idstr = ','.join(all_ids[k:k+100])
r = requests.get("https://api.twitter.com/1.1/users/lookup.json?user_id=%s" % idstr, auth=oauth)
tlist = r.json()
#print(tlist)
while not isinstance(tlist, list) and tlist.has_key('errors') :
print(tlist['errors'][0]['message'])
print("Sleep for 60s..")
time.sleep(60)
r = requests.get("https://api.twitter.com/1.1/users/lookup.json?user_id=%s" % idstr, auth=oauth)
tlist = r.json()
for user in tlist :
addUser(user)
| [] | [] | [
"CONSUMER_KEY",
"CONSUMER_SECRET",
"ACCESS_TOKEN_SECRET",
"ACCESS_TOKEN"
] | [] | ["CONSUMER_KEY", "CONSUMER_SECRET", "ACCESS_TOKEN_SECRET", "ACCESS_TOKEN"] | python | 4 | 0 | |
rlog/log.go | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package rlog
import (
"os"
"strings"
"github.com/sirupsen/logrus"
)
const (
LogKeyConsumerGroup = "consumerGroup"
LogKeyTopic = "topic"
LogKeyMessageQueue = "MessageQueue"
LogKeyUnderlayError = "underlayError"
LogKeyBroker = "broker"
LogKeyValueChangedFrom = "changedFrom"
LogKeyValueChangedTo = "changeTo"
LogKeyPullRequest = "PullRequest"
LogKeyTimeStamp = "timestamp"
)
type Logger interface {
Debug(msg string, fields map[string]interface{})
Info(msg string, fields map[string]interface{})
Warning(msg string, fields map[string]interface{})
Error(msg string, fields map[string]interface{})
Fatal(msg string, fields map[string]interface{})
Level(level string)
OutputPath(path string) (err error)
}
func init() {
r := &defaultLogger{
logger: logrus.New(),
}
level := os.Getenv("ROCKETMQ_GO_LOG_LEVEL")
switch strings.ToLower(level) {
case "debug":
r.logger.SetLevel(logrus.DebugLevel)
case "warn":
r.logger.SetLevel(logrus.WarnLevel)
case "error":
r.logger.SetLevel(logrus.ErrorLevel)
case "fatal":
r.logger.SetLevel(logrus.FatalLevel)
default:
r.logger.SetLevel(logrus.InfoLevel)
}
rLog = r
}
var rLog Logger
type defaultLogger struct {
logger *logrus.Logger
}
func (l *defaultLogger) Debug(msg string, fields map[string]interface{}) {
if msg == "" && len(fields) == 0 {
return
}
l.logger.WithFields(fields).Debug(msg)
}
func (l *defaultLogger) Info(msg string, fields map[string]interface{}) {
if msg == "" && len(fields) == 0 {
return
}
l.logger.WithFields(fields).Info(msg)
}
func (l *defaultLogger) Warning(msg string, fields map[string]interface{}) {
if msg == "" && len(fields) == 0 {
return
}
l.logger.WithFields(fields).Warning(msg)
}
func (l *defaultLogger) Error(msg string, fields map[string]interface{}) {
if msg == "" && len(fields) == 0 {
return
}
l.logger.WithFields(fields).WithFields(fields).Error(msg)
}
func (l *defaultLogger) Fatal(msg string, fields map[string]interface{}) {
if msg == "" && len(fields) == 0 {
return
}
l.logger.WithFields(fields).Fatal(msg)
}
func (l *defaultLogger) Level(level string) {
switch strings.ToLower(level) {
case "debug":
l.logger.SetLevel(logrus.DebugLevel)
case "warn":
l.logger.SetLevel(logrus.WarnLevel)
case "error":
l.logger.SetLevel(logrus.ErrorLevel)
case "fatal":
l.logger.SetLevel(logrus.FatalLevel)
default:
l.logger.SetLevel(logrus.InfoLevel)
}
}
func (l *defaultLogger) OutputPath(path string) (err error) {
var file *os.File
file, err = os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
if err != nil {
return
}
l.logger.Out = file
return
}
// SetLogger use specified logger user customized, in general, we suggest user to replace the default logger with specified
func SetLogger(logger Logger) {
rLog = logger
}
func SetLogLevel(level string) {
if level == "" {
return
}
rLog.Level(level)
}
func SetOutputPath(path string) (err error) {
if "" == path {
return
}
return rLog.OutputPath(path)
}
func Debug(msg string, fields map[string]interface{}) {
rLog.Debug(msg, fields)
}
func Info(msg string, fields map[string]interface{}) {
if msg == "" && len(fields) == 0 {
return
}
rLog.Info(msg, fields)
}
func Warning(msg string, fields map[string]interface{}) {
if msg == "" && len(fields) == 0 {
return
}
rLog.Warning(msg, fields)
}
func Error(msg string, fields map[string]interface{}) {
rLog.Error(msg, fields)
}
func Fatal(msg string, fields map[string]interface{}) {
rLog.Fatal(msg, fields)
}
| [
"\"ROCKETMQ_GO_LOG_LEVEL\""
] | [] | [
"ROCKETMQ_GO_LOG_LEVEL"
] | [] | ["ROCKETMQ_GO_LOG_LEVEL"] | go | 1 | 0 | |
main.go | package main
import (
"log"
"net/http"
"os"
"golang.org/x/net/webdav"
)
type methodMux map[string]http.Handler
func (m *methodMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if h, ok := (*m)[r.Method]; ok {
h.ServeHTTP(w, r)
} else {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
}
}
func main() {
listen := os.Getenv("LISTEN")
root := os.Getenv("ROOT")
prefix := os.Getenv("PREFIX")
files := http.StripPrefix(prefix, http.FileServer(http.Dir(root)))
webdav := &webdav.Handler{
Prefix: prefix,
FileSystem: webdav.Dir(root),
LockSystem: webdav.NewMemLS(),
Logger: func(r *http.Request, err error) {
if err != nil {
log.Printf("r=%v err=%v", r, err)
}
},
}
mux := methodMux(map[string]http.Handler{
"GET": files,
"OPTIONS": webdav,
"PROPFIND": webdav,
"PROPPATCH": webdav,
"MKCOL": webdav,
"COPY": webdav,
"MOVE": webdav,
"LOCK": webdav,
"UNLOCK": webdav,
"DELETE": webdav,
"PUT": webdav,
})
if err := http.ListenAndServe(listen, &mux); err != nil {
log.Fatal(err)
}
}
| [
"\"LISTEN\"",
"\"ROOT\"",
"\"PREFIX\""
] | [] | [
"PREFIX",
"ROOT",
"LISTEN"
] | [] | ["PREFIX", "ROOT", "LISTEN"] | go | 3 | 0 | |
cmd/rhoas/main.go | package main
import (
"encoding/json"
"fmt"
"github.com/markbates/pkger"
"github.com/redhat-developer/app-services-cli/pkg/api/kas"
"github.com/redhat-developer/app-services-cli/pkg/doc"
"github.com/redhat-developer/app-services-cli/pkg/dump"
"os"
"github.com/redhat-developer/app-services-cli/pkg/cmdutil"
"github.com/redhat-developer/app-services-cli/internal/build"
"github.com/redhat-developer/app-services-cli/internal/localizer"
"github.com/redhat-developer/app-services-cli/internal/config"
"github.com/redhat-developer/app-services-cli/pkg/cmd/debug"
"github.com/redhat-developer/app-services-cli/pkg/cmd/factory"
"github.com/redhat-developer/app-services-cli/pkg/cmd/root"
"github.com/spf13/cobra"
)
var (
generateDocs = os.Getenv("GENERATE_DOCS") == "true"
)
// load all locale files
func loadStaticFiles() error {
err := localizer.IncludeAssetsAndLoadMessageFiles()
if err != nil {
return err
}
return pkger.Walk("/static", func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
return nil
})
}
func main() {
err := loadStaticFiles()
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
buildVersion := build.Version
cmdFactory := factory.New(build.Version)
logger, err := cmdFactory.Logger()
if err != nil {
fmt.Println(cmdFactory.IOStreams.ErrOut, err)
os.Exit(1)
}
initConfig(cmdFactory)
rootCmd := root.NewRootCommand(cmdFactory, buildVersion)
rootCmd.InitDefaultHelpCmd()
if generateDocs {
generateDocumentation(rootCmd)
os.Exit(0)
}
if err = rootCmd.Execute(); err == nil {
return
}
if e, ok := kas.GetAPIError(err); ok {
logger.Error("Error:", e.GetReason())
if debug.Enabled() {
errJSON, _ := json.Marshal(e)
_ = dump.JSON(cmdFactory.IOStreams.ErrOut, errJSON)
}
os.Exit(1)
}
if err = cmdutil.CheckSurveyError(err); err != nil {
logger.Error("Error:", err)
os.Exit(1)
}
}
/**
* Generates documentation files
*/
func generateDocumentation(rootCommand *cobra.Command) {
fmt.Fprint(os.Stderr, "Generating docs.\n\n")
filePrepender := func(filename string) string {
return ""
}
rootCommand.DisableAutoGenTag = true
linkHandler := func(s string) string { return s }
err := doc.GenAsciidocTreeCustom(rootCommand, "./docs/commands", filePrepender, linkHandler)
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
func initConfig(f *factory.Factory) {
cfgFile, err := f.Config.Load()
if cfgFile != nil {
return
}
if !os.IsNotExist(err) {
fmt.Fprintln(f.IOStreams.ErrOut, err)
os.Exit(1)
}
cfgFile = &config.Config{}
if err := f.Config.Save(cfgFile); err != nil {
fmt.Fprintln(f.IOStreams.ErrOut, err)
os.Exit(1)
}
}
| [
"\"GENERATE_DOCS\""
] | [] | [
"GENERATE_DOCS"
] | [] | ["GENERATE_DOCS"] | go | 1 | 0 | |
client/common.go | package client
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"regexp"
"time"
"github.com/gorilla/websocket"
"github.com/pkg/errors"
)
const (
SELF = "self"
COLLECTION = "collection"
)
var (
debug = false
dialer = &websocket.Dialer{}
privateFieldRegex = regexp.MustCompile("^[[:lower:]]")
)
type ClientOpts struct {
Url string
AccessKey string
SecretKey string
Timeout time.Duration
}
type ApiError struct {
StatusCode int
Url string
Msg string
Status string
Body string
}
func (e *ApiError) Error() string {
return e.Msg
}
func IsNotFound(err error) bool {
apiError, ok := err.(*ApiError)
if !ok {
return false
}
return apiError.StatusCode == http.StatusNotFound
}
func newApiError(resp *http.Response, url string) *ApiError {
contents, err := ioutil.ReadAll(resp.Body)
var body string
if err != nil {
body = "Unreadable body."
} else {
body = string(contents)
}
data := map[string]interface{}{}
if json.Unmarshal(contents, &data) == nil {
delete(data, "id")
delete(data, "links")
delete(data, "actions")
delete(data, "type")
delete(data, "status")
buf := &bytes.Buffer{}
for k, v := range data {
if v == nil {
continue
}
if buf.Len() > 0 {
buf.WriteString(", ")
}
fmt.Fprintf(buf, "%s=%v", k, v)
}
body = buf.String()
}
formattedMsg := fmt.Sprintf("Bad response statusCode [%d]. Status [%s]. Body: [%s] from [%s]",
resp.StatusCode, resp.Status, body, url)
return &ApiError{
Url: url,
Msg: formattedMsg,
StatusCode: resp.StatusCode,
Status: resp.Status,
Body: body,
}
}
func contains(array []string, item string) bool {
for _, check := range array {
if check == item {
return true
}
}
return false
}
func appendFilters(urlString string, filters map[string]interface{}) (string, error) {
if len(filters) == 0 {
return urlString, nil
}
u, err := url.Parse(urlString)
if err != nil {
return "", err
}
q := u.Query()
for k, v := range filters {
if l, ok := v.([]string); ok {
for _, v := range l {
q.Add(k, v)
}
} else {
q.Add(k, fmt.Sprintf("%v", v))
}
}
u.RawQuery = q.Encode()
return u.String(), nil
}
func setupKuladoBaseClient(kuladoClient *KuladoBaseClientImpl, opts *ClientOpts) error {
if opts.Timeout == 0 {
opts.Timeout = time.Second * 10
}
client := &http.Client{Timeout: opts.Timeout}
req, err := http.NewRequest("GET", opts.Url, nil)
if err != nil {
return err
}
req.SetBasicAuth(opts.AccessKey, opts.SecretKey)
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return newApiError(resp, opts.Url)
}
schemasUrls := resp.Header.Get("X-API-Schemas")
if len(schemasUrls) == 0 {
return errors.New("Failed to find schema at [" + opts.Url + "]")
}
if schemasUrls != opts.Url {
req, err = http.NewRequest("GET", schemasUrls, nil)
req.SetBasicAuth(opts.AccessKey, opts.SecretKey)
if err != nil {
return err
}
resp, err = client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return newApiError(resp, opts.Url)
}
}
var schemas Schemas
bytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
err = json.Unmarshal(bytes, &schemas)
if err != nil {
return err
}
kuladoClient.Opts = opts
kuladoClient.Schemas = &schemas
for _, schema := range schemas.Data {
kuladoClient.Types[schema.Id] = schema
}
return nil
}
func NewListOpts() *ListOpts {
return &ListOpts{
Filters: map[string]interface{}{},
}
}
func (kuladoClient *KuladoBaseClientImpl) setupRequest(req *http.Request) {
req.SetBasicAuth(kuladoClient.Opts.AccessKey, kuladoClient.Opts.SecretKey)
}
func (kuladoClient *KuladoBaseClientImpl) newHttpClient() *http.Client {
if kuladoClient.Opts.Timeout == 0 {
kuladoClient.Opts.Timeout = time.Second * 10
}
return &http.Client{Timeout: kuladoClient.Opts.Timeout}
}
func (kuladoClient *KuladoBaseClientImpl) doDelete(url string) error {
client := kuladoClient.newHttpClient()
req, err := http.NewRequest("DELETE", url, nil)
if err != nil {
return err
}
kuladoClient.setupRequest(req)
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
io.Copy(ioutil.Discard, resp.Body)
if resp.StatusCode >= 300 {
return newApiError(resp, url)
}
return nil
}
func (kuladoClient *KuladoBaseClientImpl) Websocket(url string, headers map[string][]string) (*websocket.Conn, *http.Response, error) {
return dialer.Dial(url, http.Header(headers))
}
func (kuladoClient *KuladoBaseClientImpl) doGet(url string, opts *ListOpts, respObject interface{}) error {
if opts == nil {
opts = NewListOpts()
}
url, err := appendFilters(url, opts.Filters)
if err != nil {
return err
}
if debug {
fmt.Println("GET " + url)
}
client := kuladoClient.newHttpClient()
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return err
}
kuladoClient.setupRequest(req)
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return newApiError(resp, url)
}
byteContent, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
if debug {
fmt.Println("Response <= " + string(byteContent))
}
if err := json.Unmarshal(byteContent, respObject); err != nil {
return errors.Wrap(err, fmt.Sprintf("Failed to parse: %s", byteContent))
}
return nil
}
func (kuladoClient *KuladoBaseClientImpl) List(schemaType string, opts *ListOpts, respObject interface{}) error {
return kuladoClient.doList(schemaType, opts, respObject)
}
func (kuladoClient *KuladoBaseClientImpl) doList(schemaType string, opts *ListOpts, respObject interface{}) error {
schema, ok := kuladoClient.Types[schemaType]
if !ok {
return errors.New("Unknown schema type [" + schemaType + "]")
}
if !contains(schema.CollectionMethods, "GET") {
return errors.New("Resource type [" + schemaType + "] is not listable")
}
collectionUrl, ok := schema.Links[COLLECTION]
if !ok {
return errors.New("Failed to find collection URL for [" + schemaType + "]")
}
return kuladoClient.doGet(collectionUrl, opts, respObject)
}
func (kuladoClient *KuladoBaseClientImpl) doNext(nextUrl string, respObject interface{}) error {
return kuladoClient.doGet(nextUrl, nil, respObject)
}
func (kuladoClient *KuladoBaseClientImpl) Post(url string, createObj interface{}, respObject interface{}) error {
return kuladoClient.doModify("POST", url, createObj, respObject)
}
func (kuladoClient *KuladoBaseClientImpl) GetLink(resource Resource, link string, respObject interface{}) error {
url := resource.Links[link]
if url == "" {
return fmt.Errorf("Failed to find link: %s", link)
}
return kuladoClient.doGet(url, &ListOpts{}, respObject)
}
func (kuladoClient *KuladoBaseClientImpl) doModify(method string, url string, createObj interface{}, respObject interface{}) error {
bodyContent, err := json.Marshal(createObj)
if err != nil {
return err
}
if debug {
fmt.Println(method + " " + url)
fmt.Println("Request => " + string(bodyContent))
}
client := kuladoClient.newHttpClient()
req, err := http.NewRequest(method, url, bytes.NewBuffer(bodyContent))
if err != nil {
return err
}
kuladoClient.setupRequest(req)
req.Header.Set("Content-Type", "application/json")
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode >= 300 {
return newApiError(resp, url)
}
byteContent, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
if len(byteContent) > 0 {
if debug {
fmt.Println("Response <= " + string(byteContent))
}
return json.Unmarshal(byteContent, respObject)
}
return nil
}
func (kuladoClient *KuladoBaseClientImpl) Create(schemaType string, createObj interface{}, respObject interface{}) error {
return kuladoClient.doCreate(schemaType, createObj, respObject)
}
func (kuladoClient *KuladoBaseClientImpl) doCreate(schemaType string, createObj interface{}, respObject interface{}) error {
if createObj == nil {
createObj = map[string]string{}
}
if respObject == nil {
respObject = &map[string]interface{}{}
}
schema, ok := kuladoClient.Types[schemaType]
if !ok {
return errors.New("Unknown schema type [" + schemaType + "]")
}
if !contains(schema.CollectionMethods, "POST") {
return errors.New("Resource type [" + schemaType + "] is not creatable")
}
var collectionUrl string
collectionUrl, ok = schema.Links[COLLECTION]
if !ok {
// return errors.New("Failed to find collection URL for [" + schemaType + "]")
// This is a hack to address https://github.com/kulado/cattle/issues/254
re := regexp.MustCompile("schemas.*")
collectionUrl = re.ReplaceAllString(schema.Links[SELF], schema.PluralName)
}
return kuladoClient.doModify("POST", collectionUrl, createObj, respObject)
}
func (kuladoClient *KuladoBaseClientImpl) Update(schemaType string, existing *Resource, updates interface{}, respObject interface{}) error {
return kuladoClient.doUpdate(schemaType, existing, updates, respObject)
}
func (kuladoClient *KuladoBaseClientImpl) doUpdate(schemaType string, existing *Resource, updates interface{}, respObject interface{}) error {
if existing == nil {
return errors.New("Existing object is nil")
}
selfUrl, ok := existing.Links[SELF]
if !ok {
return errors.New(fmt.Sprintf("Failed to find self URL of [%v]", existing))
}
if updates == nil {
updates = map[string]string{}
}
if respObject == nil {
respObject = &map[string]interface{}{}
}
schema, ok := kuladoClient.Types[schemaType]
if !ok {
return errors.New("Unknown schema type [" + schemaType + "]")
}
if !contains(schema.ResourceMethods, "PUT") {
return errors.New("Resource type [" + schemaType + "] is not updatable")
}
return kuladoClient.doModify("PUT", selfUrl, updates, respObject)
}
func (kuladoClient *KuladoBaseClientImpl) ById(schemaType string, id string, respObject interface{}) error {
return kuladoClient.doById(schemaType, id, respObject)
}
func (kuladoClient *KuladoBaseClientImpl) doById(schemaType string, id string, respObject interface{}) error {
schema, ok := kuladoClient.Types[schemaType]
if !ok {
return errors.New("Unknown schema type [" + schemaType + "]")
}
if !contains(schema.ResourceMethods, "GET") {
return errors.New("Resource type [" + schemaType + "] can not be looked up by ID")
}
collectionUrl, ok := schema.Links[COLLECTION]
if !ok {
return errors.New("Failed to find collection URL for [" + schemaType + "]")
}
err := kuladoClient.doGet(collectionUrl+"/"+id, nil, respObject)
//TODO check for 404 and return nil, nil
return err
}
func (kuladoClient *KuladoBaseClientImpl) Delete(existing *Resource) error {
if existing == nil {
return nil
}
return kuladoClient.doResourceDelete(existing.Type, existing)
}
func (kuladoClient *KuladoBaseClientImpl) doResourceDelete(schemaType string, existing *Resource) error {
schema, ok := kuladoClient.Types[schemaType]
if !ok {
return errors.New("Unknown schema type [" + schemaType + "]")
}
if !contains(schema.ResourceMethods, "DELETE") {
return errors.New("Resource type [" + schemaType + "] can not be deleted")
}
selfUrl, ok := existing.Links[SELF]
if !ok {
return errors.New(fmt.Sprintf("Failed to find self URL of [%v]", existing))
}
return kuladoClient.doDelete(selfUrl)
}
func (kuladoClient *KuladoBaseClientImpl) Reload(existing *Resource, output interface{}) error {
selfUrl, ok := existing.Links[SELF]
if !ok {
return errors.New(fmt.Sprintf("Failed to find self URL of [%v]", existing))
}
return kuladoClient.doGet(selfUrl, NewListOpts(), output)
}
func (kuladoClient *KuladoBaseClientImpl) Action(schemaType string, action string,
existing *Resource, inputObject, respObject interface{}) error {
return kuladoClient.doAction(schemaType, action, existing, inputObject, respObject)
}
func (kuladoClient *KuladoBaseClientImpl) doAction(schemaType string, action string,
existing *Resource, inputObject, respObject interface{}) error {
if existing == nil {
return errors.New("Existing object is nil")
}
actionUrl, ok := existing.Actions[action]
if !ok {
return errors.New(fmt.Sprintf("Action [%v] not available on [%v]", action, existing))
}
_, ok = kuladoClient.Types[schemaType]
if !ok {
return errors.New("Unknown schema type [" + schemaType + "]")
}
var input io.Reader
if inputObject != nil {
bodyContent, err := json.Marshal(inputObject)
if err != nil {
return err
}
if debug {
fmt.Println("Request => " + string(bodyContent))
}
input = bytes.NewBuffer(bodyContent)
}
client := kuladoClient.newHttpClient()
req, err := http.NewRequest("POST", actionUrl, input)
if err != nil {
return err
}
kuladoClient.setupRequest(req)
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Content-Length", "0")
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode >= 300 {
return newApiError(resp, actionUrl)
}
byteContent, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
if debug {
fmt.Println("Response <= " + string(byteContent))
}
return json.Unmarshal(byteContent, respObject)
}
func init() {
debug = os.Getenv("RANCHER_CLIENT_DEBUG") == "true"
if debug {
fmt.Println("Kulado client debug on")
}
}
| [
"\"RANCHER_CLIENT_DEBUG\""
] | [] | [
"RANCHER_CLIENT_DEBUG"
] | [] | ["RANCHER_CLIENT_DEBUG"] | go | 1 | 0 | |
internal/operations/client_test.go | package operations
import (
"fmt"
"os"
"testing"
"github.com/Azure/terraform-provider-acsengine/internal/tester"
"github.com/stretchr/testify/assert"
)
func TestSetACSEngineClient(t *testing.T) {
resourceGroup := "clusterResourceGroup"
masterDNSPrefix := "masterDNSPrefix"
cluster := tester.MockContainerService("clusterName", "southcentralus", masterDNSPrefix)
id := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Resources/deployments/%s", os.Getenv("ARM_SUBSCRIPTION_ID"), resourceGroup, "clusterName")
c := NewACSEngineClient(os.Getenv("ARM_CLIENT_SECRET"))
if err := c.SetACSEngineClient(cluster, id); err != nil {
t.Fatalf("initializeScaleClient failed: %+v", err)
}
assert.Equal(t, c.ResourceGroupName, resourceGroup, "Resource group is not named correctly")
assert.Equal(t, c.SubscriptionID.String(), os.Getenv("ARM_SUBSCRIPTION_ID"), "Subscription ID is not set correctly")
}
func TestSetACSEngineClientBadID(t *testing.T) {
masterDNSPrefix := "masterDNSPrefix"
cluster := tester.MockContainerService("clusterName", "southcentralus", masterDNSPrefix)
c := NewACSEngineClient(os.Getenv("ARM_CLIENT_SECRET"))
if err := c.SetACSEngineClient(cluster, ""); err == nil {
t.Fatalf("initializeScaleClient should have failed")
}
}
// func TestsetACSEngineClientInvalidAuthArgs(t *testing.T) {
// resourceGroup := "clusterResourceGroup"
// masterDNSPrefix := "masterDNSPrefix"
// d := mockClusterResourceData("clusterName", "southcentralus", resourceGroup, masterDNSPrefix)
// id := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Resources/deployments/%s", os.Getenv("ARM_SUBSCRIPTION_ID"), resourceGroup, masterDNSPrefix)
// d.SetId(id)
// if err := d.Set("service_principal.0.client_secret", ""); err != nil {
// t.Fatalf("setting service principal failed")
// }
// c := client.ACSEngineClient{}
// err := setACSEngineClient(d, &c)
// if err == nil {
// t.Fatalf("initializeScaleClient should have failed")
// }
// }
func TestValidate(t *testing.T) {
cases := []struct {
Client ACSEngineClient
ExpectError bool
}{
{
Client: ACSEngineClient{},
ExpectError: true,
},
{
Client: ACSEngineClient{
ResourceGroupName: "rg",
},
ExpectError: true,
},
{
Client: ACSEngineClient{
ResourceGroupName: "rg",
Location: "westus",
},
ExpectError: true,
},
{
Client: ACSEngineClient{
ResourceGroupName: "rg",
Location: "westus",
DeploymentDirectory: "directory",
},
ExpectError: false,
},
}
for _, tc := range cases {
err := tc.Client.Validate()
if err == nil && tc.ExpectError {
t.Fatalf("expected error")
}
if err != nil && !tc.ExpectError {
t.Fatalf("error: %+v", err)
}
}
}
| [
"\"ARM_SUBSCRIPTION_ID\"",
"\"ARM_CLIENT_SECRET\"",
"\"ARM_SUBSCRIPTION_ID\"",
"\"ARM_CLIENT_SECRET\"",
"\"ARM_SUBSCRIPTION_ID\""
] | [] | [
"ARM_SUBSCRIPTION_ID",
"ARM_CLIENT_SECRET"
] | [] | ["ARM_SUBSCRIPTION_ID", "ARM_CLIENT_SECRET"] | go | 2 | 0 | |
lib/cgroup/cpu.go | package cgroup
import (
"fmt"
"io/ioutil"
"os"
"runtime"
"strconv"
"strings"
"github.com/VictoriaMetrics/metrics"
)
// AvailableCPUs returns the number of available CPU cores for the app.
//
// The number is rounded to the next integer value if fractional number of CPU cores are available.
func AvailableCPUs() int {
return runtime.GOMAXPROCS(-1)
}
func init() {
cpuQuota := getCPUQuota()
if cpuQuota > 0 {
updateGOMAXPROCSToCPUQuota(cpuQuota)
}
cpuCoresAvailable := cpuQuota
if cpuCoresAvailable <= 0 {
cpuCoresAvailable = float64(runtime.NumCPU())
}
metrics.NewGauge(`process_cpu_cores_available`, func() float64 {
return cpuCoresAvailable
})
}
// updateGOMAXPROCSToCPUQuota updates GOMAXPROCS to cpuQuota if GOMAXPROCS isn't set in environment var.
func updateGOMAXPROCSToCPUQuota(cpuQuota float64) {
if v := os.Getenv("GOMAXPROCS"); v != "" {
// Do not override explicitly set GOMAXPROCS.
return
}
gomaxprocs := int(cpuQuota + 0.5)
numCPU := runtime.NumCPU()
if gomaxprocs > numCPU {
// There is no sense in setting more GOMAXPROCS than the number of available CPU cores.
gomaxprocs = numCPU
}
if gomaxprocs <= 0 {
gomaxprocs = 1
}
runtime.GOMAXPROCS(gomaxprocs)
}
func getCPUQuota() float64 {
cpuQuota, err := getCPUQuotaGeneric()
if err != nil {
return 0
}
if cpuQuota <= 0 {
// The quota isn't set. This may be the case in multilevel containers.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/685#issuecomment-674423728
return getOnlineCPUCount()
}
return cpuQuota
}
func getCPUQuotaGeneric() (float64, error) {
quotaUS, err := getCPUStat("cpu.cfs_quota_us")
if err == nil {
periodUS, err := getCPUStat("cpu.cfs_period_us")
if err == nil {
return float64(quotaUS) / float64(periodUS), nil
}
}
return getCPUQuotaV2("/sys/fs/cgroup", "/proc/self/cgroup")
}
func getCPUStat(statName string) (int64, error) {
return getStatGeneric(statName, "/sys/fs/cgroup/cpu", "/proc/self/cgroup", "cpu,")
}
func getOnlineCPUCount() float64 {
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/685#issuecomment-674423728
data, err := ioutil.ReadFile("/sys/devices/system/cpu/online")
if err != nil {
return -1
}
n := float64(countCPUs(string(data)))
if n <= 0 {
return -1
}
return n
}
func getCPUQuotaV2(sysPrefix, cgroupPath string) (float64, error) {
data, err := getFileContents("cpu.max", sysPrefix, cgroupPath, "")
if err != nil {
return 0, err
}
data = strings.TrimSpace(data)
n, err := parseCPUMax(data)
if err != nil {
return 0, fmt.Errorf("cannot parse cpu.max file contents: %w", err)
}
return n, nil
}
// See https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html#cpu
func parseCPUMax(data string) (float64, error) {
bounds := strings.Split(data, " ")
if len(bounds) != 2 {
return 0, fmt.Errorf("unexpected line format: want 'quota period'; got: %s", data)
}
if bounds[0] == "max" {
return -1, nil
}
quota, err := strconv.ParseUint(bounds[0], 10, 64)
if err != nil {
return 0, fmt.Errorf("cannot parse quota: %w", err)
}
period, err := strconv.ParseUint(bounds[1], 10, 64)
if err != nil {
return 0, fmt.Errorf("cannot parse period: %w", err)
}
return float64(quota) / float64(period), nil
}
func countCPUs(data string) int {
data = strings.TrimSpace(data)
n := 0
for _, s := range strings.Split(data, ",") {
n++
if !strings.Contains(s, "-") {
if _, err := strconv.Atoi(s); err != nil {
return -1
}
continue
}
bounds := strings.Split(s, "-")
if len(bounds) != 2 {
return -1
}
start, err := strconv.Atoi(bounds[0])
if err != nil {
return -1
}
end, err := strconv.Atoi(bounds[1])
if err != nil {
return -1
}
n += end - start
}
return n
}
| [
"\"GOMAXPROCS\""
] | [] | [
"GOMAXPROCS"
] | [] | ["GOMAXPROCS"] | go | 1 | 0 | |
fhirclient/r4models/device_tests.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b on 2019-05-07.
# 2019, SMART Health IT.
import os
import io
import unittest
import json
from . import device
from .fhirdate import FHIRDate
class DeviceTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("Device", js["resourceType"])
return device.Device(js)
def testDevice1(self):
inst = self.instantiate_from("device-example-f001-feedingtube.json")
self.assertIsNotNone(inst, "Must have instantiated a Device instance")
self.implDevice1(inst)
js = inst.as_json()
self.assertEqual("Device", js["resourceType"])
inst2 = device.Device(js)
self.implDevice1(inst2)
def implDevice1(self, inst):
self.assertEqual(inst.id, "f001")
self.assertEqual(inst.identifier[0].system, "http:/goodhealthhospital/identifier/devices")
self.assertEqual(inst.identifier[0].value, "12345")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\"><p><b>Generated Narrative with Details</b></p><p><b>id</b>: f001</p><p><b>identifier</b>: 12345</p><p><b>status</b>: active</p></div>")
self.assertEqual(inst.text.status, "generated")
def testDevice2(self):
inst = self.instantiate_from("device-example.json")
self.assertIsNotNone(inst, "Must have instantiated a Device instance")
self.implDevice2(inst)
js = inst.as_json()
self.assertEqual("Device", js["resourceType"])
inst2 = device.Device(js)
self.implDevice2(inst2)
def implDevice2(self, inst):
self.assertEqual(inst.id, "example")
self.assertEqual(inst.identifier[0].system, "http://goodcare.org/devices/id")
self.assertEqual(inst.identifier[0].value, "345675")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\"><p><b>Generated Narrative with Details</b></p><p><b>id</b>: example</p><p><b>identifier</b>: 345675</p></div>")
self.assertEqual(inst.text.status, "generated")
| [] | [] | [
"FHIR_UNITTEST_DATADIR"
] | [] | ["FHIR_UNITTEST_DATADIR"] | python | 1 | 0 | |
sls/handler.py | import json
import datetime
import time
import boto3
import os
def train_and_generate_recommendations(event, context):
# 200 is the HTTP status code for "ok".
status_code = 200
try:
# From the input parameter named "event", get the body, which contains
# the input rows.
event_body = event["body"]
# Convert the input from a JSON string into a JSON object.
payload = json.loads(event_body)
# This is basically an array of arrays. The inner array contains the
# row number, and a value for each parameter passed to the function.
rows = payload["data"]
# For each input row in the JSON object...
for row in rows:
# Read the input row number (the output row number will be the same).
row_number = row[0]
# Read the first input parameter's value. For example, this can be a
# numeric value or a string, or it can be a compound value such as
# a JSON structure.
_input_table_name = row[1]
_output_table_name = row[2]
# start the SageMaker training job
client = boto3.client('sagemaker')
bucket = os.environ['s3_bucket']
prefix = "training-job-" + time.strftime("%Y%m%d%H%M%S")
s3_output_location = 's3://{}/'.format(bucket)
print(s3_output_location)
training_job_name = prefix
TRAINING_IMAGE_ECR_PATH = os.environ['training_image_ecr_path']
SAGEMAKER_ROLE_ARN = os.environ['sagemaker_role_arn']
response = client.create_training_job(
TrainingJobName=training_job_name,
HyperParameters=dict(input_table_name=_input_table_name, output_table_name=_output_table_name, region=os.environ['region']),
AlgorithmSpecification={
'TrainingImage': TRAINING_IMAGE_ECR_PATH,
'TrainingInputMode': 'File'
},
RoleArn=SAGEMAKER_ROLE_ARN,
OutputDataConfig={
'S3OutputPath': s3_output_location
},
ResourceConfig={
'InstanceType': 'ml.m5.xlarge',
'InstanceCount': 1,
'VolumeSizeInGB': 10
},
StoppingCondition={
'MaxRuntimeInSeconds': 10000
}
)
training_job_arn = response['TrainingJobArn']
print(training_job_arn)
array_of_rows_to_return = []
# Put the returned row number and the returned value into an array.
row_to_return = [0, training_job_arn]
# ... and add that array to the main array.
array_of_rows_to_return.append(row_to_return)
json_compatible_string_to_return = json.dumps({"data" : array_of_rows_to_return})
except Exception as err:
# 400 implies some type of error.
status_code = 400
# Tell caller what this function could not handle.
print(err)
json_compatible_string_to_return = str(err)
# Return the return value and HTTP status code.
return {
'statusCode': status_code,
'body': json_compatible_string_to_return
}
def deploy_model(event, context):
# 200 is the HTTP status code for "ok".
status_code = 200
try:
# From the input parameter named "event", get the body, which contains
# the input rows.
event_body = event["body"]
# Convert the input from a JSON string into a JSON object.
payload = json.loads(event_body)
# This is basically an array of arrays. The inner array contains the
# row number, and a value for each parameter passed to the function.
rows = payload["data"]
# For each input row in the JSON object...
for row in rows:
# Read the input row number (the output row number will be the same).
row_number = row[0]
# Read the first input parameter's value.
model_name = row[1]
model_data_url = row[2]
# start the SageMaker training job
client = boto3.client('sagemaker')
ECR_PATH = os.environ['training_image_ecr_path']
SAGEMAKER_ROLE_ARN = os.environ['sagemaker_role_arn']
response = client.create_model(
ModelName=model_name,
PrimaryContainer={
'Image': ECR_PATH,
'ModelDataUrl': model_data_url
},
ExecutionRoleArn=SAGEMAKER_ROLE_ARN
)
print(response)
print("now trying to create endpoint config...")
response = client.create_endpoint_config(
EndpointConfigName=model_name,
ProductionVariants=[
{
'VariantName': 'variant-1',
'ModelName': model_name,
'InitialInstanceCount': 1,
'InstanceType': 'ml.t2.medium'
}
]
)
print(response)
print("now trying to create the endpoint...")
response = client.create_endpoint(
EndpointName=model_name,
EndpointConfigName=model_name
)
endpoint_arn = response['EndpointArn']
print(endpoint_arn)
array_of_rows_to_return = []
# Put the returned row number and the returned value into an array.
row_to_return = [0, endpoint_arn]
# ... and add that array to the main array.
array_of_rows_to_return.append(row_to_return)
json_compatible_string_to_return = json.dumps({"data" : array_of_rows_to_return})
except Exception as err:
# 400 implies some type of error.
status_code = 400
# Tell caller what this function could not handle.
print(err)
json_compatible_string_to_return = str(err)
# Return the return value and HTTP status code.
return {
'statusCode': status_code,
'body': json_compatible_string_to_return
}
# function that performs real-time prediction
def invoke_model(event, context):
# 200 is the HTTP status code for "ok".
status_code = 200
try:
# From the input parameter named "event", get the body, which contains
# the input rows.
event_body = event["body"]
# Convert the input from a JSON string into a JSON object.
payload = json.loads(event_body)
# This is basically an array of arrays. The inner array contains the
# row number, and a value for each parameter passed to the function.
rows = payload["data"]
# For each input row in the JSON object...
body = ""
for row in rows:
model_name = row[1]
# extract and transform the user_ids and item_ids posted to csv
body = body + row[2] + "," + row[3] + "\n"
# invoke the SageMaker endpoint
client = boto3.client('sagemaker-runtime')
response = client.invoke_endpoint(
EndpointName=model_name,
Body=body.encode('utf-8'),
ContentType='text/csv'
)
predictions = response["Body"].read().decode('utf-8')
i = 0
array_of_rows_to_return = []
for prediction in iter(predictions.splitlines()):
# Put the returned row number and the returned value into an array.
row_to_return = [i, prediction]
# ... and add that array to the main array.
array_of_rows_to_return.append(row_to_return)
i = i + 1
json_compatible_string_to_return = json.dumps({"data" : array_of_rows_to_return})
except Exception as err:
# 400 implies some type of error.
status_code = 400
# Tell caller what this function could not handle.
print(err)
json_compatible_string_to_return = str(err)
# Return the return value and HTTP status code.
return {
'statusCode': status_code,
'body': json_compatible_string_to_return
} | [] | [] | [
"sagemaker_role_arn",
"region",
"training_image_ecr_path",
"s3_bucket"
] | [] | ["sagemaker_role_arn", "region", "training_image_ecr_path", "s3_bucket"] | python | 4 | 0 | |
pkg/executor/executortype/poolmgr/gp.go | /*
Copyright 2016 The Fission Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package poolmgr
import (
"context"
"encoding/json"
"fmt"
"net"
"os"
"strings"
"time"
"github.com/dchest/uniuri"
"github.com/fission/fission/pkg/utils"
multierror "github.com/hashicorp/go-multierror"
"github.com/pkg/errors"
"go.uber.org/zap"
appsv1 "k8s.io/api/apps/v1"
apiv1 "k8s.io/api/core/v1"
k8sErrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
k8sTypes "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/client-go/kubernetes"
fv1 "github.com/fission/fission/pkg/apis/core/v1"
"github.com/fission/fission/pkg/crd"
"github.com/fission/fission/pkg/executor/fscache"
"github.com/fission/fission/pkg/executor/util"
fetcherClient "github.com/fission/fission/pkg/fetcher/client"
fetcherConfig "github.com/fission/fission/pkg/fetcher/config"
)
type (
GenericPool struct {
logger *zap.Logger
env *fv1.Environment
replicas int32 // num idle pods
deployment *appsv1.Deployment // kubernetes deployment
namespace string // namespace to keep our resources
functionNamespace string // fallback namespace for fission functions
podReadyTimeout time.Duration // timeout for generic pods to become ready
fsCache *fscache.FunctionServiceCache // cache funcSvc's by function, address and podname
useSvc bool // create k8s service for specialized pods
useIstio bool
poolInstanceId string // small random string to uniquify pod names
runtimeImagePullPolicy apiv1.PullPolicy // pull policy for generic pool to created env deployment
kubernetesClient *kubernetes.Clientset
fissionClient *crd.FissionClient
instanceId string // poolmgr instance id
requestChannel chan *choosePodRequest
fetcherConfig *fetcherConfig.Config
stopCh context.CancelFunc
}
// serialize the choosing of pods so that choices don't conflict
choosePodRequest struct {
newLabels map[string]string
responseChannel chan *choosePodResponse
}
choosePodResponse struct {
pod *apiv1.Pod
error
}
)
func MakeGenericPool(
logger *zap.Logger,
fissionClient *crd.FissionClient,
kubernetesClient *kubernetes.Clientset,
env *fv1.Environment,
initialReplicas int32,
namespace string,
functionNamespace string,
fsCache *fscache.FunctionServiceCache,
fetcherConfig *fetcherConfig.Config,
instanceId string,
enableIstio bool) (*GenericPool, error) {
gpLogger := logger.Named("generic_pool")
podReadyTimeoutStr := os.Getenv("POD_READY_TIMEOUT")
podReadyTimeout, err := time.ParseDuration(podReadyTimeoutStr)
if err != nil {
podReadyTimeout = 300 * time.Second
gpLogger.Error("failed to parse pod ready timeout duration from 'POD_READY_TIMEOUT' - set to the default value",
zap.Error(err),
zap.String("value", podReadyTimeoutStr),
zap.Duration("default", podReadyTimeout))
}
gpLogger.Info("creating pool", zap.Any("environment", env.ObjectMeta))
ctx, stopCh := context.WithCancel(context.Background())
// TODO: in general we need to provide the user a way to configure pools. Initial
// replicas, autoscaling params, various timeouts, etc.
gp := &GenericPool{
logger: gpLogger,
env: env,
replicas: initialReplicas, // TODO make this an env param instead?
requestChannel: make(chan *choosePodRequest),
fissionClient: fissionClient,
kubernetesClient: kubernetesClient,
namespace: namespace,
functionNamespace: functionNamespace,
podReadyTimeout: podReadyTimeout,
fsCache: fsCache,
poolInstanceId: uniuri.NewLen(8),
fetcherConfig: fetcherConfig,
instanceId: instanceId,
useSvc: false, // defaults off -- svc takes a second or more to become routable, slowing cold start
useIstio: enableIstio, // defaults off -- istio integration requires pod relabeling and it takes a second or more to become routable, slowing cold start
stopCh: stopCh,
}
gp.runtimeImagePullPolicy = utils.GetImagePullPolicy(os.Getenv("RUNTIME_IMAGE_PULL_POLICY"))
// create fetcher SA in this ns, if not already created
err = fetcherConfig.SetupServiceAccount(gp.kubernetesClient, gp.namespace, nil)
if err != nil {
return nil, errors.Wrapf(err, "error creating fetcher service account in namespace %q", gp.namespace)
}
// Labels for generic deployment/RS/pods.
//gp.labelsForPool = gp.getDeployLabels()
// create the pool
err = gp.createPool()
if err != nil {
return nil, err
}
gpLogger.Info("deployment created", zap.Any("environment", env.ObjectMeta))
go gp.choosePodService(ctx)
return gp, nil
}
func (gp *GenericPool) getEnvironmentPoolLabels() map[string]string {
return map[string]string{
fv1.EXECUTOR_TYPE: string(fv1.ExecutorTypePoolmgr),
fv1.ENVIRONMENT_NAME: gp.env.ObjectMeta.Name,
fv1.ENVIRONMENT_NAMESPACE: gp.env.ObjectMeta.Namespace,
fv1.ENVIRONMENT_UID: string(gp.env.ObjectMeta.UID),
"managed": "true", // this allows us to easily find pods managed by the deployment
}
}
func (gp *GenericPool) getDeployAnnotations() map[string]string {
return map[string]string{
fv1.EXECUTOR_INSTANCEID_LABEL: gp.instanceId,
}
}
// choosePodService serializes the choosing of pods
func (gp *GenericPool) choosePodService(ctx context.Context) {
for {
select {
case req := <-gp.requestChannel:
pod, err := gp._choosePod(req.newLabels)
if err != nil {
req.responseChannel <- &choosePodResponse{error: err}
continue
}
req.responseChannel <- &choosePodResponse{pod: pod}
case <-ctx.Done():
return
}
}
}
// choosePod picks a ready pod from the pool and relabels it, waiting if necessary.
// returns the pod API object.
func (gp *GenericPool) choosePod(newLabels map[string]string) (*apiv1.Pod, error) {
req := &choosePodRequest{
newLabels: newLabels,
responseChannel: make(chan *choosePodResponse),
}
gp.requestChannel <- req
resp := <-req.responseChannel
return resp.pod, resp.error
}
// _choosePod is called serially by choosePodService
func (gp *GenericPool) _choosePod(newLabels map[string]string) (*apiv1.Pod, error) {
startTime := time.Now()
for {
// Retries took too long, error out.
if time.Since(startTime) > gp.podReadyTimeout {
gp.logger.Error("timed out waiting for pod", zap.Any("labels", newLabels), zap.Duration("timeout", gp.podReadyTimeout))
return nil, errors.New("timeout: waited too long to get a ready pod")
}
// Get pods; filter the ones that are ready
podList, err := gp.kubernetesClient.CoreV1().Pods(gp.namespace).List(
metav1.ListOptions{
FieldSelector: "status.phase=Running",
LabelSelector: labels.Set(
gp.deployment.Spec.Selector.MatchLabels).AsSelector().String(),
})
if err != nil {
return nil, err
}
readyPods := make([]*apiv1.Pod, 0, len(podList.Items))
for i := range podList.Items {
pod := podList.Items[i]
// Ignore not ready pod here
if !utils.IsReadyPod(&pod) {
continue
}
// add it to the list of ready pods
readyPods = append(readyPods, &pod)
break
}
gp.logger.Info("found ready pods",
zap.Any("labels", newLabels),
zap.Int("ready_count", len(readyPods)),
zap.Int("total", len(podList.Items)))
// If there are no ready pods, wait and retry.
if len(readyPods) == 0 {
err = gp.waitForReadyPod()
if err != nil {
return nil, err
}
continue
}
// Pick a ready pod. For now just choose randomly;
// ideally we'd care about which node it's running on,
// and make a good scheduling decision.
chosenPod := readyPods[0]
if gp.env.Spec.AllowedFunctionsPerContainer != fv1.AllowedFunctionsPerContainerInfinite {
// Relabel. If the pod already got picked and
// modified, this should fail; in that case just
// retry.
labelPatch, _ := json.Marshal(newLabels)
// Append executor instance id to pod annotations to
// indicate this pod is managed by this executor.
annotations := gp.getDeployAnnotations()
annotationPatch, _ := json.Marshal(annotations)
patch := fmt.Sprintf(`{"metadata":{"annotations":%v, "labels":%v}}`, string(annotationPatch), string(labelPatch))
gp.logger.Info("relabel pod", zap.String("pod", patch))
newPod, err := gp.kubernetesClient.CoreV1().Pods(chosenPod.Namespace).Patch(chosenPod.Name, k8sTypes.StrategicMergePatchType, []byte(patch))
if err != nil {
gp.logger.Error("failed to relabel pod", zap.Error(err), zap.String("pod", chosenPod.Name))
continue
}
// With StrategicMergePatchType, the client-go sometimes return
// nil error and the labels & annotations remain the same.
// So we have to check both of them to ensure the patch success.
for k, v := range newLabels {
if newPod.Labels[k] != v {
return nil, errors.Errorf("value of necessary labels '%v' mismatch: want '%v', get '%v'",
k, v, newPod.Labels[k])
}
}
for k, v := range annotations {
if newPod.Annotations[k] != v {
return nil, errors.Errorf("value of necessary annotations '%v' mismatch: want '%v', get '%v'",
k, v, newPod.Annotations[k])
}
}
}
gp.logger.Info("chose pod", zap.Any("labels", newLabels),
zap.String("pod", chosenPod.Name), zap.Duration("elapsed_time", time.Since(startTime)))
return chosenPod, nil
}
}
func (gp *GenericPool) labelsForFunction(metadata *metav1.ObjectMeta) map[string]string {
label := gp.getEnvironmentPoolLabels()
label[fv1.FUNCTION_NAME] = metadata.Name
label[fv1.FUNCTION_UID] = string(metadata.UID)
label[fv1.FUNCTION_NAMESPACE] = metadata.Namespace // function CRD must stay within same namespace of environment CRD
label["managed"] = "false" // this allows us to easily find pods not managed by the deployment
return label
}
func (gp *GenericPool) scheduleDeletePod(name string) {
go func() {
// The sleep allows debugging or collecting logs from the pod before it's
// cleaned up. (We need a better solutions for both those things; log
// aggregation and storage will help.)
gp.logger.Error("error in pod - scheduling cleanup", zap.String("pod", name))
gp.kubernetesClient.CoreV1().Pods(gp.namespace).Delete(name, nil)
}()
}
func IsIPv6(podIP string) bool {
ip := net.ParseIP(podIP)
return ip != nil && strings.Contains(podIP, ":")
}
func (gp *GenericPool) getFetcherUrl(podIP string) string {
testUrl := os.Getenv("TEST_FETCHER_URL")
if len(testUrl) != 0 {
// it takes a second or so for the test service to
// become routable once a pod is relabeled. This is
// super hacky, but only runs in unit tests.
time.Sleep(5 * time.Second)
return testUrl
}
isv6 := IsIPv6(podIP)
var baseUrl string
if isv6 { // We use bracket if the IP is in IPv6.
baseUrl = fmt.Sprintf("http://[%v]:8000/", podIP)
} else {
baseUrl = fmt.Sprintf("http://%v:8000/", podIP)
}
return baseUrl
}
// specializePod chooses a pod, copies the required user-defined function to that pod
// (via fetcher), and calls the function-run container to load it, resulting in a
// specialized pod.
func (gp *GenericPool) specializePod(ctx context.Context, pod *apiv1.Pod, fn *fv1.Function) error {
// for fetcher we don't need to create a service, just talk to the pod directly
podIP := pod.Status.PodIP
if len(podIP) == 0 {
return errors.Errorf("Pod %s in namespace %s has no IP", pod.ObjectMeta.Name, pod.ObjectMeta.Namespace)
}
// specialize pod with service
if gp.useIstio {
svc := utils.GetFunctionIstioServiceName(fn.ObjectMeta.Name, fn.ObjectMeta.Namespace)
podIP = fmt.Sprintf("%v.%v", svc, gp.namespace)
}
// tell fetcher to get the function.
fetcherUrl := gp.getFetcherUrl(podIP)
gp.logger.Info("calling fetcher to copy function", zap.String("function", fn.ObjectMeta.Name), zap.String("url", fetcherUrl))
specializeReq := gp.fetcherConfig.NewSpecializeRequest(fn, gp.env)
gp.logger.Info("specializing pod", zap.String("function", fn.ObjectMeta.Name))
// Fetcher will download user function to share volume of pod, and
// invoke environment specialize api for pod specialization.
err := fetcherClient.MakeClient(gp.logger, fetcherUrl).Specialize(ctx, &specializeReq)
if err != nil {
return err
}
return nil
}
// getPoolName returns a unique name of an environment
func (gp *GenericPool) getPoolName() string {
return strings.ToLower(fmt.Sprintf("poolmgr-%v-%v-%v", gp.env.ObjectMeta.Name, gp.env.ObjectMeta.Namespace, gp.env.ObjectMeta.ResourceVersion))
}
// A pool is a deployment of generic containers for an env. This
// creates the pool but doesn't wait for any pods to be ready.
func (gp *GenericPool) createPool() error {
deployLabels := gp.getEnvironmentPoolLabels()
deployAnnotations := gp.getDeployAnnotations()
// Use long terminationGracePeriodSeconds for connection draining in case that
// pod still runs user functions.
gracePeriodSeconds := int64(6 * 60)
if gp.env.Spec.TerminationGracePeriod > 0 {
gracePeriodSeconds = gp.env.Spec.TerminationGracePeriod
}
podAnnotations := gp.env.ObjectMeta.Annotations
if podAnnotations == nil {
podAnnotations = make(map[string]string)
}
// Here, we don't append executor instance-id to pod annotations
// to prevent unwanted rolling updates occur. Pool manager will
// append executor instance-id to pod annotations when a pod is chosen
// for function specialization.
if gp.useIstio && gp.env.Spec.AllowAccessToExternalNetwork {
podAnnotations["sidecar.istio.io/inject"] = "false"
}
podLabels := gp.env.ObjectMeta.Labels
if podLabels == nil {
podLabels = make(map[string]string)
}
for k, v := range deployLabels {
podLabels[k] = v
}
container, err := util.MergeContainer(&apiv1.Container{
Name: gp.env.ObjectMeta.Name,
Image: gp.env.Spec.Runtime.Image,
ImagePullPolicy: gp.runtimeImagePullPolicy,
TerminationMessagePath: "/dev/termination-log",
Resources: gp.env.Spec.Resources,
// Pod is removed from endpoints list for service when it's
// state became "Termination". We used preStop hook as the
// workaround for connection draining since pod maybe shutdown
// before grace period expires.
// https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods
// https://github.com/kubernetes/kubernetes/issues/47576#issuecomment-308900172
Lifecycle: &apiv1.Lifecycle{
PreStop: &apiv1.Handler{
Exec: &apiv1.ExecAction{
Command: []string{
"/bin/sleep",
fmt.Sprintf("%v", gracePeriodSeconds),
},
},
},
},
// https://istio.io/docs/setup/kubernetes/additional-setup/requirements/
Ports: []apiv1.ContainerPort{
{
Name: "http-fetcher",
ContainerPort: int32(8000),
},
{
Name: "http-env",
ContainerPort: int32(8888),
},
},
}, gp.env.Spec.Runtime.Container)
if err != nil {
return err
}
pod := apiv1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: podLabels,
Annotations: podAnnotations,
},
Spec: apiv1.PodSpec{
Containers: []apiv1.Container{*container},
ServiceAccountName: "fission-fetcher",
// TerminationGracePeriodSeconds should be equal to the
// sleep time of preStop to make sure that SIGTERM is sent
// to pod after 6 mins.
TerminationGracePeriodSeconds: &gracePeriodSeconds,
},
}
pod.Spec = *(util.ApplyImagePullSecret(gp.env.Spec.ImagePullSecret, pod.Spec))
deployment := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: gp.getPoolName(),
Labels: deployLabels,
Annotations: deployAnnotations,
},
Spec: appsv1.DeploymentSpec{
Replicas: &gp.replicas,
Selector: &metav1.LabelSelector{
MatchLabels: deployLabels,
},
Template: pod,
},
}
// Order of merging is important here - first fetcher, then containers and lastly pod spec
err = gp.fetcherConfig.AddFetcherToPodSpec(&deployment.Spec.Template.Spec, gp.env.ObjectMeta.Name)
if err != nil {
return err
}
if gp.env.Spec.Runtime.PodSpec != nil {
newPodSpec, err := util.MergePodSpec(&deployment.Spec.Template.Spec, gp.env.Spec.Runtime.PodSpec)
if err != nil {
return err
}
deployment.Spec.Template.Spec = *newPodSpec
}
depl, err := gp.kubernetesClient.AppsV1().Deployments(gp.namespace).Get(deployment.Name, metav1.GetOptions{})
if err == nil {
if depl.Annotations[fv1.EXECUTOR_INSTANCEID_LABEL] != gp.instanceId {
deployment.Annotations[fv1.EXECUTOR_INSTANCEID_LABEL] = gp.instanceId
// Update with the latest deployment spec. Kubernetes will trigger
// rolling update if spec is different from the one in the cluster.
depl, err = gp.kubernetesClient.AppsV1().Deployments(gp.namespace).Update(deployment)
}
gp.deployment = depl
return err
} else if !k8sErrs.IsNotFound(err) {
gp.logger.Error("error getting deployment in kubernetes", zap.Error(err), zap.String("deployment", deployment.Name))
return err
}
depl, err = gp.kubernetesClient.AppsV1().Deployments(gp.namespace).Create(deployment)
if err != nil {
gp.logger.Error("error creating deployment in kubernetes", zap.Error(err), zap.String("deployment", deployment.Name))
return err
}
gp.deployment = depl
return nil
}
func (gp *GenericPool) waitForReadyPod() error {
startTime := time.Now()
for {
// TODO: for now we just poll; use a watch instead
depl, err := gp.kubernetesClient.AppsV1().Deployments(gp.namespace).Get(
gp.deployment.ObjectMeta.Name, metav1.GetOptions{})
if err != nil {
e := "error waiting for ready pod for deployment"
gp.logger.Error(e, zap.String("deployment", gp.deployment.ObjectMeta.Name), zap.String("namespace", gp.namespace))
return fmt.Errorf("%s %q in namespace %q", e, gp.deployment.ObjectMeta.Name, gp.namespace)
}
gp.deployment = depl
if gp.deployment.Status.AvailableReplicas > 0 {
return nil
}
if time.Since(startTime) > gp.podReadyTimeout {
podList, err := gp.kubernetesClient.CoreV1().Pods(gp.namespace).List(metav1.ListOptions{
LabelSelector: labels.Set(
gp.deployment.Spec.Selector.MatchLabels).AsSelector().String(),
})
if err != nil {
gp.logger.Error("error getting pod list after timeout waiting for ready pod", zap.Error(err))
}
// Since even single pod is not ready, choosing the first pod to inspect is a good approximation. In future this can be done better
pod := podList.Items[0]
errs := &multierror.Error{}
for _, cStatus := range pod.Status.ContainerStatuses {
if !cStatus.Ready {
errs = multierror.Append(errs, errors.New(fmt.Sprintf("%v: %v", cStatus.State.Waiting.Reason, cStatus.State.Waiting.Message)))
}
}
if errs.ErrorOrNil() != nil {
return errors.Wrapf(errs, "Timeout: waited too long for pod of deployment %v in namespace %v to be ready",
gp.deployment.ObjectMeta.Name, gp.namespace)
}
return nil
}
time.Sleep(1000 * time.Millisecond)
}
}
func (gp *GenericPool) createSvc(name string, labels map[string]string) (*apiv1.Service, error) {
service := apiv1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: labels,
},
Spec: apiv1.ServiceSpec{
Type: apiv1.ServiceTypeClusterIP,
Ports: []apiv1.ServicePort{
{
Protocol: apiv1.ProtocolTCP,
Port: 8888,
TargetPort: intstr.FromInt(8888),
},
},
Selector: labels,
},
}
svc, err := gp.kubernetesClient.CoreV1().Services(gp.namespace).Create(&service)
return svc, err
}
func (gp *GenericPool) getFuncSvc(ctx context.Context, fn *fv1.Function) (*fscache.FuncSvc, error) {
gp.logger.Info("choosing pod from pool", zap.Any("function", fn.ObjectMeta))
funcLabels := gp.labelsForFunction(&fn.ObjectMeta)
if gp.useIstio {
// Istio only allows accessing pod through k8s service, and requests come to
// service are not always being routed to the same pod. For example:
// If there is only one pod (podA) behind the service svcX.
// svcX -> podA
// All requests (specialize request & function access requests)
// will be routed to podA without any problem.
// If podA and podB are behind svcX.
// svcX -> podA (specialized)
// -> podB (non-specialized)
// The specialize request may be routed to podA and the function access
// requests may go to podB. In this case, the function cannot be served
// properly.
// To prevent such problem, we need to delete old versions function pods
// and make sure that there is only one pod behind the service
sel := map[string]string{
"functionName": fn.ObjectMeta.Name,
"functionUid": string(fn.ObjectMeta.UID),
}
podList, err := gp.kubernetesClient.CoreV1().Pods(gp.namespace).List(metav1.ListOptions{
LabelSelector: labels.Set(sel).AsSelector().String(),
})
if err != nil {
return nil, err
}
// Remove old versions function pods
for _, pod := range podList.Items {
// Delete pod no matter what status it is
gp.kubernetesClient.CoreV1().Pods(gp.namespace).Delete(pod.ObjectMeta.Name, nil)
}
}
pod, err := gp.choosePod(funcLabels)
if err != nil {
return nil, err
}
err = gp.specializePod(ctx, pod, fn)
if err != nil {
gp.scheduleDeletePod(pod.ObjectMeta.Name)
return nil, err
}
gp.logger.Info("specialized pod", zap.String("pod", pod.ObjectMeta.Name), zap.Any("function", fn.ObjectMeta))
var svcHost string
if gp.useSvc && !gp.useIstio {
svcName := fmt.Sprintf("svc-%v", fn.ObjectMeta.Name)
if len(fn.ObjectMeta.UID) > 0 {
svcName = fmt.Sprintf("%s-%v", svcName, fn.ObjectMeta.UID)
}
svc, err := gp.createSvc(svcName, funcLabels)
if err != nil {
gp.scheduleDeletePod(pod.ObjectMeta.Name)
return nil, err
}
if svc.ObjectMeta.Name != svcName {
gp.scheduleDeletePod(pod.ObjectMeta.Name)
return nil, errors.Errorf("sanity check failed for svc %v", svc.ObjectMeta.Name)
}
// the fission router isn't in the same namespace, so return a
// namespace-qualified hostname
svcHost = fmt.Sprintf("%v.%v:8888", svcName, gp.namespace)
} else if gp.useIstio {
svc := utils.GetFunctionIstioServiceName(fn.ObjectMeta.Name, fn.ObjectMeta.Namespace)
svcHost = fmt.Sprintf("%v.%v:8888", svc, gp.namespace)
} else {
svcHost = fmt.Sprintf("%v:8888", pod.Status.PodIP)
}
// patch svc-host and resource version to the pod annotations for new executor to adopt the pod
patch := fmt.Sprintf(`{"metadata":{"annotations":{"%v":"%v","%v":"%v"}}}`,
fv1.ANNOTATION_SVC_HOST, svcHost, fv1.FUNCTION_RESOURCE_VERSION, fn.ObjectMeta.ResourceVersion)
p, err := gp.kubernetesClient.CoreV1().Pods(pod.Namespace).Patch(pod.Name, k8sTypes.StrategicMergePatchType, []byte(patch))
if err != nil {
// just log the error since it won't affect the function serving
gp.logger.Warn("error patching svc-host to pod", zap.Error(err),
zap.String("pod", pod.Name), zap.String("ns", pod.Namespace))
} else {
pod = p
}
gp.logger.Info("specialized pod",
zap.String("pod", pod.ObjectMeta.Name),
zap.String("podNamespace", pod.ObjectMeta.Namespace),
zap.String("function", fn.ObjectMeta.Name),
zap.String("functionNamespace", fn.ObjectMeta.Namespace),
zap.String("specialization_host", svcHost))
kubeObjRefs := []apiv1.ObjectReference{
{
Kind: "pod",
Name: pod.ObjectMeta.Name,
APIVersion: pod.TypeMeta.APIVersion,
Namespace: pod.ObjectMeta.Namespace,
ResourceVersion: pod.ObjectMeta.ResourceVersion,
UID: pod.ObjectMeta.UID,
},
}
m := fn.ObjectMeta // only cache necessary part
fsvc := &fscache.FuncSvc{
Name: pod.ObjectMeta.Name,
Function: &m,
Environment: gp.env,
Address: svcHost,
KubernetesObjects: kubeObjRefs,
Executor: fv1.ExecutorTypePoolmgr,
Ctime: time.Now(),
Atime: time.Now(),
}
gp.fsCache.AddFunc(*fsvc)
gp.fsCache.IncreaseColdStarts(fn.ObjectMeta.Name, string(fn.ObjectMeta.UID))
return fsvc, nil
}
// destroys the pool -- the deployment, replicaset and pods
func (gp *GenericPool) destroy() error {
gp.stopCh()
deletePropagation := metav1.DeletePropagationBackground
delOpt := metav1.DeleteOptions{
PropagationPolicy: &deletePropagation,
}
err := gp.kubernetesClient.AppsV1().
Deployments(gp.namespace).Delete(gp.deployment.ObjectMeta.Name, &delOpt)
if err != nil {
gp.logger.Error("error destroying deployment",
zap.Error(err),
zap.String("deployment_name", gp.deployment.ObjectMeta.Name),
zap.String("deployment_namespace", gp.namespace))
return err
}
return nil
}
| [
"\"POD_READY_TIMEOUT\"",
"\"RUNTIME_IMAGE_PULL_POLICY\"",
"\"TEST_FETCHER_URL\""
] | [] | [
"POD_READY_TIMEOUT",
"TEST_FETCHER_URL",
"RUNTIME_IMAGE_PULL_POLICY"
] | [] | ["POD_READY_TIMEOUT", "TEST_FETCHER_URL", "RUNTIME_IMAGE_PULL_POLICY"] | go | 3 | 0 | |
main.go | package main
import (
"os"
"fmt"
"net/http"
"strings"
"io/ioutil"
)
type Weather struct {
Url string
Api string
Location string
}
func main() {
weter := Weather{}
// Get weather service url
fmt.Println("Get variables from os.envs")
weter.Url = os.Getenv("PLUGIN_URL")
if weter.Url == "" {
weter.Url = "http://samples.openweathermap.org"
}
weter.Api = os.Getenv("PLUGIN_API")
if weter.Api == "" {
weter.Api = "/data/2.5/weather?q="
}
weter.Location = os.Getenv("PLUGIN_LOCATION")
if weter.Location == "" {
weter.Location = "Katowice"
}
fmt.Printf("We got \nURL: %s \nAPI: %s \nLOCATION: %s\n", weter.Url, weter.Api, weter.Location)
// Get external data via API
request := get_data(weter)
fmt.Printf("%+v\n\n", request)
// Get internal data from workspace file
data_from_file := read_file(os.Getenv("PLUGIN_FILENAME"))
fmt.Printf("Data from file: %s\n", data_from_file)
}
func get_data(weather Weather) string {
body := strings.NewReader("Plugin test\n")
url := fmt.Sprintf("%s%s?q=%s,uk&appid=b1b15e88fa797225412429c1c50c122a1", weather.Url, weather.Api, weather.Location)
req, err := http.NewRequest("GET", url, body)
if err != nil {
fmt.Printf("NewRequest Error %v \n", err)
os.Exit(1)
}
c := &http.Client{}
response, err := c.Do(req)
if err != nil {
fmt.Printf("Do Error %v \n", err)
os.Exit(1)
}
defer response.Body.Close()
data, err := ioutil.ReadAll(response.Body)
return string(data)
}
func read_file(file string) string {
data, err := ioutil.ReadFile(file)
if err != nil {
fmt.Printf("Open file error %s \n", err)
os.Exit(1)
}
return string(data)
} | [
"\"PLUGIN_URL\"",
"\"PLUGIN_API\"",
"\"PLUGIN_LOCATION\"",
"\"PLUGIN_FILENAME\""
] | [] | [
"PLUGIN_API",
"PLUGIN_URL",
"PLUGIN_FILENAME",
"PLUGIN_LOCATION"
] | [] | ["PLUGIN_API", "PLUGIN_URL", "PLUGIN_FILENAME", "PLUGIN_LOCATION"] | go | 4 | 0 | |
deepspeed/runtime/engine.py | '''
Copyright 2019 The Microsoft DeepSpeed Team
'''
import os
import re
import stat
import math
import torch
import warnings
import hashlib
import torch.distributed as dist
from collections import defaultdict, OrderedDict
from shutil import copyfile
from torch.nn.modules import Module
from torch.nn.parameter import Parameter
from torch.optim import Optimizer
from torch.optim.lr_scheduler import _LRScheduler
from torch.distributed.distributed_c10d import _get_global_rank
from tensorboardX import SummaryWriter
from typing import Callable, Dict, Optional, Union, Iterable
from deepspeed.runtime.utils import see_memory_usage
from deepspeed.runtime.zero.stage2 import FP16_DeepSpeedZeroOptimizer
from deepspeed.runtime.zero.stage1 import FP16_DeepSpeedZeroOptimizer_Stage1
from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus
from deepspeed.runtime.zero.utils import is_zero_supported_optimizer, _initialize_parameter_parallel_groups
from deepspeed.runtime.activation_checkpointing import checkpointing as activation_checkpointing
from deepspeed.runtime.fp16.fused_optimizer import FP16_Optimizer
from deepspeed.runtime.fp16.unfused_optimizer import FP16_UnfusedOptimizer
from deepspeed.runtime.config import DeepSpeedConfig, DEEPSPEED_OPTIMIZERS, \
ADAM_OPTIMIZER, ADAMW_OPTIMIZER, LAMB_OPTIMIZER, ONEBIT_ADAM_OPTIMIZER, ONEBIT_LAMB_OPTIMIZER, \
TORCH_ADAM_PARAM, ADAM_W_MODE, ADAM_W_MODE_DEFAULT
from deepspeed.runtime.dataloader import DeepSpeedDataLoader
from deepspeed.runtime.constants import \
ROUTE_TRAIN, ROUTE_PREDICT, ROUTE_EVAL, \
PLD_THETA, PLD_GAMMA
from deepspeed.runtime.zero.constants import \
ZERO_OPTIMIZATION_OPTIMIZER_STATES, ZERO_OPTIMIZATION_GRADIENTS, ZERO_OPTIMIZATION_WEIGHTS
from deepspeed.runtime.csr_tensor import CSRTensor
import deepspeed.runtime.lr_schedules as lr_schedules
import deepspeed.utils.groups as groups
from deepspeed.runtime.utils import get_grad_norm
from deepspeed.utils import logger, log_dist, init_distributed
from deepspeed.utils.timer import ThroughputTimer, SynchronizedWallClockTimer
from deepspeed.utils.debug import debug_extract_module_and_param_names
from deepspeed.runtime.progressive_layer_drop import ProgressiveLayerDrop
from deepspeed.runtime.utils import clip_grad_norm_
from deepspeed.runtime.eigenvalue import Eigenvalue
from deepspeed.runtime.data_pipeline.curriculum_scheduler import CurriculumScheduler
from .pipe.module import PipelineModule
from .utils import ensure_directory_exists
from ..ops.op_builder import UtilsBuilder
from ..ops.adam import DeepSpeedCPUAdam
from ..ops.adam import FusedAdam
from ..moe.sharded_moe import TopKGate, MOELayer
from ..moe.layer import MoE
from ..git_version_info import version
from deepspeed.profiling.flops_profiler.profiler import FlopsProfiler
MEMORY_OPT_ALLREDUCE_SIZE = 500000000
DeepSpeedOptimizerCallable = \
Callable[[Union[Iterable[Parameter], Dict[str, Iterable]]], Optimizer]
DeepSpeedSchedulerCallable = Callable[[Optimizer], _LRScheduler]
try:
import apex
from apex import amp
APEX_INSTALLED = True
except ImportError:
# Fail silently so we don't spam logs unnecessarily if user isn't using amp
APEX_INSTALLED = False
pass
def split_half_float_double_csr(tensors):
supported_types = [
"torch.cuda.HalfTensor",
"torch.cuda.FloatTensor",
"torch.cuda.DoubleTensor",
CSRTensor.type()
]
for t in tensors:
assert t.type() in supported_types, f"attempting to reduce an unsupported grad type: {t.type()}"
buckets = []
for i, dtype in enumerate(supported_types):
bucket = [t for t in tensors if t.type() == dtype]
if bucket:
buckets.append((dtype, bucket))
return buckets
def print_configuration(args, name):
logger.info('{}:'.format(name))
for arg in sorted(vars(args)):
dots = '.' * (29 - len(arg))
logger.info(' {} {} {}'.format(arg, dots, getattr(args, arg)))
class DeepSpeedEngine(Module):
r"""DeepSpeed engine for training.
"""
def __init__(self,
args,
model,
optimizer=None,
model_parameters=None,
training_data=None,
lr_scheduler=None,
mpu=None,
dist_init_required=None,
collate_fn=None,
config=None,
config_params=None,
dont_change_device=False):
super(DeepSpeedEngine, self).__init__()
self.dont_change_device = dont_change_device
self.client_optimizer = optimizer
self.client_model_parameters = model_parameters
self.client_lr_scheduler = lr_scheduler
self.training_data = training_data
self.collate_fn = collate_fn
self.mpu = mpu
self.data_parallel_group = None
self.global_steps = 0
self.global_samples = 0
self.micro_steps = 0
self.skipped_steps = 0
self.gradient_average = True
self.warn_unscaled_loss = True
self.config = config
self.loaded_checkpoint_mp_world_size = None
self.loaded_checkpoint_dp_world_size = None
self.enable_backward_allreduce = True
self.progressive_layer_drop = None
self.eigenvalue = None
self.block_eigenvalue = None
self.gas_boundary_ctr = 0
self.dist_backend = "nccl"
self.has_moe_layers = False
self.num_experts = None
self.gate_modules = []
self.moe_layers = []
self._step_applied = False
self._global_grad_norm = None
# for debug purposes - can then debug print: debug_get_module_name(module)
debug_extract_module_and_param_names(model)
# needed for zero_to_fp32 weights reconstruction to remap nameless data to state_dict
self.param_names = {param: name for name, param in model.named_parameters()}
# Set config using config_params for backwards compat
if self.config is None and config_params is not None:
self.config = config_params
if dist_init_required is None:
dist_init_required = not dist.is_initialized()
if dist_init_required is False:
assert dist.is_initialized() is True, "Torch distributed not initialized. Please set dist_init_required to True or initialize before calling deepspeed.initialize()"
else:
# Initialize torch distributed if needed
init_distributed(dist_backend=self.dist_backend)
see_memory_usage(f"DeepSpeed Engine: Before args sanity test")
self._do_args_sanity_check(args)
self._configure_with_arguments(args, mpu)
self._do_sanity_check()
if mpu is not None:
assert not self.elasticity_enabled(), "Elasticity is not currently supported" \
" with model parallelism."
self._set_distributed_vars(args)
if self.tensorboard_enabled() and self.global_rank == 0:
self.summary_writer = self.get_summary_writer()
see_memory_usage(f"DeepSpeed Engine: Before configure distributed model")
self.pipeline_parallelism = isinstance(model, PipelineModule)
# Configure distributed model
self._configure_distributed_model(model)
see_memory_usage(f"DeepSpeed Engine: After configure distributed model")
# Configure wall clock timer
self.timers = SynchronizedWallClockTimer()
# Throughput timer
self.tput_timer = ThroughputTimer(
batch_size=self.train_micro_batch_size_per_gpu(),
num_workers=self.dp_world_size,
steps_per_output=self.steps_per_print(),
monitor_memory=False)
if dist.get_rank() == 0:
logger.info(
f"DeepSpeed Flops Profiler Enabled: {self.flops_profiler_enabled()}")
if self.flops_profiler_enabled():
self.flops_profiler = FlopsProfiler(self.module, self)
if training_data:
self.training_dataloader = self.deepspeed_io(training_data)
else:
self.training_dataloader = None
# Configure optimizer and scheduler
self.optimizer = None
self.basic_optimizer = None
self.lr_scheduler = None
if model_parameters or optimizer:
self._configure_optimizer(optimizer, model_parameters)
self._configure_lr_scheduler(lr_scheduler)
self._report_progress(0)
# Bookkeeping for csr support
self.csr_tensor_module_names = set()
if self.sparse_gradients_enabled():
for name, module in self.module.named_modules():
if isinstance(module, torch.nn.Embedding):
self.csr_tensor_module_names.add(name + ".weight")
logger.info("Will convert {} to sparse (csr) "
"tensor during training".format(name))
self.save_non_zero_checkpoint = False
self.save_zero_checkpoint = False
self._configure_checkpointing(dist_init_required)
if self.eigenvalue_enabled():
self.eigenvalue = self._configure_eigenvalue()
if self.pld_enabled():
self.progressive_layer_drop = self._configure_progressive_layer_drop()
if self.curriculum_enabled():
self.curriculum_scheduler = self._configure_curriculum_scheduler()
if self.global_rank == 0:
self._config.print('DeepSpeedEngine configuration')
if self.dump_state():
print_configuration(self, 'DeepSpeedEngine')
# Load pre-installed or JIT compile (un)flatten ops
util_ops = UtilsBuilder().load()
self.flatten = util_ops.flatten
self.unflatten = util_ops.unflatten
def get_batch_info(self):
""" Get all training batch related settings.
Returns:
train_batch_size (int): The effective training batch size. This is the amount of data
samples that leads to one step of model update.
train_micro_batch_size_per_gpu (int): Batch size to be processed by one GPU in one
step (without gradient accumulation).
gradient_accumulation_steps (int): Number of training steps to accumulate gradients
before averaging and applying them.
"""
return self.train_batch_size, self.train_micro_batch_size_per_gpu, self.gradient_accumulation_steps
def set_train_batch_size(self, train_batch_size):
"""Adjust the global batch size by increasing or decreasing the number of
micro-batches (i.e., gradient accumulation steps). The size of each micro-batch
(i.e., ``train_micro_batch_size_per_gpu``) is not changed.
Args:
train_batch_size (int): The new global batch size for training.
Raises:
ValueError: if ``train_batch_size`` is not divisible by the
configured micro-batch size and data parallelism.
"""
if train_batch_size % (self.train_micro_batch_size_per_gpu() *
self.dp_world_size) != 0:
#print(f'{train_batch_size=} {self.train_micro_batch_size_per_gpu()=} {self.dp_world_size=}')
raise ValueError(
f'Train batch size must be divisible by micro-batch data parallelism')
new_gas = train_batch_size // (self.train_micro_batch_size_per_gpu() *
self.dp_world_size)
# overwrite config
self._config.train_batch_size = train_batch_size
self._config.gradient_accumulation_steps = new_gas
def get_global_grad_norm(self) -> float:
"""Return the 2-norm of all gradients. If there is model parallelism,
the norm will be global.
The computed norm will be cached and reused until the next step() pass.
.. note::
In the presence of model parallelism, this is a collective call
and acts as a barrier among ``mpu.get_model_parallel_group()``.
Returns:
float: norm
"""
return self._global_grad_norm
def checkpoint_tag_validation_enabled(self):
return self._config.checkpoint_tag_validation_enabled
def checkpoint_tag_validation_fail(self):
return self._config.checkpoint_tag_validation_fail
def elasticity_enabled(self):
return self._config.elasticity_enabled
def pld_enabled(self):
return self._config.pld_enabled
def pld_params(self):
return self._config.pld_params
def pld_theta(self):
return self.pld_params()[PLD_THETA]
def pld_gamma(self):
return self.pld_params()[PLD_GAMMA]
def eigenvalue_enabled(self):
return self._config.eigenvalue_enabled
def eigenvalue_verbose(self):
return self._config.eigenvalue_verbose
def eigenvalue_max_iter(self):
return self._config.eigenvalue_max_iter
def eigenvalue_tol(self):
return self._config.eigenvalue_tol
def eigenvalue_stability(self):
return self._config.eigenvalue_stability
def eigenvalue_gas_boundary_resolution(self):
return self._config.eigenvalue_gas_boundary_resolution
def eigenvalue_layer_name(self):
return self._config.eigenvalue_layer_name
def eigenvalue_layer_num(self):
return self._config.eigenvalue_layer_num
def curriculum_enabled(self):
return self._config.curriculum_enabled
def curriculum_params(self):
return self._config.curriculum_params
def tensorboard_enabled(self):
return self._config.tensorboard_enabled
def tensorboard_output_path(self):
return self._config.tensorboard_output_path
def tensorboard_job_name(self):
return self._config.tensorboard_job_name
def get_summary_writer(self,
name="DeepSpeedJobName",
base=os.path.join(os.path.expanduser("~"),
"tensorboard")):
if self.tensorboard_output_path():
base_dir = self.tensorboard_output_path()
job_name = self.tensorboard_job_name()
log_dir = os.path.join(base_dir, job_name)
else:
if self.tensorboard_job_name():
name = self.tensorboard_job_name()
# Infrastructure-specific job-id
if 'DLWS_JOB_ID' in os.environ:
infra_job_id = os.environ['DLWS_JOB_ID']
elif 'DLTS_JOB_ID' in os.environ:
infra_job_id = os.environ['DLTS_JOB_ID']
else:
infra_job_id = 'unknown-job-id'
summary_writer_dir_name = os.path.join(infra_job_id, "logs")
log_dir = os.path.join(base, summary_writer_dir_name, name)
os.makedirs(log_dir, exist_ok=True)
return SummaryWriter(log_dir=log_dir)
def wall_clock_breakdown(self):
return self._config.wall_clock_breakdown
def flops_profiler_enabled(self):
return self._config.flops_profiler_config.enabled
def flops_profiler_profile_step(self):
return self._config.flops_profiler_config.profile_step
def flops_profiler_module_depth(self):
return self._config.flops_profiler_config.module_depth
def flops_profiler_top_modules(self):
return self._config.flops_profiler_config.top_modules
def flops_profiler_detailed(self):
return self._config.flops_profiler_config.detailed
def flops_profiler_output_file(self):
return self._config.flops_profiler_config.output_file
def memory_breakdown(self):
return self._config.memory_breakdown
def sparse_gradients_enabled(self):
return self._config.sparse_gradients_enabled
def train_batch_size(self):
return self._config.train_batch_size
def train_micro_batch_size_per_gpu(self):
return self._config.train_micro_batch_size_per_gpu
def optimizer_name(self):
return self.client_optimizer.__class__.__name__ if self.client_optimizer else self._config.optimizer_name
def optimizer_params(self):
return self._config.optimizer_params
def optimizer_legacy_fusion(self):
return self._config.optimizer_legacy_fusion
def scheduler_name(self):
return self._config.scheduler_name
def scheduler_params(self):
return self._config.scheduler_params
def quantize_training(self):
return self._config.quantize_training_enabled, \
self._config.quantize_target_bits, \
self._config.quantize_start_bits, \
self._config.quantize_period, \
self._config.quantize_offset, \
self._config.quantize_groups, \
self._config.fp16_mixed_quantize, \
self._config.quantize_change_rate, \
self._config.quantize_type, \
self._config.quantize_rounding, \
self._config.quantize_verbose, \
self._config.use_quantizer_kernel
def zero_optimization(self):
return self._config.zero_enabled
def zero_allow_untested_optimizer(self):
return self._config.zero_allow_untested_optimizer
def zero_reduce_scatter(self):
return self._config.zero_config.reduce_scatter
def zero_overlap_comm(self):
return self._config.zero_config.overlap_comm
def zero_offload_optimizer(self):
return self._config.zero_config.offload_optimizer
def zero_offload_param(self):
return self._config.zero_config.offload_param
def zero_cpu_offload(self):
return self._config.zero_config.offload_optimizer is not None
def zero_sub_group_size(self):
return self._config.zero_config.sub_group_size
def zero_optimization_stage(self):
return self._config.zero_optimization_stage
def zero_reduce_bucket_size(self):
return self._config.zero_config.reduce_bucket_size
def zero_allgather_bucket_size(self):
return self._config.zero_config.allgather_bucket_size
def zero_optimization_partition_gradients(self):
return self.zero_optimization_stage() >= ZERO_OPTIMIZATION_GRADIENTS
def zero_optimization_partition_weights(self):
return self.zero_optimization_stage() >= ZERO_OPTIMIZATION_WEIGHTS
def zero_contiguous_gradients(self):
return self._config.zero_config.contiguous_gradients
def zero_load_from_fp32_weights(self):
return self._config.zero_config.load_from_fp32_weights
def zero_elastic_checkpoint(self):
return self._config.zero_config.elastic_checkpoint
def zero_max_live_parameters(self):
return self._config.zero_config.max_live_parameters
def zero_max_reuse_distance(self):
return self._config.zero_config.max_reuse_distance
def zero_prefetch_bucket_size(self):
return self._config.zero_config.prefetch_bucket_size
def zero_param_persistence_threshold(self):
return self._config.zero_config.param_persistence_threshold
def zero_gather_fp16_weights_on_model_save(self):
return self._config.zero_config.gather_fp16_weights_on_model_save
def zero_grad_hooks(self):
return self._config.zero_config.grad_hooks
def zero_legacy_stage1(self):
return self._config.zero_config.legacy_stage1
def zero_ignore_unused_parameters(self):
return self._config.zero_config.ignore_unused_parameters
def fp16_enabled(self):
return self._config.fp16_enabled
def fp16_master_weights_and_gradients(self):
return self._config.fp16_master_weights_and_gradients
def amp_enabled(self):
return self._config.amp_enabled
def amp_params(self):
return self._config.amp_params
def loss_scale(self):
return self._config.loss_scale
def gradient_accumulation_steps(self):
return self._config.gradient_accumulation_steps
def allreduce_always_fp32(self):
return self._config.allreduce_always_fp32
def postscale_gradients(self):
return not self._config.prescale_gradients
def gradient_predivide_factor(self):
return self._config.gradient_predivide_factor
def steps_per_print(self):
return self._config.steps_per_print
def zero_allgather_partitions(self):
return self._config.zero_config.allgather_partitions
def zero_round_robin_gradients(self):
return self._config.zero_config.round_robin_gradients
def dump_state(self):
return self._config.dump_state
def gradient_clipping(self):
return self._config.gradient_clipping
def dynamic_loss_scale(self):
return self._config.loss_scale == 0
def initial_dynamic_scale(self):
return self._config.initial_dynamic_scale
def dynamic_loss_scale_args(self):
return self._config.dynamic_loss_scale_args
def swap_tensor_config(self):
return self._config.swap_tensor_config
def aio_config(self):
return self._config.aio_config
def _configure_lr_scheduler(self, client_lr_scheduler):
# First check for scheduler in json configuration
lr_scheduler = self._scheduler_from_config(self.optimizer)
if lr_scheduler:
if self.global_rank == 0:
logger.info(
f'DeepSpeed using configured LR scheduler = {self.scheduler_name()}')
self.lr_scheduler = lr_scheduler
else:
if isinstance(client_lr_scheduler, Callable):
if self.global_rank == 0:
logger.info('DeepSpeed using client callable to create LR scheduler')
self.lr_scheduler = client_lr_scheduler(self.basic_optimizer)
else:
if self.global_rank == 0:
logger.info('DeepSpeed using client LR scheduler')
self.lr_scheduler = client_lr_scheduler
log_dist(f'DeepSpeed LR Scheduler = {self.lr_scheduler}', ranks=[0])
def _configure_checkpointing(self, dist_init_required):
dp_rank = self.global_rank
if self.mpu:
dp_rank = self.mpu.get_data_parallel_rank()
# only the first data parallel process needs to store the model checkpoint
self.save_non_zero_checkpoint = (
dp_rank == 0) or self.zero_optimization_partition_weights()
if self.zero_optimization():
param_rank = torch.distributed.get_rank(
group=self.optimizer.dp_process_group)
# Only the first parameter parallel process needs to store the
# optimizer state checkpoints for zero
self.save_zero_checkpoint = (param_rank == dp_rank)
def _scheduler_from_config(self, optimizer):
scheduler_name = self.scheduler_name()
if scheduler_name is not None:
if hasattr(lr_schedules, scheduler_name):
scheduler = getattr(lr_schedules, scheduler_name)
else:
assert hasattr(torch.optim.lr_scheduler, scheduler_name), \
f"DeepSpeed does not recognize LR scheduler {scheduler_name}"
scheduler = getattr(torch.optim.lr_scheduler, scheduler_name)
scheduler_params = self.scheduler_params()
instantiated_scheduler = scheduler(optimizer, **scheduler_params)
return instantiated_scheduler
else:
return None
def _set_distributed_vars(self, args):
device_rank = args.device_rank if args is not None and hasattr(
args,
'device_rank') else self.local_rank
if device_rank >= 0:
torch.cuda.set_device(device_rank)
self.device = torch.device("cuda", device_rank)
self.world_size = dist.get_world_size()
self.global_rank = dist.get_rank()
else:
self.world_size = 1
self.global_rank = 0
self.device = torch.device("cuda")
# Configure based on command line arguments
def _configure_with_arguments(self, args, mpu):
# After the distributed backend is initialized we are guaranteed the LOCAL_RANK
# environment variable is set. We must align args.local_rank to this value for
# backwards compatability with scripts relying on [args|self].local_rank containing
# the correct local rank info. _do_args_sanity_check will ensure this is the case.
if "OMPI_COMM_WORLD_LOCAL_RANK" in os.environ:
ompi_local_rank = os.environ.get("OMPI_COMM_WORLD_LOCAL_RANK")
local_rank = os.environ.get('LOCAL_RANK', ompi_local_rank)
assert ompi_local_rank == local_rank, f"LOCAL_RANK ({local_rank}) != OMPI_COMM_WORLD_LOCAL_RANK ({ompi_local_rank}), " \
"not sure how to proceed as we're seeing conflicting local rank info."
os.environ['LOCAL_RANK'] = local_rank
self.local_rank = int(os.environ['LOCAL_RANK'])
if hasattr(args, 'local_rank'):
args.local_rank = self.local_rank
if self.config is None:
self.config = args.deepspeed_config if hasattr(args,
'deepspeed_config') else None
self._config = DeepSpeedConfig(self.config, mpu)
# Validate command line arguments
def _do_args_sanity_check(self, args):
if hasattr(args, 'deepscale_config') and args.deepscale_config is not None:
logger.warning(
"************ --deepscale_config is deprecated, please use --deepspeed_config ************"
)
if hasattr(args, 'deepspeed_config'):
assert args.deepspeed_config is None, "Not sure how to proceed, we were given both a deepscale_config and deepspeed_config"
args.deepspeed_config = args.deepscale_config
assert "LOCAL_RANK" in os.environ or "OMPI_COMM_WORLD_LOCAL_RANK" in os.environ, "DeepSpeed requires the LOCAL_RANK environment " \
"variable, it is set by the deepspeed launcher, deepspeed.init_distributed, or the torch.distributed launcher. If using a " \
"different launcher please ensure LOCAL_RANK is set prior to initializing deepspeed."
if hasattr(args, 'local_rank') and args.local_rank != None:
assert isinstance(args.local_rank, int), f"args.local_rank of {args.local_rank} is an unknown type {type(args.local_rank)}"
if args.local_rank >= 0:
env_local_rank = int(os.environ.get("LOCAL_RANK"))
assert env_local_rank == args.local_rank, \
f"Mismatch in local rank setting, args.local_rank={args.local_rank} but env['LOCAL_RANK']={env_local_rank}."
if self.config is None:
assert hasattr(args, 'deepspeed_config') and args.deepspeed_config is not None, \
'DeepSpeed requires --deepspeed_config to specify configuration file'
assert os.path.isfile(args.deepspeed_config), \
'DeepSpeed configuration file: {} is not an existing file'.format(args.deepspeed_config)
def _is_supported_optimizer(self, optimizer_name):
return optimizer_name in DEEPSPEED_OPTIMIZERS or \
getattr(torch.optim, optimizer_name, None) is not None
# Validate configuration based on command line arguments
def _do_sanity_check(self):
assert isinstance(self.client_optimizer, (type(None), Optimizer, Callable)), \
f'Client Optimizer is of unexpected type {type(self.client_optimizer)}'
if not self.client_optimizer:
if self.optimizer_name() is not None:
assert self._is_supported_optimizer(self.optimizer_name()), \
'{} is not a supported DeepSpeed Optimizer'.format(self.optimizer_name())
if self.optimizer_name() == LAMB_OPTIMIZER or self.optimizer_name(
) == ONEBIT_LAMB_OPTIMIZER:
assert self.dynamic_loss_scale(), \
'DeepSpeed {} optimizer requires dynamic loss scaling'.format(self.optimizer_name())
# Detect invalid combinations of client optimizer and client scheduler
if isinstance(self.client_lr_scheduler, _LRScheduler):
assert isinstance(self.client_optimizer, Optimizer), \
f'Client Optimizer (type = {type(self.client_optimizer)} is not instantiated but Client LR Scheduler is instantiated'
def _broadcast_model(self):
def is_replicated(p):
if hasattr(p, 'ds_status') and p.ds_status is not ZeroParamStatus.AVAILABLE:
return False
return True
for p in self.module.parameters():
if hasattr(p, 'allreduce') and not p.allreduce:
if torch.is_tensor(p) and is_replicated(p):
dist.broadcast(p,
self.expert_broadcast_src_rank,
group=self.expert_data_parallel_group)
else:
if torch.is_tensor(p) and is_replicated(p):
dist.broadcast(p,
self.broadcast_src_rank,
group=self.data_parallel_group)
def _configure_distributed_model(self, model):
self.module = model
if self.fp16_enabled():
if self.zero_optimization_partition_weights() and any(
[hasattr(param,
'ds_id') for param in self.module.parameters()]):
if not all(
[param.dtype == torch.half for param in self.module.parameters()]):
names = [
n for n,
p in self.module.named_parameters() if p.dtype != torch.half
]
raise ValueError(
f"fp16 is enabled but the following parameters have dtype that is not fp16: {', '.join(names)}"
)
self.module.half()
else:
if not all(
[param.dtype == torch.float for param in self.module.parameters()]):
names = [
n for n,
p in self.module.named_parameters() if p.dtype != torch.float
]
raise ValueError(
f"fp32 is enabled but the following parameters have dtype that is not fp32: {', '.join(names)}"
)
if not self.dont_change_device:
self.module.to(self.device)
# MoE related initialization
for _, module in self.module.named_modules():
if isinstance(module, MoE):
self.has_moe_layers = True
self.num_experts = module.num_experts
break
if self.has_moe_layers:
for _, module in self.module.named_modules():
if isinstance(module, TopKGate):
self.gate_modules.append(module)
if self.wall_clock_breakdown:
module.wall_clock_breakdown = True
if isinstance(module, MOELayer):
self.moe_layers.append(module)
if self.wall_clock_breakdown:
module.wall_clock_breakdown = True
if not self.pipeline_parallelism:
# PipeEngine's mpu object is different from Megatron's mpu object
# so we handle them separately
if self.mpu is not None:
if groups.is_initialized():
# Scenario 4 - Case 1
assert self.mpu.get_data_parallel_world_size() == groups.get_data_parallel_world_size(), "mpu object provided must match mpu object provided to groups.initialize()"
assert self.mpu.get_model_parallel_world_size() == groups.get_model_parallel_world_size(), "mpu object provided must match mpu object provided to groups.initialize()"
else:
# Scenario 3
groups.initialize(mpu=self.mpu)
else:
if not groups.is_initialized():
# Scenario 1
groups.initialize()
#else:
# Scenario 2
# Scenario 4 - Case 2
# pass
self.data_parallel_group = groups.get_data_parallel_group()
self.dp_world_size = groups.get_data_parallel_world_size()
self.mp_world_size = groups.get_model_parallel_world_size()
self.broadcast_src_rank = _get_global_rank(groups.get_data_parallel_group(),
0)
else:
self.data_parallel_group = self.mpu.get_data_parallel_group()
self.dp_world_size = self.mpu.get_data_parallel_world_size()
self.mp_world_size = self.mpu.get_model_parallel_world_size()
self.broadcast_src_rank = _get_global_rank(
self.mpu.get_data_parallel_group(),
0)
if self.has_moe_layers:
# No assert needed because this will only be true if MoE Layer creation was successful
self.expert_data_parallel_group = groups.get_expert_data_parallel_group()
self.expert_parallel_group = groups.get_expert_parallel_group()
self.ep_world_size = groups.get_expert_parallel_world_size()
self.expert_broadcast_src_rank = _get_global_rank(
groups.get_expert_data_parallel_group(),
0)
if not self.amp_enabled():
self._broadcast_model()
#check if parameters are duplicated in optimizer param_groups
def _check_for_duplicates(self, optimizer):
for name, param in self.module.named_parameters():
param_id = id(param)
def ids_list(group):
return [id(param) for param in group]
occurrence = sum([
ids_list(group['params']).count(param_id)
if param_id in ids_list(group['params']) else 0
for group in optimizer.param_groups
])
assert occurrence <= 1, f"Parameter with name: {name} occurs multiple times in optimizer.param_groups. Make sure it only appears once to prevent undefined behaviour."
# Configure optimizer
def _configure_optimizer(self, client_optimizer, model_parameters):
if client_optimizer is not None:
if isinstance(client_optimizer, Optimizer):
client_optimizer.param_groups[:] = [
pg for pg in client_optimizer.param_groups if len(pg["params"]) != 0
]
if self.global_rank == 0:
logger.info(
"Removing param_group that has no 'params' in the client Optimizer"
)
basic_optimizer = client_optimizer
if self.global_rank == 0:
logger.info('Using client Optimizer as basic optimizer')
else:
basic_optimizer = client_optimizer(model_parameters)
if self.global_rank == 0:
logger.info('Using client callable to create basic optimizer')
else:
basic_optimizer = self._configure_basic_optimizer(model_parameters)
if self.global_rank == 0:
logger.info(
'Using DeepSpeed Optimizer param name {} as basic optimizer'.format(
self.optimizer_name()))
self._check_for_duplicates(basic_optimizer)
self.basic_optimizer = basic_optimizer
if self.global_rank == 0:
logger.info('DeepSpeed Basic Optimizer = {}'.format(
basic_optimizer.__class__.__name__))
if self.zero_optimization():
assert not self.amp_enabled(), "Amp and ZeRO are not currently compatible, please use (legacy) fp16 mode which performs similar to amp opt_mode=O2"
if not is_zero_supported_optimizer(basic_optimizer):
assert self.zero_allow_untested_optimizer(), \
'You are using an untested ZeRO Optimizer. Please add <"zero_allow_untested_optimizer": true> in the configuration file to use it.'
if self.global_rank == 0:
logger.warning(
"**** You are using ZeRO with an untested optimizer, proceed with caution *****"
)
self.optimizer = self._configure_zero_optimizer(basic_optimizer)
elif self.amp_enabled():
assert not self.fp16_enabled(), "Cannot enable both amp with (legacy) fp16 mode"
amp_params = self.amp_params()
if self.global_rank == 0:
logger.info(f"Initializing AMP with these params: {amp_params}")
try:
logger.info("Initializing Apex amp from: {}".format(amp.__path__))
except NameError:
# If apex/amp is available it will be imported above
raise RuntimeError(
"Unable to import apex/amp, please make sure it is installed")
self.module, self.optimizer = amp.initialize(self.module, basic_optimizer, **amp_params)
self._broadcast_model()
# TODO: maybe need to broadcast experts differently?
elif self.fp16_enabled():
self.optimizer = self._configure_fp16_optimizer(basic_optimizer)
else:
self.optimizer = basic_optimizer
log_dist('DeepSpeed Final Optimizer = {}'.format(self.optimizer_name()),
ranks=[0])
self.quantizer = self._configure_quantization()
def _configure_basic_optimizer(self, model_parameters):
optimizer_parameters = self.optimizer_params()
if optimizer_parameters is None:
optimizer_parameters = {}
# print(optimizer_parameters.keys())
if 'max_grad_norm' in optimizer_parameters.keys():
raise ValueError(
"'max_grad_norm' is not supported as an optimizer parameter, please switch to using the deepspeed parameter 'gradient_clipping' see: https://www.deepspeed.ai/docs/config-json/#gradient-clipping for more details"
)
if self.optimizer_name() in [ADAM_OPTIMIZER, ADAMW_OPTIMIZER]:
torch_adam = optimizer_parameters.pop(TORCH_ADAM_PARAM, False)
adam_w_mode = optimizer_parameters.pop(ADAM_W_MODE, ADAM_W_MODE_DEFAULT)
# Optimizer name of Adam forces AdamW logic unless adam_w_mode is explicitly set
effective_adam_w_mode = self.optimizer_name(
) == ADAMW_OPTIMIZER or adam_w_mode
if torch_adam:
if not effective_adam_w_mode:
optimizer = torch.optim.Adam(model_parameters,
**optimizer_parameters)
else:
optimizer = torch.optim.AdamW(model_parameters,
**optimizer_parameters)
else:
if self.zero_cpu_offload():
from deepspeed.ops.adam import DeepSpeedCPUAdam
optimizer = DeepSpeedCPUAdam(model_parameters,
**optimizer_parameters,
adamw_mode=effective_adam_w_mode)
else:
from deepspeed.ops.adam import FusedAdam
optimizer = FusedAdam(model_parameters,
**optimizer_parameters,
adam_w_mode=effective_adam_w_mode)
elif self.optimizer_name() == LAMB_OPTIMIZER:
from deepspeed.ops.lamb import FusedLamb
optimizer = FusedLamb(model_parameters, **optimizer_parameters)
elif self.optimizer_name() == ONEBIT_ADAM_OPTIMIZER:
assert not self.zero_optimization(), "1bit-Adam is not compatible with ZeRO"
from deepspeed.runtime.fp16.onebit.adam import OnebitAdam
optimizer = OnebitAdam(model_parameters, self, **optimizer_parameters)
if not self.fp16_enabled():
logger.warning(
f'Currently the convergence of 1-bit Adam is only verified under FP16'
)
elif self.optimizer_name() == ONEBIT_LAMB_OPTIMIZER:
assert not self.zero_optimization(), "1bit-Lamb is not compatible with ZeRO"
from deepspeed.runtime.fp16.onebit.lamb import OnebitLamb
optimizer = OnebitLamb(model_parameters, self, **optimizer_parameters)
if not self.fp16_enabled():
logger.warning(
f'Currently the convergence of 1-bit Lamb is only verified under FP16'
)
else:
torch_optimizer = getattr(torch.optim, self.optimizer_name())
optimizer = torch_optimizer(model_parameters, **optimizer_parameters)
return optimizer
def _configure_quantization(self):
quantize_enabled, \
q_target_bits, \
q_start_bits, \
q_period, \
q_offset, \
q_groups, \
q_mixed_fp16, \
q_change_ratio, \
q_type, \
q_rounding, \
q_verbose, \
use_quantizer_kernel = self.quantize_training()
quantizer = None
if quantize_enabled:
from deepspeed.runtime.quantize import Quantizer
quantizer = Quantizer(
q_target_bits,
q_start_bits,
q_period,
q_offset,
q_groups,
q_mixed_fp16,
q_change_ratio,
q_type,
q_rounding,
q_verbose,
self.eigenvalue_enabled(),
use_quantizer_kernel,
self.eigenvalue_layer_num() if self.eigenvalue_enabled() else 0)
return quantizer
def _configure_fp16_optimizer(self, optimizer):
initial_dynamic_scale = self.initial_dynamic_scale()
dynamic_loss_args = self.dynamic_loss_scale_args()
clip_grad = self.gradient_clipping()
if APEX_INSTALLED:
fused_opts = (apex.optimizers.FusedAdam, FusedAdam)
else:
fused_opts = FusedAdam
if isinstance(optimizer, fused_opts) \
or self.optimizer_name() == ONEBIT_ADAM_OPTIMIZER:
if self.dynamic_loss_scale():
log_dist('Creating fp16 optimizer with dynamic loss scale', ranks=[0])
timers = self.timers if self.wall_clock_breakdown() else None
optimizer = FP16_Optimizer(
optimizer,
deepspeed=self,
dynamic_loss_scale=True,
initial_dynamic_scale=initial_dynamic_scale,
dynamic_loss_args=dynamic_loss_args,
mpu=self.mpu,
clip_grad=clip_grad,
fused_adam_legacy=self.optimizer_legacy_fusion(),
timers=timers)
else:
log_dist('Creating fp16 optimizer with static loss scale: {}'.format(
self.loss_scale()),
ranks=[0])
optimizer = FP16_Optimizer(
optimizer,
deepspeed=self,
static_loss_scale=self.loss_scale(),
mpu=self.mpu,
clip_grad=clip_grad,
fused_adam_legacy=self.optimizer_legacy_fusion())
else:
log_dist('Creating fp16 unfused optimizer with dynamic loss scale',
ranks=[0])
optimizer = FP16_UnfusedOptimizer(
optimizer,
deepspeed=self,
static_loss_scale=self.loss_scale(),
dynamic_loss_scale=self.dynamic_loss_scale(),
dynamic_loss_args=dynamic_loss_args,
mpu=self.mpu,
clip_grad=clip_grad,
fused_lamb_legacy=self.optimizer_name() == LAMB_OPTIMIZER)
return optimizer
def _configure_zero_optimizer(self, optimizer):
zero_stage = self.zero_optimization_stage()
log_dist('Creating fp16 ZeRO stage {} optimizer'.format(zero_stage), ranks=[0])
assert not self.allreduce_always_fp32(), "ZeRO does not support 'fp32_allreduce': true"
timers = self.timers if self.wall_clock_breakdown() else None
if self.zero_legacy_stage1(
) and zero_stage == ZERO_OPTIMIZATION_OPTIMIZER_STATES:
assert not self.has_moe_layers, "MoE not supported with Stage 1"
optimizer = FP16_DeepSpeedZeroOptimizer_Stage1(
optimizer,
static_loss_scale=self.loss_scale(),
dynamic_loss_scale=self.dynamic_loss_scale(),
dynamic_loss_args=self.dynamic_loss_scale_args(),
clip_grad=self.gradient_clipping(),
all_gather_partitions=self.zero_allgather_partitions(),
allgather_size=self.zero_allgather_bucket_size(),
max_elements_per_comm=self.zero_reduce_bucket_size(),
dp_process_group=self.data_parallel_group,
elastic_checkpoint=self.zero_elastic_checkpoint(),
mpu=self.mpu,
postscale_gradients=self.postscale_gradients(),
gradient_predivide_factor=self.gradient_predivide_factor(),
gradient_predivide=self.gradient_predivide)
elif zero_stage <= ZERO_OPTIMIZATION_GRADIENTS:
overlap_comm = self.zero_overlap_comm()
contiguous_gradients = self.zero_contiguous_gradients()
round_robin_gradients = self.zero_round_robin_gradients()
# Overlap and contiguous grads are meaningless in stage 1 and are ignored
if zero_stage == ZERO_OPTIMIZATION_OPTIMIZER_STATES:
overlap_comm = False
contiguous_gradients = False
round_robin_gradients = False
if isinstance(self.module, PipelineModule):
if overlap_comm:
logger.warning(
"Pipeline parallelism does not support overlapped communication, will be disabled."
)
overlap_comm = False
optimizer = FP16_DeepSpeedZeroOptimizer(
optimizer,
timers=timers,
static_loss_scale=self.loss_scale(),
dynamic_loss_scale=self.dynamic_loss_scale(),
dynamic_loss_args=self.dynamic_loss_scale_args(),
clip_grad=self.gradient_clipping(),
contiguous_gradients=contiguous_gradients,
reduce_bucket_size=self.zero_reduce_bucket_size(),
allgather_bucket_size=self.zero_allgather_bucket_size(),
dp_process_group=self.data_parallel_group,
expert_parallel_group=self.expert_parallel_group
if self.has_moe_layers else None,
expert_data_parallel_group=self.expert_data_parallel_group
if self.has_moe_layers else None,
reduce_scatter=self.zero_reduce_scatter(),
overlap_comm=overlap_comm,
cpu_offload=self.zero_cpu_offload(),
mpu=self.mpu,
postscale_gradients=self.postscale_gradients(),
gradient_predivide_factor=self.gradient_predivide_factor(),
gradient_accumulation_steps=self.gradient_accumulation_steps(),
ignore_unused_parameters=self.zero_ignore_unused_parameters(),
partition_grads=zero_stage == ZERO_OPTIMIZATION_GRADIENTS,
round_robin_gradients=round_robin_gradients,
has_moe_layers=self.has_moe_layers,
fp16_master_weights_and_gradients=self.fp16_master_weights_and_gradients(
))
elif zero_stage == ZERO_OPTIMIZATION_WEIGHTS:
assert not self.has_moe_layers, "MoE not supported with Stage 3"
print("Initializing ZeRO Stage 3") if dist.get_rank() == 0 else None
from deepspeed.runtime.zero.stage3 import FP16_DeepSpeedZeroOptimizer_Stage3
optimizer = FP16_DeepSpeedZeroOptimizer_Stage3(
self.module,
optimizer,
timers=timers,
static_loss_scale=self.loss_scale(),
dynamic_loss_scale=self.dynamic_loss_scale(),
dynamic_loss_args=self.dynamic_loss_scale_args(),
clip_grad=self.gradient_clipping(),
contiguous_gradients=self.zero_contiguous_gradients(),
reduce_bucket_size=self.zero_reduce_bucket_size(),
prefetch_bucket_size=self.zero_prefetch_bucket_size(),
max_reuse_distance=self.zero_max_reuse_distance(),
max_live_parameters=self.zero_max_live_parameters(),
param_persistence_threshold=self.zero_param_persistence_threshold(),
dp_process_group=self.data_parallel_group,
reduce_scatter=self.zero_reduce_scatter(),
overlap_comm=self.zero_overlap_comm(),
offload_optimizer_config=self.zero_offload_optimizer(),
offload_param_config=self.zero_offload_param(),
sub_group_size=self.zero_sub_group_size(),
mpu=self.mpu,
postscale_gradients=self.postscale_gradients(),
gradient_predivide_factor=self.gradient_predivide_factor(),
gradient_accumulation_steps=self.gradient_accumulation_steps(),
aio_config=self.aio_config())
else:
raise NotImplementedError("ZeRO stage {} not implemented".format(zero_stage))
return optimizer
def _configure_eigenvalue(self):
eigenvalue = Eigenvalue(
verbose=self.eigenvalue_verbose(),
max_iter=self.eigenvalue_max_iter(),
tol=self.eigenvalue_tol(),
stability=self.eigenvalue_stability(),
gas_boundary_resolution=self.eigenvalue_gas_boundary_resolution(),
layer_name=self.eigenvalue_layer_name(),
layer_num=self.eigenvalue_layer_num())
return eigenvalue
def _configure_progressive_layer_drop(self):
pld = ProgressiveLayerDrop(theta=self.pld_theta(), gamma=self.pld_gamma())
return pld
def _configure_curriculum_scheduler(self):
scheduler = CurriculumScheduler(self.curriculum_params())
return scheduler
@staticmethod
def is_map_style_dataset(obj):
return hasattr(obj, "__getitem__") and hasattr(obj, "__len__")
@staticmethod
def is_iterable_style_dataset(obj):
return isinstance(obj,
torch.utils.data.IterableDataset
) # hasattr(obj, "__iter__") should work as well
def dataloader_drop_last(self):
return self._config.dataloader_drop_last
def was_step_applied(self) -> bool:
"""Returns True if the latest ``step()`` produced in parameter updates.
Note that a ``False`` return is not an error condition. Steps are frequently
no-ops, such as between gradient accumulation boundaries or when overflows
occur.
Returns:
bool: Whether the latest ``step()`` modified model parameters.
"""
return self._step_applied
def deepspeed_io(self,
dataset,
batch_size=None,
route=ROUTE_TRAIN,
pin_memory=True,
data_sampler=None,
collate_fn=None,
num_local_io_workers=None):
if not (self.is_map_style_dataset(dataset)
or self.is_iterable_style_dataset(dataset)):
raise ValueError("Training data must be a torch Dataset")
if data_sampler is None and (route == ROUTE_PREDICT or route == ROUTE_EVAL):
data_sampler = torch.utils.data.SequentialSampler(dataset)
if batch_size is None:
batch_size = self.train_micro_batch_size_per_gpu()
if collate_fn is None:
collate_fn = self.collate_fn
# Currently we only use timer in train route
deepspeed_io_timer = None
if route == ROUTE_TRAIN:
deepspeed_io_timer = self.tput_timer
# If mpu is provided, forward world size and parallel rank to sampler.
data_parallel_world_size = None
data_parallel_rank = None
if self.mpu is not None:
data_parallel_world_size = self.mpu.get_data_parallel_world_size()
data_parallel_rank = self.mpu.get_data_parallel_rank()
return DeepSpeedDataLoader(dataset=dataset,
batch_size=batch_size,
pin_memory=pin_memory,
collate_fn=collate_fn,
local_rank=self.local_rank,
tput_timer=deepspeed_io_timer,
num_local_io_workers=num_local_io_workers,
data_sampler=data_sampler,
data_parallel_world_size=data_parallel_world_size,
data_parallel_rank=data_parallel_rank,
dataloader_drop_last=self.dataloader_drop_last())
def train(self, mode=True):
r"""
"""
self.warn_unscaled_loss = True
self.module.train(mode)
def eval(self):
r"""
"""
self.warn_unscaled_loss = True
self.module.train(False)
def _scale_loss_by_gas(self, prescaled_loss):
if isinstance(prescaled_loss, torch.Tensor):
scaled_loss = prescaled_loss / self.gradient_accumulation_steps()
elif isinstance(prescaled_loss, tuple) or isinstance(prescaled_loss, list):
scaled_loss = []
for l in prescaled_loss:
if isinstance(l, torch.Tensor):
scaled_loss.append(l / self.gradient_accumulation_steps())
else:
scaled_loss.append(l)
else:
scaled_loss = prescaled_loss
if self.warn_unscaled_loss:
logger.warning(
f'DeepSpeed unable to scale loss because of type: {type(prescaled_loss)}'
)
self.warn_unscaled_loss = False
return scaled_loss
def forward(self, *inputs, **kwargs):
r"""Execute forward propagation
Arguments:
*inputs: Variable length input list
**kwargs: variable length keyword arguments
"""
if self.flops_profiler_enabled(
) and self.global_steps == self.flops_profiler_profile_step(
) and self.global_rank == 0:
self.flops_profiler.start_profile(ignore_list=None)
if self.module.training and self.progressive_layer_drop:
kwargs.update(self.progressive_layer_drop.get_state())
if self.__class__.__name__ != "PipelineEngine":
# TODO: The above if condition is a HACK since for PipelineEngine
# it's difficult to inject argument in forward pass.
if self.module.training and self.curriculum_enabled():
self.curriculum_scheduler.update_difficulty(self.global_steps + 1)
if self.curriculum_params()["curriculum_type"] == "seqlen":
kwargs.update({
"curriculum_seqlen":
self.curriculum_scheduler.get_current_difficulty()
})
if self.zero_optimization_partition_weights():
# Enable automated discovery of external parameters by indicating that
# we are in a forward pass.
for module in self.module.modules():
module._parameters._in_forward = True
pass
if self.wall_clock_breakdown():
self.timers('forward_microstep').start()
self.timers('forward').start()
if self.training_dataloader is None:
self.tput_timer.start()
loss = self.module(*inputs, **kwargs)
if self.zero_optimization_partition_weights():
# Reset the ZeRO-3 state if we are only doing forward-passes (ie evaluation).
if not torch._C.is_grad_enabled():
self.optimizer.param_coordinator.reset_step()
# Disable automated discovery of external parameters
for module in self.module.modules():
module._parameters._in_forward = False
if self.wall_clock_breakdown():
self.timers('forward').stop()
self.timers('forward_microstep').stop()
if self.flops_profiler_enabled(
) and self.global_steps == self.flops_profiler_profile_step(
) and self.global_rank == 0:
self.flops_profiler.stop_profile()
self.flops_profiler.print_model_profile(
profile_step=self.global_steps,
module_depth=self.flops_profiler_module_depth(),
top_modules=self.flops_profiler_top_modules(),
detailed=self.flops_profiler_detailed(),
output_file=self.flops_profiler_output_file())
self.flops_profiler.end_profile()
return loss
def print_forward_breakdown(self, fwd_time):
gate_time = 0.0
moe_time = 0.0
falltoall = 0.0
salltoall = 0.0
for gate in self.gate_modules:
#logger.info(f"Individual TopK gate time: {gate.gate_time:.2f} ms")
gate_time += gate.gate_time
for l in self.moe_layers:
#logger.info(f"MoE layer; total: {l.time_moe:.2f} ms, first alltoall: {l.time_falltoall:.2f}, second alltoall: {l.time_salltoall:.2f}")
moe_time += l.time_moe
falltoall += l.time_falltoall
salltoall += l.time_salltoall
#TODO: Allreduce/average them across ranks for more accurate timing.
#if torch.distributed.get_rank() == 0:
log_dist(
f"rank={torch.distributed.get_rank()} time (ms) | forward: {fwd_time:.2f} (forward_moe: {moe_time:.2f}, 1st alltoall: {falltoall:.2f}, 2nd alltoall: {salltoall:.2f}, top-k: {gate_time:.2f})",
ranks=[0])
def allreduce_gradients(self, bucket_size=MEMORY_OPT_ALLREDUCE_SIZE):
# Pass (PP) gas boundary flag to optimizer (required for zero)
self.optimizer.is_gradient_accumulation_boundary = self.is_gradient_accumulation_boundary(
)
# ZeRO stage 2 communicates during non gradient accumulation boundaries as well
if self.zero_optimization_partition_gradients():
self.optimizer.overlapping_partition_gradients_reduce_epilogue()
# Communicate only at gradient accumulation boundaries
elif self.is_gradient_accumulation_boundary():
if self.zero_optimization_stage() == ZERO_OPTIMIZATION_OPTIMIZER_STATES:
self.optimizer.reduce_gradients(
pipeline_parallel=self.pipeline_parallelism)
else:
self.buffered_allreduce_fallback(elements_per_buffer=bucket_size)
def backward(self, loss, allreduce_gradients=True, release_loss=False):
r"""Execute backward pass on the loss
Arguments:
loss: Torch tensor on which to execute backward propagation
allreduce_gradients: is deprecated, ignored, and will soon be removed'
"""
if not allreduce_gradients:
logger.warning(
f'Argument `allreduce_gradients` is deprecated, ignored, and will soon be removed'
)
# scale loss w.r.t. gradient accumulation if needed
if self.gradient_accumulation_steps() > 1:
loss = self._scale_loss_by_gas(loss.float())
# Log training Loss
if self.tensorboard_enabled():
if self.is_gradient_accumulation_boundary():
if self.global_rank == 0:
self.summary_events = [
(f'Train/Samples/train_loss',
loss.mean().item() * self.gradient_accumulation_steps(),
self.global_samples)
]
for event in self.summary_events: # write_summary_events
self.summary_writer.add_scalar(event[0], event[1], event[2])
self.summary_writer.flush()
if self.wall_clock_breakdown():
self.timers('backward_microstep').start()
self.timers('backward').start()
assert self.optimizer is not None, "must provide optimizer during " \
"init in order to use backward"
if self.wall_clock_breakdown():
self.timers('backward_inner_microstep').start()
self.timers('backward_inner').start()
if self.zero_optimization():
self.optimizer.is_gradient_accumulation_boundary = self.is_gradient_accumulation_boundary(
)
self.optimizer.backward(loss)
elif self.amp_enabled():
# AMP requires delaying unscale when inside gradient accumulation boundaries
# https://nvidia.github.io/apex/advanced.html#gradient-accumulation-across-iterations
delay_unscale = not self.is_gradient_accumulation_boundary()
with amp.scale_loss(loss,
self.optimizer,
delay_unscale=delay_unscale) as scaled_loss:
scaled_loss.backward()
elif self.fp16_enabled():
if self.eigenvalue_enabled():
self.optimizer.backward(loss, create_graph=True, retain_graph=True)
else:
self.optimizer.backward(loss)
else:
if self.eigenvalue_enabled():
loss.backward(create_graph=True, retain_graph=True)
else:
loss.backward()
if self.wall_clock_breakdown():
self.timers('backward_inner').stop()
self.timers('backward_inner_microstep').stop()
if self.wall_clock_breakdown():
self.timers('backward_allreduce_microstep').start()
self.timers('backward_allreduce').start()
if self.enable_backward_allreduce:
self.allreduce_gradients()
if self.wall_clock_breakdown():
self.timers('backward_allreduce').stop()
self.timers('backward_allreduce_microstep').stop()
self.timers('backward').stop()
self.timers('backward_microstep').stop()
if release_loss:
# loss.data = None
pass
return loss
def is_gradient_accumulation_boundary(self):
"""Query whether the current micro-batch is at the boundary of
gradient accumulation, and thus will trigger gradient reductions and
an optimizer step.
Returns:
bool: if the current step is a gradient accumulation boundary.
"""
return (self.micro_steps + 1) % \
self.gradient_accumulation_steps() == 0
def zero_grad(self):
"""
Zero parameter grads.
"""
for param_name, param in self.module.named_parameters():
param.grad = None
def clip_fp32_gradients(self):
clip_grad_norm_(parameters=self.module.parameters(),
max_norm=self.gradient_clipping(),
mpu=self.mpu)
def _take_model_step(self, lr_kwargs, block_eigenvalue={}):
if self.gradient_clipping() > 0.0:
if not (self.fp16_enabled() or self.amp_enabled()
or self.zero_optimization()):
self.clip_fp32_gradients()
elif self.amp_enabled():
# AMP's recommended way of doing clipping
# https://nvidia.github.io/apex/advanced.html#gradient-clipping
master_params = amp.master_params(self.optimizer)
clip_grad_norm_(parameters=master_params,
max_norm=self.gradient_clipping(),
mpu=self.mpu)
self.optimizer.step()
if hasattr(self.optimizer, '_global_grad_norm'):
self._global_grad_norm = self.optimizer._global_grad_norm
# Quantize the updated parameter if there is no overflow
if self.quantizer:
self.quantizer.quantize(
(self.optimizer.fp16_groups
if self.fp16_enabled() else self.optimizer.param_groups),
(self.optimizer.overflow if self.fp16_enabled() else False),
self.eigenvalue_enabled(),
block_eigenvalue)
#zero grad in basic optimizer could be unreliable and may not exhibit
#the behaviour that we want
if not self.zero_optimization() and not self.fp16_enabled(
) and not self.amp_enabled():
self.zero_grad()
else:
self.optimizer.zero_grad()
report_progress = self.global_rank == 0 if self.global_rank else True
# Check overflow here since in DS fp16 optimizer, the overflow is updated in above step() function.
overflow = False
if hasattr(self.optimizer, 'overflow'):
overflow = self.optimizer.overflow
self._step_applied = not overflow
if overflow:
self.skipped_steps += 1
else:
if self.lr_scheduler is not None:
try:
self.lr_scheduler.step(**(lr_kwargs or {}))
except TypeError:
# XXX Hack to work with Megatron 2.0 and DeepSpeed pipelines.
# We don't currently have a way to specify lr_kwargs from
# pipe_engine.train_batch()
self.lr_scheduler.step(increment=self.train_batch_size())
if report_progress and (self.global_steps + 1) % self.steps_per_print() == 0:
self._report_progress(self.global_steps + 1)
self.global_steps += 1
self.global_samples += self.train_batch_size()
def step(self, lr_kwargs=None):
r"""Execute the weight update step after forward and backward propagation
on effective_train_batch.
"""
if self.wall_clock_breakdown():
self.timers('step_microstep').start()
self.timers('step').start()
assert self.optimizer is not None, "must provide optimizer during " \
"init in order to use step"
report_progress = self.global_rank == 0 if self.global_rank else True
self._step_applied = False # assume False, will flip to True
# Update the model when we reach gradient accumulation boundaries
if self.is_gradient_accumulation_boundary():
self.gas_boundary_ctr += 1
if self.eigenvalue_enabled() and (
self.gas_boundary_ctr % self.eigenvalue_gas_boundary_resolution() ==
0) and self.quantizer.any_precision_switch():
log_dist(f'computing eigenvalue...', ranks=[0])
self.block_eigenvalue = self.eigenvalue.compute_eigenvalue(
self.module,
self.device,
self.optimizer.cur_scale)
if self.progressive_layer_drop:
self.progressive_layer_drop.update_state(self.global_steps)
if self.eigenvalue_enabled(
) and not self.gas_boundary_ctr % self.eigenvalue_gas_boundary_resolution(
) and self.quantizer.any_precision_switch():
self._take_model_step(lr_kwargs, self.block_eigenvalue)
else:
self._take_model_step(lr_kwargs)
self.tput_timer.stop(report_progress)
# Log learning rate
if self.tensorboard_enabled():
if self.is_gradient_accumulation_boundary():
if self.global_rank == 0:
self.summary_events = [(f'Train/Samples/lr',
self.get_lr()[0],
self.global_samples)]
for event in self.summary_events: # write_summary_events
self.summary_writer.add_scalar(event[0], event[1], event[2])
if self.fp16_enabled() and hasattr(self.optimizer, 'cur_scale'):
self.summary_events.append((f'Train/Samples/loss_scale',
self.optimizer.cur_scale,
self.global_samples))
if self.eigenvalue_enabled(
) and not self.gas_boundary_ctr % self.eigenvalue_gas_boundary_resolution(
):
ev_values = self.block_eigenvalue.values()
for i in range(len(ev_values)):
self.summary_writer.add_scalar(
f'Train/Eigenvalues/ModelBlockParam_{i}',
self.ev_values[i][0],
self.global_samples)
self.summary_writer.flush()
for event in self.summary_events: # write_summary_events
self.summary_writer.add_scalar(event[0], event[1], event[2])
self.summary_writer.flush()
if self.wall_clock_breakdown():
self.timers('step').stop()
self.timers('step_microstep').stop()
timer_names = [
'forward_microstep',
'backward_microstep',
'backward_inner_microstep',
'backward_allreduce_microstep',
'step_microstep'
]
self.timers.log(names=timer_names,
reset=False,
memory_breakdown=self.memory_breakdown())
# Log timing
if self.is_gradient_accumulation_boundary():
if self.tensorboard_enabled():
if self.global_rank == 0:
self.summary_events = [
(f'Train/Samples/elapsed_time_ms_forward',
self.timers('forward').elapsed(reset=False) * 1000.0,
self.global_samples),
(f'Train/Samples/elapsed_time_ms_backward',
self.timers('backward').elapsed(reset=False) * 1000.0,
self.global_samples),
(f'Train/Samples/elapsed_time_ms_backward_inner',
self.timers('backward_inner').elapsed(reset=False) * 1000.0,
self.global_samples),
(f'Train/Samples/elapsed_time_ms_backward_allreduce',
self.timers('backward_allreduce').elapsed(reset=False) *
1000.0,
self.global_samples),
(f'Train/Samples/elapsed_time_ms_step',
self.timers('step').elapsed(reset=False) * 1000.0,
self.global_samples)
]
for event in self.summary_events: # write_summary_events
self.summary_writer.add_scalar(event[0], event[1], event[2])
self.summary_writer.flush()
if self.wall_clock_breakdown():
fwd_time = self.timers('forward').elapsed(reset=False) * 1000
self.timers.log([
'forward',
'backward',
'backward_inner',
'backward_allreduce',
'step'
],
reset=False)
if self.has_moe_layers:
self.print_forward_breakdown(fwd_time=fwd_time)
self.micro_steps += 1
def _get_optimizer_param(self, param_name):
result = []
if not self.optimizer:
return result
for group in self.optimizer.param_groups:
if param_name in group:
result.append(group[param_name])
else:
result.append(0.0)
return result
def get_lr(self):
return self._get_optimizer_param('lr')
def get_type(self):
return self._get_optimizer_param('type')
def get_mom(self):
if self.optimizer_name() in ['SGD', 'RMSprop']:
return self._get_optimizer_param('momentum')
else:
return self._get_optimizer_param('betas')
def get_pld_theta(self):
if self.progressive_layer_drop:
return self.progressive_layer_drop.get_theta()
else:
return None
def _report_progress(self, step):
lr = self.get_lr()
mom = self.get_mom()
log_dist(f'step={step}, skipped={self.skipped_steps}, lr={lr}, mom={mom}',
ranks=[0])
def allreduce_bucket(self, bucket, dp_group):
tensor = self.flatten(bucket)
tensor_to_allreduce = tensor
if self.allreduce_always_fp32():
tensor_to_allreduce = tensor.float()
if self.postscale_gradients():
if self.gradient_predivide_factor() != 1.0:
tensor_to_allreduce.mul_(1. / self.gradient_predivide_factor())
dist.all_reduce(tensor_to_allreduce, group=dp_group)
if self.gradient_average:
if self.gradient_predivide_factor() != dist.get_world_size(
group=dp_group):
tensor_to_allreduce.mul_(self.gradient_predivide_factor() /
dist.get_world_size(group=dp_group))
else:
tensor_to_allreduce.div_(dist.get_world_size(group=dp_group))
dist.all_reduce(tensor_to_allreduce, group=dp_group)
if self.allreduce_always_fp32() and tensor is not tensor_to_allreduce:
tensor.copy_(tensor_to_allreduce)
return tensor
def allreduce_and_copy(self, small_bucket, dp_group):
allreduced = self.allreduce_bucket(small_bucket, dp_group)
for buf, synced in zip(small_bucket, self.unflatten(allreduced, small_bucket)):
buf.copy_(synced)
def allreduce_no_retain(self, bucket, dp_group, numel_per_bucket=500000000):
small_bucket = []
numel = 0
for tensor in bucket:
small_bucket.append(tensor)
numel = numel + tensor.numel()
if numel > numel_per_bucket:
self.allreduce_and_copy(small_bucket, dp_group)
small_bucket = []
numel = 0
if len(small_bucket) > 0:
self.allreduce_and_copy(small_bucket, dp_group)
def buffered_allreduce_fallback(self, grads=None, elements_per_buffer=500000000):
grads, expert_grads = [], []
for param_name, param in self.module.named_parameters():
if hasattr(param, 'allreduce') and not param.allreduce:
is_moe_param = True
else:
is_moe_param = False
if param.grad is None:
# In cases where there is an imbalance of empty grads across
# ranks we must create empty grads, this will ensure that every
# rank is reducing the same size. In some cases it may make
# sense in the future to support the ability to average not
# w.r.t. world size but with a different value.
param.grad = torch.zeros(param.size(),
dtype=param.dtype,
device=param.device)
if is_moe_param:
expert_grads.append(param.grad.data)
else:
grads.append(param.grad.data)
else:
grad_data = param.grad.data
if self.sparse_gradients_enabled(
) and param_name in self.csr_tensor_module_names:
if is_moe_param:
expert_grads.append(CSRTensor(grad_data))
else:
grads.append(CSRTensor(grad_data))
else:
if is_moe_param:
expert_grads.append(grad_data)
else:
grads.append(grad_data)
split_buckets = split_half_float_double_csr(grads)
for _, bucket_tuple in enumerate(split_buckets):
bucket_type, bucket = bucket_tuple
if self.pipeline_parallelism:
dp_group = self.mpu.get_data_parallel_group()
else:
dp_group = groups.get_data_parallel_group()
if bucket_type == CSRTensor.type():
self.csr_allreduce_no_retain(bucket, dp_group=dp_group)
else:
self.allreduce_no_retain(bucket,
dp_group=dp_group,
numel_per_bucket=elements_per_buffer)
if self.has_moe_layers:
expert_split_buckets = split_half_float_double_csr(expert_grads)
for i, bucket_tuple in enumerate(expert_split_buckets):
bucket_type, bucket = bucket_tuple
if bucket_type == CSRTensor.type():
self.csr_allreduce_no_retain(bucket,
groups.get_expert_data_parallel_group())
else:
# Separate between diff groups
self.allreduce_no_retain(
bucket,
dp_group=groups.get_expert_data_parallel_group(),
numel_per_bucket=elements_per_buffer)
def csr_allreduce_no_retain(self, bucket, dp_group):
allreduced_csrs = self.csr_allreduce_bucket(bucket, dp_group)
# Densify csr tensor and copy back to original location
for csr in allreduced_csrs:
dense_tensor = csr.to_dense()
csr.orig_dense_tensor.copy_(dense_tensor)
def csr_allreduce_bucket(self, bucket, dp_group):
csr_list = []
for csr in bucket:
csr_list.append(self.csr_allreduce(csr, dp_group))
return csr_list
def csr_allreduce(self, csr, dp_group):
# Pre-divide for fp16 stability
csr.values.div_(dist.get_world_size(group=dp_group))
indices_device_list = self.csr_all_gather(csr.indices, dp_group)
values_device_list = self.csr_all_gather(csr.values, dp_group)
csr.indices = torch.cat(indices_device_list)
csr.values = torch.cat(values_device_list)
return csr
def csr_all_gather(self, value, dp_group):
my_size = torch.LongTensor([value.size()[0]]).to(self.device)
all_sizes = self.all_gather_scalar(my_size, dp_group)
max_size = torch.cat(all_sizes).max()
fill_size = (max_size - my_size)
assert value.dim() in [1, 2]
if value.dim() == 1:
if fill_size > 0:
value = torch.cat([value, value.new_zeros(fill_size)])
tensor_list = [
value.new_zeros(max_size)
for _ in range(dist.get_world_size(group=dp_group))
]
else:
if fill_size > 0:
value = torch.cat([value, value.new_zeros(fill_size, value.size()[1])])
tensor_list = [
value.new_zeros(max_size,
value.size()[1])
for _ in range(dist.get_world_size(group=dp_group))
]
dist.all_gather(tensor_list, value, group=dp_group)
tensors = []
for dev_idx, t in enumerate(tensor_list):
size = all_sizes[dev_idx][0]
tensors.append(
t.index_select(0,
torch.LongTensor(range(size)).to(self.device)))
return tensors
def all_gather_scalar(self, value, dp_group):
tensor_list = [
value.new_zeros(value.size())
for _ in range(dist.get_world_size(group=dp_group))
]
dist.all_gather(tensor_list, value, group=dp_group)
return tensor_list
def module_state_dict(self, destination=None, prefix='', keep_vars=False):
sd = self.module.state_dict(destination, prefix, keep_vars)
return sd
def load_moe_state_dict(self, checkpoint_path, tag, state_dict):
expp_rank = groups.get_expert_parallel_rank()
num_local_experts = self.num_experts // self.ep_world_size
for local_expert_id in range(num_local_experts):
global_expert_id = expp_rank * num_local_experts + local_expert_id
expert_state_dict = torch.load(self._get_expert_ckpt_name(
checkpoint_path,
global_expert_id,
tag),
map_location=torch.device('cpu'))
# Updating global -> local expert ids
moe_str_prefix = '.deepspeed_moe.experts.deepspeed_experts.'
for key in list(expert_state_dict.keys()):
local_key = key.replace(f'{moe_str_prefix}{global_expert_id}',
f'{moe_str_prefix}{local_expert_id}')
expert_state_dict[local_key] = expert_state_dict.pop(key)
state_dict.update(expert_state_dict)
def load_module_state_dict(self, state_dict, strict=True):
self.module.load_state_dict(state_dict, strict=strict)
def _get_rank_zero_ckpt_name(self, checkpoints_path, tag, mp_rank, dp_rank):
filename = 'zero_pp_rank_{}'.format(dp_rank)
zero_ckpt_name = os.path.join(
checkpoints_path,
str(tag),
filename + '_mp_rank_{:02d}'.format(mp_rank) + '_optim_states.pt')
return zero_ckpt_name
def _get_zero_ckpt_name(self, checkpoints_path, tag):
mp_rank = 0 if self.mpu is None else self.mpu.get_model_parallel_rank()
pp_rank = torch.distributed.get_rank(group=self.optimizer.dp_process_group)
return self._get_rank_zero_ckpt_name(checkpoints_path, tag, mp_rank, pp_rank)
def _get_ckpt_name(self, checkpoints_path, tag, mp_placeholder=None):
if mp_placeholder is not None:
mp_rank_str = mp_placeholder
else:
mp_rank = 0 if self.mpu is None else self.mpu.get_model_parallel_rank()
mp_rank_str = "{:02d}".format(mp_rank)
if self.zero_optimization_partition_weights():
filename = 'zero_pp_rank_{}'.format(
torch.distributed.get_rank(group=self.optimizer.dp_process_group))
ckpt_name = os.path.join(
checkpoints_path,
str(tag),
filename + '_mp_rank_' + mp_rank_str + '_model_states.pt')
else:
ckpt_name = os.path.join(checkpoints_path,
str(tag),
'mp_rank_' + mp_rank_str + '_model_states.pt')
return ckpt_name
def _get_optimizer_ckpt_name(self, checkpoints_path, tag, expp_rank):
mp_rank = 0 if self.mpu is None else self.mpu.get_model_parallel_rank()
ckpt_name = os.path.join(
checkpoints_path,
str(tag),
f'expp_rank_{expp_rank}_mp_rank_{mp_rank:02d}_optim_states.pt')
return ckpt_name
def _get_expert_ckpt_name(self, checkpoints_path, expert_id, tag):
mp_rank = 0 if self.mpu is None else self.mpu.get_model_parallel_rank()
ckpt_name = os.path.join(
checkpoints_path,
str(tag),
f'expert_{expert_id}_mp_rank_{mp_rank:02d}_model_states.pt')
return ckpt_name
def _get_all_ckpt_names(self, checkpoints_path, tag):
# It is required that (checkpoints_path, tag) are consistent among all ranks.
ckpt_file_pattern = self._get_ckpt_name(checkpoints_path,
tag,
mp_placeholder="*")
import glob
ckpt_files = glob.glob(ckpt_file_pattern)
ckpt_files.sort()
return ckpt_files
def load_checkpoint(self,
load_dir,
tag=None,
load_module_strict=True,
load_optimizer_states=True,
load_lr_scheduler_states=True,
load_module_only=False):
"""Load training checkpoint
Arguments:
load_dir: Required. Directory to load the checkpoint from
tag: Checkpoint tag used as a unique identifier for checkpoint, if not provided will attempt to load tag in 'latest' file
load_module_strict: Optional. Boolean to strictly enforce that the keys in state_dict of module and checkpoint match.
load_optimizer_states: Optional. Boolean to load the training optimizer states from Checkpoint. Ex. ADAM's momentum and variance
load_lr_scheduler_states: Optional. Boolean to add the learning rate scheduler states from Checkpoint.
load_module_only: Optional. Boolean to load only the model weights from the checkpoint. Ex. warmstarting.
Returns:
A tuple of ``load_path`` and ``client_state``.
*``load_path``: Path of the loaded checkpoint. ``None`` if loading the checkpoint failed.
*``client_state``: State dictionary used for loading required training states in the client code.
Important: under ZeRO3, one cannot load checkpoint with ``engine.load_checkpoint()`` right
after ``engine.save_checkpoint()``. It is because ``engine.module`` is partitioned, and
``load_checkpoint()`` wants a pristine model. If insisting to do so, please reinitialize engine
before ``load_checkpoint()``.
"""
if tag is None:
latest_path = os.path.join(load_dir, 'latest')
if os.path.isfile(latest_path):
with open(latest_path, 'r') as fd:
tag = fd.read().strip()
else:
logger.warning(f"Unable to find latest file at {latest_path}, if trying to load latest " \
"checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint.")
return None, None
load_path, client_states = self._load_checkpoint(load_dir,
tag,
load_module_strict=load_module_strict,
load_optimizer_states=load_optimizer_states,
load_lr_scheduler_states=load_lr_scheduler_states,
load_module_only=load_module_only)
if self.zero_optimization() and load_path is not None:
success = self._load_zero_checkpoint(
load_dir,
tag,
load_optimizer_states=load_optimizer_states)
if not success:
self.optimizer._restore_from_fp16_weights()
return load_path, client_states
def _load_checkpoint(self,
load_dir,
tag,
load_module_strict=True,
load_optimizer_states=True,
load_lr_scheduler_states=True,
load_module_only=False):
from deepspeed.runtime.state_dict_factory import SDLoaderFactory
ckpt_list = self._get_all_ckpt_names(load_dir, tag)
sd_loader = SDLoaderFactory.get_sd_loader(ckpt_list)
is_pipe_parallel = isinstance(self.module, PipelineModule)
mp_rank = 0 if self.mpu is None else self.mpu.get_model_parallel_rank()
load_path, checkpoint, _ = sd_loader.load(self.mp_world_size, mp_rank, is_pipe_parallel=is_pipe_parallel)
if checkpoint is None:
return None, None
# TODO: merge the above two after talking to Reza/Jeff.
if is_pipe_parallel:
# Pipeline parallelism uses this to load its own checkpoint files.
self._curr_ckpt_path = os.path.join(load_dir, tag)
if self.has_moe_layers:
self.load_moe_state_dict(load_dir, tag, state_dict=checkpoint['module'])
self.load_module_state_dict(state_dict=checkpoint['module'],
strict=load_module_strict)
#TODO: Do the following before we merge to master.
# if load_optimizer states and not load_module_only:
# Add consistency check between fp16 and fp32 parameters
# If the consistency check fails, crash with a message telling users
# to turn on load_module_only.
self.loaded_checkpoint_dp_world_size = checkpoint['dp_world_size']
if load_module_only:
deepspeed_states = ['module']
if self.optimizer is not None and self.fp16_enabled():
self.optimizer.refresh_fp32_params()
else:
if self.has_moe_layers:
expp_rank = groups.get_expert_parallel_rank()
optim_load_path = self._get_optimizer_ckpt_name(load_dir, tag, expp_rank)
optim_checkpoint = torch.load(optim_load_path,
map_location=torch.device('cpu'))
else:
optim_checkpoint = checkpoint
if load_optimizer_states and self.optimizer is not None and not self.zero_optimization(
):
if self.fp16_enabled():
self.optimizer.load_state_dict(
optim_checkpoint['optimizer'],
load_optimizer_states=load_optimizer_states)
else:
self.optimizer.load_state_dict(optim_checkpoint['optimizer'])
if load_lr_scheduler_states and self.lr_scheduler is not None:
self.lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
self.csr_tensor_module_names = checkpoint['csr_tensor_module_names']
self.global_steps = checkpoint['global_steps']
self.global_samples = checkpoint.get(
'global_samples',
self.global_steps * self.train_batch_size())
self.skipped_steps = checkpoint['skipped_steps']
self.loaded_checkpoint_mp_world_size = checkpoint['mp_world_size']
deepspeed_states = [
'module',
'csr_tensor_module_names',
'skipped_steps',
'global_steps',
'dp_world_size',
'mp_world_size'
]
client_state = {}
if load_lr_scheduler_states:
deepspeed_states.append('lr_scheduler')
if load_optimizer_states:
deepspeed_states.append('optimizer')
client_state = {
key: value
for key,
value in checkpoint.items() if not key in deepspeed_states
}
if not load_optimizer_states and not load_module_only:
client_state['optimizer'] = optim_checkpoint['optimizer']
return load_path, client_state
def _load_zero_checkpoint(self, load_dir, tag, load_optimizer_states=True):
zero_sd_list = self._get_all_zero_checkpoints(load_dir, tag)
if zero_sd_list is None:
return False
self.optimizer.load_state_dict(
state_dict_list=zero_sd_list,
load_optimizer_states=load_optimizer_states,
load_from_fp32_weights=self.zero_load_from_fp32_weights())
print(
f'loading {len(zero_sd_list)} zero partition checkpoints for rank {self.global_rank}'
)
return True
def _get_mp_rank_zero_checkpoint_names(self, load_dir, tag, mp_rank, dp_world_size):
zero_ckpt_names = []
for dp_rank in range(dp_world_size):
ckpt_name = self._get_rank_zero_ckpt_name(checkpoints_path=load_dir,
tag=tag,
mp_rank=mp_rank,
dp_rank=dp_rank)
zero_ckpt_names.append(ckpt_name)
return zero_ckpt_names
def _get_all_zero_checkpoint_names(self,
load_dir,
tag,
mp_world_size,
dp_world_size):
zero_ckpt_names = []
for mp_rank in range(mp_world_size):
mp_rank_ckpt_names = self._get_mp_rank_zero_checkpoint_names(
load_dir=load_dir,
tag=tag,
mp_rank=mp_rank,
dp_world_size=dp_world_size)
zero_ckpt_names += mp_rank_ckpt_names
return zero_ckpt_names
def _get_all_zero_checkpoints(self, load_dir, tag):
mp_rank = 0 if self.mpu is None else self.mpu.get_model_parallel_rank()
zero_ckpt_names = self._get_mp_rank_zero_checkpoint_names(
load_dir=load_dir,
tag=tag,
mp_rank=mp_rank,
dp_world_size=self.loaded_checkpoint_dp_world_size)
invalid_zero_ckpt_paths = []
for i, ckpt_name in enumerate(zero_ckpt_names):
if not os.path.exists(ckpt_name):
# transparently handle the old file pattern for optim_states
if 'optim_states.pt' in ckpt_name:
ckpt_name_try = ckpt_name.replace("_optim_states.pt",
"optim_states.pt")
if os.path.exists(ckpt_name_try):
zero_ckpt_names[i] = ckpt_name_try
continue
invalid_zero_ckpt_paths.append(ckpt_name)
if len(invalid_zero_ckpt_paths) > 0:
logger.warn(
f"The following zero checkpoints paths are missing: {invalid_zero_ckpt_paths}"
)
return None
zero_sd_list = []
for ckpt_name in zero_ckpt_names:
zero_sd_list.append(torch.load(ckpt_name, map_location='cpu'))
zero_optimizer_sd = [sd['optimizer_state_dict'] for sd in zero_sd_list]
print(
f"successfully loaded {len(zero_optimizer_sd)} ZeRO state_dicts for rank {self.global_rank}"
)
return zero_optimizer_sd
def _checkpoint_tag_validation(self, tag):
if self.checkpoint_tag_validation_enabled():
s_hash = hashlib.sha1(tag.encode())
bhash = torch.ByteTensor([s_hash.digest()]).flatten().to(self.device)
max_bhash = bhash.clone()
min_bhash = bhash.clone()
dist.all_reduce(max_bhash, op=torch.distributed.ReduceOp.MAX)
dist.all_reduce(min_bhash, op=torch.distributed.ReduceOp.MIN)
valid = all(min_bhash == bhash) and all(max_bhash == bhash)
msg = f"[rank={dist.get_rank()}] The checkpoint tag name '{tag}' is not consistent across " \
"all ranks. Including rank unique information in checkpoint tag could cause issues when " \
"restoring with different world sizes."
if self.checkpoint_tag_validation_fail():
assert valid, msg
elif not valid:
logger.warning(msg)
def save_checkpoint(self, save_dir, tag=None, client_state={}, save_latest=True):
r"""Save training checkpoint
Arguments:
save_dir: Required. Directory for saving the checkpoint
tag: Optional. Checkpoint tag used as a unique identifier for the checkpoint, global step is
used if not provided. Tag name must be the same across all ranks.
client_state: Optional. State dictionary used for saving required training states in the client code.
save_latest: Optional. Save a file 'latest' pointing to the latest saved checkpoint.
Important: all processes must call this method and not just the process with rank 0. It is
because each process needs to save its master weights and scheduler+optimizer states. This
method will hang waiting to synchronize with other processes if it's called just for the
process with rank 0.
"""
if self.zero_optimization_partition_weights():
# Prepare for state_dict() by ensuring all parameters are partitioned
self.optimizer.save_checkpoint_prologue()
# This is to make sure the checkpoint names are created without collision
# There seems to be issue creating them in parallel
# Ensure save_dir directory exists
os.makedirs(save_dir, exist_ok=True)
if tag is None:
tag = f"global_step{self.global_steps}"
# Ensure tag is a string
tag = str(tag)
# Ensure checkpoint tag is consistent across ranks
self._checkpoint_tag_validation(tag)
if self.has_moe_layers:
self.save_non_zero_checkpoint = False
self._create_checkpoint_file(save_dir, tag, False)
self._save_moe_checkpoint(save_dir, tag, client_state=client_state)
if self.save_non_zero_checkpoint:
self._create_checkpoint_file(save_dir, tag, False)
self._save_checkpoint(save_dir, tag, client_state=client_state)
if self.save_zero_checkpoint:
self._create_zero_checkpoint_files(save_dir, tag)
self._save_zero_checkpoint(save_dir, tag)
# Save latest checkpoint tag
if save_latest:
with open(os.path.join(save_dir, 'latest'), 'w') as fd:
fd.write(tag)
if self.zero_optimization_partition_weights():
self.optimizer.save_checkpoint_epilogue()
return True
def _get_moe_state_dict(self, full_state_dict, num_local_experts, expp_rank):
"""Compute moe and non moe state dict from complete local model state dict
key : global_expert_id
value : state_dict
experts_state_dict =
{
'0': {
'models.seq2seq.encoder.layers.0.experts.moe.experts.experts.0.fc1.weight' <class 'torch.Tensor'>,
'models.seq2seq.encoder.layers.1.experts.moe.experts.experts.0.fc1.weight' <class 'torch.Tensor'>,
'models.seq2seq.encoder.layers.2.experts.moe.experts.experts.0.fc1.weight' <class 'torch.Tensor'>,
...
},
'1' : {
...
}
}
returns experts_state_dict, model_state_dict
"""
experts_state_dict, moe_state_dict = defaultdict(dict), {}
for key in list(full_state_dict.keys()):
if 'expert' in key and 'moe.gate.wg.weight' not in key:
moe_state_dict[key] = full_state_dict.pop(key)
non_moe_state_dict = full_state_dict
moe_str_prefix = '.deepspeed_moe.experts.deepspeed_experts.'
for key in list(moe_state_dict.keys()):
m = re.match(f".*{moe_str_prefix}([0-9]+).*", key)
local_expert_id = None
if not m:
logger.warn(f'No expert found in key {key}.')
else:
local_expert_id = m.group(1)
global_expert_id = expp_rank * num_local_experts + int(local_expert_id)
expert_key = key.replace(f'{moe_str_prefix}{local_expert_id}',
f'{moe_str_prefix}{global_expert_id}')
experts_state_dict[str(global_expert_id)][expert_key] = moe_state_dict.pop(
key)
return experts_state_dict, non_moe_state_dict
def _save_moe_checkpoint(self, save_dir, tag, client_state={}):
save_path = self._get_ckpt_name(save_dir, tag)
# A hack to save the checkpointing directory. Pipeline parallelism overrides
# module_state_dict() and uses this path to save the model. module_state_dict()
# then instead just returns None.
self._curr_ckpt_path = os.path.join(save_dir, tag)
""""
experts_state_dict = {
'e_id' : state_dict_for_eid
}
"""
expp_rank = groups.get_expert_parallel_rank()
exp_dp_rank = groups.get_expert_data_parallel_rank()
# In the case of E + D parallelism, only the
# first expert parallel group should save the expert weights
# since each expert parallel group is a copy of the model's experts
if exp_dp_rank != 0:
return
num_local_experts = self.num_experts // self.ep_world_size
experts_state_dict, model_state_dict = self._get_moe_state_dict(self.module_state_dict(), num_local_experts, expp_rank)
# Each rank saves its local experts
for global_expert_id, expert_state_dict in experts_state_dict.items():
expert_save_dir = self._get_expert_ckpt_name(save_dir, global_expert_id, tag)
logger.info(
f'Saving model expert {global_expert_id} checkpoint: {expert_save_dir}')
torch.save(expert_state_dict, expert_save_dir)
# Save optimizer states. They are different across each exp parallel rank.
optimizer_state = {
'optimizer':
self.optimizer.state_dict()
if self.optimizer and not self.zero_optimization() else None
}
torch.save(optimizer_state,
self._get_optimizer_ckpt_name(save_dir,
tag,
expp_rank))
if expp_rank == 0:
# TODO: update num experts info,.. in checkpoint
state = {
'module':
model_state_dict,
'lr_scheduler':
self.lr_scheduler.state_dict()
if self.lr_scheduler is not None else None,
'csr_tensor_module_names':
self.csr_tensor_module_names,
'skipped_steps':
self.skipped_steps,
'global_steps':
self.global_steps,
'global_samples':
self.global_samples,
'dp_world_size':
self.dp_world_size,
'mp_world_size':
self.mp_world_size,
'num_experts':
self.num_experts
}
state.update(client_state)
logger.info(f'Saving model checkpoint: {save_path}')
torch.save(state, save_path)
self._curr_save_path = None
def _create_checkpoint_file(self, save_dir, tag, zero_checkpoint):
name_function = self._get_zero_ckpt_name if zero_checkpoint else self._get_ckpt_name
try:
checkpoint_name = name_function(save_dir, tag)
ensure_directory_exists(checkpoint_name)
except:
logger.error(f'Failed saving model checkpoint to {save_dir} with tag {tag}')
return False
return True
def _create_zero_checkpoint_files(self, save_dir, tag):
success = True
# zero checkpoint files are created sequentially
for rank in range(self.world_size):
if rank == self.global_rank:
success = self._create_checkpoint_file(save_dir, tag, True)
dist.barrier()
return success
def _save_checkpoint(self, save_dir, tag, client_state={}):
save_path = self._get_ckpt_name(save_dir, tag)
# A hack to save the checkpointing directory. Pipeline parallelism overrides
# module_state_dict() and uses this path to save the model. module_state_dict()
# then instead just returns None.
self._curr_ckpt_path = os.path.join(save_dir, tag)
state = dict(module=self.module_state_dict(),
buffer_names=self._get_buffer_names(),
optimizer=self.optimizer.state_dict()
if self.optimizer and not self.zero_optimization() else None,
lr_scheduler=self.lr_scheduler.state_dict()
if self.lr_scheduler is not None else None,
csr_tensor_module_names=self.csr_tensor_module_names,
skipped_steps=self.skipped_steps,
global_steps=self.global_steps,
global_samples=self.global_samples,
dp_world_size=self.dp_world_size,
mp_world_size=self.mp_world_size,
ds_config=self.config,
ds_version=version)
state.update(client_state)
log_dist(message=f'Saving model checkpoint: {save_path}', ranks=[0, 1])
#logger.info('Saving model checkpoint: {}'.format(save_path))
torch.save(state, save_path)
self._curr_save_path = None
def _get_buffer_names(self):
buffer_names = []
# we save buffer names so that we could extract later the real buffers from the saved
# state_dict["module"] in the non-zero checkpoint - the buffers are already there but they
# are intermixed with param placeholders
# have to traverse the tree to be able to skip non-persistent buffers
def get_layer_named_buffers(module, prefix=""):
for name, buf in module.named_buffers(recurse=False):
if buf is not None and name not in module._non_persistent_buffers_set:
buffer_names.append(prefix + name)
for name, child in module.named_children():
if child is not None:
get_layer_named_buffers(child, prefix + name + ".")
get_layer_named_buffers(self.module, prefix="")
return buffer_names
def _get_zero_param_shapes(self):
"""Returns a dict of name to shape mapping, only for the flattened fp32 weights saved by the
optimizer. the names are exactly as in state_dict. The order is absolutely important, since
the saved data is just flattened data with no identifiers and requires reconstruction in the
same order it was saved.
We can't rely on self.module.named_parameters() to get the saved tensors, as some params
will be missing and others unsaved and then it'd be impossible to reconstruct state_dict
from the flattened weights.
optimizer.fp16_groups seems to be the easiest to use as it's in all zeroX versions.
"""
param_group_shapes = []
cnt = 0
numel = 0
# zero2 started using a round_robin_fp16_groups which is a shuffled version of fp16_groups -
# if we don't use it, we get parameters ordered incorrectly
if hasattr(self.optimizer, "round_robin_fp16_groups"):
fp16_groups = self.optimizer.round_robin_fp16_groups
else:
fp16_groups = self.optimizer.fp16_groups
for fp16_group in fp16_groups:
param_shapes = OrderedDict()
for param in fp16_group:
cnt += 1
numel += param.ds_numel if hasattr(param, "ds_numel") else param.numel()
shape = param.ds_shape if hasattr(param, "ds_shape") else param.shape
if param not in self.param_names:
raise ValueError(f"failed to find optimizer param in named params")
name = self.param_names[param]
param_shapes[name] = shape
# uncomment to debug zero_to_fp32.py problems
# if self.global_rank == 0: print(f"saving param {name} {shape} (numel={shape.numel()})")
param_group_shapes.append(param_shapes)
# if self.global_rank == 0: print(f"Total saved {numel} numels in {cnt} params")
return param_group_shapes
def _copy_recovery_script(self, save_path):
base_dir = os.path.dirname(os.path.dirname(__file__))
script = "zero_to_fp32.py"
src = os.path.join(base_dir, "utils", script)
dst = os.path.join(save_path, script)
#logger.info(f"creating recovery script {dst}")
copyfile(src, dst)
# make executable
os.chmod(dst, os.stat(dst).st_mode | stat.S_IEXEC)
def _save_zero_checkpoint(self, save_path, tag):
zero_checkpoint_name = self._get_zero_ckpt_name(save_path, tag)
zero_sd = dict(optimizer_state_dict=self.optimizer.state_dict(),
param_shapes=self._get_zero_param_shapes(),
ds_config=self.config,
ds_version=version)
torch.save(zero_sd, zero_checkpoint_name)
if self.global_rank == 0:
self._copy_recovery_script(save_path)
logger.info('zero checkpoint saved {}'.format(zero_checkpoint_name))
def _zero3_consolidated_fp16_state_dict(self):
"""
Get a full non-partitioned state_dict with fp16 weights on cpu.
Important: this function must be called on all ranks and not just rank 0.
This is similar to nn.Module.state_dict (modelled after _save_to_state_dict), but:
1. consolidates the weights from different partitions on gpu0
2. works on one layer at a time to require as little gpu0 memory as possible, by
moving the already consolidated weights to cpu
3. takes care to keep the shared params shared when gradually copying the params to cpu
Returns:
a consolidated fp16 ``state_dict`` on cpu on rank 0, ``None`` on other ranks
"""
import deepspeed
if not self.zero_optimization_partition_weights():
raise ValueError("this function requires ZeRO-3 mode")
state_dict = OrderedDict() if torch.distributed.get_rank() == 0 else None
shared_params = {}
def get_layer_state_dict(module, prefix=""):
# gather one layer at a time to be memory-efficient
# must use modifier_rank=0 to release GPU memory after each layer gathered
#see_memory_usage("before GatheredParameters", force=True)
with deepspeed.zero.GatheredParameters(list(
module.parameters(recurse=False)),
modifier_rank=0):
if torch.distributed.get_rank() == 0:
# handle params
for name, param in module.named_parameters(recurse=False):
if param is None:
continue
key = prefix + name
# can't rely on param.data_ptr() as it will be reused as weights gets
# gathered and reduced, but param.ds_id is unique across all zero weights
# (and shared params will have the same param.ds_id)
if param.ds_id in shared_params:
# shared weights
#print(f"`{key}` is shared with `{shared_params[param.ds_id]}`")
state_dict[key] = state_dict[shared_params[param.ds_id]]
else:
state_dict[key] = param.detach().cpu()
shared_params[param.ds_id] = key
#print(f"param {param.ds_id} {param.shape} {key} ")
# now buffers - not sure if need to take care of potentially shared weights here
for name, buf in module.named_buffers(recurse=False):
if buf is not None and name not in module._non_persistent_buffers_set:
state_dict[prefix + name] = buf.detach().cpu()
#see_memory_usage("after GatheredParameters", force=True)
for name, child in module.named_children():
if child is not None:
get_layer_state_dict(child, prefix + name + ".")
see_memory_usage("before get_layer_state_dict", force=False)
get_layer_state_dict(self.module, prefix="")
see_memory_usage("after get_layer_state_dict", force=False)
return state_dict
def save_fp16_model(self, save_dir, save_filename="pytorch_model.bin"):
r"""Save fp16 model weights
This method saves the fp16 model weights at the desired destination.
Arguments:
save_dir: Required. Directory for saving the model
save_filename: Optional. Filename to save to. Defaults to ``pytorch_model.bin``
Important: all processes must call this method and not just the process with rank 0. It is
because the processes need to work in sync to gather the weights. This method will hang
waiting to synchronize with other processes if it's called just for the process with rank 0.
"""
path = os.path.join(save_dir, save_filename)
if self.zero_optimization_partition_weights():
if self.zero_gather_fp16_weights_on_model_save():
# consolidation is expensive in time and memory and therefore isn't a default
state_dict = self._zero3_consolidated_fp16_state_dict()
else:
# the model will be bogus if not consolidated so don't confuse the user by saving it
logger.info(
f"Did not save the model {path} because `stage3_gather_fp16_weights_on_model_save` is False"
)
return
else:
state_dict = self.module.state_dict()
if torch.distributed.get_rank() == 0:
os.makedirs(save_dir, exist_ok=True)
logger.info(f"Saving model weights to {path}")
torch.save(state_dict, path)
| [] | [] | [
"OMPI_COMM_WORLD_LOCAL_RANK",
"LOCAL_RANK",
"DLWS_JOB_ID",
"DLTS_JOB_ID"
] | [] | ["OMPI_COMM_WORLD_LOCAL_RANK", "LOCAL_RANK", "DLWS_JOB_ID", "DLTS_JOB_ID"] | python | 4 | 0 | |
zwaymqtt.go | package main
import (
"os"
"log"
"flag"
"fmt"
"time"
"strings"
"regexp"
"errors"
"strconv"
"encoding/json"
"net/http"
"io/ioutil"
MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git"
pfl "github.com/davecheney/profile"
)
type MqttUpdate struct {
Topic string
Value string
}
type Gateway struct {
Key string
Topic string
Value string
Write bool
Type string
}
//command line variable
var zway_server string
var zway_username string
var zway_password string
var zway_home string
var zway_refresh int
var mqtt_server string
var mqtt_username string
var mqtt_password string
var mqtt_protocol string
var debug bool
var profile string
//used variables
var zway_timestamp int = 0
var zway_dataapi = "/ZWaveAPI/Data/"
var zway_zautoapi = "/ZAutomation/api/v1/"
var zway_runapi = "/ZWaveAPI/Run/"
var zway_cookiename = "ZWAYSession"
var http_client = new(http.Client)
var zway_cookie = new(http.Cookie)
var gateways []Gateway
var zway_retries int = 0
//ZWay enumerations
const (
BASIC_TYPE_CONTROLER = 1
BASIC_TYPE_STATIC_CONTROLER = 2
BASIC_TYPE_SLAVE = 3
BASIC_TYPE_ROUTING_SLAVE = 4
GENERIC_TYPE_THERMOSTAT = 8
GENERIC_TYPE_BINARY_SWITCH = 16
GENERIC_TYPE_MULTILEVEL_SWITCH = 17
GENERIC_TYPE_SWITCH_REMOTE = 18
GENERIC_TYPE_SWITCH_TOGGLE = 19
GENERIC_TYPE_SECURITY_PANEL = 23
GENERIC_TYPE_BINARY_SENSOR = 32
GENERIC_TYPE_MULTILEVEL_SENSOR = 33
GENERIC_TYPE_METER = 49
GENERIC_TYPE_ENTRY_CONTROL = 64
COMMAND_CLASS_NO_OPERATION = 0
COMMAND_CLASS_BASIC = 32
COMMAND_CLASS_CONTROLLER_REPLICATION = 33
COMMAND_CLASS_APPLICATION_STATUS = 34
COMMAND_CLASS_ZIP_SERVICES = 35
COMMAND_CLASS_ZIP_SERVER = 36
COMMAND_CLASS_SWITCH_BINARY = 37
COMMAND_CLASS_SWITCH_MULTILEVEL = 38
COMMAND_CLASS_SWITCH_ALL = 39
COMMAND_CLASS_SWITCH_TOGGLE_BINARY = 40
COMMAND_CLASS_SWITCH_TOGGLE_MULTILEVEL = 41
COMMAND_CLASS_CHIMNEY_FAN = 42
COMMAND_CLASS_SCENE_ACTIVATION = 43
COMMAND_CLASS_SCENE_ACTUATOR_CONF = 44
COMMAND_CLASS_SCENE_CONTROLLER_CONF = 45
COMMAND_CLASS_ZIP_CLIENT = 46
COMMAND_CLASS_ZIP_ADV_SERVICES = 47
COMMAND_CLASS_SENSOR_BINARY = 48
COMMAND_CLASS_SENSOR_MULTILEVEL = 49
COMMAND_CLASS_METER = 50
COMMAND_CLASS_ZIP_ADV_SERVER = 51
COMMAND_CLASS_ZIP_ADV_CLIENT = 52
COMMAND_CLASS_METER_PULSE = 53
COMMAND_CLASS_THERMOSTAT_HEATING = 56
COMMAND_CLASS_METER_TABLE_CONFIG = 60
COMMAND_CLASS_METER_TABLE_MONITOR = 61
COMMAND_CLASS_METER_TABLE_PUSH = 62
COMMAND_CLASS_THERMOSTAT_MODE = 64
COMMAND_CLASS_THERMOSTAT_OPERATING_STATE = 66
COMMAND_CLASS_THERMOSTAT_SET_POINT = 67
COMMAND_CLASS_THERMOSTAT_FAN_MODE = 68
COMMAND_CLASS_THERMOSTAT_FAN_STATE = 69
COMMAND_CLASS_CLIMATE_CONTROL_SCHEDULE = 70
COMMAND_CLASS_THERMOSTAT_SETBACK = 71
COMMAND_CLASS_DOOR_LOCK_LOGGING = 76
COMMAND_CLASS_SCHEDULE_ENTRY_LOCK = 78
COMMAND_CLASS_BASIC_WINDOW_COVERING = 80
COMMAND_CLASS_MTP_WINDOW_COVERING = 81
COMMAND_CLASS_SCHEDULE = 83
COMMAND_CLASS_CRC_16_ENCAP = 86
COMMAND_CLASS_ASSOCIATION_GROUP_INFO = 89
COMMAND_CLASS_DEVICE_RESET_LOCALLY = 90
COMMAND_CLASS_CENTRAL_SCENE = 91
COMMAND_CLASS_IP_ASSOCIATION = 92
COMMAND_CLASS_ANTITHEFT = 93
COMMAND_CLASS_ZWAVEPLUS_INFO = 94
COMMAND_CLASS_MULTI_INSTANCE = 96
COMMAND_CLASS_DOOR_LOCK = 98
COMMAND_CLASS_USER_CODE = 99
COMMAND_CLASS_BARRIER_OPERATOR = 102
COMMAND_CLASS_CONFIGURATION = 112
COMMAND_CLASS_ALARM = 113
COMMAND_CLASS_MANUFACTURER_SPECIFIC = 114
COMMAND_CLASS_POWER_LEVEL = 115
COMMAND_CLASS_PROTECTION = 117
COMMAND_CLASS_LOCK = 118
COMMAND_CLASS_NODE_NAMING = 119
COMMAND_CLASS_FIRMWARE_UPDATE = 122
COMMAND_CLASS_GROUPING_NAME = 123
COMMAND_CLASS_REMOTE_ASSOCIATION_ACTIVATE = 124
COMMAND_CLASS_REMOTE_ASSOCIATION = 125
COMMAND_CLASS_BATTERY = 128
COMMAND_CLASS_CLOCK = 129
COMMAND_CLASS_HAIL = 130
COMMAND_CLASS_WAKEUP = 132
COMMAND_CLASS_ASSOCIATION = 133
COMMAND_CLASS_VERSION = 134
COMMAND_CLASS_INDICATOR = 135
COMMAND_CLASS_PROPRIETRAY = 136
COMMAND_CLASS_LANGUAGE = 137
COMMAND_CLASS_TIME = 138
COMMAND_CLASS_TIME_PARAMETERS = 139
COMMAND_CLASS_GEOGRAPHIC_LOCATION = 140
COMMAND_CLASS_COMPOSITE = 141
COMMAND_CLASS_MULTICHANNEL_ASSOCIATION = 142
COMMAND_CLASS_MULTI_CMD = 143
COMMAND_CLASS_ENERGY_PRODUCTION = 144
COMMAND_CLASS_MANUFACTURER_PROPRIETRATY = 145
COMMAND_CLASS_SCREEN_MD = 146
COMMAND_CLASS_SCREEN_ATTRIBUTES = 147
COMMAND_CLASS_SIMPLE_AV_CONTROL = 148
COMMAND_CLASS_AV_CONTENT_DIRECTORY_MD = 149
COMMAND_CLASS_RENDERER_STATUS = 150
COMMAND_CLASS_AV_CONTENT_SEARCH_MD = 151
COMMAND_CLASS_SECURITY = 152
COMMAND_CLASS_AV_TAGGING_MD = 153
COMMAND_CLASS_IP_CONFIGURATION = 154
COMMAND_CLASS_ASSOCIATION_COMMAND_CONFIGURATION = 155
COMMAND_CLASS_ALARM_SENSOR = 156
COMMAND_CLASS_SILENCE_ALARM = 157
COMMAND_CLASS_SENSOR_CONFIGURATION = 158
COMMAND_CLASS_MARK = 239
COMMAND_CLASS_NON_INEROPERABLE = 240
)
var ZWaveClassNames = [...]string{
COMMAND_CLASS_NO_OPERATION: "command no operation",
COMMAND_CLASS_BASIC: "command basic",
COMMAND_CLASS_CONTROLLER_REPLICATION: "command controler replication",
COMMAND_CLASS_APPLICATION_STATUS: "command application status",
COMMAND_CLASS_ZIP_SERVICES: "command zip services",
COMMAND_CLASS_ZIP_SERVER: "command zip server",
COMMAND_CLASS_SWITCH_BINARY: "command switch binary",
COMMAND_CLASS_SWITCH_MULTILEVEL: "command switch multilevel",
COMMAND_CLASS_SWITCH_ALL: "commad switch all",
COMMAND_CLASS_SWITCH_TOGGLE_BINARY: "command switch toggle binary",
COMMAND_CLASS_SWITCH_TOGGLE_MULTILEVEL: "command switch toggle multilevel",
COMMAND_CLASS_CHIMNEY_FAN: "command chimney fan",
COMMAND_CLASS_SCENE_ACTIVATION: "command scene activation",
COMMAND_CLASS_SCENE_ACTUATOR_CONF: "command scene actuator configuration",
COMMAND_CLASS_SCENE_CONTROLLER_CONF: "command scene controler configuration",
COMMAND_CLASS_ZIP_CLIENT: "command zip client",
COMMAND_CLASS_ZIP_ADV_SERVICES: "command zip adv services",
COMMAND_CLASS_SENSOR_BINARY: "command sensor binary",
COMMAND_CLASS_SENSOR_MULTILEVEL: "command sensor multilevel",
COMMAND_CLASS_METER: "command meter",
COMMAND_CLASS_ZIP_ADV_SERVER: "command zip adv server",
COMMAND_CLASS_ZIP_ADV_CLIENT: "command zip adv client",
COMMAND_CLASS_METER_PULSE: "command meter pulse",
COMMAND_CLASS_THERMOSTAT_HEATING: "command thermostat heating",
COMMAND_CLASS_METER_TABLE_CONFIG: "command meter table config",
COMMAND_CLASS_METER_TABLE_MONITOR: "command meter table monitor",
COMMAND_CLASS_METER_TABLE_PUSH: "command meter table push",
COMMAND_CLASS_THERMOSTAT_MODE: "command thermostat mode",
COMMAND_CLASS_THERMOSTAT_OPERATING_STATE: "command thermostat operationg state",
COMMAND_CLASS_THERMOSTAT_SET_POINT: "command thermostat set point",
COMMAND_CLASS_THERMOSTAT_FAN_MODE: "command thermostat fan mode",
COMMAND_CLASS_THERMOSTAT_FAN_STATE: "command thermostat fan state",
COMMAND_CLASS_CLIMATE_CONTROL_SCHEDULE: "command climate control schedule",
COMMAND_CLASS_THERMOSTAT_SETBACK: "command thermostat setback",
COMMAND_CLASS_DOOR_LOCK_LOGGING: "command door lock logging",
COMMAND_CLASS_SCHEDULE_ENTRY_LOCK: "command schedule entry lock",
COMMAND_CLASS_BASIC_WINDOW_COVERING: "command basic window covering",
COMMAND_CLASS_MTP_WINDOW_COVERING: "command mtp window covering",
COMMAND_CLASS_SCHEDULE: "command shedule",
COMMAND_CLASS_CRC_16_ENCAP: "command crc 16 encap",
COMMAND_CLASS_ASSOCIATION_GROUP_INFO: "command association group info",
COMMAND_CLASS_DEVICE_RESET_LOCALLY: "command device reset locally",
COMMAND_CLASS_CENTRAL_SCENE: "command central scene",
COMMAND_CLASS_IP_ASSOCIATION: "command ip association",
COMMAND_CLASS_ANTITHEFT: "command antitheft",
COMMAND_CLASS_ZWAVEPLUS_INFO: "command zwaveplus info",
COMMAND_CLASS_MULTI_INSTANCE: "command multi instance",
COMMAND_CLASS_DOOR_LOCK: "command door lock",
COMMAND_CLASS_USER_CODE: "command user code",
COMMAND_CLASS_BARRIER_OPERATOR: "command barrier operator",
COMMAND_CLASS_CONFIGURATION: "command configuration",
COMMAND_CLASS_ALARM: "command alarm",
COMMAND_CLASS_MANUFACTURER_SPECIFIC: "commad manufacturer specific",
COMMAND_CLASS_POWER_LEVEL: "command power level",
COMMAND_CLASS_PROTECTION: "command protection",
COMMAND_CLASS_LOCK: "command lock",
COMMAND_CLASS_NODE_NAMING: "command node naming",
COMMAND_CLASS_FIRMWARE_UPDATE: "command firmware update",
COMMAND_CLASS_GROUPING_NAME: "command grouping name",
COMMAND_CLASS_REMOTE_ASSOCIATION_ACTIVATE: "command remote association activte",
COMMAND_CLASS_REMOTE_ASSOCIATION: "command remote association",
COMMAND_CLASS_BATTERY: "command battery",
COMMAND_CLASS_CLOCK: "command clock",
COMMAND_CLASS_HAIL: "command hail",
COMMAND_CLASS_WAKEUP: "command wakeup",
COMMAND_CLASS_ASSOCIATION: "command association",
COMMAND_CLASS_VERSION: "command version",
COMMAND_CLASS_INDICATOR: "command indicator",
COMMAND_CLASS_PROPRIETRAY: "command proprietary",
COMMAND_CLASS_LANGUAGE: "command language",
COMMAND_CLASS_TIME: "command time",
COMMAND_CLASS_TIME_PARAMETERS: "command time parameters",
COMMAND_CLASS_GEOGRAPHIC_LOCATION: "command geographic location",
COMMAND_CLASS_COMPOSITE: "command position",
COMMAND_CLASS_MULTICHANNEL_ASSOCIATION: "command multichannel association",
COMMAND_CLASS_MULTI_CMD: "command multi cmd",
COMMAND_CLASS_ENERGY_PRODUCTION: "command energy production",
COMMAND_CLASS_MANUFACTURER_PROPRIETRATY: "command manufacturer proprietary",
COMMAND_CLASS_SCREEN_MD: "command screen md",
COMMAND_CLASS_SCREEN_ATTRIBUTES: "command screen attributes",
COMMAND_CLASS_SIMPLE_AV_CONTROL: "command simple av control",
COMMAND_CLASS_AV_CONTENT_DIRECTORY_MD: "command av content directory",
COMMAND_CLASS_RENDERER_STATUS: "command renderer status",
COMMAND_CLASS_AV_CONTENT_SEARCH_MD: "command av content search md",
COMMAND_CLASS_SECURITY: "command security",
COMMAND_CLASS_AV_TAGGING_MD: "command av tagging md",
COMMAND_CLASS_IP_CONFIGURATION: "command ip configuration",
COMMAND_CLASS_ASSOCIATION_COMMAND_CONFIGURATION:
"command association command configuration",
COMMAND_CLASS_ALARM_SENSOR: "command alarm sensor",
COMMAND_CLASS_SILENCE_ALARM: "command silence alarm",
COMMAND_CLASS_SENSOR_CONFIGURATION: "command sensor configuration",
COMMAND_CLASS_MARK: "command mark",
COMMAND_CLASS_NON_INEROPERABLE: "command non interoperable",
}
var ZWaveTypeNames = [...]string{
BASIC_TYPE_CONTROLER: "basic controler",
BASIC_TYPE_STATIC_CONTROLER: "basic static controler",
BASIC_TYPE_SLAVE: "basic slave",
BASIC_TYPE_ROUTING_SLAVE: "basic routing slave",
GENERIC_TYPE_THERMOSTAT: "generic thermostat",
GENERIC_TYPE_BINARY_SWITCH: "generic binary switch",
GENERIC_TYPE_MULTILEVEL_SWITCH: "generic multilevel switch",
GENERIC_TYPE_SWITCH_REMOTE: "generic switch remote",
GENERIC_TYPE_SWITCH_TOGGLE: "generic switch toggle",
GENERIC_TYPE_SECURITY_PANEL: "generic security panel",
GENERIC_TYPE_BINARY_SENSOR: "generic binary sensor",
GENERIC_TYPE_MULTILEVEL_SENSOR: "generic multilevel sensor",
GENERIC_TYPE_METER: "generic meter",
GENERIC_TYPE_ENTRY_CONTROL: "generic entry control",
}
func (g *Gateway) ToString() string {
w := "->"
if g.Write { w = "<>" }
return fmt.Sprintf("%s %s %s (%s)", g.Key, w, g.Topic, g.Type)
}
func (g *Gateway) GetValue(update map[string]interface{}) string {
switch g.Type {
case "string":
value, err := jsonStringValue(g.Key + "." + g.Value,update)
if err == nil {
return value
}
case "int":
value, err := jsonFloatValue(g.Key + "." + g.Value,update)
if err == nil {
return fmt.Sprintf("%d", int(value))
}
case "float":
value, err := jsonFloatValue(g.Key + "." + g.Value,update)
if err == nil {
v := fmt.Sprintf("%.3f", value)
if strings.Contains(v,".") {
v = strings.TrimRight(v,"0.")
}
return v
}
case "bool":
value, err := jsonBoolValue(g.Key + "." + g.Value,update)
if err == nil {
return fmt.Sprintf("%t", value)
}
}
return ""
}
func init() {
//initialize command line parameters
flag.StringVar(&zway_server,"s","localhost:8083","Z-Way server name or ZWAY_SERVER environment variable")
flag.StringVar(&zway_username,"u","admin","Z-Way username or ZWAY_USERNAME environment variable")
flag.StringVar(&zway_password,"p","","Z-Way passsword or ZWAY_PASSWORD environment variable")
flag.StringVar(&zway_home,"h","razberry","mqtt topic root or ZWAY_HOME environment variable")
flag.StringVar(&mqtt_server,"m","localhost:1883","MQTT server or MQTT_SERVER environment variable")
flag.StringVar(&mqtt_username,"mu","","MQTT username or MQTT_USERNAME environment variable")
flag.StringVar(&mqtt_password,"mp","","MQTT password or MQTT_PASSWORD environment variable")
flag.StringVar(&mqtt_protocol,"proto","tcp","MQTT protocol tcp/ws/tls or MQTT_PROTOCOL environment variable")
flag.IntVar(&zway_refresh,"r",30,"Z-Way refresh rate in seconds or ZWAY_REFRESH environment variable")
flag.BoolVar(&debug,"v",false,"Show debug messages")
flag.StringVar(&profile,"profile","","Profile execution (cpu/mem/all)")
flag.Parse()
// check defaults against environment variables
if zway_server == "localhost:8083" && len(os.Getenv("ZWAY_SERVER")) > 0 {
zway_server = os.Getenv("ZWAY_SERVER")
}
if zway_username == "admin" && len(os.Getenv("ZWAY_USERNAME")) > 0 {
zway_username = os.Getenv("ZWAY_USERNAME")
}
if len(zway_password) == 0 && len(os.Getenv("ZWAY_PASSWORD")) > 0 {
zway_password = os.Getenv("ZWAY_PASSWORD")
}
if zway_home == "razberry" && len(os.Getenv("ZWAY_HOME")) > 0 {
zway_home = os.Getenv("ZWAY_HOME")
}
if zway_refresh == 30 && len(os.Getenv("ZWAY_REFRESH")) > 0 {
zway_refresh, _ = strconv.Atoi(os.Getenv("ZWAY_REFRESH"))
}
if mqtt_server == "localhost:1883" && len(os.Getenv("MQTT_SERVER")) > 0 {
mqtt_server = os.Getenv("MQTT_SERVER")
}
if len(mqtt_username) == 0 && len(os.Getenv("MQTT_USERNAME")) > 0 {
mqtt_username = os.Getenv("MQTT_USERNAME")
}
if len(mqtt_password) == 0 && len(os.Getenv("MQTT_PASSWORD")) > 0 {
mqtt_password = os.Getenv("MQTT_PASSWORD")
}
if mqtt_protocol == "tcp" && len(os.Getenv("MQTT_PROTOCOL")) > 0 {
mqtt_protocol = os.Getenv("MQTT_PROTOCOL")
}
if !debug && len(os.Getenv("ZWAYMQTT_DEBUG")) > 0 {
if os.Getenv("ZWAYMQTT_DEBUG") == "true" {
debug = true
}
}
//standardise hostname values to <host>:<port>
zway_match, err := regexp.MatchString(":[0-9]+$",zway_server)
if err != nil {
log.Fatal(fmt.Sprintf("Could not use regexp: %s", err))
}
if zway_match == false {
log.Print("Setting port 8083 on given Z-Way server")
zway_server = zway_server + ":8083"
}
mqtt_match, err := regexp.MatchString(":[0-9]+$",mqtt_server)
if err != nil {
log.Fatal(fmt.Sprintf("Could not use regexp: %s", err))
}
if mqtt_match == false {
log.Print("Setting port 1883 on given MQTT server")
mqtt_server = mqtt_server + ":1883"
}
}
func getzway() string {
if (debug) { log.Print("Getting Z-Way update.") }
url := fmt.Sprintf("http://%s%s%d", zway_server, zway_dataapi, zway_timestamp)
req, err := http.NewRequest("GET",url,nil)
if err != nil {
log.Printf("Error initializing request: %s", err)
}
if zway_cookie != nil {
req.AddCookie(zway_cookie)
}
rsp, err := http_client.Do(req)
if err != nil {
log.Printf("Could not make zway update: %s", err)
return ""
}
defer rsp.Body.Close()
bdy, err := ioutil.ReadAll(rsp.Body)
if err != nil {
log.Printf("could not read body: %s", err)
}
return string(bdy)
}
func authzway() {
//getting Zway authentication cookie
url := fmt.Sprintf("http://%s%slogin", zway_server, zway_zautoapi)
login := fmt.Sprintf("{\"login\": \"%s\", \"password\": \"%s\"}",
zway_username, zway_password)
req, err := http.NewRequest("POST",url,strings.NewReader(login))
if err != nil {
log.Printf("Error initializing request: %s", err)
}
req.Header.Set("Content-Type", "application/json")
rsp, err := http_client.Do(req)
if err != nil {
log.Fatalf("Could not login to Z-Way: %s", err)
}
cookies := rsp.Cookies()
for i := range cookies {
if cookies[i].Name == zway_cookiename && cookies[i].Path == "/" {
zway_cookie = cookies[i]
break
}
}
if zway_cookie == nil {
log.Fatal("Z-Way cookie not found.")
}
}
func jsonValue(key string, target map[string]interface{}) (interface{}, error) {
//if the value is directly found... return it
if target[key] != nil {
return target[key], nil
}
current := target
keys := strings.Split(key,".")
for i := range keys[:len(keys)-1] {
value := current[keys[i]]
if value == nil {
return nil, errors.New(fmt.Sprintf("Json Key not existent (%s)", keys[i]))
}
current = value.(map[string]interface{})
}
key = keys[len(keys)-1]
value := current[key]
if value != nil {
return value, nil
}
return nil, errors.New("Json Value non existent.")
}
func jsonStringValue(key string, target map[string]interface{}) (string, error) {
iface, err := jsonValue(key,target)
if err != nil {
return "", err
}
return iface.(string), nil
}
func jsonIntValue(key string, target map[string]interface{}) (int, error) {
iface, err := jsonValue(key,target)
if err != nil {
return 0, err
}
return iface.(int), nil
}
func jsonFloatValue(key string, target map[string]interface{}) (float64, error) {
iface, err := jsonValue(key,target)
if err != nil {
return 0.0, err
}
return iface.(float64), nil
}
func jsonMapValue(key string, target map[string]interface{}) (map[string]interface{}, error) {
iface, err := jsonValue(key,target)
if err != nil {
return nil, err
}
return iface.(map[string]interface{}), nil
}
func jsonBoolValue(key string, target map[string]interface{}) (bool, error) {
iface, err := jsonValue(key,target)
if err != nil {
return false, err
}
return iface.(bool), nil
}
func zwaygetcmdclassdata(cmdClasses map[string]interface{}, cmdClass int) (map[string]interface{}, error) {
iface := cmdClasses[strconv.Itoa(cmdClass)]
if iface == nil {
return nil, errors.New("Command class not implemented by instance")
}
class := iface.(map[string]interface{})
data, err := jsonMapValue("data",class)
if err != nil {
return nil, err
}
return data, nil
}
func normName(name string) string {
//trim
res := strings.Trim(name," /")
//lower
res = strings.ToLower(res)
//spaces
res = strings.Replace(res," ","_",-1)
//percents
res = strings.Replace(res,"%","pc",-1)
//deg
res = strings.Replace(res,"°","",-1)
return res
}
func zwayparsedevices(update map[string]interface{}) {
log.Print("Parse Z-Way devices")
for node, info := range update {
m := info.(map[string]interface{})
basicType, err := jsonFloatValue("data.basicType.value",m)
if err != nil {
log.Printf("basic type not found: %s", err)
continue
}
genericType, err:= jsonFloatValue("data.genericType.value",m)
if err != nil {
log.Printf("generic type not found: %s", err)
continue
}
givenName, err := jsonStringValue("data.givenName.value",m)
if err != nil {
log.Printf("given name not found: %s", err)
continue
}
//specificType := int(jsonFloatValue("data.specificType.value",m))
isControler := false
switch int(basicType) {
case BASIC_TYPE_CONTROLER:
isControler = true
case BASIC_TYPE_STATIC_CONTROLER:
isControler = true
}
//skip if controller
if isControler {
log.Printf("Skipping node %s: %s", node, ZWaveTypeNames[int(basicType)])
continue
}
//skip if no name
if len(givenName) == 0 {
log.Printf("given name empty")
continue
}
//parsing instances
instances, err := jsonMapValue("instances",m)
if err != nil {
continue
}
for i := range instances {
instance := instances[i].(map[string]interface{})
commandClasses, err := jsonMapValue("commandClasses",instance)
if err != nil {
log.Printf("command classes not found: %s", err)
continue
}
nkey := fmt.Sprintf("devices.%s.instances.%s.commandClasses.%d.data",
node, i, COMMAND_CLASS_BATTERY)
topic := fmt.Sprintf("%s/sensors/analogic/%s/%s/battery",
zway_home, normName(givenName),i)
gateways = append(gateways, Gateway{Key: nkey, Topic: topic,
Value: "last.value", Write:false, Type: "int"})
switch int(genericType) {
case GENERIC_TYPE_BINARY_SWITCH:
nkey := fmt.Sprintf("devices.%s.instances.%s.commandClasses.%d.data",
node, i, COMMAND_CLASS_SWITCH_BINARY)
topic := fmt.Sprintf("%s/actuators/binary/%s/%s/switch",
zway_home, normName(givenName), i)
gateways = append(gateways, Gateway{Key: nkey, Topic: topic,
Value: "level.value", Write:true, Type: "bool"})
case GENERIC_TYPE_MULTILEVEL_SWITCH:
nkey := fmt.Sprintf("devices.%s.instances.%s.commandClasses.%d.data",
node, i, COMMAND_CLASS_SWITCH_MULTILEVEL)
topic := fmt.Sprintf("%s/actuators/analogic/%s/%s/switch",
zway_home, normName(givenName),i)
gateways = append(gateways, Gateway{Key: nkey, Topic: topic,
Value: "level.value", Write:true, Type: "float"})
case GENERIC_TYPE_BINARY_SENSOR:
data, err := zwaygetcmdclassdata(commandClasses,
COMMAND_CLASS_SENSOR_BINARY)
if err != nil {
break
}
sensorType := "generic"
nkey := fmt.Sprintf("devices.%s.instances.%s.commandClasses.%d.data",
node, i, COMMAND_CLASS_SENSOR_BINARY)
topic := fmt.Sprintf("%s/sensors/binary/%s/%s/%s",
zway_home, normName(givenName), i, sensorType)
_, err = jsonBoolValue("level.value",update)
if err == nil {
gateways = append(gateways, Gateway{Key: nkey, Topic: topic,
Value: "level.value", Write:false, Type: "bool"})
} else {
for k, v := range data {
if _, err := strconv.Atoi(k); err == nil {
sensor := v.(map[string]interface{})
sensorType, err := jsonStringValue("sensorTypeString.value",sensor)
if err != nil {
log.Printf("Could not get sensor type: %s", err)
continue
}
nkey := fmt.Sprintf(
"devices.%s.instances.%s.commandClasses.%d.data.%s",
node, i, COMMAND_CLASS_SENSOR_BINARY,k)
topic := fmt.Sprintf("%s/sensors/binary/%s/%s/%s",
zway_home,normName(givenName), i, normName(sensorType))
gateways = append(gateways, Gateway{Key: nkey, Topic: topic,
Value: "level.value", Write:false, Type: "bool"})
}
}
}
fallthrough
case GENERIC_TYPE_MULTILEVEL_SENSOR:
data, err := zwaygetcmdclassdata(commandClasses,
COMMAND_CLASS_SENSOR_MULTILEVEL)
if err == nil {
for k, v := range data {
if _, err := strconv.Atoi(k); err == nil {
sensor := v.(map[string]interface{})
sensorType, err := jsonStringValue("sensorTypeString.value",
sensor)
if err != nil {
log.Printf("Could not get sensor type: %s", err)
continue
}
sensorScale, err := jsonStringValue("scaleString.value",
sensor)
if err != nil {
log.Printf("Could not get sensor scale: %s", err)
continue
}
nkey := fmt.Sprintf(
"devices.%s.instances.%s.commandClasses.%d.data.%s",
node, i, COMMAND_CLASS_SENSOR_MULTILEVEL,k)
topic := fmt.Sprintf("%s/sensors/analogic/%s/%s/%s/%s",
zway_home, normName(givenName), i, normName(sensorType),
normName(sensorScale))
gateways = append(gateways, Gateway{Key: nkey, Topic: topic,
Value: "val.value", Write:false, Type: "float"})
}
}
}
case GENERIC_TYPE_METER:
data, err := zwaygetcmdclassdata(commandClasses,COMMAND_CLASS_METER)
if err == nil {
for k, v := range data {
if _, err := strconv.Atoi(k); err == nil {
sensor := v.(map[string]interface{})
sensorType, err := jsonStringValue("sensorTypeString.value",
sensor)
if err != nil {
log.Printf("Could not get sensor type: %s", err)
continue
}
sensorScale, err := jsonStringValue("scaleString.value",
sensor)
if err != nil {
log.Printf("Could not get sensor scale: %s", err)
continue
}
nkey := fmt.Sprintf(
"devices.%s.instances.%s.commandClasses.%d.data.%s",
node, i, COMMAND_CLASS_METER,k)
topic := fmt.Sprintf("%s/sensors/analogic/%s/%s/%s/%s",
zway_home, normName(givenName), i, normName(sensorType),
normName(sensorScale))
gateways = append(gateways, Gateway{Key: nkey, Topic: topic,
Value: "val.value", Write:false, Type: "float"})
}
}
}
case GENERIC_TYPE_THERMOSTAT:
//get the binary switch to enable/disable thermostat
nkey := fmt.Sprintf("devices.%s.instances.%s.commandClasses.%d.data",
node, i, COMMAND_CLASS_SWITCH_BINARY)
topic := fmt.Sprintf("%s/actuators/binary/%s/%s/switch",
zway_home, normName(givenName), i)
gateways = append(gateways, Gateway{Key: nkey, Topic: topic,
Value: "level.value", Write:true, Type: "bool"})
//TODO: informations about set point
data, err := zwaygetcmdclassdata(commandClasses,
COMMAND_CLASS_THERMOSTAT_SET_POINT)
if err == nil {
for k, v := range data {
if _, err := strconv.Atoi(k); err == nil {
setpoint := v.(map[string]interface{})
setpointType, err := jsonStringValue("modeName.value",
setpoint)
if err != nil {
log.Printf("Could not get set point mode: %s", err)
continue
}
setpointScale, err := jsonStringValue("scaleString.value",
setpoint)
if err != nil {
log.Printf("Could not get setpoint scale: %s", err)
continue
}
nkey := fmt.Sprintf(
"devices.%s.instances.%s.commandClasses.%d.data.%s",
node, i, COMMAND_CLASS_THERMOSTAT_SET_POINT,k)
topic := fmt.Sprintf("%s/actuators/analogic/%s/%s/%s/%s",
zway_home, normName(givenName), i, normName(setpointType),
normName(setpointScale))
gateways = append(gateways, Gateway{Key: nkey, Topic: topic,
Value: "val.value", Write:true, Type: "int"})
}
}
}
data, err = zwaygetcmdclassdata(commandClasses,
COMMAND_CLASS_SENSOR_MULTILEVEL)
if err == nil {
for k, v := range data {
if _, err := strconv.Atoi(k); err == nil {
sensor := v.(map[string]interface{})
sensorType, err := jsonStringValue("sensorTypeString.value",
sensor)
if err != nil {
log.Printf("Could not get sensor type: %s", err)
continue
}
sensorScale, err := jsonStringValue("scaleString.value",
sensor)
if err != nil {
log.Printf("Could not get sensor scale: %s", err)
continue
}
nkey := fmt.Sprintf(
"devices.%s.instances.%s.commandClasses.%d.data.%s",
node, i, COMMAND_CLASS_SENSOR_MULTILEVEL,k)
topic := fmt.Sprintf("%s/sensors/analogic/%s/%s/%s/%s",
zway_home, normName(givenName), i, normName(sensorType),
normName(sensorScale))
gateways = append(gateways, Gateway{Key: nkey, Topic: topic,
Value: "val.value", Write:false, Type: "float"})
}
}
}
default:
log.Printf("device not implemented: type: %f / name: %s", genericType, givenName)
}
}
}
}
func zwayupdategateways(update map[string]interface{}, mqtt_updates chan<- MqttUpdate) {
if (debug) { log.Print("Update Z-Way devices") }
for _, g := range gateways {
//Z-Way is always true
value := g.GetValue(update)
if len(value) > 0 {
if (debug) { log.Printf("ZWAY: %s / Value: %s", g.ToString(), value ) }
mqtt_updates <- MqttUpdate{Topic: g.Topic, Value: value}
}
}
}
func normalizeJson(json map[string]interface{}) map[string]interface{} {
for k, v := range json {
if strings.IndexRune(k,'.') > -1 {
keys := strings.Split(k,".")
nkey := keys[0]
rest := strings.Join(keys[1:len(keys)],".")
tmp := make(map[string]interface{})
tmp[rest] = v.(map[string]interface{})
if json[nkey] != nil {
for k2, v2 := range json[nkey].(map[string]interface{}) {
tmp[k2] = v2
}
}
json[nkey] = normalizeJson(tmp)
delete(json, k)
}
}
return json
}
func checkzwayupdate(update string,mqtt_updates chan<- MqttUpdate) {
var f interface{}
err := json.Unmarshal([]byte(update), &f)
if err != nil {
log.Printf("Error decoding json: %s", err)
}
m := f.(map[string]interface{})
m = normalizeJson(m)
if zway_timestamp == 0 {
devices, err := jsonMapValue("devices",m)
if err != nil {
log.Printf("devices not found: %s", err)
return
}
zwayparsedevices(devices)
}
zwayupdategateways(m,mqtt_updates)
zway_timestampf, err := jsonFloatValue("updateTime",m)
if err != nil {
log.Printf("timestamp not found: %s", err)
return
}
zway_timestamp = int(zway_timestampf)
}
//define a function for the default message handler
var f MQTT.MessageHandler = func(client *MQTT.Client, msg MQTT.Message) {
topic := msg.Topic()
value := string(msg.Payload())
for _, g := range gateways {
if g.Topic == topic { g.Set(value) }
}
}
func (g *Gateway) Set(value string) {
if !g.Write {
if (debug) { log.Printf("MQTT: %s / Readonly", g.ToString()) }
return
}
if g.Get() == value {
if (debug) { log.Printf("MQTT: %s / Value not changed", g.ToString()) }
return
}
//check value
switch g.Type {
case "int":
if strings.Contains(value,".") {
value = strings.TrimRight(value,"0.")
}
i, err := strconv.Atoi(value)
if err != nil {
log.Printf("MQTT: %s / value not int: %s", g.ToString(), value)
return
}
value = fmt.Sprintf("%d",i)
case "float":
if strings.Contains(value,".") {
value = strings.TrimRight(value,"0.")
}
f, err := strconv.ParseFloat(value,64)
if err != nil {
log.Printf("MQTT: %s / value not float: %s", g.ToString(), value)
return
}
value = fmt.Sprintf("%.3f", f)
}
log.Printf("MQTT: %s / Value: %s ", g.ToString(), value)
key := g.Key
r := regexp.MustCompile("\\.([0-9]+)(\\.|$)")
key = r.ReplaceAllString(key, "[$1].")
r = regexp.MustCompile("\\.data$")
key = r.ReplaceAllString(key,"")
result, _ := zwayget(zway_runapi,fmt.Sprintf("%s.Set(%s)", key, value))
if result != "null" {
log.Printf("Error updating value: %s", result)
}
}
func (g *Gateway) Get() string {
if (debug) { log.Print("Setting Z-Way value.") }
key := g.Key
r := regexp.MustCompile("\\.([0-9]+)\\.")
key = r.ReplaceAllString(key, "[$1].")
result, _ := zwayget(zway_runapi, fmt.Sprintf("%s.%s", key, g.Value))
return result
}
func zwayget(api string, path string) (string, error) {
url := fmt.Sprintf("http://%s%s%s", zway_server, api, path)
if (debug) { log.Printf("Http Get on Z-Way: %s", url) }
req, err := http.NewRequest("GET",url,nil)
if err != nil {
return "", err
}
if zway_cookie != nil {
req.AddCookie(zway_cookie)
}
rsp, err := http_client.Do(req)
if err != nil {
return "", err
}
defer rsp.Body.Close()
bdy, err := ioutil.ReadAll(rsp.Body)
if err != nil {
return "", err
}
result := string(bdy)
return result, nil
}
func main() {
//start profiling
if len(profile) > 0 {
log.Print("Profiling enabled")
cfg := pfl.Config{}
if profile=="mem" || profile=="all" {
cfg.MemProfile = true
}
if profile=="cpu" || profile=="all" {
cfg.CPUProfile = true
}
defer pfl.Start(&cfg).Stop()
}
//print informations given
log.Print("Starting Z-Way to mqtt gateway...")
log.Printf("Z-Way server: %s", zway_server)
if len(zway_password) > 0 {
log.Printf("Z-Way user: %s", zway_username)
} else {
log.Print("Not using authentication as no password given.")
}
log.Printf("Z-Way refresh rate: %d", zway_refresh)
log.Printf("MQTT server: %s", mqtt_server)
//authtenticate to zway
if len(zway_password) > 0 {
authzway()
}
//connect and subscribe to mqtt
//prepare
opts := MQTT.NewClientOptions()
opts.AddBroker(mqtt_protocol+"://"+mqtt_server)
opts.SetClientID("ZWayMQTT")
opts.SetDefaultPublishHandler(f)
opts.SetAutoReconnect(true)
if len(mqtt_username) > 0 && len(mqtt_password) > 0 {
opts.SetUsername(mqtt_username)
opts.SetPassword(mqtt_password)
}
//Connect
mqtt := MQTT.NewClient(opts)
if token := mqtt.Connect(); token.Wait() && token.Error() != nil {
panic(token.Error())
}
//create the control channel
quit := make(chan struct{})
defer close(quit)
//create zway update channel
zway_updates := make(chan string,3)
defer close(zway_updates)
//create mqtt update channel
mqtt_updates := make(chan MqttUpdate,20)
defer close(mqtt_updates)
//create the zway refresh timer
refreshes := time.NewTicker(time.Second * time.Duration(zway_refresh)).C
//make initial refreshe
zway_updates <- getzway()
//subscribe only when zway started
subject := zway_home + "/actuators/#"
if token := mqtt.Subscribe(subject, 1, nil); token.Wait() && token.Error() != nil {
fmt.Println(token.Error())
os.Exit(1)
}
//start refreshes
go func() {
for _ = range refreshes {
update := getzway()
if len(update) > 0 {
zway_updates <- getzway()
} else {
log.Print("Got empty zwave response...")
if zway_retries < 3 {
log.Printf("Reinitializing Z-Way for the %d time.", zway_retries)
authzway()
zway_retries += 1
} else {
log.Print("Already tested 3 times: stop")
<-quit
return
}
}
}
}()
//start update parsing
go func() {
for zway_update := range zway_updates {
checkzwayupdate(zway_update,mqtt_updates)
}
}()
//star mqtt updating
go func() {
for mqtt_update := range mqtt_updates {
token := mqtt.Publish(mqtt_update.Topic, 1, true, mqtt_update.Value)
token.Wait()
}
}()
//start the main loop
for {
select {
case <- quit:
return
}
}
}
| [
"\"ZWAY_SERVER\"",
"\"ZWAY_SERVER\"",
"\"ZWAY_USERNAME\"",
"\"ZWAY_USERNAME\"",
"\"ZWAY_PASSWORD\"",
"\"ZWAY_PASSWORD\"",
"\"ZWAY_HOME\"",
"\"ZWAY_HOME\"",
"\"ZWAY_REFRESH\"",
"\"ZWAY_REFRESH\"",
"\"MQTT_SERVER\"",
"\"MQTT_SERVER\"",
"\"MQTT_USERNAME\"",
"\"MQTT_USERNAME\"",
"\"MQTT_PASSWORD\"",
"\"MQTT_PASSWORD\"",
"\"MQTT_PROTOCOL\"",
"\"MQTT_PROTOCOL\"",
"\"ZWAYMQTT_DEBUG\"",
"\"ZWAYMQTT_DEBUG\""
] | [] | [
"MQTT_PROTOCOL",
"MQTT_PASSWORD",
"ZWAY_USERNAME",
"ZWAY_SERVER",
"ZWAY_PASSWORD",
"MQTT_USERNAME",
"ZWAY_REFRESH",
"MQTT_SERVER",
"ZWAYMQTT_DEBUG",
"ZWAY_HOME"
] | [] | ["MQTT_PROTOCOL", "MQTT_PASSWORD", "ZWAY_USERNAME", "ZWAY_SERVER", "ZWAY_PASSWORD", "MQTT_USERNAME", "ZWAY_REFRESH", "MQTT_SERVER", "ZWAYMQTT_DEBUG", "ZWAY_HOME"] | go | 10 | 0 | |
dpxdt/server/__init__.py | #!/usr/bin/env python
# Copyright 2013 Brett Slatkin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main module for the API server."""
import datetime
import logging
import os
# Local libraries
from flask import Flask, url_for
from flask.ext.cache import Cache
from flask.ext.login import LoginManager
from flask.ext.mail import Mail
from flask.ext.sqlalchemy import SQLAlchemy
import jinja2
# Local modules required for app setup
import config
app = Flask(__name__)
app.config.from_object(config)
if 'YOURAPPLICATION_SETTINGS' in os.environ:
app.config.from_envvar('YOURAPPLICATION_SETTINGS')
db = SQLAlchemy(
app,
# Don't expire model instances on commit. Let functions continue to
# quickly read properties from their last known-good state.
session_options=dict(expire_on_commit=False))
login = LoginManager(app)
login.login_view = 'login_view'
login.refresh_view = 'login_view'
cache = Cache(app)
mail = Mail(app)
# Modules with handlers to register with the app
from dpxdt.server import api
from dpxdt.server import auth
from dpxdt.server import emails
from dpxdt.server import frontend
from dpxdt.server import work_queue
from dpxdt.server import work_queue_handlers
| [] | [] | [] | [] | [] | python | 0 | 0 | |
src/main.py | """Модуль запуска сервера."""
import os
from flask import Flask, url_for
from flask_admin.contrib.sqla import ModelView
from flask_admin.menu import MenuLink
from flask_dance.contrib.github import make_github_blueprint
from flask_mail import Mail
from flask_sessionstore import SqlAlchemySessionInterface
from src import user
from src.admin_forms import QAWYSIWYG, TestQuestionView
from src.commands import create_admin_user, load_chapters_questions
from src.extensions import admin, bcrypt, db, migrate, sess
from src.qa.models import Answer, Question
from src.qa.views import bp as qa_bp
from src.settings import DevelopConfig
from src.test_cases import (TestAnswer, TestCase, TestQuestion,
TestQuestionUserRelation)
from src.test_cases.views import bp as test_cases_bp
from src.user import User
from src.user.auth import auth_hook
from src.views import bp as index_bp
def create_app(config=DevelopConfig):
"""App factory."""
app = Flask(
__name__.split('.')[0],
static_url_path='/static',
static_folder=f'{config.PROJECT_PATH}/src/static'
)
app.url_map.strict_slashes = False
app.config.from_object(config)
register_extensions(app)
register_blueprints(app)
register_shellcontext(app)
register_adminpanel(app)
register_sessions(app)
register_github_oauth(app)
register_before_hooks(app)
register_commands(app)
register_mail_settings(app)
register_secret(app)
return app
def register_extensions(app):
"""Flask extensions."""
bcrypt.init_app(app)
db.init_app(app)
migrate.init_app(app, db)
admin.init_app(app)
def register_adminpanel(app):
app.config['FLASK_ADMIN_SWATCH'] = 'darkly'
admin.add_view(ModelView(User, db.session))
admin.add_view(QAWYSIWYG(TestCase, db.session))
admin.add_view(TestQuestionView(TestQuestion, db.session))
admin.add_view(QAWYSIWYG(TestAnswer, db.session))
admin.add_view(QAWYSIWYG(Answer, db.session))
admin.add_view(QAWYSIWYG(Question, db.session))
admin.add_view(QAWYSIWYG(TestQuestionUserRelation, db.session))
admin.add_link(MenuLink(name='Back Home', url='/'))
def register_sessions(app):
app.config['SESSION_TYPE'] = 'sqlalchemy'
app.config['SESSION_SQLALCHEMY'] = db
SqlAlchemySessionInterface(app, db, 'flask_sessions', 'key_')
sess.init_app(app)
return app
def register_blueprints(app):
app.register_blueprint(user.views.bp)
app.register_blueprint(index_bp)
app.register_blueprint(qa_bp)
app.register_blueprint(test_cases_bp)
def register_shellcontext(app):
"""Register shell context objects."""
def shell_context():
"""Shell context objects."""
return {
'db': db,
'User': user.models.User,
}
app.shell_context_processor(shell_context)
def register_github_oauth(app):
app.config['GITHUB_OAUTH_CLIENT_ID'] = os.environ.get('GITHUB_OAUTH_CLIENT_ID')
app.config['GITHUB_OAUTH_CLIENT_SECRET'] = os.environ.get('GITHUB_OAUTH_CLIENT_SECRET')
github_bp = make_github_blueprint(scope='read:user,user:email', redirect_to='auth.login_oauth')
app.register_blueprint(github_bp, url_prefix='/login')
def register_before_hooks(app):
app.before_request(auth_hook)
def register_commands(app):
app.cli.add_command(load_chapters_questions)
app.cli.add_command(create_admin_user)
def register_mail_settings(app):
mail = Mail(app)
app.config["MAIL_SERVER"] = os.environ.get("MAIL_SERVER",
'smtp.googlemail.com')
app.config["MAIL_PORT"] = int(os.environ.get("MAIL_PORT",
587))
app.config["MAIL_USE_TLS"] = os.environ.get("MAIL_USE_TLS",
True)
app.config["MAIL_USERNAME"] = os.environ.get("MAIL_USERNAME",
'[email protected]')
app.config["MAIL_PASSWORD"] = os.environ.get("MAIL_PASSWORD",
'123456789')
app.config["ADMINS"] = os.environ.get("ADMINS", ['[email protected]'])
return mail
def register_secret(app):
app.config["SECRET_KEY"] = os.environ.get("SECRET_KEY",
'mysupersecretkey')
| [] | [] | [
"ADMINS",
"MAIL_SERVER",
"MAIL_PASSWORD",
"MAIL_PORT",
"SECRET_KEY",
"MAIL_USERNAME",
"GITHUB_OAUTH_CLIENT_SECRET",
"MAIL_USE_TLS",
"GITHUB_OAUTH_CLIENT_ID"
] | [] | ["ADMINS", "MAIL_SERVER", "MAIL_PASSWORD", "MAIL_PORT", "SECRET_KEY", "MAIL_USERNAME", "GITHUB_OAUTH_CLIENT_SECRET", "MAIL_USE_TLS", "GITHUB_OAUTH_CLIENT_ID"] | python | 9 | 0 | |
install/install.py | import boto3
import json
import os
import yaml
from subprocess import run
def handler(event, context):
p = run( [ 'cdk', 'version' ], capture_output = True )
print("AWS Cloud Development Kit (CDK)", p.stdout.decode())
os.system('export CDK_NEW_BOOTSTRAP=1 && cdk bootstrap --show-template > /tmp/cdk.yaml')
with open('/tmp/cdk.yaml', 'r') as stream:
parsed_yaml = yaml.safe_load(stream)
stream.close()
ssm_client = boto3.client('ssm')
response = ssm_client.get_parameter(
Name = os.environ['BOOTSTRAP']
)
bootstrap = response['Parameter']['Value']
print('Current Bootstrap: '+bootstrap)
print('Future Bootstrap: '+str(parsed_yaml['Resources']['CdkBootstrapVersion']['Properties']['Value']))
if bootstrap != str(parsed_yaml['Resources']['CdkBootstrapVersion']['Properties']['Value']):
s3_client = boto3.client('s3')
s3_client.upload_file('/tmp/cdk.yaml', os.environ['BUCKET'], 'cdk.yaml')
ssm_client.put_parameter(
Name = os.environ['BOOTSTRAP'],
Value = str(parsed_yaml['Resources']['CdkBootstrapVersion']['Properties']['Value']),
Type = 'String',
Overwrite = True
)
return {
'statusCode': 200,
'body': json.dumps('CDKBoot Install')
} | [] | [] | [
"BOOTSTRAP",
"BUCKET"
] | [] | ["BOOTSTRAP", "BUCKET"] | python | 2 | 0 | |
e2e/cloud_manager/events_test.go | // Copyright 2020 MongoDB Inc
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build e2e cloudmanager,generic
package cloud_manager_test
import (
"encoding/json"
"os"
"os/exec"
"testing"
"time"
"github.com/mongodb/mongocli/e2e"
"go.mongodb.org/atlas/mongodbatlas"
)
func TestEvents(t *testing.T) {
cliPath, err := e2e.Bin()
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
const eventsEntity = "events"
t.Run("ListProjectEvent", func(t *testing.T) {
cmd := exec.Command(cliPath,
entity,
eventsEntity,
"list",
"--projectId="+os.Getenv("MCLI_PROJECT_ID"),
"-o=json",
)
cmd.Env = os.Environ()
resp, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("unexpected error: %v, resp: %v", err, string(resp))
}
var events mongodbatlas.EventResponse
if err := json.Unmarshal(resp, &events); err != nil {
t.Fatalf("unexpected error: %v", err)
}
})
t.Run("ListOrganizationEvent", func(t *testing.T) {
cmd := exec.Command(cliPath,
entity,
eventsEntity,
"list",
"--orgId="+os.Getenv("MCLI_ORG_ID"),
"--minDate="+time.Now().Add(-time.Hour*time.Duration(24)).Format("2006-01-02"),
"-o=json",
)
cmd.Env = os.Environ()
resp, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("unexpected error: %v, resp: %v", err, string(resp))
}
var events mongodbatlas.EventResponse
if err := json.Unmarshal(resp, &events); err != nil {
t.Fatalf("unexpected error: %v", err)
}
})
}
| [
"\"MCLI_PROJECT_ID\"",
"\"MCLI_ORG_ID\""
] | [] | [
"MCLI_ORG_ID",
"MCLI_PROJECT_ID"
] | [] | ["MCLI_ORG_ID", "MCLI_PROJECT_ID"] | go | 2 | 0 | |
experiments/IMGEP-BigVAE/repetition_000008/experiment_config.py | import autodisc as ad
import goalrepresent as gr
def get_system_config():
system_config = ad.systems.Lenia.default_config()
system_config.version = "pytorch_fft"
system_config.use_gpu = True
return system_config
def get_system_parameters():
system_parameters = ad.systems.Lenia.default_system_parameters()
system_parameters.size_y = 256
system_parameters.size_x = 256
return system_parameters
def get_model_config(model_name):
model_class = eval("gr.models.{}Model".format(model_name))
model_config = model_class.default_config()
if 'network' in model_config:
if "ProgressiveTree" in model_name:
## network
model_config.node_classname = "VAE"
model_config.node.network.name = "Burgess"
model_config.node.network.parameters = {"n_channels": 1, "input_size": (256,256), "n_latents": 368, "n_conv_layers": 6, "hidden_channels": 64, "hidden_dim": 512, "encoder_conditional_type": "gaussian", "feature_layer": 2}
model_config.node.create_connections = {}
model_config.network.parameters = {"n_channels": 1, "input_size": (256,256), "n_latents": 368, "n_conv_layers": 6, "hidden_channels": 64, "hidden_dim": 512, "encoder_conditional_type": "gaussian", "feature_layer": 2}
## device
model_config.node.device.use_gpu = True
else:
## network
model_config.network.name = "Burgess"
model_config.network.parameters = {"n_channels": 1, "input_size": (256,256), "n_latents": 368, "n_conv_layers": 6, "hidden_channels": 64, "hidden_dim": 512, "encoder_conditional_type": "gaussian", "feature_layer": 2}
## initialization
model_config.network.initialization.name = "kaiming_uniform"
model_config.network.initialization.parameters = {}
## loss
model_config.loss.name = "VAE"
model_config.loss.parameters = {"reconstruction_dist": "bernoulli"}
## optimizer
model_config.optimizer.name = "Adam"
model_config.optimizer.parameters = {"lr": 1e-3, "weight_decay": 1e-5 }
# device
model_config.device.use_gpu = True
## logging
model_config.logging.record_valid_images_every = 100
model_config.logging.record_embeddings_every = 400
## checkpoint
model_config.checkpoint.save_model_every = 1
## evaluation
model_config.evaluation.save_results_every = 5000
return model_config
def get_explorer_config():
explorer_config = ad.explorers.ProgressiveExplorer.default_config()
explorer_config.seed = 8
explorer_config.num_of_random_initialization = 1000
explorer_config.run_parameters = []
# Parameter 1: init state
parameter = ad.Config()
parameter.name = 'init_state'
parameter.type = 'cppn_evolution'
parameter.init = ad.cppn.TwoDMatrixCCPNNEATEvolution.default_config()
parameter.init.neat_config_file = 'neat_config.cfg'
parameter.init.n_generations = 1
parameter.init.best_genome_of_last_generation = True
parameter.mutate = ad.cppn.TwoDMatrixCCPNNEATEvolution.default_config()
parameter.mutate.neat_config_file = 'neat_config.cfg'
parameter.mutate.n_generations = 2
parameter.mutate.best_genome_of_last_generation = True
explorer_config.run_parameters.append(parameter)
# Parameter 2: R
parameter = ad.Config()
parameter.name = 'R'
parameter.type = 'sampling'
parameter.init = ('discrete', 2, 20)
parameter.mutate = {'type': 'discrete', 'distribution': 'gauss', 'sigma': 0.5, 'min': 2, 'max': 20}
explorer_config.run_parameters.append(parameter)
# Parameter 3: T
parameter = ad.Config()
parameter.name = 'T'
parameter.type = 'sampling'
parameter.init = ('discrete', 1, 20)
parameter.mutate = {'type': 'discrete', 'distribution': 'gauss', 'sigma': 0.5, 'min': 1, 'max': 20}
explorer_config.run_parameters.append(parameter)
# Parameter 4: b
parameter = ad.Config()
parameter.name = 'b'
parameter.type = 'sampling'
parameter.init = ('function', ad.helper.sampling.sample_vector, (('discrete', 1, 3), (0, 1)))
parameter.mutate = {'type': 'continuous', 'distribution': 'gauss', 'sigma': 0.1, 'min': 0, 'max': 1}
explorer_config.run_parameters.append(parameter)
# Parameter 5: m
parameter = ad.Config()
parameter.name = 'm'
parameter.type = 'sampling'
parameter.init = ('continuous', 0, 1)
parameter.mutate = {'type': 'continuous', 'distribution': 'gauss', 'sigma': 0.1, 'min': 0, 'max': 1}
explorer_config.run_parameters.append(parameter)
# Parameter 6: s
parameter = ad.Config()
parameter.name = 's'
parameter.type = 'sampling'
parameter.init = ('continuous', 0.001, 0.3)
parameter.mutate = {'type': 'continuous', 'distribution': 'gauss', 'sigma': 0.05, 'min': 0.001, 'max': 0.3}
explorer_config.run_parameters.append(parameter)
# visual representation
explorer_config.visual_representation = gr.representations.SingleModelRepresentation.default_config()
explorer_config.visual_representation.seed = 8
explorer_config.visual_representation.training.output_folder = "./training"
explorer_config.visual_representation.model.name = "ProgressiveTree"
explorer_config.visual_representation.model.config = get_model_config(explorer_config.visual_representation.model.name)
# goal space selection
explorer_config.goal_space_selection.type = 'random'
if explorer_config.goal_space_selection.type in ['probability_distribution']:
explorer_config.goal_space_selection.distribution = None
elif explorer_config.goal_space_selection.type in ['adaptive']:
explorer_config.goal_space_selection.measure = ad.Config()
explorer_config.goal_space_selection.measure.type = 'None'
explorer_config.goal_space_selection.measure.n_steps = None
if None is not None and None is not None:
raise ValueError('Only explorer_config.goal_space_selection.measure.n_bins_per_dimension or explorer_config.goal_space_selection.measure.n_bins can be defined!')
if None is not None:
explorer_config.goal_space_selection.measure.diversity = ad.Config()
explorer_config.goal_space_selection.measure.diversity.type = 'NBinDiversityNBinPerDim'
explorer_config.goal_space_selection.measure.diversity.n_bins_per_dimension = None
elif None is not None:
explorer_config.goal_space_selection.measure.diversity = ad.Config()
explorer_config.goal_space_selection.measure.diversity.type = 'NBinDiversityNBins'
explorer_config.goal_space_selection.measure.diversity.n_bins = None
# add constraint to the diversity measure
explorer_config.goal_space_selection.measure.update_constraints = dict( active = False)
explorer_config.goal_space_selection.selection_algo = ad.Config()
explorer_config.goal_space_selection.selection_algo.type = 'None'
if explorer_config.goal_space_selection.selection_algo.type in ['epsilon_greedy']:
explorer_config.goal_space_selection.selection_algo.epsilon = None
elif explorer_config.goal_space_selection.selection_algo.type in ['softmax']:
explorer_config.goal_space_selection.selection_algo.beta = None
elif explorer_config.goal_space_selection.selection_algo.type in ['epsilon_softmax']:
explorer_config.goal_space_selection.selection_algo.epsilon = None
explorer_config.goal_space_selection.selection_algo.beta = None
# goal selection
explorer_config.goal_selection.type = 'random'
explorer_config.goal_selection.sampling_from_reached_boundaries = ad.Config()
explorer_config.goal_selection.sampling_from_reached_boundaries.margin_min = 0
explorer_config.goal_selection.sampling_from_reached_boundaries.margin_max = 0
# progressive growing parameters
explorer_config.progressive_growing.split_trigger = ad.Config(dict(active = False))
explorer_config.progressive_growing.split_trigger.boundary_config = {}
# progressive training parameters
explorer_config.progressive_training.dataset_constraints = [dict( active = True, filter = ('statistics.is_dead', '==', False))]
explorer_config.progressive_training.dataset_augment = True
explorer_config.progressive_training.n_runs_between_stages = 100
explorer_config.progressive_training.n_epochs_per_stage = 100
explorer_config.progressive_training.train_batch_size = 128
explorer_config.progressive_training.importance_sampling_new_vs_old = 0.3
explorer_config.progressive_training.alternated_backward = {"active": False}
# how are the source policies for a mutation are selected
explorer_config.source_policy_selection.type = 'optimal'
explorer_config.source_policy_selection.constraints = []
return explorer_config
def get_number_of_explorations():
return 5000
| [] | [] | [] | [] | [] | python | null | null | null |
third_party/phantomjs/test/run-tests.py | #!/usr/bin/env python
import argparse
import collections
import errno
import glob
import imp
import os
import platform
import posixpath
import re
import shlex
import SimpleHTTPServer
import socket
import SocketServer
import ssl
import string
import cStringIO as StringIO
import subprocess
import sys
import threading
import time
import traceback
import urllib
# All files matching one of these glob patterns will be run as tests.
TESTS = [
'basics/*.js',
'module/*/*.js',
'standards/*/*.js',
'regression/*.js',
]
TIMEOUT = 7 # Maximum duration of PhantomJS execution (in seconds).
# This is a backstop; testharness.js imposes a shorter
# timeout. Both can be increased if necessary.
#
# Utilities
#
# FIXME: assumes ANSI/VT100 escape sequences
# properly this should use curses, but that's an awful lot of work
# One of colors 30 ("black" -- usually a dark gray) and 37 ("white" --
# usually a very light gray) will almost certainly be illegible
# against the terminal background, so we provide neither.
# The colorization mode is global because so is sys.stdout.
_COLOR_NONE = {
"_": "", "^": "",
"r": "", "R": "",
"g": "", "G": "",
"y": "", "Y": "",
"b": "", "B": "",
"m": "", "M": "",
"c": "", "C": "",
}
_COLOR_ON = {
"_": "\033[0m", "^": "\033[1m",
"r": "\033[31m", "R": "\033[1;31m",
"g": "\033[32m", "G": "\033[1;32m",
"y": "\033[33m", "Y": "\033[1;33m",
"b": "\033[34m", "B": "\033[1;34m",
"m": "\033[35m", "M": "\033[1;35m",
"c": "\033[36m", "C": "\033[1;36m",
}
_COLOR_BOLD = {
"_": "\033[0m", "^": "\033[1m",
"r": "\033[0m", "R": "\033[1m",
"g": "\033[0m", "G": "\033[1m",
"y": "\033[0m", "Y": "\033[1m",
"b": "\033[0m", "B": "\033[1m",
"m": "\033[0m", "M": "\033[1m",
"c": "\033[0m", "C": "\033[1m",
}
_COLORS = None
def activate_colorization(options):
global _COLORS
if options.color == "always":
_COLORS = _COLOR_ON
elif options.color == "never":
_COLORS = _COLOR_NONE
else:
if sys.stdout.isatty() and platform.system() != "Windows":
try:
n = int(subprocess.check_output(["tput", "colors"]))
if n >= 8:
_COLORS = _COLOR_ON
else:
_COLORS = _COLOR_BOLD
except subprocess.CalledProcessError:
_COLORS = _COLOR_NONE
else:
_COLORS = _COLOR_NONE
def colorize(color, message):
return _COLORS[color] + message + _COLORS["_"]
# create_default_context and SSLContext were only added in 2.7.9,
# which is newer than the python2 that ships with OSX :-(
# The fallback tries to mimic what create_default_context(CLIENT_AUTH)
# does. Security obviously isn't important in itself for a test
# server, but making sure the PJS client can talk to a server
# configured according to modern TLS best practices _is_ important.
# Unfortunately, there is no way to set things like OP_NO_SSL2 or
# OP_CIPHER_SERVER_PREFERENCE prior to 2.7.9.
CIPHERLIST_2_7_9 = (
'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+HIGH:'
'DH+HIGH:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+HIGH:RSA+3DES:!aNULL:'
'!eNULL:!MD5:!DSS:!RC4'
)
def wrap_socket_ssl(sock, base_path):
crtfile = os.path.join(base_path, 'certs/https-snakeoil.crt')
keyfile = os.path.join(base_path, 'certs/https-snakeoil.key')
try:
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ctx.load_cert_chain(crtfile, keyfile)
return ctx.wrap_socket(sock, server_side=True)
except AttributeError:
return ssl.wrap_socket(sock,
keyfile=keyfile,
certfile=crtfile,
server_side=True,
ciphers=CIPHERLIST_2_7_9)
# This should be in the standard library somewhere, but as far as I
# can tell, it isn't.
class ResponseHookImporter(object):
def __init__(self, www_path):
# All Python response hooks, no matter how deep below www_path,
# are treated as direct children of the fake "test_www" package.
if 'test_www' not in sys.modules:
imp.load_source('test_www', www_path + '/__init__.py')
self.tr = string.maketrans('-./%', '____')
def __call__(self, path):
modname = 'test_www.' + path.translate(self.tr)
try:
return sys.modules[modname]
except KeyError:
return imp.load_source(modname, path)
# This should also be in the standard library somewhere, and
# definitely isn't.
#
# FIXME: This currently involves *three* threads for every process,
# and a fourth if the process takes input. (On Unix, clever use of
# select() might be able to get that down to one, but zero is Hard.
# On Windows, we're hosed. 3.4's asyncio module would make everything
# better, but 3.4 is its own can of worms.)
try:
devnull = subprocess.DEVNULL
except:
devnull = os.open(os.devnull, os.O_RDONLY)
def do_call_subprocess(command, verbose, stdin_data, timeout):
def read_thread(linebuf, fp):
while True:
line = fp.readline().rstrip()
if not line: break # EOF
line = line.rstrip()
if line:
linebuf.append(line)
if verbose >= 3:
sys.stdout.write(line + '\n')
def write_thread(data, fp):
fp.writelines(data)
fp.close()
def reap_thread(proc, timed_out):
if proc.returncode is None:
proc.terminate()
timed_out[0] = True
class DummyThread:
def start(self): pass
def join(self): pass
if stdin_data:
stdin = subprocess.PIPE
else:
stdin = devnull
proc = subprocess.Popen(command,
stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if stdin_data:
sithrd = threading.Thread(target=write_thread,
args=(stdin_data, proc.stdin))
else:
sithrd = DummyThread()
stdout = []
stderr = []
timed_out = [False]
sothrd = threading.Thread(target=read_thread, args=(stdout, proc.stdout))
sethrd = threading.Thread(target=read_thread, args=(stderr, proc.stderr))
rpthrd = threading.Timer(timeout, reap_thread, args=(proc, timed_out))
sithrd.start()
sothrd.start()
sethrd.start()
rpthrd.start()
proc.wait()
if not timed_out[0]: rpthrd.cancel()
sithrd.join()
sothrd.join()
sethrd.join()
rpthrd.join()
if timed_out[0]:
stderr.append("TIMEOUT: Process terminated after {} seconds."
.format(timeout))
if verbose >= 3:
sys.stdout.write(stderr[-1] + "\n")
rc = proc.returncode
if verbose >= 3:
if rc < 0:
sys.stdout.write("## killed by signal {}\n".format(-rc))
else:
sys.stdout.write("## exit {}\n".format(rc))
return proc.returncode, stdout, stderr
#
# HTTP/HTTPS server, presented on localhost to the tests
#
class FileHandler(SimpleHTTPServer.SimpleHTTPRequestHandler, object):
def __init__(self, *args, **kwargs):
self._cached_untranslated_path = None
self._cached_translated_path = None
self.postdata = None
super(FileHandler, self).__init__(*args, **kwargs)
# silent, do not pollute stdout nor stderr.
def log_message(self, format, *args):
return
# accept POSTs, read the postdata and stash it in an instance variable,
# then forward to do_GET; handle_request hooks can vary their behavior
# based on the presence of postdata and/or the command verb.
def do_POST(self):
try:
ln = int(self.headers.get('content-length'))
except TypeError, ValueError:
self.send_response(400, 'Bad Request')
self.send_header('Content-Type', 'text/plain')
self.end_headers()
self.wfile.write("No or invalid Content-Length in POST (%r)"
% self.headers.get('content-length'))
return
self.postdata = self.rfile.read(ln)
self.do_GET()
# allow provision of a .py file that will be interpreted to
# produce the response.
def send_head(self):
path = self.translate_path(self.path)
# do not allow direct references to .py(c) files,
# or indirect references to __init__.py
if (path.endswith('.py') or path.endswith('.pyc') or
path.endswith('__init__')):
self.send_error(404, 'File not found')
return None
if os.path.exists(path):
return super(FileHandler, self).send_head()
py = path + '.py'
if os.path.exists(py):
try:
mod = self.get_response_hook(py)
return mod.handle_request(self)
except:
self.send_error(500, 'Internal Server Error in '+py)
raise
self.send_error(404, 'File not found')
return None
# modified version of SimpleHTTPRequestHandler's translate_path
# to resolve the URL relative to the www/ directory
# (e.g. /foo -> test/www/foo)
def translate_path(self, path):
# Cache for efficiency, since our send_head calls this and
# then, in the normal case, the parent class's send_head
# immediately calls it again.
if (self._cached_translated_path is not None and
self._cached_untranslated_path == path):
return self._cached_translated_path
orig_path = path
# Strip query string and/or fragment, if present.
x = path.find('?')
if x != -1: path = path[:x]
x = path.find('#')
if x != -1: path = path[:x]
# Ensure consistent encoding of special characters, then
# lowercase everything so that the tests behave consistently
# whether or not the local filesystem is case-sensitive.
path = urllib.quote(urllib.unquote(path)).lower()
# Prevent access to files outside www/.
# At this point we want specifically POSIX-like treatment of 'path'
# because it is still a URL component and not a filesystem path.
# SimpleHTTPRequestHandler.send_head() expects us to preserve the
# distinction between paths with and without a trailing slash, but
# posixpath.normpath() discards that distinction.
trailing_slash = path.endswith('/')
path = posixpath.normpath(path)
while path.startswith('/'):
path = path[1:]
while path.startswith('../'):
path = path[3:]
# Now resolve the normalized, clamped path relative to the www/
# directory, according to local OS conventions.
path = os.path.normpath(os.path.join(self.www_path, *path.split('/')))
if trailing_slash:
# it must be a '/' even on Windows
path += '/'
self._cached_untranslated_path = orig_path
self._cached_translated_path = path
return path
class TCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
# This is how you are officially supposed to set SO_REUSEADDR per
# https://docs.python.org/2/library/socketserver.html#SocketServer.BaseServer.allow_reuse_address
allow_reuse_address = True
def __init__(self, use_ssl, handler, base_path, signal_error):
SocketServer.TCPServer.__init__(self, ('localhost', 0), handler)
if use_ssl:
self.socket = wrap_socket_ssl(self.socket, base_path)
self._signal_error = signal_error
def handle_error(self, request, client_address):
# Ignore errors which can occur naturally if the client
# disconnects in the middle of a request. EPIPE and
# ECONNRESET *should* be the only such error codes
# (according to the OSX manpage for send()).
_, exval, _ = sys.exc_info()
if getattr(exval, 'errno', None) in (errno.EPIPE, errno.ECONNRESET):
return
# Otherwise, report the error to the test runner.
self._signal_error(sys.exc_info())
class HTTPTestServer(object):
def __init__(self, base_path, signal_error, verbose):
self.httpd = None
self.httpsd = None
self.base_path = base_path
self.www_path = os.path.join(base_path, 'www')
self.signal_error = signal_error
self.verbose = verbose
def __enter__(self):
handler = FileHandler
handler.extensions_map.update({
'.htm': 'text/html',
'.html': 'text/html',
'.css': 'text/css',
'.js': 'application/javascript',
'.json': 'application/json'
})
handler.www_path = self.www_path
handler.get_response_hook = ResponseHookImporter(self.www_path)
self.httpd = TCPServer(False, handler,
self.base_path, self.signal_error)
os.environ['TEST_HTTP_BASE'] = \
'http://localhost:{}/'.format(self.httpd.server_address[1])
httpd_thread = threading.Thread(target=self.httpd.serve_forever)
httpd_thread.daemon = True
httpd_thread.start()
if self.verbose >= 3:
sys.stdout.write("## HTTP server at {}\n".format(
os.environ['TEST_HTTP_BASE']))
self.httpsd = TCPServer(True, handler,
self.base_path, self.signal_error)
os.environ['TEST_HTTPS_BASE'] = \
'https://localhost:{}/'.format(self.httpsd.server_address[1])
httpsd_thread = threading.Thread(target=self.httpsd.serve_forever)
httpsd_thread.daemon = True
httpsd_thread.start()
if self.verbose >= 3:
sys.stdout.write("## HTTPS server at {}\n".format(
os.environ['TEST_HTTPS_BASE']))
return self
def __exit__(self, *dontcare):
self.httpd.shutdown()
del os.environ['TEST_HTTP_BASE']
self.httpsd.shutdown()
del os.environ['TEST_HTTPS_BASE']
#
# Running tests and interpreting their results
#
class TestDetailCode(collections.namedtuple("TestDetailCode", (
"idx", "color", "short_label", "label", "long_label"))):
def __index__(self): return self.idx
def __hash__(self): return self.idx
def __eq__(self, other): return self.idx == other.idx
def __ne__(self, other): return self.idx != other.idx
class T(object):
PASS = TestDetailCode(0, "g", ".", "pass", "passed")
FAIL = TestDetailCode(1, "R", "F", "FAIL", "failed")
XFAIL = TestDetailCode(2, "y", "f", "xfail", "failed as expected")
XPASS = TestDetailCode(3, "Y", "P", "XPASS", "passed unexpectedly")
ERROR = TestDetailCode(4, "R", "E", "ERROR", "had errors")
SKIP = TestDetailCode(5, "m", "s", "skip", "skipped")
UNSUPPORTED = TestDetailCode(6, "y", "u", "unsupported", "unsupported")
MAX = 7
class TestDetail(object):
"""Holds one block of details about a test that failed."""
# types of details:
def __init__(self, message, test_id, detail_type):
if not isinstance(message, list):
message = [message]
self.message = [line.rstrip()
for chunk in message
for line in chunk.split("\n")]
self.dtype = detail_type
self.test_id = test_id
def report(self, fp):
col, label = self.dtype.color, self.dtype.label
if self.test_id:
fp.write("{:>5}: {}\n".format(colorize(col, label),
self.test_id))
lo = 0
else:
fp.write("{:>5}: {}\n".format(colorize(col, label),
self.message[0]))
lo = 1
for line in self.message[lo:]:
fp.write(" {}\n".format(colorize("b", line)))
class TestGroup(object):
"""Holds the result of one group of tests (that is, one .js file),
parsed from the output of run_phantomjs (see below).
Subclasses specify what the output means.
A test with zero details is considered to be successful.
"""
def __init__(self, name):
self.name = name
self.n = [0]*T.MAX
self.details = []
def parse(self, rc, out, err):
raise NotImplementedError
def _add_d(self, message, test_id, dtype):
self.n[dtype] += 1
self.details.append(TestDetail(message, test_id, dtype))
def add_pass (self, m, t): self._add_d(m, t, T.PASS)
def add_fail (self, m, t): self._add_d(m, t, T.FAIL)
def add_xpass(self, m, t): self._add_d(m, t, T.XPASS)
def add_xfail(self, m, t): self._add_d(m, t, T.XFAIL)
def add_error(self, m, t): self._add_d(m, t, T.ERROR)
def add_skip (self, m, t): self._add_d(m, t, T.SKIP)
def add_unsupported (self, m, t): self._add_d(m, t, T.UNSUPPORTED)
def default_interpret_exit_code(self, rc):
if rc == 0:
if not self.is_successful() and not self.n[T.ERROR]:
self.add_error([],
"PhantomJS exited successfully when test failed")
# Exit code -15 indicates a timeout.
elif rc == 1 or rc == -15:
if self.is_successful():
self.add_error([], "PhantomJS exited unsuccessfully")
elif rc >= 2:
self.add_error([], "PhantomJS exited with code {}".format(rc))
else:
self.add_error([], "PhantomJS killed by signal {}".format(-rc))
def is_successful(self):
return self.n[T.FAIL] + self.n[T.XPASS] + self.n[T.ERROR] == 0
def worst_code(self):
# worst-to-best ordering
for code in (T.ERROR, T.FAIL, T.XPASS, T.SKIP, T.XFAIL, T.PASS, T.UNSUPPORTED):
if self.n[code] > 0:
return code
return T.PASS
def one_char_summary(self, fp):
code = self.worst_code()
fp.write(colorize(code.color, code.short_label))
fp.flush()
def line_summary(self, fp):
code = self.worst_code()
fp.write("{}: {}\n".format(colorize("^", self.name),
colorize(code.color, code.label)))
def report(self, fp, show_all):
self.line_summary(fp)
need_blank_line = False
for detail in self.details:
if show_all or detail.dtype not in (T.PASS, T.XFAIL, T.SKIP):
detail.report(fp)
need_blank_line = True
if need_blank_line:
fp.write("\n")
def report_for_verbose_level(self, fp, verbose):
if verbose == 0:
self.one_char_summary(sys.stdout)
elif verbose == 1:
self.report(sys.stdout, False)
else:
self.report(sys.stdout, True)
class UnsupportedTestGroup(TestGroup):
"""Test group which is currently unsupported and should
be skipped altogether.
"""
def __init__(self, name):
TestGroup.__init__(self, name)
self.add_unsupported('', 'Skipping the whole file');
class ExpectTestGroup(TestGroup):
"""Test group whose output must be exactly as specified by directives
in the file. This is how you test for an _unsuccessful_ exit code,
or for output appearing on a specific one of stdout/stderr.
"""
def __init__(self, name, rc_exp, stdout_exp, stderr_exp,
rc_xfail, stdout_xfail, stderr_xfail):
TestGroup.__init__(self, name)
if rc_exp is None: rc_exp = 0
self.rc_exp = rc_exp
self.stdout_exp = stdout_exp
self.stderr_exp = stderr_exp
self.rc_xfail = rc_xfail
self.stdout_xfail = stdout_xfail
self.stderr_xfail = stderr_xfail
def parse(self, rc, out, err):
self.parse_output("stdout", self.stdout_exp, out, self.stdout_xfail)
self.parse_output("stderr", self.stderr_exp, err, self.stderr_xfail)
exit_msg = ["expected exit code {} got {}"
.format(self.rc_exp, rc)]
if rc != self.rc_exp:
exit_desc = "did not exit as expected"
if self.rc_xfail:
self.add_xfail(exit_msg, exit_desc)
else:
self.add_fail(exit_msg, exit_desc)
else:
exit_desc = "exited as expected"
if self.rc_xfail:
self.add_xpass(exit_msg, exit_desc)
else:
self.add_pass(exit_msg, exit_desc)
def parse_output(self, what, exp, got, xfail):
diff = []
le = len(exp)
lg = len(got)
for i in range(max(le, lg)):
e = ""
g = ""
if i < le: e = exp[i]
if i < lg: g = got[i]
if e != g:
diff.extend(("{}: line {} not as expected".format(what, i+1),
"-" + repr(e)[1:-1],
"+" + repr(g)[1:-1]))
if diff:
desc = what + " not as expected"
if xfail:
self.add_xfail(diff, desc)
else:
self.add_fail(diff, desc)
else:
desc = what + " as expected"
if xfail:
self.add_xpass(diff, desc)
else:
self.add_pass(diff, desc)
class TAPTestGroup(TestGroup):
"""Test group whose output is interpreted according to a variant of the
Test Anything Protocol (http://testanything.org/tap-specification.html).
Relative to that specification, these are the changes:
* Plan-at-the-end, explanations for directives, and "Bail out!"
are not supported. ("1..0 # SKIP: explanation" *is* supported.)
* "Anything else" lines are an error.
* Repeating a test point number, or using one outside the plan
range, is an error (this is unspecified in TAP proper).
* Diagnostic lines beginning with # are taken as additional
information about the *next* test point. Diagnostic lines
beginning with ## are ignored.
* Directives are case sensitive.
"""
diag_r = re.compile(r"^#(#*)\s*(.*)$")
plan_r = re.compile(r"^1..(\d+)(?:\s*\#\s*SKIP(?::\s*(.*)))?$")
test_r = re.compile(r"^(not ok|ok)\s*"
r"([0-9]+)?\s*"
r"([^#]*)(?:# (TODO|SKIP))?$")
def parse(self, rc, out, err):
self.parse_tap(out, err)
self.default_interpret_exit_code(rc)
def parse_tap(self, out, err):
points_already_used = set()
messages = []
# Look for the plan.
# Diagnostic lines are allowed to appear above the plan, but not
# test lines.
for i in range(len(out)):
line = out[i]
m = self.diag_r.match(line)
if m:
if not m.group(1):
messages.append(m.group(2))
continue
m = self.plan_r.match(line)
if m:
break
messages.insert(0, line)
self.add_error(messages, "Plan line not interpretable")
if i + 1 < len(out):
self.add_skip(out[(i+1):], "All further output ignored")
return
else:
self.add_error(messages, "No plan line detected in output")
return
max_point = int(m.group(1))
if max_point == 0:
if any(msg.startswith("ERROR:") for msg in messages):
self.add_error(messages, m.group(2) or "Test group skipped")
else:
self.add_skip(messages, m.group(2) or "Test group skipped")
if i + 1 < len(out):
self.add_skip(out[(i+1):], "All further output ignored")
return
prev_point = 0
for i in range(i+1, len(out)):
line = out[i]
m = self.diag_r.match(line)
if m:
if not m.group(1):
messages.append(m.group(2))
continue
m = self.test_r.match(line)
if m:
status = m.group(1)
point = m.group(2)
desc = m.group(3)
dirv = m.group(4)
if point:
point = int(point)
else:
point = prev_point + 1
if point in points_already_used:
# A reused test point is an error.
self.add_error(messages, desc + " [test point repeated]")
else:
points_already_used.add(point)
# A point above the plan limit is an automatic *fail*.
# The test suite relies on this in testing exit().
if point > max_point:
status = "not ok"
if status == "ok":
if not dirv:
self.add_pass(messages, desc)
elif dirv == "TODO":
self.add_xpass(messages, desc)
elif dirv == "SKIP":
self.add_skip(messages, desc)
else:
self.add_error(messages, desc +
" [ok, with invalid directive "+dirv+"]")
else:
if not dirv:
self.add_fail(messages, desc)
elif dirv == "TODO":
self.add_xfail(messages, desc)
else:
self.add_error(messages, desc +
" [not ok, with invalid directive "+dirv+"]")
del messages[:]
prev_point = point
else:
self.add_error([line], "neither a test nor a diagnostic")
# Any output on stderr is an error, with one exception: the timeout
# message added by record_process_output, which is treated as an
# unnumbered "not ok".
if err:
if len(err) == 1 and err[0].startswith("TIMEOUT: "):
points_already_used.add(prev_point + 1)
self.add_fail(messages, err[0][len("TIMEOUT: "):])
else:
self.add_error(err, "Unexpected output on stderr")
# Any missing test points are fails.
for pt in range(1, max_point+1):
if pt not in points_already_used:
self.add_fail([], "test {} did not report status".format(pt))
class TestRunner(object):
def __init__(self, base_path, phantomjs_exe, options):
self.base_path = base_path
self.cert_path = os.path.join(base_path, 'certs')
self.harness = os.path.join(base_path, 'testharness.js')
self.phantomjs_exe = phantomjs_exe
self.verbose = options.verbose
self.debugger = options.debugger
self.to_run = options.to_run
self.run_unsupported = options.run_unsupported
self.server_errs = []
def signal_server_error(self, exc_info):
self.server_errs.append(exc_info)
def get_base_command(self, debugger):
if debugger is None:
return ["node", self.phantomjs_exe]
elif debugger == "gdb":
return ["gdb", "--args", "node", self.phantomjs_exe]
elif debugger == "lldb":
return ["lldb", "--", "node", self.phantomjs_exe]
elif debugger == "valgrind":
return ["valgrind", "node", self.phantomjs_exe]
else:
raise RuntimeError("Don't know how to invoke " + self.debugger)
def run_phantomjs(self, script,
script_args=[], pjs_args=[], stdin_data=[],
timeout=TIMEOUT, silent=False):
verbose = self.verbose
debugger = self.debugger
if silent:
verbose = False
debugger = None
output = []
command = self.get_base_command(debugger)
command.extend(pjs_args)
command.append(script)
if verbose:
command.append('--verbose={}'.format(verbose))
command.extend(script_args)
if verbose >= 3:
sys.stdout.write("## running {}\n".format(" ".join(command)))
if debugger:
# FIXME: input-feed mode doesn't work with a debugger,
# because how do you tell the debugger that the *debuggee*
# needs to read from a pipe?
subprocess.call(command)
return 0, [], []
else:
return do_call_subprocess(command, verbose, stdin_data, timeout)
def run_test(self, script, name):
script_args = []
pjs_args = []
use_harness = True
use_snakeoil = False
stdin_data = []
stdout_exp = []
stderr_exp = []
rc_exp = None
stdout_xfail = False
stderr_xfail = False
rc_xfail = False
timeout = TIMEOUT
unsupported = False
def require_args(what, i, tokens):
if i+1 == len(tokens):
raise ValueError(what + "directive requires an argument")
if self.verbose >= 3:
sys.stdout.write(colorize("^", name) + ":\n")
# Parse any directives at the top of the script.
try:
with open(script, "rt") as s:
for line in s:
if not line.startswith("//!"):
break
tokens = shlex.split(line[3:], comments=True)
skip = False
for i in range(len(tokens)):
if skip:
skip = False
continue
tok = tokens[i]
if tok == "unsupported":
unsupported = True
elif tok == "no-harness":
use_harness = False
elif tok == "snakeoil":
use_snakeoil = True
elif tok == "expect-exit-fails":
rc_xfail = True
elif tok == "expect-stdout-fails":
stdout_xfail = True
elif tok == "expect-stderr-fails":
stderr_xfail = True
elif tok == "timeout:":
require_args(tok, i, tokens)
timeout = float(tokens[i+1])
if timeout <= 0:
raise ValueError("timeout must be positive")
skip = True
elif tok == "expect-exit:":
require_args(tok, i, tokens)
rc_exp = int(tokens[i+1])
skip = True
elif tok == "phantomjs:":
require_args(tok, i, tokens)
pjs_args.extend(tokens[(i+1):])
break
elif tok == "script:":
require_args(tok, i, tokens)
script_args.extend(tokens[(i+1):])
break
elif tok == "stdin:":
require_args(tok, i, tokens)
stdin_data.append(" ".join(tokens[(i+1):]) + "\n")
break
elif tok == "expect-stdout:":
require_args(tok, i, tokens)
stdout_exp.append(" ".join(tokens[(i+1):]))
break
elif tok == "expect-stderr:":
require_args(tok, i, tokens)
stderr_exp.append(" ".join(tokens[(i+1):]))
break
else:
raise ValueError("unrecognized directive: " + tok)
except Exception as e:
grp = TestGroup(name)
if hasattr(e, 'strerror') and hasattr(e, 'filename'):
grp.add_error([], '{} ({}): {}\n'
.format(name, e.filename, e.strerror))
else:
grp.add_error([], '{} ({}): {}\n'
.format(name, script, str(e)))
return grp
if use_harness:
script_args.insert(0, script)
script = self.harness
if use_snakeoil:
pjs_args.insert(0, '--ssl-certificates-path=' + self.cert_path)
if unsupported and not self.run_unsupported:
return UnsupportedTestGroup(name)
rc, out, err = self.run_phantomjs(script, script_args, pjs_args,
stdin_data, timeout)
if rc_exp or stdout_exp or stderr_exp:
grp = ExpectTestGroup(name,
rc_exp, stdout_exp, stderr_exp,
rc_xfail, stdout_xfail, stderr_xfail)
else:
grp = TAPTestGroup(name)
grp.parse(rc, out, err)
return grp
def run_tests(self):
start = time.time()
base = self.base_path
nlen = len(base) + 1
results = []
for test_glob in TESTS:
test_glob = os.path.join(base, test_glob)
for test_script in sorted(glob.glob(test_glob)):
tname = os.path.splitext(test_script)[0][nlen:]
if self.to_run:
for to_run in self.to_run:
if to_run in tname:
break
else:
continue
any_executed = True
grp = self.run_test(test_script, tname)
grp.report_for_verbose_level(sys.stdout, self.verbose)
results.append(grp)
grp = TestGroup("HTTP server errors")
for ty, val, tb in self.server_errs:
grp.add_error(traceback.format_tb(tb, 5),
traceback.format_exception_only(ty, val)[-1])
grp.report_for_verbose_level(sys.stdout, self.verbose)
results.append(grp)
sys.stdout.write("\n")
return self.report(results, time.time() - start)
def report(self, results, elapsed):
# There is always one test group, for the HTTP server errors.
if len(results) == 1:
sys.stderr.write("No tests selected for execution.\n")
return 1
n = [0] * T.MAX
for grp in results:
if self.verbose == 0 and not grp.is_successful():
grp.report(sys.stdout, False)
for i, x in enumerate(grp.n): n[i] += x
sys.stdout.write("{:6.3f}s elapsed\n".format(elapsed))
for s in (T.PASS, T.FAIL, T.XPASS, T.XFAIL, T.ERROR, T.SKIP, T.UNSUPPORTED):
if n[s]:
sys.stdout.write(" {:>4} {}\n".format(n[s], s.long_label))
if n[T.FAIL] == 0 and n[T.XPASS] == 0 and n[T.ERROR] == 0:
return 0
else:
return 1
def init():
base_path = os.path.normpath(os.path.dirname(os.path.abspath(__file__)))
phantomjs_exe = os.path.normpath(base_path + '/../../../phantom_shim/runner.js')
if not os.path.isfile(phantomjs_exe):
sys.stdout.write("{} is unavailable, cannot run tests.\n"
.format(phantomjs_exe))
sys.exit(1)
parser = argparse.ArgumentParser(description='Run PhantomJS tests.')
parser.add_argument('-v', '--verbose', action='count', default=0,
help='Increase verbosity of logs (repeat for more)')
parser.add_argument('to_run', nargs='*', metavar='test',
help='tests to run (default: all of them)')
parser.add_argument('--debugger', default=None,
help="Run PhantomJS under DEBUGGER")
parser.add_argument('--run-unsupported', action='count', default=0,
help='Run unsupported tests.')
parser.add_argument('--color', metavar="WHEN", default='auto',
choices=['always', 'never', 'auto'],
help="colorize the output; can be 'always',"
" 'never', or 'auto' (the default)")
options = parser.parse_args()
activate_colorization(options)
runner = TestRunner(base_path, phantomjs_exe, options)
if options.verbose:
rc, ver, err = runner.run_phantomjs('--version', silent=True)
if rc != 0 or len(ver) != 1 or len(err) != 0:
sys.stdout.write(colorize("R", "FATAL")+": Version check failed\n")
for l in ver:
sys.stdout.write(colorize("b", "## " + l) + "\n")
for l in err:
sys.stdout.write(colorize("b", "## " + l) + "\n")
sys.stdout.write(colorize("b", "## exit {}".format(rc)) + "\n")
sys.exit(1)
sys.stdout.write(colorize("b", "## Testing PhantomJS "+ver[0])+"\n")
# Run all the tests in Chatham Islands Standard Time, UTC+12:45.
# This timezone is deliberately chosen to be unusual: it's not a
# whole number of hours offset from UTC *and* it's more than twelve
# hours offset from UTC.
#
# The Chatham Islands do observe daylight savings, but we don't
# implement that because testsuite issues only reproducible on two
# particular days out of the year are too much tsuris.
#
# Note that the offset in a TZ value is the negative of the way it's
# usually written, e.g. UTC+1 would be xxx-1:00.
os.environ["TZ"] = "CIST-12:45:00"
return runner
def main():
runner = init()
try:
with HTTPTestServer(runner.base_path,
runner.signal_server_error,
runner.verbose):
sys.exit(runner.run_tests())
except Exception:
trace = traceback.format_exc(5).split("\n")
# there will be a blank line at the end of 'trace'
sys.stdout.write(colorize("R", "FATAL") + ": " + trace[-2] + "\n")
for line in trace[:-2]:
sys.stdout.write(colorize("b", "## " + line) + "\n")
sys.exit(1)
except KeyboardInterrupt:
sys.exit(2)
main()
| [] | [] | [
"TEST_HTTPS_BASE",
"TEST_HTTP_BASE",
"TZ"
] | [] | ["TEST_HTTPS_BASE", "TEST_HTTP_BASE", "TZ"] | python | 3 | 0 | |
gaussian_processes/src/models/run_gps_and_plot.py | import os, sys, pickle
from datetime import datetime
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv())
sys.path.append(os.environ.get("PROJECT_ROOT"))
import GPy_1_0_5
# date config
current_month = datetime(2021, 1, 19)
analysis_month_start = datetime(2020, 3, 1)
analysis_month_end = datetime(2020, 12, 31)
num_months = (analysis_month_end.year - analysis_month_start.year) * 12 + (
analysis_month_end.month - analysis_month_start.month) + 1
remove_months = (current_month.year - analysis_month_end.year) * 12 + (current_month.month - analysis_month_end.month)
plot_start = datetime(2016, 1, 1)
ref_start = datetime(2015, 1, 1)
buffer = (plot_start.year - ref_start.year) * 12 + (plot_start.month - ref_start.month)
# path config
output_path = os.path.join(os.environ.get("PROJECT_ROOT"), 'out')
os.makedirs(output_path, exist_ok=True)
def load_cooling_heating_degree_days():
# https://www.eia.gov/outlooks/steo/data/browser/#/?v=28&f=M&s=&start=199701&end=202212&id=&map=&ctype=linechart&maptype=0&linechart=ZWCDPUS~ZWHDPUS
dd_csv_path = os.path.join(os.environ.get("PROJECT_ROOT"), 'data', 'processed',
'eia_heating_cooling_degree_days_20210120.csv')
dd_df = pd.read_csv(dd_csv_path, skiprows=4)
dd_df['Month'] = pd.to_datetime(dd_df['Month'])
# plot cooling and heating degree days
dd_df_plot = dd_df[dd_df['Month'] > plot_start]
fig, ax1 = plt.subplots(figsize=(8, 4))
color = 'tab:blue'
ax1.set_xlabel('Time (year)')
ax1.set_ylabel('Cooling Degree Days', color=color)
ax1.plot(dd_df_plot['Month'], dd_df_plot['Cooling Degree days: U.S. (cooling degree days) cooling degree days'],
'.-', color=color)
ax1.tick_params(axis='y', labelcolor=color)
ax2 = ax1.twinx()
color = 'tab:red'
ax2.set_ylabel('Heating Degree Days', color=color)
ax2.plot(dd_df_plot['Month'], dd_df_plot['Heating Degree days: U.S. (heating degree days) heating degree days'],
'.--', color=color)
ax2.tick_params(axis='y', labelcolor=color)
plt.title('Population-Weighted Heating and Cooling Degree Days \n by Month in the Contiguous U.S.')
fig.tight_layout()
plt.savefig(os.path.join(output_path, '000_coolingheatingdegreedays.pdf'))
plt.close()
return dd_df
def clean_gp_input(X, Y, X_optional_0=None, X_optional_1=None):
# if there are duplicates in X, take the correspoding Ys, average them,
# and then only return an X, Y pair with unique X values.
# additionally, if there are X_optional_0 and X_optional_1 passed,
# average them and only return an additional X_optional_0 and X_optional_1
# with unique X values
Y_new = []
X = X.flatten()
Y = Y.flatten()
if X_optional_0 is not None:
X_optional_0 = X_optional_0.flatten()
X_optional_0_new = []
if X_optional_1 is not None:
X_optional_1 = X_optional_1.flatten()
X_optional_1_new = []
uniques, counts = np.unique(X, return_counts=True)
for unique, count in zip(uniques, counts):
y = np.mean(Y[np.where(X == unique)])
Y_new.append(y)
if X_optional_0 is not None:
X_optional_0_add = np.mean(X_optional_0[np.where(X == unique)])
X_optional_0_new.append(X_optional_0_add)
if X_optional_1 is not None:
X_optional_1_add = np.mean(X_optional_1[np.where(X == unique)])
X_optional_1_new.append(X_optional_1_add)
X_all = uniques[:, np.newaxis]
if X_optional_0 is not None:
X_optional_0_new = np.array(X_optional_0_new).reshape(len(X_optional_1_new), 1)
X_all = np.hstack((X_all, X_optional_0_new))
if X_optional_1 is not None:
X_optional_1_new = np.array(X_optional_1_new).reshape(len(X_optional_1_new), 1)
X_all = np.hstack((X_all, X_optional_1_new))
return X_all, np.array(Y_new)[:, np.newaxis]
def annualize_monthly_x_y(X, Y):
dates = pd.DatetimeIndex(X)
days_in_month = dates.daysinmonth
Y = Y / np.array(days_in_month) * 365.25
return Y
def get_eia923_c(eia923_df):
co2_emissions_col_list = [
'co2_emissions_in_kg_of_co2_Jan',
'co2_emissions_in_kg_of_co2_Feb',
'co2_emissions_in_kg_of_co2_Mar',
'co2_emissions_in_kg_of_co2_Apr',
'co2_emissions_in_kg_of_co2_May',
'co2_emissions_in_kg_of_co2_Jun',
'co2_emissions_in_kg_of_co2_Jul',
'co2_emissions_in_kg_of_co2_Aug',
'co2_emissions_in_kg_of_co2_Sep',
'co2_emissions_in_kg_of_co2_Oct',
'co2_emissions_in_kg_of_co2_Nov',
'co2_emissions_in_kg_of_co2_Dec'
]
# mod_dict = {}
years = ['2015', '2016', '2017', '2018', '2019', '2020']
datetime_list = []
co2_emissions_list = []
for year in years:
for month, col in enumerate(co2_emissions_col_list):
datetime_list.append(pd.to_datetime(f"{month + 1}/1/{year}", infer_datetime_format=False))
co2_emissions_list.append(eia923_df.loc[year][col])
output_dict = {
'Date': datetime_list,
'Co2 Emissions (kg)': co2_emissions_list
}
output_df = pd.DataFrame.from_dict(output_dict)
return output_df
def get_eia923_c_over_e(eia923_df):
c_over_e_col_list = [
'KG_CO2/MWh_Jan',
'KG_CO2/MWh_Feb',
'KG_CO2/MWh_Mar',
'KG_CO2/MWh_Apr',
'KG_CO2/MWh_May',
'KG_CO2/MWh_Jun',
'KG_CO2/MWh_Jul',
'KG_CO2/MWh_Aug',
'KG_CO2/MWh_Sep',
'KG_CO2/MWh_Oct',
'KG_CO2/MWh_Nov',
'KG_CO2/MWh_Dec'
]
years = ['2015', '2016', '2017', '2018', '2019', '2020']
datetime_list = []
c_over_e_list = []
for year in years:
for month, col in enumerate(c_over_e_col_list):
datetime_list.append(pd.to_datetime(f"{month + 1}/1/{year}", infer_datetime_format=False))
c_over_e_list.append(eia923_df.loc[year][col])
output_dict = {
'Date': datetime_list,
'C/E (kg/MWh)': c_over_e_list
}
output_df = pd.DataFrame.from_dict(output_dict)
return output_df
def get_eia923_e(eia923_df):
e_col_list = [
'net_generation_Jan',
'net_generation_Feb',
'net_generation_Mar',
'net_generation_Apr',
'net_generation_May',
'net_generation_Jun',
'net_generation_Jul',
'net_generation_Aug',
'net_generation_Sep',
'net_generation_Oct',
'net_generation_Nov',
'net_generation_Dec'
]
# mod_dict = {}
years = ['2015', '2016', '2017', '2018', '2019', '2020']
datetime_list = []
e_list = []
for year in years:
for month, col in enumerate(e_col_list):
datetime_list.append(pd.to_datetime(f"{month + 1}/1/{year}", infer_datetime_format=False))
e_list.append(eia923_df.loc[year][col])
output_dict = {
'Date': datetime_list,
'Net Generation (MWh)': e_list
}
output_df = pd.DataFrame.from_dict(output_dict)
return output_df
def fit_3d_gp(X_fit_reg, Y_fit_reg):
# fitting 3d GP
# * linear and bias terms on HDD and CDD
# * linear, bias, and std periodic terms on time
k_lin_0 = GPy_1_0_5.kern.Linear(1, active_dims=[0])
k_bias_0 = GPy_1_0_5.kern.Bias(1, active_dims=[0])
k_std_per_0 = GPy_1_0_5.kern.StdPeriodic(1, period=0.2, lengthscale=0.25, active_dims=[0])
k_lin_1 = GPy_1_0_5.kern.Linear(1, active_dims=[1])
k_bias_1 = GPy_1_0_5.kern.Bias(1, active_dims=[1])
k_lin_2 = GPy_1_0_5.kern.Linear(1, active_dims=[2])
k_bias_2 = GPy_1_0_5.kern.Bias(1, active_dims=[2])
kernel_all = k_lin_0 + k_bias_0 + k_std_per_0 + k_lin_1 + k_bias_1 + k_lin_2 + k_bias_2
m = GPy_1_0_5.models.GPRegression(X_fit_reg, Y_fit_reg, kernel_all)
m['sum.std_periodic.period'].constrain_bounded(0.1, 0.3)
m.optimize(messages=True, max_f_eval=1000)
m.optimize_restarts(num_restarts=5)
return m
def run_3d_gp(m, X_fit_reg, X_float_compare, X_cdd_compare, X_hdd_compare, Y_max, Y_min, Y_fit_reg):
# 3D GP run, with real data
plot_mean, plot_var = m.predict(X_fit_reg)
plot_std = np.sqrt(plot_var)
# compile all X's for prediction
X_pred_reg = X_float_compare
X_pred_reg = np.hstack((X_pred_reg, X_cdd_compare))
X_pred_reg = np.hstack((X_pred_reg, X_hdd_compare))
plot_pred_mean, plot_pred_var = m.predict(X_pred_reg)
plot_pred_std = np.sqrt(plot_pred_var)
# undo normalization
plot_mean = plot_mean * (Y_max - Y_min) + Y_min
plot_std = plot_std * (Y_max - Y_min)
plot_pred_mean = plot_pred_mean * (Y_max - Y_min) + Y_min
plot_pred_std = plot_pred_std * (Y_max - Y_min)
Y_fit_reg_unnormalized = Y_fit_reg * (Y_max - Y_min) + Y_min
return plot_std, plot_mean, plot_pred_std, plot_pred_mean, Y_fit_reg_unnormalized
def plot_3d_gp(X_fit_datetime_reg, X_compare, plot_std, plot_mean, plot_pred_std, plot_pred_mean,
Y_fit_reg_unnormalized, Y_compare_unnormalized, title=None, ylabel=None, savepath=None, units_multiplier=1.0):
# plot final 3D GP results using stacked bars
X_fit_datetime_reg_str = np.datetime_as_string(X_fit_datetime_reg, unit='M')
X_compare_datetime_str = np.datetime_as_string(X_compare[:, 0], unit='M')
plt.figure(figsize=(8, 4))
bar_width = 0.75
plt.xticks(rotation=90)
plt.bar(X_fit_datetime_reg_str, 1.96 * plot_std[:, 0] * units_multiplier, bar_width,
bottom=(plot_mean[:, 0]) * units_multiplier, color="lightcyan", edgecolor='k', zorder=1,
label='GP 95% credible interval, historical')
plt.bar(X_fit_datetime_reg_str, 1.96 * plot_std[:, 0] * units_multiplier, bar_width,
bottom=(plot_mean[:, 0] - 1.96 * plot_std[:, 0]) * units_multiplier, color="lightcyan", edgecolor='k', zorder=1)
plt.bar(X_compare_datetime_str, (1.96 * plot_pred_std[:, 0]) * units_multiplier, bar_width,
bottom=plot_pred_mean[:, 0] * units_multiplier, color="lavenderblush", edgecolor='k', zorder=1,
label='GP 95% credible interval, forecast')
plt.bar(X_compare_datetime_str, (1.96 * plot_pred_std[:, 0]) * units_multiplier, bar_width,
bottom=(plot_pred_mean[:, 0] - 1.96 * plot_pred_std[:, 0]) * units_multiplier, color="lavenderblush", edgecolor='k', zorder=1)
ax = plt.gca()
for index, label in enumerate(ax.xaxis.get_ticklabels()):
if index % 3 != 0:
label.set_visible(False)
plt.scatter(X_fit_datetime_reg_str, Y_fit_reg_unnormalized[:, 0] * units_multiplier, c="black", marker="x", zorder=2,
label='EIA data, used to fit GP')
plt.scatter(X_compare_datetime_str, Y_compare_unnormalized[:, 0] * units_multiplier, c="red", marker="x", zorder=2,
label='EIA data, not used to fit GP')
plt.title(title)
plt.xlabel('Time (year)')
plt.ylabel(ylabel)
plt.legend(loc='lower left')
plt.tight_layout()
plt.savefig(savepath)
plt.close()
def get_logging_output(m, X_float_compare, Y_compare, X_cdd_compare, X_hdd_compare, Y_max, Y_min, Y_compare_unnormalized, feature=None):
X_compare_3d_float = X_float_compare
X_compare_3d_float_reg, Y_compare_3d_reg = clean_gp_input(X_compare_3d_float, Y_compare, X_optional_0=X_cdd_compare,
X_optional_1=X_hdd_compare)
# calculate percent error
Y_compare_3d_preds_normalized = m.predict(X_compare_3d_float_reg)[0]
Y_compare_3d_preds_normalized_std = np.sqrt(m.predict(X_compare_3d_float_reg)[1])
Y_compare_3d_preds = Y_compare_3d_preds_normalized * (Y_max - Y_min) + Y_min
Y_compare_3d_preds_std = Y_compare_3d_preds_normalized_std * (Y_max - Y_min)
percent_error_3d = (Y_compare_unnormalized - Y_compare_3d_preds) / Y_compare_3d_preds
percent_error_95_CI_range = (1.96 * Y_compare_3d_preds_std) / Y_compare_3d_preds
# save output
summary_3d_dict = {'feature': feature,
'march 2020 value': Y_compare_unnormalized[0][0],
'march 2020 counterfactual': Y_compare_3d_preds[0][0],
'march 2020 fraction deviation': percent_error_3d[0][0],
'march 2020 95% CI (+/- %)': percent_error_95_CI_range[0][0],
'march 2020 95% CI low': Y_compare_3d_preds[0][0] - 1.96 * Y_compare_3d_preds_std[0][0],
'march 2020 95% CI high': Y_compare_3d_preds[0][0] + 1.96 * Y_compare_3d_preds_std[0][0],
'april 2020 value': Y_compare_unnormalized[1][0],
'april 2020 counterfactual': Y_compare_3d_preds[1][0],
'april 2020 fraction deviation': percent_error_3d[1][0],
'april 2020 95% CI (+/- %)': percent_error_95_CI_range[1][0],
'april 2020 95% CI low': Y_compare_3d_preds[1][0] - 1.96 * Y_compare_3d_preds_std[1][0],
'april 2020 95% CI high': Y_compare_3d_preds[1][0] + 1.96 * Y_compare_3d_preds_std[1][0],
'may 2020 value': Y_compare_unnormalized[2][0],
'may 2020 counterfactual': Y_compare_3d_preds[2][0],
'may 2020 fraction deviation': percent_error_3d[2][0],
'may 2020 95% CI (+/- %)': percent_error_95_CI_range[2][0],
'may 2020 95% CI low': Y_compare_3d_preds[2][0] - 1.96 * Y_compare_3d_preds_std[2][0],
'may 2020 95% CI high': Y_compare_3d_preds[2][0] + 1.96 * Y_compare_3d_preds_std[2][0],
'june 2020 value': Y_compare_unnormalized[3][0],
'june 2020 counterfactual': Y_compare_3d_preds[3][0],
'june 2020 fraction deviation': percent_error_3d[3][0],
'june 2020 95% CI (+/- %)': percent_error_95_CI_range[3][0],
'june 2020 95% CI low': Y_compare_3d_preds[3][0] - 1.96 * Y_compare_3d_preds_std[3][0],
'june 2020 95% CI high': Y_compare_3d_preds[3][0] + 1.96 * Y_compare_3d_preds_std[3][0],
'july 2020 value': Y_compare_unnormalized[4][0],
'july 2020 counterfactual': Y_compare_3d_preds[4][0],
'july 2020 fraction deviation': percent_error_3d[4][0],
'july 2020 95% CI (+/- %)': percent_error_95_CI_range[4][0],
'july 2020 95% CI low': Y_compare_3d_preds[4][0] - 1.96 * Y_compare_3d_preds_std[4][0],
'july 2020 95% CI high': Y_compare_3d_preds[4][0] + 1.96 * Y_compare_3d_preds_std[4][0],
'august 2020 value': Y_compare_unnormalized[5][0],
'august 2020 counterfactual': Y_compare_3d_preds[5][0],
'august 2020 fraction deviation': percent_error_3d[5][0],
'august 2020 95% CI (+/- %)': percent_error_95_CI_range[5][0],
'august 2020 95% CI low': Y_compare_3d_preds[5][0] - 1.96 * Y_compare_3d_preds_std[5][0],
'august 2020 95% CI high': Y_compare_3d_preds[5][0] + 1.96 * Y_compare_3d_preds_std[5][0],
'september 2020 value': Y_compare_unnormalized[6][0],
'september 2020 counterfactual': Y_compare_3d_preds[6][0],
'september 2020 fraction deviation': percent_error_3d[6][0],
'september 2020 95% CI (+/- %)': percent_error_95_CI_range[6][0],
'september 2020 95% CI low': Y_compare_3d_preds[6][0] - 1.96 * Y_compare_3d_preds_std[6][0],
'september 2020 95% CI high': Y_compare_3d_preds[6][0] + 1.96 * Y_compare_3d_preds_std[6][0],
'october 2020 value': Y_compare_unnormalized[7][0],
'october 2020 counterfactual': Y_compare_3d_preds[7][0],
'october 2020 fraction deviation': percent_error_3d[7][0],
'october 2020 95% CI (+/- %)': percent_error_95_CI_range[7][0],
'october 2020 95% CI low': Y_compare_3d_preds[7][0] - 1.96 * Y_compare_3d_preds_std[7][0],
'october 2020 95% CI high': Y_compare_3d_preds[7][0] + 1.96 * Y_compare_3d_preds_std[7][0],
'november 2020 value': Y_compare_unnormalized[8][0],
'november 2020 counterfactual': Y_compare_3d_preds[8][0],
'november 2020 fraction deviation': percent_error_3d[8][0],
'november 2020 95% CI (+/- %)': percent_error_95_CI_range[8][0],
'november 2020 95% CI low': Y_compare_3d_preds[8][0] - 1.96 * Y_compare_3d_preds_std[8][0],
'november 2020 95% CI high': Y_compare_3d_preds[8][0] + 1.96 * Y_compare_3d_preds_std[8][0],
'december 2020 value': Y_compare_unnormalized[9][0],
'december 2020 counterfactual': Y_compare_3d_preds[9][0],
'december 2020 fraction deviation': percent_error_3d[9][0],
'december 2020 95% CI (+/- %)': percent_error_95_CI_range[9][0],
'december 2020 95% CI low': Y_compare_3d_preds[9][0] - 1.96 * Y_compare_3d_preds_std[9][0],
'december 2020 95% CI high': Y_compare_3d_preds[9][0] + 1.96 * Y_compare_3d_preds_std[9][0]
}
return pd.DataFrame(data=summary_3d_dict, index=[0])
def preprocess_data(X, Y, units_multiplier=1.0):
# make new df so we can merge cooling and heating degree days into dataset
dd_df = load_cooling_heating_degree_days()
df_all = pd.DataFrame(data=X, columns=['X'])
df_all['Y'] = Y
df_all = df_all.merge(dd_df, left_on='X', right_on='Month')
X_cdd = df_all['Cooling Degree days: U.S. (cooling degree days) cooling degree days']
X_hdd = df_all['Heating Degree days: U.S. (heating degree days) heating degree days']
X_cdd = X_cdd.values.reshape(X_cdd.values.size, 1)
X_hdd = X_hdd.values.reshape(X_hdd.values.size, 1)
X_float = np.array(X, dtype=float)
X_min = np.min(X_float)
X_max = np.max(X_float)
Y = np.array(Y, dtype=float) * units_multiplier
Y_min = np.min(Y)
Y_max = np.max(Y)
X_cdd_min = np.min(X_cdd)
X_cdd_max = np.max(X_cdd)
X_hdd_min = np.min(X_hdd)
X_hdd_max = np.max(X_hdd)
# normalize
X_float = (X_float - X_min) / (X_max - X_min)
Y = (Y - Y_min) / (Y_max - Y_min)
X_cdd = (X_cdd - X_cdd_min) / (X_cdd_max - X_cdd_min)
X_hdd = (X_hdd - X_hdd_min) / (X_hdd_max - X_hdd_min)
# remove test months from the model!
X_fit = X_float[:-num_months, :]
X_fit_datetime = X[:-num_months, :]
Y_fit = Y[:-num_months, :]
X_cdd_fit = X_cdd[:-num_months, :]
X_hdd_fit = X_hdd[:-num_months, :]
X_float_compare = X_float[-num_months:, :]
X_compare = X[-num_months:, :]
Y_compare = Y[-num_months:, :]
X_cdd_compare = X_cdd[-num_months:, :]
X_hdd_compare = X_hdd[-num_months:, :]
X_fit_datetime_reg = np.unique(X_fit_datetime)
# undo normalization
Y_compare_unnormalized = Y_compare * (Y_max - Y_min) + Y_min
X_fit_reg, Y_fit_reg = clean_gp_input(X_fit, Y_fit, X_optional_0=X_cdd_fit, X_optional_1=X_hdd_fit)
return X_fit_reg, Y_fit_reg, X_float_compare, X_cdd_compare, X_hdd_compare, Y_max, Y_min, X_fit_datetime_reg, X_compare, Y_compare_unnormalized, Y_compare
def plot_c():
eia_p = os.path.join(os.environ.get("PROJECT_ROOT"), 'data', 'processed', 'eia923_monthly_c_and_c_over_e_and_e.p')
eia923_df = pd.read_pickle(eia_p)
final_em = get_eia923_c(eia923_df)
X_923 = final_em['Date'].values
Y_923 = final_em['Co2 Emissions (kg)'].values
X_923 = X_923[buffer:]
Y_923 = Y_923[buffer:]
# process annualized data
Y_923 = annualize_monthly_x_y(X_923, Y_923)
# restructure
X = X_923[np.newaxis].T
Y = Y_923[np.newaxis].T
X_fit_reg, Y_fit_reg, X_float_compare, X_cdd_compare, X_hdd_compare, Y_max, Y_min, X_fit_datetime_reg, X_compare, Y_compare_unnormalized, Y_compare = preprocess_data(
X, Y, units_multiplier=(1.0 / 1e3 / 1e6))
# fit gp
m = fit_3d_gp(X_fit_reg, Y_fit_reg)
# run gp
plot_std, plot_mean, plot_pred_std, plot_pred_mean, Y_fit_reg_unnormalized = run_3d_gp(m, X_fit_reg, X_float_compare, X_cdd_compare, X_hdd_compare, Y_max, Y_min, Y_fit_reg)
# plot final 3D GP results using stacked bars
plot_3d_gp(X_fit_datetime_reg, X_compare, plot_std, plot_mean, plot_pred_std, plot_pred_mean,
Y_fit_reg_unnormalized, Y_compare_unnormalized, title='Carbon Dioxide Emissions, C',
ylabel='Carbon Dioxide Emissions (MMT, annualized)',
savepath=os.path.join(output_path, '001_3D_c-gp-bars.pdf'))
summary_3d_df = get_logging_output(m, X_float_compare, Y_compare, X_cdd_compare, X_hdd_compare, Y_max, Y_min, Y_compare_unnormalized, feature='c')
return summary_3d_df
def plot_c_over_e():
eia_p = os.path.join(os.environ.get("PROJECT_ROOT"), 'data', 'processed', 'eia923_monthly_c_and_c_over_e_and_e.p')
eia923_df = pd.read_pickle(eia_p)
final_fe = get_eia923_c_over_e(eia923_df)
X_923 = final_fe['Date'].values
Y_923 = final_fe['C/E (kg/MWh)'].values
X_923 = X_923[buffer:]
Y_923 = Y_923[buffer:]
# restructure
X = X_923[np.newaxis].T
Y = Y_923[np.newaxis].T
X_fit_reg, Y_fit_reg, X_float_compare, X_cdd_compare, X_hdd_compare, Y_max, Y_min, X_fit_datetime_reg, X_compare, Y_compare_unnormalized, Y_compare = preprocess_data(
X, Y, units_multiplier=(1.0))
# fit gp
m = fit_3d_gp(X_fit_reg, Y_fit_reg)
# run gp
plot_std, plot_mean, plot_pred_std, plot_pred_mean, Y_fit_reg_unnormalized = run_3d_gp(m, X_fit_reg, X_float_compare, X_cdd_compare, X_hdd_compare, Y_max, Y_min, Y_fit_reg)
# plot final 3D GP results using stacked bars
plot_3d_gp(X_fit_datetime_reg, X_compare, plot_std, plot_mean, plot_pred_std, plot_pred_mean,
Y_fit_reg_unnormalized, Y_compare_unnormalized, title='Carbon Intensity of Electricity Supply, C/E',
ylabel='Carbon Intensity of Electricity Supply (kg/MWh)',
savepath=os.path.join(output_path, '002_3D_c_over_e-gp-bars.pdf'))
summary_3d_df = get_logging_output(m, X_float_compare, Y_compare, X_cdd_compare, X_hdd_compare, Y_max, Y_min, Y_compare_unnormalized, feature='c_over_e')
return summary_3d_df
def plot_e():
eia_p = os.path.join(os.environ.get("PROJECT_ROOT"), 'data', 'processed', 'eia923_monthly_c_and_c_over_e_and_e.p')
eia923_df = pd.read_pickle(eia_p)
final_gen = get_eia923_e(eia923_df)
X_923 = final_gen['Date'].values
Y_923 = final_gen['Net Generation (MWh)'].values
X_923 = X_923[buffer:]
Y_923 = Y_923[buffer:]
# process annualized data
Y_923 = annualize_monthly_x_y(X_923, Y_923)
# restructure
X = X_923[np.newaxis].T
Y = Y_923[np.newaxis].T
X_fit_reg, Y_fit_reg, X_float_compare, X_cdd_compare, X_hdd_compare, Y_max, Y_min, X_fit_datetime_reg, X_compare, Y_compare_unnormalized, Y_compare = preprocess_data(
X, Y, units_multiplier=(1.0 / 1e6))
# fit gp
m = fit_3d_gp(X_fit_reg, Y_fit_reg)
# run gp
plot_std, plot_mean, plot_pred_std, plot_pred_mean, Y_fit_reg_unnormalized = run_3d_gp(m, X_fit_reg, X_float_compare, X_cdd_compare, X_hdd_compare, Y_max, Y_min, Y_fit_reg)
# plot final 3D GP results using stacked bars
plot_3d_gp(X_fit_datetime_reg, X_compare, plot_std, plot_mean, plot_pred_std, plot_pred_mean,
Y_fit_reg_unnormalized, Y_compare_unnormalized, title='Electricity Generation, E',
ylabel='Electricity Generation (TWh, annualized)',
savepath=os.path.join(output_path, '0035_3D_e-gp-bars.pdf'),
units_multiplier=1.0
)
summary_3d_df = get_logging_output(m, X_float_compare, Y_compare, X_cdd_compare, X_hdd_compare, Y_max, Y_min, Y_compare_unnormalized, feature='e')
return summary_3d_df
def plot_c_by_fuel(fuel_source):
eia_p = os.path.join(os.environ.get("PROJECT_ROOT"), 'data', 'processed', 'eia923_monthly_c_by_fuel.p')
eia923_df = pd.read_pickle(eia_p)
dd_df = load_cooling_heating_degree_days()
X_923 = eia923_df.index.values
Y_923 = eia923_df[fuel_source].values
X_923 = X_923[buffer:]
Y_923 = Y_923[buffer:]
# process annualized data
Y_923 = annualize_monthly_x_y(X_923, Y_923)
# restructure
X = X_923[np.newaxis].T
Y = Y_923[np.newaxis].T
X_fit_reg, Y_fit_reg, X_float_compare, X_cdd_compare, X_hdd_compare, Y_max, Y_min, X_fit_datetime_reg, X_compare, Y_compare_unnormalized, Y_compare = preprocess_data(
X, Y)
# fit gp
m = fit_3d_gp(X_fit_reg, Y_fit_reg)
# run gp
plot_std, plot_mean, plot_pred_std, plot_pred_mean, Y_fit_reg_unnormalized = run_3d_gp(m, X_fit_reg, X_float_compare, X_cdd_compare, X_hdd_compare, Y_max, Y_min, Y_fit_reg)
# plot final 3D GP results using stacked bars
plot_3d_gp(X_fit_datetime_reg, X_compare, plot_std, plot_mean, plot_pred_std, plot_pred_mean,
Y_fit_reg_unnormalized, Y_compare_unnormalized,
title='Carbon Dioxide Emissions from ' + fuel_source + ', C',
ylabel='Carbon Dioxide Emissions (MMT, annualized)',
savepath=os.path.join(output_path, 'c-gp-3d-gp-bars-' + fuel_source + '.pdf'))
summary_3d_df = get_logging_output(m, X_float_compare, Y_compare, X_cdd_compare, X_hdd_compare, Y_max, Y_min, Y_compare_unnormalized, feature='c(' + fuel_source + ')')
return summary_3d_df
# plot everything
summary_c_df = plot_c()
summary_e_df = plot_e()
summary_c_over_e_df = plot_c_over_e()
summary_c_coal_df = plot_c_by_fuel('Coal')
summary_c_oil_df = plot_c_by_fuel('Oil')
summary_c_gas_df = plot_c_by_fuel('Gas')
summary_all_df = pd.concat([summary_c_df, summary_e_df, summary_c_over_e_df, summary_c_coal_df, summary_c_oil_df, summary_c_gas_df])
# save results to CSV
summary_all_df.to_csv(os.path.join(output_path, 'zzz_summary_3d.csv'), index=False, header=True)
| [] | [] | [
"PROJECT_ROOT"
] | [] | ["PROJECT_ROOT"] | python | 1 | 0 | |
adapter/sites/open/config/default.py | # -*- coding: utf-8 -*-
import os
# sdk
ESB_SDK_NAME = "adapter.sites.open.blueking.component"
# bk_login
OAUTH_COOKIES_PARAMS = {"bk_token": "bk_token"}
RUN_VER_DISPLAY = os.environ.get("RUN_VER_DISPLAY", "企业版")
INIT_SUPERUSER = ["admin"]
BIZ_ACCESS_URL = os.getenv("BKAPP_BIZ_ACCESS_URL", "")
DEMO_BIZ_ID = os.getenv("BKAPP_DEMO_BIZ_ID", "")
DEMO_BIZ_EDIT_ENABLED = bool(os.getenv("BKAPP_DEMO_BIZ_EDIT_ENABLED", ""))
# footer 配置
FOOTER_CONFIG = {
"footer": [
{
"zh": [
{
"text": "QQ咨询(800802001)",
"link": "http://wpa.b.qq.com/cgi/wpa.php?ln=1&key=XzgwMDgwMjAwMV80NDMwOTZfODAwODAyMDAxXzJf",
},
{"text": "蓝鲸论坛", "link": "https://bk.tencent.com/s-mart/community"},
{"text": "蓝鲸官网", "link": "https://bk.tencent.com/"},
{"text": "蓝鲸智云桌面", "link": ""},
],
"en": [
{
"text": "QQ(800802001)",
"link": "http://wpa.b.qq.com/cgi/wpa.php?ln=1&key=XzgwMDgwMjAwMV80NDMwOTZfODAwODAyMDAxXzJf",
},
{"text": "BlueKing Forum", "link": "https://bk.tencent.com/s-mart/community"},
{"text": "Blueking Official", "link": "https://bk.tencent.com/"},
{"text": "BlueKing Desktop", "link": ""},
],
}
],
"copyright": "Copyright © 2012-2020 Tencent BlueKing. All Rights Reserved.",
}
| [] | [] | [
"BKAPP_DEMO_BIZ_ID",
"RUN_VER_DISPLAY",
"BKAPP_BIZ_ACCESS_URL",
"BKAPP_DEMO_BIZ_EDIT_ENABLED"
] | [] | ["BKAPP_DEMO_BIZ_ID", "RUN_VER_DISPLAY", "BKAPP_BIZ_ACCESS_URL", "BKAPP_DEMO_BIZ_EDIT_ENABLED"] | python | 4 | 0 | |
pkg/client/connector/install.go | package connector
import (
"bytes"
"context"
"fmt"
"os"
"strconv"
"strings"
"sync"
"time"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
errors2 "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/util/intstr"
"github.com/datawire/ambassador/pkg/kates"
"github.com/datawire/dlib/dlog"
"github.com/datawire/dlib/dtime"
"github.com/telepresenceio/telepresence/rpc/v2/manager"
"github.com/telepresenceio/telepresence/v2/pkg/client"
"github.com/telepresenceio/telepresence/v2/pkg/install"
"github.com/telepresenceio/telepresence/v2/pkg/install/resource"
)
type installer struct {
*k8sCluster
}
func newTrafficManagerInstaller(kc *k8sCluster) (*installer, error) {
return &installer{k8sCluster: kc}, nil
}
const annTelepresenceActions = install.DomainPrefix + "actions"
// this is modified in tests
var managerNamespace = func() string {
if ns := os.Getenv("TELEPRESENCE_MANAGER_NAMESPACE"); ns != "" {
return ns
}
return "ambassador"
}()
func managerImageName(env client.Env) string {
return fmt.Sprintf("%s/tel2:%s", env.Registry, strings.TrimPrefix(client.Version(), "v"))
}
// removeManager will remove the agent from all deployments listed in the given agents slice. Unless agentsOnly is true,
// it will also remove the traffic-manager service and deployment.
func (ki *installer) removeManagerAndAgents(c context.Context, agentsOnly bool, agents []*manager.AgentInfo, env *client.Env) error {
// Removes the manager and all agents from the cluster
var errs []error
var errsLock sync.Mutex
addError := func(e error) {
errsLock.Lock()
errs = append(errs, e)
errsLock.Unlock()
}
// Remove the agent from all deployments
wg := sync.WaitGroup{}
wg.Add(len(agents))
for _, ai := range agents {
ai := ai // pin it
go func() {
defer wg.Done()
kind, err := ki.findObjectKind(c, ai.Namespace, ai.Name)
if err != nil {
addError(err)
return
}
var agent kates.Object
switch kind {
case "ReplicaSet":
agent, err = ki.findReplicaSet(c, ai.Namespace, ai.Name)
if err != nil {
if !errors2.IsNotFound(err) {
addError(err)
}
return
}
case "Deployment":
agent, err = ki.findDeployment(c, ai.Namespace, ai.Name)
if err != nil {
if !errors2.IsNotFound(err) {
addError(err)
}
return
}
case "StatefulSet":
agent, err = ki.findStatefulSet(c, ai.Namespace, ai.Name)
if err != nil {
if !errors2.IsNotFound(err) {
addError(err)
}
return
}
default:
addError(fmt.Errorf("agent %q associated with unsupported workload kind %q, cannot be removed", ai.Name, kind))
return
}
// Assume that the agent was added using the mutating webhook when no actions
// annotation can be found in the workload.
ann := agent.GetAnnotations()
if ann == nil {
return
}
if _, ok := ann[annTelepresenceActions]; !ok {
return
}
if err = ki.undoObjectMods(c, agent); err != nil {
addError(err)
return
}
if err = ki.waitForApply(c, ai.Namespace, ai.Name, agent); err != nil {
addError(err)
}
}()
}
// wait for all agents to be removed
wg.Wait()
if !agentsOnly && len(errs) == 0 {
// agent removal succeeded. Remove the manager resources
if err := resource.DeleteTrafficManager(c, ki.client, managerNamespace, env); err != nil {
addError(err)
}
}
switch len(errs) {
case 0:
case 1:
return errs[0]
default:
bld := bytes.NewBufferString("multiple errors:")
for _, err := range errs {
bld.WriteString("\n ")
bld.WriteString(err.Error())
}
return errors.New(bld.String())
}
return nil
}
// Finds the Referenced Service in an objects' annotations
func (ki *installer) getSvcFromObjAnnotation(c context.Context, obj kates.Object) (*kates.Service, error) {
var actions workloadActions
annotationsFound, err := getAnnotation(obj, &actions)
if err != nil {
return nil, err
}
namespace := obj.GetNamespace()
if !annotationsFound {
return nil, install.ObjErrorf(obj, "annotations[%q]: annotation is not set", annTelepresenceActions)
}
svcName := actions.ReferencedService
if svcName == "" {
return nil, install.ObjErrorf(obj, "annotations[%q]: field \"ReferencedService\" is not set", annTelepresenceActions)
}
svc, err := ki.findSvc(c, namespace, svcName)
if err != nil && !kates.IsNotFound(err) {
return nil, err
}
if svc == nil {
return nil, install.ObjErrorf(obj, "annotations[%q]: field \"ReferencedService\" references unfound service %q", annTelepresenceActions, svcName)
}
return svc, nil
}
// Determines if the service associated with a pre-existing intercept exists or if
// the port to-be-intercepted has changed. It raises an error if either of these
// cases exist since to go forward with an intercept would require changing the
// configuration of the agent.
func checkSvcSame(c context.Context, obj kates.Object, svcName, portNameOrNumber string) error {
var actions workloadActions
annotationsFound, err := getAnnotation(obj, &actions)
if err != nil {
return err
}
if annotationsFound {
// If the Service in the annotation doesn't match the svcName passed in
// then the service to be used with the intercept has changed
curSvc := actions.ReferencedService
if svcName != "" && curSvc != svcName {
return install.ObjErrorf(obj, "associated Service changed from %q to %q", curSvc, svcName)
}
// If the portNameOrNumber passed in doesn't match the referenced service
// port name or number in the annotation, then the servicePort to be intercepted
// has changed.
if portNameOrNumber != "" {
curSvcPortName := actions.ReferencedServicePortName
curSvcPort := actions.ReferencedServicePort
if curSvcPortName != portNameOrNumber && curSvcPort != portNameOrNumber {
return install.ObjErrorf(obj, "port changed from %q to %q", curSvcPort, portNameOrNumber)
}
}
}
return nil
}
var agentNotFound = errors.New("no such agent")
// This does a lot of things but at a high level it ensures that the traffic agent
// is installed alongside the proper workload. In doing that, it also ensures that
// the workload is referenced by a service. Lastly, it returns the service UID
// associated with the workload since this is where that correlation is made.
func (ki *installer) ensureAgent(c context.Context, namespace, name, svcName, portNameOrNumber, agentImageName string) (string, string, error) {
kind, err := ki.findObjectKind(c, namespace, name)
if err != nil {
return "", "", err
}
var obj kates.Object
switch kind {
case "ReplicaSet":
obj, err = ki.findReplicaSet(c, namespace, name)
if err != nil {
return "", "", err
}
case "Deployment":
obj, err = ki.findDeployment(c, namespace, name)
if err != nil {
return "", "", err
}
case "StatefulSet":
obj, err = ki.findStatefulSet(c, namespace, name)
if err != nil {
return "", "", err
}
default:
return "", "", fmt.Errorf("unsupported workload kind %q, cannot ensure agent", kind)
}
podTemplate, err := install.GetPodTemplateFromObject(obj)
if err != nil {
return "", "", err
}
var svc *kates.Service
if a := podTemplate.ObjectMeta.Annotations; a != nil && a[install.InjectAnnotation] == "enabled" {
// agent is injected using a mutating webhook. Get its service and skip the rest
svc, err = install.FindMatchingService(c, ki.client, portNameOrNumber, svcName, namespace, podTemplate.Labels)
if err != nil {
return "", "", err
}
return string(svc.GetUID()), kind, nil
}
var agentContainer *kates.Container
for i := range podTemplate.Spec.Containers {
container := &podTemplate.Spec.Containers[i]
if container.Name == install.AgentContainerName {
agentContainer = container
break
}
}
if err := checkSvcSame(c, obj, svcName, portNameOrNumber); err != nil {
msg := fmt.Sprintf(
`%s already being used for intercept with a different service
configuration. To intercept this with your new configuration, please use
telepresence uninstall --agent %s This will cancel any intercepts that
already exist for this service`, kind, obj.GetName())
return "", "", errors.Wrap(err, msg)
}
switch {
case agentContainer == nil:
dlog.Infof(c, "no agent found for %s %s.%s", kind, name, namespace)
dlog.Infof(c, "Using port name or number %q", portNameOrNumber)
matchingSvc, err := install.FindMatchingService(c, ki.client, portNameOrNumber, svcName, namespace, podTemplate.Labels)
if err != nil {
return "", "", err
}
obj, svc, err = addAgentToWorkload(c, portNameOrNumber, agentImageName, obj, matchingSvc)
if err != nil {
return "", "", err
}
case agentContainer.Image != agentImageName:
var actions workloadActions
ok, err := getAnnotation(obj, &actions)
if err != nil {
return "", "", err
} else if !ok {
// This can only happen if someone manually tampered with the annTelepresenceActions annotation
return "", "", install.ObjErrorf(obj, "annotations[%q]: annotation is not set", annTelepresenceActions)
}
dlog.Debugf(c, "Updating agent for %s %s.%s", kind, name, namespace)
aaa := &workloadActions{
Version: actions.Version,
AddTrafficAgent: actions.AddTrafficAgent,
}
explainUndo(c, aaa, obj)
aaa.AddTrafficAgent.ImageName = agentImageName
agentContainer.Image = agentImageName
explainDo(c, aaa, obj)
default:
dlog.Debugf(c, "%s %s.%s already has an installed and up-to-date agent", kind, name, namespace)
}
if err := ki.client.Update(c, obj, obj); err != nil {
return "", "", err
}
if svc != nil {
if err := ki.client.Update(c, svc, svc); err != nil {
return "", "", err
}
} else {
// If the service is still nil, that's because an agent already exists that we can reuse.
// So we get the service from the deployments annotation so that we can extract the UID.
svc, err = ki.getSvcFromObjAnnotation(c, obj)
if err != nil {
return "", "", err
}
}
if err := ki.waitForApply(c, namespace, name, obj); err != nil {
return "", "", err
}
return string(svc.GetUID()), kind, nil
}
// The following <workload>Updated functions all contain the logic for
// determining if that specific workload type has successfully been updated
// based on the object's metadata. We have separate ones for each object
// because the criteria is slightly different for each.
func replicaSetUpdated(rs *kates.ReplicaSet, origGeneration int64) bool {
applied := rs.ObjectMeta.Generation >= origGeneration &&
rs.Status.ObservedGeneration == rs.ObjectMeta.Generation &&
(rs.Spec.Replicas == nil || rs.Status.Replicas >= *rs.Spec.Replicas) &&
rs.Status.FullyLabeledReplicas == rs.Status.Replicas &&
rs.Status.AvailableReplicas == rs.Status.Replicas
return applied
}
func deploymentUpdated(dep *kates.Deployment, origGeneration int64) bool {
applied := dep.ObjectMeta.Generation >= origGeneration &&
dep.Status.ObservedGeneration == dep.ObjectMeta.Generation &&
(dep.Spec.Replicas == nil || dep.Status.UpdatedReplicas >= *dep.Spec.Replicas) &&
dep.Status.UpdatedReplicas == dep.Status.Replicas &&
dep.Status.AvailableReplicas == dep.Status.Replicas
return applied
}
func statefulSetUpdated(statefulSet *kates.StatefulSet, origGeneration int64) bool {
applied := statefulSet.ObjectMeta.Generation >= origGeneration &&
statefulSet.Status.ObservedGeneration == statefulSet.ObjectMeta.Generation &&
(statefulSet.Spec.Replicas == nil || statefulSet.Status.UpdatedReplicas >= *statefulSet.Spec.Replicas) &&
statefulSet.Status.UpdatedReplicas == statefulSet.Status.Replicas &&
statefulSet.Status.CurrentReplicas == statefulSet.Status.Replicas
return applied
}
func (ki *installer) waitForApply(c context.Context, namespace, name string, obj kates.Object) error {
tos := &client.GetConfig(c).Timeouts
c, cancel := tos.TimeoutContext(c, client.TimeoutApply)
defer cancel()
origGeneration := int64(0)
if obj != nil {
origGeneration = obj.GetGeneration()
}
kind, err := ki.findObjectKind(c, namespace, name)
if err != nil {
return err
}
switch kind {
case "ReplicaSet":
err := ki.refreshReplicaSet(c, name, namespace)
if err != nil {
return err
}
for {
dtime.SleepWithContext(c, time.Second)
if err := c.Err(); err != nil {
return err
}
rs, err := ki.findReplicaSet(c, namespace, name)
if err != nil {
return client.CheckTimeout(c, err)
}
if replicaSetUpdated(rs, origGeneration) {
dlog.Debugf(c, "Replica Set %s.%s successfully applied", name, namespace)
return nil
}
}
case "Deployment":
for {
dtime.SleepWithContext(c, time.Second)
if err := c.Err(); err != nil {
return err
}
dep, err := ki.findDeployment(c, namespace, name)
if err != nil {
return client.CheckTimeout(c, err)
}
if deploymentUpdated(dep, origGeneration) {
dlog.Debugf(c, "deployment %s.%s successfully applied", name, namespace)
return nil
}
}
case "StatefulSet":
for {
dtime.SleepWithContext(c, time.Second)
if err := c.Err(); err != nil {
return err
}
statefulSet, err := ki.findStatefulSet(c, namespace, name)
if err != nil {
return client.CheckTimeout(c, err)
}
if statefulSetUpdated(statefulSet, origGeneration) {
dlog.Debugf(c, "statefulset %s.%s successfully applied", name, namespace)
return nil
}
}
default:
return fmt.Errorf("unsupported workload kind %q, cannot wait for apply", kind)
}
}
// refreshReplicaSet finds pods owned by a given ReplicaSet and deletes them.
// We need this because updating a Replica Set does *not* generate new
// pods if the desired amount already exists.
func (ki *installer) refreshReplicaSet(c context.Context, name, namespace string) error {
rs, err := ki.findReplicaSet(c, namespace, name)
if err != nil {
return err
}
podNames, err := ki.podNames(c, namespace)
if err != nil {
return err
}
for _, podName := range podNames {
// We only care about pods that are associated with the ReplicaSet
// so we filter them out here
if !strings.Contains(podName, name) {
continue
}
podInfo, err := ki.findPod(c, namespace, podName)
if err != nil {
return err
}
for _, ownerRef := range podInfo.OwnerReferences {
if ownerRef.UID == rs.UID {
dlog.Infof(c, "Deleting pod %s owned by rs %s", podInfo.Name, rs.Name)
pod := &kates.Pod{
TypeMeta: kates.TypeMeta{
Kind: "Pod",
},
ObjectMeta: kates.ObjectMeta{
Namespace: podInfo.Namespace,
Name: podInfo.Name,
},
}
if err := ki.client.Delete(c, pod, pod); err != nil {
return err
}
}
}
}
return nil
}
func getAnnotation(obj kates.Object, data completeAction) (bool, error) {
ann := obj.GetAnnotations()
if ann == nil {
return false, nil
}
ajs, ok := ann[annTelepresenceActions]
if !ok {
return false, nil
}
if err := data.UnmarshalAnnotation(ajs); err != nil {
return false, install.ObjErrorf(obj, "annotations[%q]: unable to parse annotation: %q: %w",
annTelepresenceActions, ajs, err)
}
annV, err := data.TelVersion()
if err != nil {
return false, install.ObjErrorf(obj, "annotations[%q]: unable to parse semantic version %q: %w",
annTelepresenceActions, ajs, err)
}
ourV := client.Semver()
// Compare major and minor versions. 100% backward compatibility is assumed and greater patch versions are allowed
if ourV.Major < annV.Major || ourV.Major == annV.Major && ourV.Minor < annV.Minor {
return false, install.ObjErrorf(obj, "annotations[%q]: the version in the annotation (%v) is more recent than this binary's version (%v)",
annTelepresenceActions,
annV, ourV)
}
return true, nil
}
func (ki *installer) undoObjectMods(c context.Context, obj kates.Object) error {
referencedService, err := undoObjectMods(c, obj)
if err != nil {
return err
}
svc, err := ki.findSvc(c, obj.GetNamespace(), referencedService)
if err != nil && !kates.IsNotFound(err) {
return err
}
if svc != nil {
if err = ki.undoServiceMods(c, svc); err != nil {
return err
}
}
return ki.client.Update(c, obj, obj)
}
func undoObjectMods(c context.Context, obj kates.Object) (string, error) {
var actions workloadActions
ok, err := getAnnotation(obj, &actions)
if !ok {
return "", err
}
if !ok {
return "", install.ObjErrorf(obj, "agent is not installed")
}
if err = actions.Undo(obj); err != nil {
return "", err
}
annotations := obj.GetAnnotations()
delete(annotations, annTelepresenceActions)
if len(annotations) == 0 {
obj.SetAnnotations(nil)
}
explainUndo(c, &actions, obj)
return actions.ReferencedService, nil
}
func (ki *installer) undoServiceMods(c context.Context, svc *kates.Service) error {
if err := undoServiceMods(c, svc); err != nil {
return err
}
return ki.client.Update(c, svc, svc)
}
func undoServiceMods(c context.Context, svc *kates.Service) error {
var actions svcActions
ok, err := getAnnotation(svc, &actions)
if !ok {
return err
}
if err = actions.Undo(svc); err != nil {
return err
}
delete(svc.Annotations, annTelepresenceActions)
if len(svc.Annotations) == 0 {
svc.Annotations = nil
}
explainUndo(c, &actions, svc)
return nil
}
// addAgentToWorkload takes a given workload object and a service and
// determines which container + port to use for an intercept. It also
// prepares and performs modifications to the obj and/or service.
func addAgentToWorkload(
c context.Context,
portNameOrNumber string,
agentImageName string,
object kates.Object, matchingService *kates.Service,
) (
kates.Object,
*kates.Service,
error,
) {
podTemplate, err := install.GetPodTemplateFromObject(object)
if err != nil {
return nil, nil, err
}
cns := podTemplate.Spec.Containers
servicePort, container, containerPortIndex, err := install.FindMatchingPort(cns, portNameOrNumber, matchingService)
if err != nil {
return nil, nil, install.ObjErrorf(object, err.Error())
}
if matchingService.Spec.ClusterIP == "None" {
dlog.Debugf(c,
"Intercepts of headless service: %s likely won't work as expected "+
"see https://github.com/telepresenceio/telepresence/issues/1632",
matchingService.Name)
}
dlog.Debugf(c, "using service %q port %q when intercepting %s %q",
matchingService.Name,
func() string {
if servicePort.Name != "" {
return servicePort.Name
}
return strconv.Itoa(int(servicePort.Port))
}(),
object.GetObjectKind().GroupVersionKind().Kind,
object.GetName())
version := client.Semver().String()
// Try to detect the container port we'll be taking over.
var containerPort struct {
Name string // If the existing container port doesn't have a name, we'll make one up.
Number uint16
Protocol corev1.Protocol
}
// Start by filling from the servicePort; if these are the zero values, that's OK.
svcHasTargetPort := true
if servicePort.TargetPort.Type == intstr.Int {
if servicePort.TargetPort.IntVal == 0 {
containerPort.Number = uint16(servicePort.Port)
svcHasTargetPort = false
} else {
containerPort.Number = uint16(servicePort.TargetPort.IntVal)
}
} else {
containerPort.Name = servicePort.TargetPort.StrVal
}
containerPort.Protocol = servicePort.Protocol
// Now fill from the Deployment's containerPort.
usedContainerName := false
if containerPortIndex >= 0 {
if containerPort.Name == "" {
containerPort.Name = container.Ports[containerPortIndex].Name
if containerPort.Name != "" {
usedContainerName = true
}
}
if containerPort.Number == 0 {
containerPort.Number = uint16(container.Ports[containerPortIndex].ContainerPort)
}
if containerPort.Protocol == "" {
containerPort.Protocol = container.Ports[containerPortIndex].Protocol
}
}
if containerPort.Number == 0 {
return nil, nil, install.ObjErrorf(object, "unable to add: the container port cannot be determined")
}
if containerPort.Name == "" {
containerPort.Name = fmt.Sprintf("tx-%d", containerPort.Number)
}
// Figure what modifications we need to make.
workloadMod := &workloadActions{
Version: version,
ReferencedService: matchingService.Name,
ReferencedServicePort: strconv.Itoa(int(servicePort.Port)),
ReferencedServicePortName: servicePort.Name,
AddTrafficAgent: &addTrafficAgentAction{
containerName: container.Name,
ContainerPortName: containerPort.Name,
ContainerPortProto: containerPort.Protocol,
ContainerPortNumber: containerPort.Number,
ImageName: agentImageName,
},
}
// Depending on whether the Service refers to the port by name or by number, we either need
// to patch the names in the deployment, or the number in the service.
var serviceMod *svcActions
if servicePort.TargetPort.Type == intstr.Int {
// Change the port number that the Service refers to.
serviceMod = &svcActions{Version: version}
if svcHasTargetPort {
serviceMod.MakePortSymbolic = &makePortSymbolicAction{
PortName: servicePort.Name,
TargetPort: containerPort.Number,
SymbolicName: containerPort.Name,
}
} else {
serviceMod.AddSymbolicPort = &addSymbolicPortAction{
makePortSymbolicAction{
PortName: servicePort.Name,
TargetPort: containerPort.Number,
SymbolicName: containerPort.Name,
},
}
}
// Since we are updating the service to use the containerPort.Name
// if that value came from the container, then we need to hide it
// since the service is using the targetPort's int.
if usedContainerName {
workloadMod.HideContainerPort = &hideContainerPortAction{
ContainerName: container.Name,
PortName: containerPort.Name,
ordinal: 0,
}
}
} else {
// Hijack the port name in the Deployment.
workloadMod.HideContainerPort = &hideContainerPortAction{
ContainerName: container.Name,
PortName: containerPort.Name,
ordinal: 0,
}
}
// Apply the actions on the workload.
if err = workloadMod.Do(object); err != nil {
return nil, nil, err
}
annotations := object.GetAnnotations()
if object.GetAnnotations() == nil {
annotations = make(map[string]string)
}
annotations[annTelepresenceActions], err = workloadMod.MarshalAnnotation()
if err != nil {
return nil, nil, err
}
object.SetAnnotations(annotations)
explainDo(c, workloadMod, object)
// Apply the actions on the Service.
if serviceMod != nil {
if err = serviceMod.Do(matchingService); err != nil {
return nil, nil, err
}
if matchingService.Annotations == nil {
matchingService.Annotations = make(map[string]string)
}
matchingService.Annotations[annTelepresenceActions], err = serviceMod.MarshalAnnotation()
if err != nil {
return nil, nil, err
}
explainDo(c, serviceMod, matchingService)
} else {
matchingService = nil
}
return object, matchingService, nil
}
func (ki *installer) ensureManager(c context.Context, env *client.Env) error {
return resource.EnsureTrafficManager(c, ki.client, managerNamespace, ki.getClusterId(c), env)
}
| [
"\"TELEPRESENCE_MANAGER_NAMESPACE\""
] | [] | [
"TELEPRESENCE_MANAGER_NAMESPACE"
] | [] | ["TELEPRESENCE_MANAGER_NAMESPACE"] | go | 1 | 0 | |
src/main/java/com/lambdaschool/bookstore/config/AuthorizationServerConfig.java | package com.lambdaschool.bookstore.config;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Configuration;
import org.springframework.security.authentication.AuthenticationManager;
import org.springframework.security.crypto.password.PasswordEncoder;
import org.springframework.security.oauth2.config.annotation.configurers.ClientDetailsServiceConfigurer;
import org.springframework.security.oauth2.config.annotation.web.configuration.AuthorizationServerConfigurerAdapter;
import org.springframework.security.oauth2.config.annotation.web.configuration.EnableAuthorizationServer;
import org.springframework.security.oauth2.config.annotation.web.configurers.AuthorizationServerEndpointsConfigurer;
import org.springframework.security.oauth2.provider.token.TokenStore;
@Configuration
@EnableAuthorizationServer
public class AuthorizationServerConfig extends AuthorizationServerConfigurerAdapter
{
// static final String CLIENT_ID = System.getenv("OAUTHCLIENTID"); // read from environment variable
// static final String CLIENT_SECRET = System.getenv("OAUTHCLIENTSECRET"); // read from environment variable
static final String CLIENT_ID = "lambda-client";
static final String CLIENT_SECRET = "lambda-secret";
static final String GRANT_TYPE_PASSWORD = "password";
static final String AUTHORIZATION_CODE = "authorization_code";
static final String REFRESH_TOKEN = "refresh_token";
static final String IMPLICIT = "implicit";
static final String SCOPE_READ = "read";
static final String SCOPE_WRITE = "write";
static final String TRUST = "trust";
static final int ACCESS_TOKEN_VALIDITY_SECONDS = 1 * 60 * 60;
static final int FREFRESH_TOKEN_VALIDITY_SECONDS = 6 * 60 * 60;
@Autowired
private TokenStore tokenStore;
@Autowired
private AuthenticationManager authenticationManager;
@Autowired
private PasswordEncoder encoder;
@Override
public void configure(ClientDetailsServiceConfigurer configurer) throws Exception
{
// .authorizedGrantTypes(GRANT_TYPE_PASSWORD, AUTHORIZATION_CODE, REFRESH_TOKEN, IMPLICIT)
configurer.inMemory().withClient(CLIENT_ID).secret(encoder.encode(CLIENT_SECRET)).authorizedGrantTypes(GRANT_TYPE_PASSWORD, AUTHORIZATION_CODE, IMPLICIT).scopes(SCOPE_READ, SCOPE_WRITE, TRUST).accessTokenValiditySeconds(ACCESS_TOKEN_VALIDITY_SECONDS).refreshTokenValiditySeconds(FREFRESH_TOKEN_VALIDITY_SECONDS);
}
@Override
public void configure(AuthorizationServerEndpointsConfigurer endpoints) throws Exception
{
endpoints.tokenStore(tokenStore).authenticationManager(authenticationManager);
endpoints.pathMapping("/oauth/token", "/login");
}
} | [
"\"OAUTHCLIENTID\"",
"\"OAUTHCLIENTSECRET\""
] | [] | [
"OAUTHCLIENTID",
"OAUTHCLIENTSECRET"
] | [] | ["OAUTHCLIENTID", "OAUTHCLIENTSECRET"] | java | 2 | 0 | |
executor/executor_test.go | // Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package executor_test
import (
"context"
"flag"
"fmt"
"math"
"os"
"strconv"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/golang/protobuf/proto"
. "github.com/pingcap/check"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
pb "github.com/pingcap/kvproto/pkg/kvrpcpb"
"github.com/pingcap/parser"
"github.com/pingcap/parser/model"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/parser/terror"
"github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/ddl"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/domain/infosync"
"github.com/pingcap/tidb/executor"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/meta"
"github.com/pingcap/tidb/meta/autoid"
"github.com/pingcap/tidb/planner"
plannercore "github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/session"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/statistics"
"github.com/pingcap/tidb/store/mockstore"
"github.com/pingcap/tidb/store/mockstore/mocktikv"
"github.com/pingcap/tidb/store/tikv"
"github.com/pingcap/tidb/store/tikv/oracle"
"github.com/pingcap/tidb/store/tikv/tikvrpc"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/table/tables"
"github.com/pingcap/tidb/tablecodec"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util"
"github.com/pingcap/tidb/util/admin"
"github.com/pingcap/tidb/util/codec"
"github.com/pingcap/tidb/util/gcutil"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/mock"
"github.com/pingcap/tidb/util/rowcodec"
"github.com/pingcap/tidb/util/testkit"
"github.com/pingcap/tidb/util/testleak"
"github.com/pingcap/tidb/util/testutil"
"github.com/pingcap/tidb/util/timeutil"
"github.com/pingcap/tipb/go-tipb"
)
func TestT(t *testing.T) {
CustomVerboseFlag = true
*CustomParallelSuiteFlag = true
logLevel := os.Getenv("log_level")
logutil.InitLogger(logutil.NewLogConfig(logLevel, logutil.DefaultLogFormat, "", logutil.EmptyFileLogConfig, false))
autoid.SetStep(5000)
old := config.GetGlobalConfig()
new := *old
new.Log.SlowThreshold = 30000 // 30s
config.StoreGlobalConfig(&new)
testleak.BeforeTest()
TestingT(t)
testleak.AfterTestT(t)()
}
var _ = Suite(&testSuite{&baseTestSuite{}})
var _ = Suite(&testSuiteP1{&baseTestSuite{}})
var _ = Suite(&testSuiteP2{&baseTestSuite{}})
var _ = Suite(&testSuite1{})
var _ = Suite(&testSuite2{&baseTestSuite{}})
var _ = Suite(&testSuite3{&baseTestSuite{}})
var _ = Suite(&testSuite4{&baseTestSuite{}})
var _ = Suite(&testSuite5{&baseTestSuite{}})
var _ = Suite(&testSuiteJoin1{&baseTestSuite{}})
var _ = Suite(&testSuiteJoin2{&baseTestSuite{}})
var _ = Suite(&testSuiteJoin3{&baseTestSuite{}})
var _ = Suite(&testSuiteAgg{baseTestSuite: &baseTestSuite{}})
var _ = Suite(&testSuite6{&baseTestSuite{}})
var _ = Suite(&testSuite7{&baseTestSuite{}})
var _ = Suite(&testSuite8{&baseTestSuite{}})
var _ = SerialSuites(&testShowStatsSuite{&baseTestSuite{}})
var _ = Suite(&testBypassSuite{})
var _ = Suite(&testUpdateSuite{})
var _ = Suite(&testPointGetSuite{})
var _ = Suite(&testBatchPointGetSuite{})
var _ = SerialSuites(&testRecoverTable{})
var _ = Suite(&testClusterReaderSuite{})
var _ = Suite(&testFlushSuite{})
var _ = SerialSuites(&testAutoRandomSuite{&baseTestSuite{}})
type testSuite struct{ *baseTestSuite }
type testSuiteP1 struct{ *baseTestSuite }
type testSuiteP2 struct{ *baseTestSuite }
type baseTestSuite struct {
cluster *mocktikv.Cluster
mvccStore mocktikv.MVCCStore
store kv.Storage
domain *domain.Domain
*parser.Parser
ctx *mock.Context
}
var mockTikv = flag.Bool("mockTikv", true, "use mock tikv store in executor test")
func (s *baseTestSuite) SetUpSuite(c *C) {
s.Parser = parser.New()
flag.Lookup("mockTikv")
useMockTikv := *mockTikv
if useMockTikv {
s.cluster = mocktikv.NewCluster()
mocktikv.BootstrapWithSingleStore(s.cluster)
s.mvccStore = mocktikv.MustNewMVCCStore()
store, err := mockstore.NewMockTikvStore(
mockstore.WithCluster(s.cluster),
mockstore.WithMVCCStore(s.mvccStore),
)
c.Assert(err, IsNil)
s.store = store
session.SetSchemaLease(0)
session.DisableStats4Test()
}
d, err := session.BootstrapSession(s.store)
c.Assert(err, IsNil)
d.SetStatsUpdating(true)
s.domain = d
}
func (s *baseTestSuite) TearDownSuite(c *C) {
s.domain.Close()
s.store.Close()
}
func (s *testSuiteP1) TestPessimisticSelectForUpdate(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(id int primary key, a int)")
tk.MustExec("insert into t values(1, 1)")
tk.MustExec("begin PESSIMISTIC")
tk.MustQuery("select a from t where id=1 for update").Check(testkit.Rows("1"))
tk.MustExec("update t set a=a+1 where id=1")
tk.MustExec("commit")
tk.MustQuery("select a from t where id=1").Check(testkit.Rows("2"))
}
func (s *testSuite) TearDownTest(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
r := tk.MustQuery("show tables")
for _, tb := range r.Rows() {
tableName := tb[0]
tk.MustExec(fmt.Sprintf("drop table %v", tableName))
}
}
func (s *testSuiteP1) TestBind(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists testbind")
tk.MustExec("create table testbind(i int, s varchar(20))")
tk.MustExec("create index index_t on testbind(i,s)")
tk.MustExec("create global binding for select * from testbind using select * from testbind use index for join(index_t)")
c.Assert(len(tk.MustQuery("show global bindings").Rows()), Equals, 1)
tk.MustExec("create session binding for select * from testbind using select * from testbind use index for join(index_t)")
c.Assert(len(tk.MustQuery("show session bindings").Rows()), Equals, 1)
tk.MustExec("drop session binding for select * from testbind")
}
func (s *testSuiteP1) TestChange(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int)")
tk.MustExec("alter table t change a b int")
tk.MustExec("alter table t change b c bigint")
c.Assert(tk.ExecToErr("alter table t change c d varchar(100)"), NotNil)
}
func (s *testSuiteP1) TestChangePumpAndDrainer(c *C) {
tk := testkit.NewTestKit(c, s.store)
// change pump or drainer's state need connect to etcd
// so will meet error "URL scheme must be http, https, unix, or unixs: /tmp/tidb"
err := tk.ExecToErr("change pump to node_state ='paused' for node_id 'pump1'")
c.Assert(err, ErrorMatches, "URL scheme must be http, https, unix, or unixs.*")
err = tk.ExecToErr("change drainer to node_state ='paused' for node_id 'drainer1'")
c.Assert(err, ErrorMatches, "URL scheme must be http, https, unix, or unixs.*")
}
func (s *testSuiteP1) TestLoadStats(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
c.Assert(tk.ExecToErr("load stats"), NotNil)
c.Assert(tk.ExecToErr("load stats ./xxx.json"), NotNil)
}
func (s *testSuiteP1) TestShow(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("create database test_show;")
tk.MustExec("use test_show")
tk.MustQuery("show engines")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int primary key)")
c.Assert(len(tk.MustQuery("show index in t").Rows()), Equals, 1)
c.Assert(len(tk.MustQuery("show index from t").Rows()), Equals, 1)
tk.MustQuery("show charset").Check(testkit.Rows(
"utf8 UTF-8 Unicode utf8_bin 3",
"utf8mb4 UTF-8 Unicode utf8mb4_bin 4",
"ascii US ASCII ascii_bin 1",
"latin1 Latin1 latin1_bin 1",
"binary binary binary 1"))
c.Assert(len(tk.MustQuery("show master status").Rows()), Equals, 1)
tk.MustQuery("show create database test_show").Check(testkit.Rows("test_show CREATE DATABASE `test_show` /*!40100 DEFAULT CHARACTER SET utf8mb4 */"))
tk.MustQuery("show privileges").Check(testkit.Rows("Alter Tables To alter the table",
"Alter Tables To alter the table",
"Alter routine Functions,Procedures To alter or drop stored functions/procedures",
"Create Databases,Tables,Indexes To create new databases and tables",
"Create routine Databases To use CREATE FUNCTION/PROCEDURE",
"Create temporary tables Databases To use CREATE TEMPORARY TABLE",
"Create view Tables To create new views",
"Create user Server Admin To create new users",
"Delete Tables To delete existing rows",
"Drop Databases,Tables To drop databases, tables, and views",
"Event Server Admin To create, alter, drop and execute events",
"Execute Functions,Procedures To execute stored routines",
"File File access on server To read and write files on the server",
"Grant option Databases,Tables,Functions,Procedures To give to other users those privileges you possess",
"Index Tables To create or drop indexes",
"Insert Tables To insert data into tables",
"Lock tables Databases To use LOCK TABLES (together with SELECT privilege)",
"Process Server Admin To view the plain text of currently executing queries",
"Proxy Server Admin To make proxy user possible",
"References Databases,Tables To have references on tables",
"Reload Server Admin To reload or refresh tables, logs and privileges",
"Replication client Server Admin To ask where the slave or master servers are",
"Replication slave Server Admin To read binary log events from the master",
"Select Tables To retrieve rows from table",
"Show databases Server Admin To see all databases with SHOW DATABASES",
"Show view Tables To see views with SHOW CREATE VIEW",
"Shutdown Server Admin To shut down the server",
"Super Server Admin To use KILL thread, SET GLOBAL, CHANGE MASTER, etc.",
"Trigger Tables To use triggers",
"Create tablespace Server Admin To create/alter/drop tablespaces",
"Update Tables To update existing rows",
"Usage Server Admin No privileges - allow connect only"))
c.Assert(len(tk.MustQuery("show table status").Rows()), Equals, 1)
}
func (s *testSuite3) TestAdmin(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists admin_test")
tk.MustExec("create table admin_test (c1 int, c2 int, c3 int default 1, index (c1))")
tk.MustExec("insert admin_test (c1) values (1),(2),(NULL)")
ctx := context.Background()
// cancel DDL jobs test
r, err := tk.Exec("admin cancel ddl jobs 1")
c.Assert(err, IsNil, Commentf("err %v", err))
req := r.NewChunk()
err = r.Next(ctx, req)
c.Assert(err, IsNil)
row := req.GetRow(0)
c.Assert(row.Len(), Equals, 2)
c.Assert(row.GetString(0), Equals, "1")
c.Assert(row.GetString(1), Matches, "*DDL Job:1 not found")
// show ddl test;
r, err = tk.Exec("admin show ddl")
c.Assert(err, IsNil)
req = r.NewChunk()
err = r.Next(ctx, req)
c.Assert(err, IsNil)
row = req.GetRow(0)
c.Assert(row.Len(), Equals, 6)
txn, err := s.store.Begin()
c.Assert(err, IsNil)
ddlInfo, err := admin.GetDDLInfo(txn)
c.Assert(err, IsNil)
c.Assert(row.GetInt64(0), Equals, ddlInfo.SchemaVer)
// TODO: Pass this test.
// rowOwnerInfos := strings.Split(row.Data[1].GetString(), ",")
// ownerInfos := strings.Split(ddlInfo.Owner.String(), ",")
// c.Assert(rowOwnerInfos[0], Equals, ownerInfos[0])
serverInfo, err := infosync.GetServerInfoByID(ctx, row.GetString(1))
c.Assert(err, IsNil)
c.Assert(row.GetString(2), Equals, serverInfo.IP+":"+
strconv.FormatUint(uint64(serverInfo.Port), 10))
c.Assert(row.GetString(3), Equals, "")
req = r.NewChunk()
err = r.Next(ctx, req)
c.Assert(err, IsNil)
c.Assert(req.NumRows() == 0, IsTrue)
err = txn.Rollback()
c.Assert(err, IsNil)
// show DDL jobs test
r, err = tk.Exec("admin show ddl jobs")
c.Assert(err, IsNil)
req = r.NewChunk()
err = r.Next(ctx, req)
c.Assert(err, IsNil)
row = req.GetRow(0)
c.Assert(row.Len(), Equals, 11)
txn, err = s.store.Begin()
c.Assert(err, IsNil)
historyJobs, err := admin.GetHistoryDDLJobs(txn, admin.DefNumHistoryJobs)
c.Assert(len(historyJobs), Greater, 1)
c.Assert(len(row.GetString(1)), Greater, 0)
c.Assert(err, IsNil)
c.Assert(row.GetInt64(0), Equals, historyJobs[0].ID)
c.Assert(err, IsNil)
r, err = tk.Exec("admin show ddl jobs 20")
c.Assert(err, IsNil)
req = r.NewChunk()
err = r.Next(ctx, req)
c.Assert(err, IsNil)
row = req.GetRow(0)
c.Assert(row.Len(), Equals, 11)
c.Assert(row.GetInt64(0), Equals, historyJobs[0].ID)
c.Assert(err, IsNil)
// show DDL job queries test
tk.MustExec("use test")
tk.MustExec("drop table if exists admin_test2")
tk.MustExec("create table admin_test2 (c1 int, c2 int, c3 int default 1, index (c1))")
result := tk.MustQuery(`admin show ddl job queries 1, 1, 1`)
result.Check(testkit.Rows())
result = tk.MustQuery(`admin show ddl job queries 1, 2, 3, 4`)
result.Check(testkit.Rows())
historyJobs, err = admin.GetHistoryDDLJobs(txn, admin.DefNumHistoryJobs)
result = tk.MustQuery(fmt.Sprintf("admin show ddl job queries %d", historyJobs[0].ID))
result.Check(testkit.Rows(historyJobs[0].Query))
c.Assert(err, IsNil)
// check table test
tk.MustExec("create table admin_test1 (c1 int, c2 int default 1, index (c1))")
tk.MustExec("insert admin_test1 (c1) values (21),(22)")
r, err = tk.Exec("admin check table admin_test, admin_test1")
c.Assert(err, IsNil)
c.Assert(r, IsNil)
// error table name
err = tk.ExecToErr("admin check table admin_test_error")
c.Assert(err, NotNil)
// different index values
sctx := tk.Se.(sessionctx.Context)
dom := domain.GetDomain(sctx)
is := dom.InfoSchema()
c.Assert(is, NotNil)
tb, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("admin_test"))
c.Assert(err, IsNil)
c.Assert(tb.Indices(), HasLen, 1)
_, err = tb.Indices()[0].Create(mock.NewContext(), txn, types.MakeDatums(int64(10)), 1)
c.Assert(err, IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
errAdmin := tk.ExecToErr("admin check table admin_test")
c.Assert(errAdmin, NotNil)
if config.CheckTableBeforeDrop {
err = tk.ExecToErr("drop table admin_test")
c.Assert(err.Error(), Equals, errAdmin.Error())
// Drop inconsistency index.
tk.MustExec("alter table admin_test drop index c1")
tk.MustExec("admin check table admin_test")
}
// checksum table test
tk.MustExec("create table checksum_with_index (id int, count int, PRIMARY KEY(id), KEY(count))")
tk.MustExec("create table checksum_without_index (id int, count int, PRIMARY KEY(id))")
r, err = tk.Exec("admin checksum table checksum_with_index, checksum_without_index")
c.Assert(err, IsNil)
res := tk.ResultSetToResult(r, Commentf("admin checksum table"))
// Mocktikv returns 1 for every table/index scan, then we will xor the checksums of a table.
// For "checksum_with_index", we have two checksums, so the result will be 1^1 = 0.
// For "checksum_without_index", we only have one checksum, so the result will be 1.
res.Sort().Check(testkit.Rows("test checksum_with_index 0 2 2", "test checksum_without_index 1 1 1"))
tk.MustExec("drop table if exists t1;")
tk.MustExec("CREATE TABLE t1 (c2 BOOL, PRIMARY KEY (c2));")
tk.MustExec("INSERT INTO t1 SET c2 = '0';")
tk.MustExec("ALTER TABLE t1 ADD COLUMN c3 DATETIME NULL DEFAULT '2668-02-03 17:19:31';")
tk.MustExec("ALTER TABLE t1 ADD INDEX idx2 (c3);")
tk.MustExec("ALTER TABLE t1 ADD COLUMN c4 bit(10) default 127;")
tk.MustExec("ALTER TABLE t1 ADD INDEX idx3 (c4);")
tk.MustExec("admin check table t1;")
// Test admin show ddl jobs table name after table has been droped.
tk.MustExec("drop table if exists t1;")
re := tk.MustQuery("admin show ddl jobs 1")
rows := re.Rows()
c.Assert(len(rows), Equals, 1)
c.Assert(rows[0][2], Equals, "t1")
// Test for reverse scan get history ddl jobs when ddl history jobs queue has multiple regions.
txn, err = s.store.Begin()
c.Assert(err, IsNil)
historyJobs, err = admin.GetHistoryDDLJobs(txn, 20)
c.Assert(err, IsNil)
// Split region for history ddl job queues.
m := meta.NewMeta(txn)
startKey := meta.DDLJobHistoryKey(m, 0)
endKey := meta.DDLJobHistoryKey(m, historyJobs[0].ID)
s.cluster.SplitKeys(s.mvccStore, startKey, endKey, int(historyJobs[0].ID/5))
historyJobs2, err := admin.GetHistoryDDLJobs(txn, 20)
c.Assert(err, IsNil)
c.Assert(historyJobs, DeepEquals, historyJobs2)
}
func (s *testSuiteP2) TestAdminShowDDLJobs(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("create database if not exists test_admin_show_ddl_jobs")
tk.MustExec("use test_admin_show_ddl_jobs")
tk.MustExec("create table t (a int);")
re := tk.MustQuery("admin show ddl jobs 1")
row := re.Rows()[0]
c.Assert(row[1], Equals, "test_admin_show_ddl_jobs")
jobID, err := strconv.Atoi(row[0].(string))
c.Assert(err, IsNil)
c.Assert(tk.Se.NewTxn(context.Background()), IsNil)
txn, err := tk.Se.Txn(true)
c.Assert(err, IsNil)
t := meta.NewMeta(txn)
job, err := t.GetHistoryDDLJob(int64(jobID))
c.Assert(err, IsNil)
c.Assert(job, NotNil)
// Test for compatibility. Old TiDB version doesn't have SchemaName field, and the BinlogInfo maybe nil.
// See PR: 11561.
job.BinlogInfo = nil
job.SchemaName = ""
err = t.AddHistoryDDLJob(job, true)
c.Assert(err, IsNil)
err = tk.Se.CommitTxn(context.Background())
c.Assert(err, IsNil)
re = tk.MustQuery("admin show ddl jobs 1")
row = re.Rows()[0]
c.Assert(row[1], Equals, "test_admin_show_ddl_jobs")
re = tk.MustQuery("admin show ddl jobs 1 where job_type='create table'")
row = re.Rows()[0]
c.Assert(row[1], Equals, "test_admin_show_ddl_jobs")
}
func (s *testSuiteP2) TestAdminChecksumOfPartitionedTable(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("USE test;")
tk.MustExec("DROP TABLE IF EXISTS admin_checksum_partition_test;")
tk.MustExec("CREATE TABLE admin_checksum_partition_test (a INT) PARTITION BY HASH(a) PARTITIONS 4;")
tk.MustExec("INSERT INTO admin_checksum_partition_test VALUES (1), (2);")
r := tk.MustQuery("ADMIN CHECKSUM TABLE admin_checksum_partition_test;")
r.Check(testkit.Rows("test admin_checksum_partition_test 1 5 5"))
}
func (s *baseTestSuite) fillData(tk *testkit.TestKit, table string) {
tk.MustExec("use test")
tk.MustExec(fmt.Sprintf("create table %s(id int not null default 1, name varchar(255), PRIMARY KEY(id));", table))
// insert data
tk.MustExec(fmt.Sprintf("insert INTO %s VALUES (1, \"hello\");", table))
tk.CheckExecResult(1, 0)
tk.MustExec(fmt.Sprintf("insert into %s values (2, \"hello\");", table))
tk.CheckExecResult(1, 0)
}
type testCase struct {
data1 []byte
data2 []byte
expected []string
restData []byte
expectedMsg string
}
func checkCases(tests []testCase, ld *executor.LoadDataInfo,
c *C, tk *testkit.TestKit, ctx sessionctx.Context, selectSQL, deleteSQL string) {
origin := ld.IgnoreLines
for _, tt := range tests {
ld.IgnoreLines = origin
c.Assert(ctx.NewTxn(context.Background()), IsNil)
ctx.GetSessionVars().StmtCtx.DupKeyAsWarning = true
ctx.GetSessionVars().StmtCtx.BadNullAsWarning = true
ctx.GetSessionVars().StmtCtx.InLoadDataStmt = true
ctx.GetSessionVars().StmtCtx.InDeleteStmt = false
data, reachLimit, err1 := ld.InsertData(context.Background(), tt.data1, tt.data2)
c.Assert(err1, IsNil)
c.Assert(reachLimit, IsFalse)
err1 = ld.CheckAndInsertOneBatch(context.Background(), ld.GetRows(), ld.GetCurBatchCnt())
c.Assert(err1, IsNil)
ld.SetMaxRowsInBatch(20000)
if tt.restData == nil {
c.Assert(data, HasLen, 0,
Commentf("data1:%v, data2:%v, data:%v", string(tt.data1), string(tt.data2), string(data)))
} else {
c.Assert(data, DeepEquals, tt.restData,
Commentf("data1:%v, data2:%v, data:%v", string(tt.data1), string(tt.data2), string(data)))
}
ld.SetMessage()
tk.CheckLastMessage(tt.expectedMsg)
err := ctx.StmtCommit(nil)
c.Assert(err, IsNil)
txn, err := ctx.Txn(true)
c.Assert(err, IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
r := tk.MustQuery(selectSQL)
r.Check(testutil.RowsWithSep("|", tt.expected...))
tk.MustExec(deleteSQL)
}
}
func (s *testSuiteP1) TestSelectWithoutFrom(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
r := tk.MustQuery("select 1 + 2*3;")
r.Check(testkit.Rows("7"))
r = tk.MustQuery(`select _utf8"string";`)
r.Check(testkit.Rows("string"))
r = tk.MustQuery("select 1 order by 1;")
r.Check(testkit.Rows("1"))
}
// TestSelectBackslashN Issue 3685.
func (s *testSuiteP1) TestSelectBackslashN(c *C) {
tk := testkit.NewTestKit(c, s.store)
sql := `select \N;`
r := tk.MustQuery(sql)
r.Check(testkit.Rows("<nil>"))
rs, err := tk.Exec(sql)
c.Check(err, IsNil)
fields := rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "NULL")
sql = `select "\N";`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("N"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `N`)
tk.MustExec("use test;")
tk.MustExec("create table test (`\\N` int);")
tk.MustExec("insert into test values (1);")
tk.CheckExecResult(1, 0)
sql = "select * from test;"
r = tk.MustQuery(sql)
r.Check(testkit.Rows("1"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `\N`)
sql = `select \N from test;`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("<nil>"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(err, IsNil)
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `NULL`)
sql = `select (\N) from test;`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("<nil>"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `NULL`)
sql = "select `\\N` from test;"
r = tk.MustQuery(sql)
r.Check(testkit.Rows("1"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `\N`)
sql = "select (`\\N`) from test;"
r = tk.MustQuery(sql)
r.Check(testkit.Rows("1"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `\N`)
sql = `select '\N' from test;`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("N"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `N`)
sql = `select ('\N') from test;`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("N"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `N`)
}
// TestSelectNull Issue #4053.
func (s *testSuiteP1) TestSelectNull(c *C) {
tk := testkit.NewTestKit(c, s.store)
sql := `select nUll;`
r := tk.MustQuery(sql)
r.Check(testkit.Rows("<nil>"))
rs, err := tk.Exec(sql)
c.Check(err, IsNil)
fields := rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `NULL`)
sql = `select (null);`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("<nil>"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `NULL`)
sql = `select null+NULL;`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("<nil>"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(err, IsNil)
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `null+NULL`)
}
// TestSelectStringLiteral Issue #3686.
func (s *testSuiteP1) TestSelectStringLiteral(c *C) {
tk := testkit.NewTestKit(c, s.store)
sql := `select 'abc';`
r := tk.MustQuery(sql)
r.Check(testkit.Rows("abc"))
rs, err := tk.Exec(sql)
c.Check(err, IsNil)
fields := rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `abc`)
sql = `select (('abc'));`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("abc"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `abc`)
sql = `select 'abc'+'def';`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("0"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `'abc'+'def'`)
// Below checks whether leading invalid chars are trimmed.
sql = "select '\n';"
r = tk.MustQuery(sql)
r.Check(testkit.Rows("\n"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "")
sql = "select '\t col';" // Lowercased letter is a valid char.
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "col")
sql = "select '\t Col';" // Uppercased letter is a valid char.
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "Col")
sql = "select '\n\t 中文 col';" // Chinese char is a valid char.
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "中文 col")
sql = "select ' \r\n .col';" // Punctuation is a valid char.
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, ".col")
sql = "select ' 😆col';" // Emoji is a valid char.
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "😆col")
// Below checks whether trailing invalid chars are preserved.
sql = `select 'abc ';`
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "abc ")
sql = `select ' abc 123 ';`
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "abc 123 ")
// Issue #4239.
sql = `select 'a' ' ' 'string';`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("a string"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "a")
sql = `select 'a' " " "string";`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("a string"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "a")
sql = `select 'string' 'string';`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("stringstring"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "string")
sql = `select "ss" "a";`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("ssa"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "ss")
sql = `select "ss" "a" "b";`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("ssab"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "ss")
sql = `select "ss" "a" ' ' "b";`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("ssa b"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "ss")
sql = `select "ss" "a" ' ' "b" ' ' "d";`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("ssa b d"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "ss")
}
func (s *testSuiteP1) TestSelectLimit(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
s.fillData(tk, "select_limit")
tk.MustExec("insert INTO select_limit VALUES (3, \"hello\");")
tk.CheckExecResult(1, 0)
tk.MustExec("insert INTO select_limit VALUES (4, \"hello\");")
tk.CheckExecResult(1, 0)
r := tk.MustQuery("select * from select_limit limit 1;")
r.Check(testkit.Rows("1 hello"))
r = tk.MustQuery("select id from (select * from select_limit limit 1) k where id != 1;")
r.Check(testkit.Rows())
r = tk.MustQuery("select * from select_limit limit 18446744073709551615 offset 0;")
r.Check(testkit.Rows("1 hello", "2 hello", "3 hello", "4 hello"))
r = tk.MustQuery("select * from select_limit limit 18446744073709551615 offset 1;")
r.Check(testkit.Rows("2 hello", "3 hello", "4 hello"))
r = tk.MustQuery("select * from select_limit limit 18446744073709551615 offset 3;")
r.Check(testkit.Rows("4 hello"))
err := tk.ExecToErr("select * from select_limit limit 18446744073709551616 offset 3;")
c.Assert(err, NotNil)
}
func (s *testSuiteP1) TestSelectOrderBy(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
s.fillData(tk, "select_order_test")
// Test star field
r := tk.MustQuery("select * from select_order_test where id = 1 order by id limit 1 offset 0;")
r.Check(testkit.Rows("1 hello"))
r = tk.MustQuery("select id from select_order_test order by id desc limit 1 ")
r.Check(testkit.Rows("2"))
r = tk.MustQuery("select id from select_order_test order by id + 1 desc limit 1 ")
r.Check(testkit.Rows("2"))
// Test limit
r = tk.MustQuery("select * from select_order_test order by name, id limit 1 offset 0;")
r.Check(testkit.Rows("1 hello"))
// Test limit
r = tk.MustQuery("select id as c1, name from select_order_test order by 2, id limit 1 offset 0;")
r.Check(testkit.Rows("1 hello"))
// Test limit overflow
r = tk.MustQuery("select * from select_order_test order by name, id limit 100 offset 0;")
r.Check(testkit.Rows("1 hello", "2 hello"))
// Test offset overflow
r = tk.MustQuery("select * from select_order_test order by name, id limit 1 offset 100;")
r.Check(testkit.Rows())
// Test limit exceeds int range.
r = tk.MustQuery("select id from select_order_test order by name, id limit 18446744073709551615;")
r.Check(testkit.Rows("1", "2"))
// Test multiple field
r = tk.MustQuery("select id, name from select_order_test where id = 1 group by id, name limit 1 offset 0;")
r.Check(testkit.Rows("1 hello"))
// Test limit + order by
for i := 3; i <= 10; i += 1 {
tk.MustExec(fmt.Sprintf("insert INTO select_order_test VALUES (%d, \"zz\");", i))
}
tk.MustExec("insert INTO select_order_test VALUES (10086, \"hi\");")
for i := 11; i <= 20; i += 1 {
tk.MustExec(fmt.Sprintf("insert INTO select_order_test VALUES (%d, \"hh\");", i))
}
for i := 21; i <= 30; i += 1 {
tk.MustExec(fmt.Sprintf("insert INTO select_order_test VALUES (%d, \"zz\");", i))
}
tk.MustExec("insert INTO select_order_test VALUES (1501, \"aa\");")
r = tk.MustQuery("select * from select_order_test order by name, id limit 1 offset 3;")
r.Check(testkit.Rows("11 hh"))
tk.MustExec("drop table select_order_test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c int, d int)")
tk.MustExec("insert t values (1, 1)")
tk.MustExec("insert t values (1, 2)")
tk.MustExec("insert t values (1, 3)")
r = tk.MustQuery("select 1-d as d from t order by d;")
r.Check(testkit.Rows("-2", "-1", "0"))
r = tk.MustQuery("select 1-d as d from t order by d + 1;")
r.Check(testkit.Rows("0", "-1", "-2"))
r = tk.MustQuery("select t.d from t order by d;")
r.Check(testkit.Rows("1", "2", "3"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int, b int, c int)")
tk.MustExec("insert t values (1, 2, 3)")
r = tk.MustQuery("select b from (select a,b from t order by a,c) t")
r.Check(testkit.Rows("2"))
r = tk.MustQuery("select b from (select a,b from t order by a,c limit 1) t")
r.Check(testkit.Rows("2"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int, index idx(a))")
tk.MustExec("insert into t values(1, 1), (2, 2)")
tk.MustQuery("select * from t where 1 order by b").Check(testkit.Rows("1 1", "2 2"))
tk.MustQuery("select * from t where a between 1 and 2 order by a desc").Check(testkit.Rows("2 2", "1 1"))
// Test double read and topN is pushed down to first read plannercore.
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int primary key, b int, c int, index idx(b))")
tk.MustExec("insert into t values(1, 3, 1)")
tk.MustExec("insert into t values(2, 2, 2)")
tk.MustExec("insert into t values(3, 1, 3)")
tk.MustQuery("select * from t use index(idx) order by a desc limit 1").Check(testkit.Rows("3 1 3"))
// Test double read which needs to keep order.
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int, key b (b))")
tk.Se.GetSessionVars().IndexLookupSize = 3
for i := 0; i < 10; i++ {
tk.MustExec(fmt.Sprintf("insert into t values(%d, %d)", i, 10-i))
}
tk.MustQuery("select a from t use index(b) order by b").Check(testkit.Rows("9", "8", "7", "6", "5", "4", "3", "2", "1", "0"))
}
func (s *testSuiteP1) TestOrderBy(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c1 int, c2 int, c3 varchar(20))")
tk.MustExec("insert into t values (1, 2, 'abc'), (2, 1, 'bcd')")
// Fix issue https://github.com/pingcap/tidb/issues/337
tk.MustQuery("select c1 as a, c1 as b from t order by c1").Check(testkit.Rows("1 1", "2 2"))
tk.MustQuery("select c1 as a, t.c1 as a from t order by a desc").Check(testkit.Rows("2 2", "1 1"))
tk.MustQuery("select c1 as c2 from t order by c2").Check(testkit.Rows("1", "2"))
tk.MustQuery("select sum(c1) from t order by sum(c1)").Check(testkit.Rows("3"))
tk.MustQuery("select c1 as c2 from t order by c2 + 1").Check(testkit.Rows("2", "1"))
// Order by position.
tk.MustQuery("select * from t order by 1").Check(testkit.Rows("1 2 abc", "2 1 bcd"))
tk.MustQuery("select * from t order by 2").Check(testkit.Rows("2 1 bcd", "1 2 abc"))
// Order by binary.
tk.MustQuery("select c1, c3 from t order by binary c1 desc").Check(testkit.Rows("2 bcd", "1 abc"))
tk.MustQuery("select c1, c2 from t order by binary c3").Check(testkit.Rows("1 2", "2 1"))
}
func (s *testSuiteP1) TestSelectErrorRow(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
err := tk.ExecToErr("select row(1, 1) from test")
c.Assert(err, NotNil)
err = tk.ExecToErr("select * from test group by row(1, 1);")
c.Assert(err, NotNil)
err = tk.ExecToErr("select * from test order by row(1, 1);")
c.Assert(err, NotNil)
err = tk.ExecToErr("select * from test having row(1, 1);")
c.Assert(err, NotNil)
err = tk.ExecToErr("select (select 1, 1) from test;")
c.Assert(err, NotNil)
err = tk.ExecToErr("select * from test group by (select 1, 1);")
c.Assert(err, NotNil)
err = tk.ExecToErr("select * from test order by (select 1, 1);")
c.Assert(err, NotNil)
err = tk.ExecToErr("select * from test having (select 1, 1);")
c.Assert(err, NotNil)
}
// TestIssue2612 is related with https://github.com/pingcap/tidb/issues/2612
func (s *testSuiteP1) TestIssue2612(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec(`drop table if exists t`)
tk.MustExec(`create table t (
create_at datetime NOT NULL DEFAULT '1000-01-01 00:00:00',
finish_at datetime NOT NULL DEFAULT '1000-01-01 00:00:00');`)
tk.MustExec(`insert into t values ('2016-02-13 15:32:24', '2016-02-11 17:23:22');`)
rs, err := tk.Exec(`select timediff(finish_at, create_at) from t;`)
c.Assert(err, IsNil)
req := rs.NewChunk()
err = rs.Next(context.Background(), req)
c.Assert(err, IsNil)
c.Assert(req.GetRow(0).GetDuration(0, 0).String(), Equals, "-46:09:02")
rs.Close()
}
// TestIssue345 is related with https://github.com/pingcap/tidb/issues/345
func (s *testSuiteP1) TestIssue345(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec(`drop table if exists t1, t2`)
tk.MustExec(`create table t1 (c1 int);`)
tk.MustExec(`create table t2 (c2 int);`)
tk.MustExec(`insert into t1 values (1);`)
tk.MustExec(`insert into t2 values (2);`)
tk.MustExec(`update t1, t2 set t1.c1 = 2, t2.c2 = 1;`)
tk.MustExec(`update t1, t2 set c1 = 2, c2 = 1;`)
tk.MustExec(`update t1 as a, t2 as b set a.c1 = 2, b.c2 = 1;`)
// Check t1 content
r := tk.MustQuery("SELECT * FROM t1;")
r.Check(testkit.Rows("2"))
// Check t2 content
r = tk.MustQuery("SELECT * FROM t2;")
r.Check(testkit.Rows("1"))
tk.MustExec(`update t1 as a, t2 as t1 set a.c1 = 1, t1.c2 = 2;`)
// Check t1 content
r = tk.MustQuery("SELECT * FROM t1;")
r.Check(testkit.Rows("1"))
// Check t2 content
r = tk.MustQuery("SELECT * FROM t2;")
r.Check(testkit.Rows("2"))
_, err := tk.Exec(`update t1 as a, t2 set t1.c1 = 10;`)
c.Assert(err, NotNil)
}
func (s *testSuiteP1) TestIssue5055(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec(`drop table if exists t1, t2`)
tk.MustExec(`create table t1 (a int);`)
tk.MustExec(`create table t2 (a int);`)
tk.MustExec(`insert into t1 values(1);`)
tk.MustExec(`insert into t2 values(1);`)
result := tk.MustQuery("select tbl1.* from (select t1.a, 1 from t1) tbl1 left join t2 tbl2 on tbl1.a = tbl2.a order by tbl1.a desc limit 1;")
result.Check(testkit.Rows("1 1"))
}
func (s *testSuiteP2) TestUnion(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
testSQL := `drop table if exists union_test; create table union_test(id int);`
tk.MustExec(testSQL)
testSQL = `drop table if exists union_test;`
tk.MustExec(testSQL)
testSQL = `create table union_test(id int);`
tk.MustExec(testSQL)
testSQL = `insert union_test values (1),(2)`
tk.MustExec(testSQL)
testSQL = `select * from (select id from union_test union select id from union_test) t order by id;`
r := tk.MustQuery(testSQL)
r.Check(testkit.Rows("1", "2"))
r = tk.MustQuery("select 1 union all select 1")
r.Check(testkit.Rows("1", "1"))
r = tk.MustQuery("select 1 union all select 1 union select 1")
r.Check(testkit.Rows("1"))
r = tk.MustQuery("select 1 as a union (select 2) order by a limit 1")
r.Check(testkit.Rows("1"))
r = tk.MustQuery("select 1 as a union (select 2) order by a limit 1, 1")
r.Check(testkit.Rows("2"))
r = tk.MustQuery("select id from union_test union all (select 1) order by id desc")
r.Check(testkit.Rows("2", "1", "1"))
r = tk.MustQuery("select id as a from union_test union (select 1) order by a desc")
r.Check(testkit.Rows("2", "1"))
r = tk.MustQuery(`select null as a union (select "abc") order by a`)
r.Check(testkit.Rows("<nil>", "abc"))
r = tk.MustQuery(`select "abc" as a union (select 1) order by a`)
r.Check(testkit.Rows("1", "abc"))
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1 (c int, d int)")
tk.MustExec("insert t1 values (NULL, 1)")
tk.MustExec("insert t1 values (1, 1)")
tk.MustExec("insert t1 values (1, 2)")
tk.MustExec("drop table if exists t2")
tk.MustExec("create table t2 (c int, d int)")
tk.MustExec("insert t2 values (1, 3)")
tk.MustExec("insert t2 values (1, 1)")
tk.MustExec("drop table if exists t3")
tk.MustExec("create table t3 (c int, d int)")
tk.MustExec("insert t3 values (3, 2)")
tk.MustExec("insert t3 values (4, 3)")
r = tk.MustQuery(`select sum(c1), c2 from (select c c1, d c2 from t1 union all select d c1, c c2 from t2 union all select c c1, d c2 from t3) x group by c2 order by c2`)
r.Check(testkit.Rows("5 1", "4 2", "4 3"))
tk.MustExec("drop table if exists t1, t2, t3")
tk.MustExec("create table t1 (a int primary key)")
tk.MustExec("create table t2 (a int primary key)")
tk.MustExec("create table t3 (a int primary key)")
tk.MustExec("insert t1 values (7), (8)")
tk.MustExec("insert t2 values (1), (9)")
tk.MustExec("insert t3 values (2), (3)")
r = tk.MustQuery("select * from t1 union all select * from t2 union all (select * from t3) order by a limit 2")
r.Check(testkit.Rows("1", "2"))
tk.MustExec("drop table if exists t1, t2")
tk.MustExec("create table t1 (a int)")
tk.MustExec("create table t2 (a int)")
tk.MustExec("insert t1 values (2), (1)")
tk.MustExec("insert t2 values (3), (4)")
r = tk.MustQuery("select * from t1 union all (select * from t2) order by a limit 1")
r.Check(testkit.Rows("1"))
r = tk.MustQuery("select (select * from t1 where a != t.a union all (select * from t2 where a != t.a) order by a limit 1) from t1 t")
r.Check(testkit.Rows("1", "2"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (id int unsigned primary key auto_increment, c1 int, c2 int, index c1_c2 (c1, c2))")
tk.MustExec("insert into t (c1, c2) values (1, 1)")
tk.MustExec("insert into t (c1, c2) values (1, 2)")
tk.MustExec("insert into t (c1, c2) values (2, 3)")
r = tk.MustQuery("select * from (select * from t where t.c1 = 1 union select * from t where t.id = 1) s order by s.id")
r.Check(testkit.Rows("1 1 1", "2 1 2"))
tk.MustExec("drop table if exists t")
tk.MustExec("CREATE TABLE t (f1 DATE)")
tk.MustExec("INSERT INTO t VALUES ('1978-11-26')")
r = tk.MustQuery("SELECT f1+0 FROM t UNION SELECT f1+0 FROM t")
r.Check(testkit.Rows("19781126"))
tk.MustExec("drop table if exists t")
tk.MustExec("CREATE TABLE t (a int, b int)")
tk.MustExec("INSERT INTO t VALUES ('1', '1')")
r = tk.MustQuery("select b from (SELECT * FROM t UNION ALL SELECT a, b FROM t order by a) t")
r.Check(testkit.Rows("1", "1"))
tk.MustExec("drop table if exists t")
tk.MustExec("CREATE TABLE t (a DECIMAL(4,2))")
tk.MustExec("INSERT INTO t VALUE(12.34)")
r = tk.MustQuery("SELECT 1 AS c UNION select a FROM t")
r.Sort().Check(testkit.Rows("1.00", "12.34"))
// #issue3771
r = tk.MustQuery("SELECT 'a' UNION SELECT CONCAT('a', -4)")
r.Sort().Check(testkit.Rows("a", "a-4"))
// test race
tk.MustQuery("SELECT @x:=0 UNION ALL SELECT @x:=0 UNION ALL SELECT @x")
// test field tp
tk.MustExec("drop table if exists t1, t2")
tk.MustExec("CREATE TABLE t1 (a date)")
tk.MustExec("CREATE TABLE t2 (a date)")
tk.MustExec("SELECT a from t1 UNION select a FROM t2")
tk.MustQuery("show create table t1").Check(testkit.Rows("t1 CREATE TABLE `t1` (\n" + " `a` date DEFAULT NULL\n" + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
// Move from session test.
tk.MustExec("drop table if exists t1, t2")
tk.MustExec("create table t1 (c double);")
tk.MustExec("create table t2 (c double);")
tk.MustExec("insert into t1 value (73);")
tk.MustExec("insert into t2 value (930);")
// If set unspecified column flen to 0, it will cause bug in union.
// This test is used to prevent the bug reappear.
tk.MustQuery("select c from t1 union (select c from t2) order by c").Check(testkit.Rows("73", "930"))
// issue 5703
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a date)")
tk.MustExec("insert into t value ('2017-01-01'), ('2017-01-02')")
r = tk.MustQuery("(select a from t where a < 0) union (select a from t where a > 0) order by a")
r.Check(testkit.Rows("2017-01-01", "2017-01-02"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int)")
tk.MustExec("insert into t value(0),(0)")
tk.MustQuery("select 1 from (select a from t union all select a from t) tmp").Check(testkit.Rows("1", "1", "1", "1"))
tk.MustQuery("select 10 as a from dual union select a from t order by a desc limit 1 ").Check(testkit.Rows("10"))
tk.MustQuery("select -10 as a from dual union select a from t order by a limit 1 ").Check(testkit.Rows("-10"))
tk.MustQuery("select count(1) from (select a from t union all select a from t) tmp").Check(testkit.Rows("4"))
err := tk.ExecToErr("select 1 from (select a from t limit 1 union all select a from t limit 1) tmp")
c.Assert(err, NotNil)
terr := errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, terror.ErrCode(mysql.ErrWrongUsage))
err = tk.ExecToErr("select 1 from (select a from t order by a union all select a from t limit 1) tmp")
c.Assert(err, NotNil)
terr = errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, terror.ErrCode(mysql.ErrWrongUsage))
_, err = tk.Exec("(select a from t order by a) union all select a from t limit 1 union all select a from t limit 1")
c.Assert(terror.ErrorEqual(err, plannercore.ErrWrongUsage), IsTrue, Commentf("err %v", err))
_, err = tk.Exec("(select a from t limit 1) union all select a from t limit 1")
c.Assert(err, IsNil)
_, err = tk.Exec("(select a from t order by a) union all select a from t order by a")
c.Assert(err, IsNil)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int)")
tk.MustExec("insert into t value(1),(2),(3)")
tk.MustQuery("(select a from t order by a limit 2) union all (select a from t order by a desc limit 2) order by a desc limit 1,2").Check(testkit.Rows("2", "2"))
tk.MustQuery("select a from t union all select a from t order by a desc limit 5").Check(testkit.Rows("3", "3", "2", "2", "1"))
tk.MustQuery("(select a from t order by a desc limit 2) union all select a from t group by a order by a").Check(testkit.Rows("1", "2", "2", "3", "3"))
tk.MustQuery("(select a from t order by a desc limit 2) union all select 33 as a order by a desc limit 2").Check(testkit.Rows("33", "3"))
tk.MustQuery("select 1 union select 1 union all select 1").Check(testkit.Rows("1", "1"))
tk.MustQuery("select 1 union all select 1 union select 1").Check(testkit.Rows("1"))
tk.MustExec("drop table if exists t1, t2")
tk.MustExec(`create table t1(a bigint, b bigint);`)
tk.MustExec(`create table t2(a bigint, b bigint);`)
tk.MustExec(`insert into t1 values(1, 1);`)
tk.MustExec(`insert into t1 select * from t1;`)
tk.MustExec(`insert into t1 select * from t1;`)
tk.MustExec(`insert into t1 select * from t1;`)
tk.MustExec(`insert into t1 select * from t1;`)
tk.MustExec(`insert into t1 select * from t1;`)
tk.MustExec(`insert into t1 select * from t1;`)
tk.MustExec(`insert into t2 values(1, 1);`)
tk.MustExec(`set @@tidb_init_chunk_size=2;`)
tk.MustExec(`set @@sql_mode="";`)
tk.MustQuery(`select count(*) from (select t1.a, t1.b from t1 left join t2 on t1.a=t2.a union all select t1.a, t1.a from t1 left join t2 on t1.a=t2.a) tmp;`).Check(testkit.Rows("128"))
tk.MustQuery(`select tmp.a, count(*) from (select t1.a, t1.b from t1 left join t2 on t1.a=t2.a union all select t1.a, t1.a from t1 left join t2 on t1.a=t2.a) tmp;`).Check(testkit.Rows("1 128"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int)")
tk.MustExec("insert into t value(1 ,2)")
tk.MustQuery("select a, b from (select a, 0 as d, b from t union all select a, 0 as d, b from t) test;").Check(testkit.Rows("1 2", "1 2"))
// #issue 8141
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1(a int, b int)")
tk.MustExec("insert into t1 value(1,2),(1,1),(2,2),(2,2),(3,2),(3,2)")
tk.MustExec("set @@tidb_init_chunk_size=2;")
tk.MustQuery("select count(*) from (select a as c, a as d from t1 union all select a, b from t1) t;").Check(testkit.Rows("12"))
// #issue 8189 and #issue 8199
tk.MustExec("drop table if exists t1")
tk.MustExec("drop table if exists t2")
tk.MustExec("CREATE TABLE t1 (a int not null, b char (10) not null)")
tk.MustExec("insert into t1 values(1,'a'),(2,'b'),(3,'c'),(3,'c')")
tk.MustExec("CREATE TABLE t2 (a int not null, b char (10) not null)")
tk.MustExec("insert into t2 values(1,'a'),(2,'b'),(3,'c'),(3,'c')")
tk.MustQuery("select a from t1 union select a from t1 order by (select a+1);").Check(testkit.Rows("1", "2", "3"))
// #issue 8201
for i := 0; i < 4; i++ {
tk.MustQuery("SELECT(SELECT 0 AS a FROM dual UNION SELECT 1 AS a FROM dual ORDER BY a ASC LIMIT 1) AS dev").Check(testkit.Rows("0"))
}
// #issue 8231
tk.MustExec("drop table if exists t1")
tk.MustExec("CREATE TABLE t1 (uid int(1))")
tk.MustExec("INSERT INTO t1 SELECT 150")
tk.MustQuery("SELECT 'a' UNION SELECT uid FROM t1 order by 1 desc;").Check(testkit.Rows("a", "150"))
// #issue 8196
tk.MustExec("drop table if exists t1")
tk.MustExec("drop table if exists t2")
tk.MustExec("CREATE TABLE t1 (a int not null, b char (10) not null)")
tk.MustExec("insert into t1 values(1,'a'),(2,'b'),(3,'c'),(3,'c')")
tk.MustExec("CREATE TABLE t2 (a int not null, b char (10) not null)")
tk.MustExec("insert into t2 values(3,'c'),(4,'d'),(5,'f'),(6,'e')")
tk.MustExec("analyze table t1")
tk.MustExec("analyze table t2")
_, err = tk.Exec("(select a,b from t1 limit 2) union all (select a,b from t2 order by a limit 1) order by t1.b")
c.Assert(err.Error(), Equals, "[planner:1250]Table 't1' from one of the SELECTs cannot be used in global ORDER clause")
// #issue 9900
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b decimal(6, 3))")
tk.MustExec("insert into t values(1, 1.000)")
tk.MustQuery("select count(distinct a), sum(distinct a), avg(distinct a) from (select a from t union all select b from t) tmp;").Check(testkit.Rows("1 1.000 1.0000000"))
}
func (s *testSuiteP1) TestNeighbouringProj(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1, t2")
tk.MustExec("create table t1(a int, b int)")
tk.MustExec("create table t2(a int, b int)")
tk.MustExec("insert into t1 value(1, 1), (2, 2)")
tk.MustExec("insert into t2 value(1, 1), (2, 2)")
tk.MustQuery("select sum(c) from (select t1.a as a, t1.a as c, length(t1.b) from t1 union select a, b, b from t2) t;").Check(testkit.Rows("5"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a bigint, b bigint, c bigint);")
tk.MustExec("insert into t values(1, 1, 1), (2, 2, 2), (3, 3, 3);")
rs := tk.MustQuery("select cast(count(a) as signed), a as another, a from t group by a order by cast(count(a) as signed), a limit 10;")
rs.Check(testkit.Rows("1 1 1", "1 2 2", "1 3 3"))
}
func (s *testSuiteP1) TestIn(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec(`drop table if exists t`)
tk.MustExec(`create table t (c1 int primary key, c2 int, key c (c2));`)
for i := 0; i <= 200; i++ {
tk.MustExec(fmt.Sprintf("insert t values(%d, %d)", i, i))
}
queryStr := `select c2 from t where c1 in ('7', '10', '112', '111', '98', '106', '100', '9', '18', '17') order by c2`
r := tk.MustQuery(queryStr)
r.Check(testkit.Rows("7", "9", "10", "17", "18", "98", "100", "106", "111", "112"))
queryStr = `select c2 from t where c1 in ('7a')`
tk.MustQuery(queryStr).Check(testkit.Rows("7"))
}
func (s *testSuiteP1) TestTablePKisHandleScan(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int PRIMARY KEY AUTO_INCREMENT)")
tk.MustExec("insert t values (),()")
tk.MustExec("insert t values (-100),(0)")
tests := []struct {
sql string
result [][]interface{}
}{
{
"select * from t",
testkit.Rows("-100", "1", "2", "3"),
},
{
"select * from t where a = 1",
testkit.Rows("1"),
},
{
"select * from t where a != 1",
testkit.Rows("-100", "2", "3"),
},
{
"select * from t where a >= '1.1'",
testkit.Rows("2", "3"),
},
{
"select * from t where a < '1.1'",
testkit.Rows("-100", "1"),
},
{
"select * from t where a > '-100.1' and a < 2",
testkit.Rows("-100", "1"),
},
{
"select * from t where a is null",
testkit.Rows(),
}, {
"select * from t where a is true",
testkit.Rows("-100", "1", "2", "3"),
}, {
"select * from t where a is false",
testkit.Rows(),
},
{
"select * from t where a in (1, 2)",
testkit.Rows("1", "2"),
},
{
"select * from t where a between 1 and 2",
testkit.Rows("1", "2"),
},
}
for _, tt := range tests {
result := tk.MustQuery(tt.sql)
result.Check(tt.result)
}
}
func (s *testSuite8) TestIndexScan(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int unique)")
tk.MustExec("insert t values (-1), (2), (3), (5), (6), (7), (8), (9)")
result := tk.MustQuery("select a from t where a < 0 or (a >= 2.1 and a < 5.1) or ( a > 5.9 and a <= 7.9) or a > '8.1'")
result.Check(testkit.Rows("-1", "3", "5", "6", "7", "9"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int unique)")
tk.MustExec("insert t values (0)")
result = tk.MustQuery("select NULL from t ")
result.Check(testkit.Rows("<nil>"))
// test for double read
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int unique, b int)")
tk.MustExec("insert t values (5, 0)")
tk.MustExec("insert t values (4, 0)")
tk.MustExec("insert t values (3, 0)")
tk.MustExec("insert t values (2, 0)")
tk.MustExec("insert t values (1, 0)")
tk.MustExec("insert t values (0, 0)")
result = tk.MustQuery("select * from t order by a limit 3")
result.Check(testkit.Rows("0 0", "1 0", "2 0"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int unique, b int)")
tk.MustExec("insert t values (0, 1)")
tk.MustExec("insert t values (1, 2)")
tk.MustExec("insert t values (2, 1)")
tk.MustExec("insert t values (3, 2)")
tk.MustExec("insert t values (4, 1)")
tk.MustExec("insert t values (5, 2)")
result = tk.MustQuery("select * from t where a < 5 and b = 1 limit 2")
result.Check(testkit.Rows("0 1", "2 1"))
tk.MustExec("drop table if exists tab1")
tk.MustExec("CREATE TABLE tab1(pk INTEGER PRIMARY KEY, col0 INTEGER, col1 FLOAT, col3 INTEGER, col4 FLOAT)")
tk.MustExec("CREATE INDEX idx_tab1_0 on tab1 (col0)")
tk.MustExec("CREATE INDEX idx_tab1_1 on tab1 (col1)")
tk.MustExec("CREATE INDEX idx_tab1_3 on tab1 (col3)")
tk.MustExec("CREATE INDEX idx_tab1_4 on tab1 (col4)")
tk.MustExec("INSERT INTO tab1 VALUES(1,37,20.85,30,10.69)")
result = tk.MustQuery("SELECT pk FROM tab1 WHERE ((col3 <= 6 OR col3 < 29 AND (col0 < 41)) OR col3 > 42) AND col1 >= 96.1 AND col3 = 30 AND col3 > 17 AND (col0 BETWEEN 36 AND 42)")
result.Check(testkit.Rows())
tk.MustExec("drop table if exists tab1")
tk.MustExec("CREATE TABLE tab1(pk INTEGER PRIMARY KEY, a INTEGER, b INTEGER)")
tk.MustExec("CREATE INDEX idx_tab1_0 on tab1 (a)")
tk.MustExec("INSERT INTO tab1 VALUES(1,1,1)")
tk.MustExec("INSERT INTO tab1 VALUES(2,2,1)")
tk.MustExec("INSERT INTO tab1 VALUES(3,1,2)")
tk.MustExec("INSERT INTO tab1 VALUES(4,2,2)")
result = tk.MustQuery("SELECT * FROM tab1 WHERE pk <= 3 AND a = 1")
result.Check(testkit.Rows("1 1 1", "3 1 2"))
result = tk.MustQuery("SELECT * FROM tab1 WHERE pk <= 4 AND a = 1 AND b = 2")
result.Check(testkit.Rows("3 1 2"))
tk.MustExec("CREATE INDEX idx_tab1_1 on tab1 (b, a)")
result = tk.MustQuery("SELECT pk FROM tab1 WHERE b > 1")
result.Check(testkit.Rows("3", "4"))
tk.MustExec("drop table if exists t")
tk.MustExec("CREATE TABLE t (a varchar(3), index(a))")
tk.MustExec("insert t values('aaa'), ('aab')")
result = tk.MustQuery("select * from t where a >= 'aaaa' and a < 'aabb'")
result.Check(testkit.Rows("aab"))
tk.MustExec("drop table if exists t")
tk.MustExec("CREATE TABLE t (a int primary key, b int, c int, index(c))")
tk.MustExec("insert t values(1, 1, 1), (2, 2, 2), (4, 4, 4), (3, 3, 3), (5, 5, 5)")
// Test for double read and top n.
result = tk.MustQuery("select a from t where c >= 2 order by b desc limit 1")
result.Check(testkit.Rows("5"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a varchar(50) primary key, b int, c int, index idx(b))")
tk.MustExec("insert into t values('aa', 1, 1)")
tk.MustQuery("select * from t use index(idx) where a > 'a'").Check(testkit.Rows("aa 1 1"))
// fix issue9636
tk.MustExec("drop table if exists t")
tk.MustExec("CREATE TABLE `t` (a int, KEY (a))")
result = tk.MustQuery(`SELECT * FROM (SELECT * FROM (SELECT a as d FROM t WHERE a IN ('100')) AS x WHERE x.d < "123" ) tmp_count`)
result.Check(testkit.Rows())
}
func (s *testSuiteP1) TestIndexReverseOrder(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int primary key auto_increment, b int, index idx (b))")
tk.MustExec("insert t (b) values (0), (1), (2), (3), (4), (5), (6), (7), (8), (9)")
result := tk.MustQuery("select b from t order by b desc")
result.Check(testkit.Rows("9", "8", "7", "6", "5", "4", "3", "2", "1", "0"))
result = tk.MustQuery("select b from t where b <3 or (b >=6 and b < 8) order by b desc")
result.Check(testkit.Rows("7", "6", "2", "1", "0"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int, b int, index idx (b, a))")
tk.MustExec("insert t values (0, 2), (1, 2), (2, 2), (0, 1), (1, 1), (2, 1), (0, 0), (1, 0), (2, 0)")
result = tk.MustQuery("select b, a from t order by b, a desc")
result.Check(testkit.Rows("0 2", "0 1", "0 0", "1 2", "1 1", "1 0", "2 2", "2 1", "2 0"))
}
func (s *testSuiteP1) TestTableReverseOrder(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int primary key auto_increment, b int)")
tk.MustExec("insert t (b) values (1), (2), (3), (4), (5), (6), (7), (8), (9)")
result := tk.MustQuery("select b from t order by a desc")
result.Check(testkit.Rows("9", "8", "7", "6", "5", "4", "3", "2", "1"))
result = tk.MustQuery("select a from t where a <3 or (a >=6 and a < 8) order by a desc")
result.Check(testkit.Rows("7", "6", "2", "1"))
}
func (s *testSuiteP1) TestDefaultNull(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int primary key auto_increment, b int default 1, c int)")
tk.MustExec("insert t values ()")
tk.MustQuery("select * from t").Check(testkit.Rows("1 1 <nil>"))
tk.MustExec("update t set b = NULL where a = 1")
tk.MustQuery("select * from t").Check(testkit.Rows("1 <nil> <nil>"))
tk.MustExec("update t set c = 1")
tk.MustQuery("select * from t ").Check(testkit.Rows("1 <nil> 1"))
tk.MustExec("delete from t where a = 1")
tk.MustExec("insert t (a) values (1)")
tk.MustQuery("select * from t").Check(testkit.Rows("1 1 <nil>"))
}
func (s *testSuiteP1) TestUnsignedPKColumn(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int unsigned primary key, b int, c int, key idx_ba (b, c, a));")
tk.MustExec("insert t values (1, 1, 1)")
result := tk.MustQuery("select * from t;")
result.Check(testkit.Rows("1 1 1"))
tk.MustExec("update t set c=2 where a=1;")
result = tk.MustQuery("select * from t where b=1;")
result.Check(testkit.Rows("1 1 2"))
}
func (s *testSuiteP1) TestJSON(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists test_json")
tk.MustExec("create table test_json (id int, a json)")
tk.MustExec(`insert into test_json (id, a) values (1, '{"a":[1,"2",{"aa":"bb"},4],"b":true}')`)
tk.MustExec(`insert into test_json (id, a) values (2, "null")`)
tk.MustExec(`insert into test_json (id, a) values (3, null)`)
tk.MustExec(`insert into test_json (id, a) values (4, 'true')`)
tk.MustExec(`insert into test_json (id, a) values (5, '3')`)
tk.MustExec(`insert into test_json (id, a) values (5, '4.0')`)
tk.MustExec(`insert into test_json (id, a) values (6, '"string"')`)
result := tk.MustQuery(`select tj.a from test_json tj order by tj.id`)
result.Check(testkit.Rows(`{"a": [1, "2", {"aa": "bb"}, 4], "b": true}`, "null", "<nil>", "true", "3", "4", `"string"`))
// Check json_type function
result = tk.MustQuery(`select json_type(a) from test_json tj order by tj.id`)
result.Check(testkit.Rows("OBJECT", "NULL", "<nil>", "BOOLEAN", "INTEGER", "DOUBLE", "STRING"))
// Check json compare with primitives.
result = tk.MustQuery(`select a from test_json tj where a = 3`)
result.Check(testkit.Rows("3"))
result = tk.MustQuery(`select a from test_json tj where a = 4.0`)
result.Check(testkit.Rows("4"))
result = tk.MustQuery(`select a from test_json tj where a = true`)
result.Check(testkit.Rows("true"))
result = tk.MustQuery(`select a from test_json tj where a = "string"`)
result.Check(testkit.Rows(`"string"`))
// Check cast(true/false as JSON).
result = tk.MustQuery(`select cast(true as JSON)`)
result.Check(testkit.Rows(`true`))
result = tk.MustQuery(`select cast(false as JSON)`)
result.Check(testkit.Rows(`false`))
// Check two json grammar sugar.
result = tk.MustQuery(`select a->>'$.a[2].aa' as x, a->'$.b' as y from test_json having x is not null order by id`)
result.Check(testkit.Rows(`bb true`))
result = tk.MustQuery(`select a->'$.a[2].aa' as x, a->>'$.b' as y from test_json having x is not null order by id`)
result.Check(testkit.Rows(`"bb" true`))
// Check some DDL limits for TEXT/BLOB/JSON column.
var err error
var terr *terror.Error
_, err = tk.Exec(`create table test_bad_json(a json default '{}')`)
c.Assert(err, NotNil)
terr = errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, terror.ErrCode(mysql.ErrBlobCantHaveDefault))
_, err = tk.Exec(`create table test_bad_json(a blob default 'hello')`)
c.Assert(err, NotNil)
terr = errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, terror.ErrCode(mysql.ErrBlobCantHaveDefault))
_, err = tk.Exec(`create table test_bad_json(a text default 'world')`)
c.Assert(err, NotNil)
terr = errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, terror.ErrCode(mysql.ErrBlobCantHaveDefault))
// check json fields cannot be used as key.
_, err = tk.Exec(`create table test_bad_json(id int, a json, key (a))`)
c.Assert(err, NotNil)
terr = errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, terror.ErrCode(mysql.ErrJSONUsedAsKey))
// check CAST AS JSON.
result = tk.MustQuery(`select CAST('3' AS JSON), CAST('{}' AS JSON), CAST(null AS JSON)`)
result.Check(testkit.Rows(`3 {} <nil>`))
// Check cast json to decimal.
// NOTE: this test case contains a bug, it should be uncommented after the bug is fixed.
// TODO: Fix bug https://github.com/pingcap/tidb/issues/12178
//tk.MustExec("drop table if exists test_json")
//tk.MustExec("create table test_json ( a decimal(60,2) as (JSON_EXTRACT(b,'$.c')), b json );")
//tk.MustExec(`insert into test_json (b) values
// ('{"c": "1267.1"}'),
// ('{"c": "1267.01"}'),
// ('{"c": "1267.1234"}'),
// ('{"c": "1267.3456"}'),
// ('{"c": "1234567890123456789012345678901234567890123456789012345"}'),
// ('{"c": "1234567890123456789012345678901234567890123456789012345.12345"}');`)
//
//tk.MustQuery("select a from test_json;").Check(testkit.Rows("1267.10", "1267.01", "1267.12",
// "1267.35", "1234567890123456789012345678901234567890123456789012345.00",
// "1234567890123456789012345678901234567890123456789012345.12"))
}
func (s *testSuiteP1) TestMultiUpdate(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec(`CREATE TABLE test_mu (a int primary key, b int, c int)`)
tk.MustExec(`INSERT INTO test_mu VALUES (1, 2, 3), (4, 5, 6), (7, 8, 9)`)
// Test INSERT ... ON DUPLICATE UPDATE set_lists.
tk.MustExec(`INSERT INTO test_mu VALUES (1, 2, 3) ON DUPLICATE KEY UPDATE b = 3, c = b`)
result := tk.MustQuery(`SELECT * FROM test_mu ORDER BY a`)
result.Check(testkit.Rows(`1 3 3`, `4 5 6`, `7 8 9`))
tk.MustExec(`INSERT INTO test_mu VALUES (1, 2, 3) ON DUPLICATE KEY UPDATE c = 2, b = c+5`)
result = tk.MustQuery(`SELECT * FROM test_mu ORDER BY a`)
result.Check(testkit.Rows(`1 7 2`, `4 5 6`, `7 8 9`))
// Test UPDATE ... set_lists.
tk.MustExec(`UPDATE test_mu SET b = 0, c = b WHERE a = 4`)
result = tk.MustQuery(`SELECT * FROM test_mu ORDER BY a`)
result.Check(testkit.Rows(`1 7 2`, `4 0 0`, `7 8 9`))
tk.MustExec(`UPDATE test_mu SET c = 8, b = c WHERE a = 4`)
result = tk.MustQuery(`SELECT * FROM test_mu ORDER BY a`)
result.Check(testkit.Rows(`1 7 2`, `4 8 8`, `7 8 9`))
tk.MustExec(`UPDATE test_mu SET c = b, b = c WHERE a = 7`)
result = tk.MustQuery(`SELECT * FROM test_mu ORDER BY a`)
result.Check(testkit.Rows(`1 7 2`, `4 8 8`, `7 8 8`))
}
func (s *testSuiteP1) TestGeneratedColumnWrite(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
_, err := tk.Exec(`CREATE TABLE test_gc_write (a int primary key auto_increment, b int, c int as (a+8) virtual)`)
c.Assert(err.Error(), Equals, ddl.ErrGeneratedColumnRefAutoInc.GenWithStackByArgs("c").Error())
tk.MustExec(`CREATE TABLE test_gc_write (a int primary key auto_increment, b int, c int as (b+8) virtual)`)
tk.MustExec(`CREATE TABLE test_gc_write_1 (a int primary key, b int, c int)`)
tests := []struct {
stmt string
err int
}{
// Can't modify generated column by values.
{`insert into test_gc_write (a, b, c) values (1, 1, 1)`, mysql.ErrBadGeneratedColumn},
{`insert into test_gc_write values (1, 1, 1)`, mysql.ErrBadGeneratedColumn},
// Can't modify generated column by select clause.
{`insert into test_gc_write select 1, 1, 1`, mysql.ErrBadGeneratedColumn},
// Can't modify generated column by on duplicate clause.
{`insert into test_gc_write (a, b) values (1, 1) on duplicate key update c = 1`, mysql.ErrBadGeneratedColumn},
// Can't modify generated column by set.
{`insert into test_gc_write set a = 1, b = 1, c = 1`, mysql.ErrBadGeneratedColumn},
// Can't modify generated column by update clause.
{`update test_gc_write set c = 1`, mysql.ErrBadGeneratedColumn},
// Can't modify generated column by multi-table update clause.
{`update test_gc_write, test_gc_write_1 set test_gc_write.c = 1`, mysql.ErrBadGeneratedColumn},
// Can insert without generated columns.
{`insert into test_gc_write (a, b) values (1, 1)`, 0},
{`insert into test_gc_write set a = 2, b = 2`, 0},
{`insert into test_gc_write (b) select c from test_gc_write`, 0},
// Can update without generated columns.
{`update test_gc_write set b = 2 where a = 2`, 0},
{`update test_gc_write t1, test_gc_write_1 t2 set t1.b = 3, t2.b = 4`, 0},
// But now we can't do this, just as same with MySQL 5.7:
{`insert into test_gc_write values (1, 1)`, mysql.ErrWrongValueCountOnRow},
{`insert into test_gc_write select 1, 1`, mysql.ErrWrongValueCountOnRow},
{`insert into test_gc_write (c) select a, b from test_gc_write`, mysql.ErrWrongValueCountOnRow},
{`insert into test_gc_write (b, c) select a, b from test_gc_write`, mysql.ErrBadGeneratedColumn},
}
for _, tt := range tests {
_, err := tk.Exec(tt.stmt)
if tt.err != 0 {
c.Assert(err, NotNil, Commentf("sql is `%v`", tt.stmt))
terr := errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, terror.ErrCode(tt.err), Commentf("sql is %v", tt.stmt))
} else {
c.Assert(err, IsNil)
}
}
}
// TestGeneratedColumnRead tests select generated columns from table.
// They should be calculated from their generation expressions.
func (s *testSuiteP1) TestGeneratedColumnRead(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec(`CREATE TABLE test_gc_read(a int primary key, b int, c int as (a+b), d int as (a*b) stored, e int as (c*2))`)
result := tk.MustQuery(`SELECT generation_expression FROM information_schema.columns WHERE table_name = 'test_gc_read' AND column_name = 'd'`)
result.Check(testkit.Rows("`a` * `b`"))
// Insert only column a and b, leave c and d be calculated from them.
tk.MustExec(`INSERT INTO test_gc_read (a, b) VALUES (0,null),(1,2),(3,4)`)
result = tk.MustQuery(`SELECT * FROM test_gc_read ORDER BY a`)
result.Check(testkit.Rows(`0 <nil> <nil> <nil> <nil>`, `1 2 3 2 6`, `3 4 7 12 14`))
tk.MustExec(`INSERT INTO test_gc_read SET a = 5, b = 10`)
result = tk.MustQuery(`SELECT * FROM test_gc_read ORDER BY a`)
result.Check(testkit.Rows(`0 <nil> <nil> <nil> <nil>`, `1 2 3 2 6`, `3 4 7 12 14`, `5 10 15 50 30`))
tk.MustExec(`REPLACE INTO test_gc_read (a, b) VALUES (5, 6)`)
result = tk.MustQuery(`SELECT * FROM test_gc_read ORDER BY a`)
result.Check(testkit.Rows(`0 <nil> <nil> <nil> <nil>`, `1 2 3 2 6`, `3 4 7 12 14`, `5 6 11 30 22`))
tk.MustExec(`INSERT INTO test_gc_read (a, b) VALUES (5, 8) ON DUPLICATE KEY UPDATE b = 9`)
result = tk.MustQuery(`SELECT * FROM test_gc_read ORDER BY a`)
result.Check(testkit.Rows(`0 <nil> <nil> <nil> <nil>`, `1 2 3 2 6`, `3 4 7 12 14`, `5 9 14 45 28`))
// Test select only-generated-column-without-dependences.
result = tk.MustQuery(`SELECT c, d FROM test_gc_read`)
result.Check(testkit.Rows(`<nil> <nil>`, `3 2`, `7 12`, `14 45`))
// Test select only virtual generated column that refers to other virtual generated columns.
result = tk.MustQuery(`SELECT e FROM test_gc_read`)
result.Check(testkit.Rows(`<nil>`, `6`, `14`, `28`))
// Test order of on duplicate key update list.
tk.MustExec(`INSERT INTO test_gc_read (a, b) VALUES (5, 8) ON DUPLICATE KEY UPDATE a = 6, b = a`)
result = tk.MustQuery(`SELECT * FROM test_gc_read ORDER BY a`)
result.Check(testkit.Rows(`0 <nil> <nil> <nil> <nil>`, `1 2 3 2 6`, `3 4 7 12 14`, `6 6 12 36 24`))
tk.MustExec(`INSERT INTO test_gc_read (a, b) VALUES (6, 8) ON DUPLICATE KEY UPDATE b = 8, a = b`)
result = tk.MustQuery(`SELECT * FROM test_gc_read ORDER BY a`)
result.Check(testkit.Rows(`0 <nil> <nil> <nil> <nil>`, `1 2 3 2 6`, `3 4 7 12 14`, `8 8 16 64 32`))
// Test where-conditions on virtual/stored generated columns.
result = tk.MustQuery(`SELECT * FROM test_gc_read WHERE c = 7`)
result.Check(testkit.Rows(`3 4 7 12 14`))
result = tk.MustQuery(`SELECT * FROM test_gc_read WHERE d = 64`)
result.Check(testkit.Rows(`8 8 16 64 32`))
result = tk.MustQuery(`SELECT * FROM test_gc_read WHERE e = 6`)
result.Check(testkit.Rows(`1 2 3 2 6`))
// Test update where-conditions on virtual/generated columns.
tk.MustExec(`UPDATE test_gc_read SET a = a + 100 WHERE c = 7`)
result = tk.MustQuery(`SELECT * FROM test_gc_read WHERE c = 107`)
result.Check(testkit.Rows(`103 4 107 412 214`))
// Test update where-conditions on virtual/generated columns.
tk.MustExec(`UPDATE test_gc_read m SET m.a = m.a + 100 WHERE c = 107`)
result = tk.MustQuery(`SELECT * FROM test_gc_read WHERE c = 207`)
result.Check(testkit.Rows(`203 4 207 812 414`))
tk.MustExec(`UPDATE test_gc_read SET a = a - 200 WHERE d = 812`)
result = tk.MustQuery(`SELECT * FROM test_gc_read WHERE d = 12`)
result.Check(testkit.Rows(`3 4 7 12 14`))
tk.MustExec(`INSERT INTO test_gc_read set a = 4, b = d + 1`)
result = tk.MustQuery(`SELECT * FROM test_gc_read ORDER BY a`)
result.Check(testkit.Rows(`0 <nil> <nil> <nil> <nil>`, `1 2 3 2 6`, `3 4 7 12 14`,
`4 <nil> <nil> <nil> <nil>`, `8 8 16 64 32`))
tk.MustExec(`DELETE FROM test_gc_read where a = 4`)
// Test on-conditions on virtual/stored generated columns.
tk.MustExec(`CREATE TABLE test_gc_help(a int primary key, b int, c int, d int, e int)`)
tk.MustExec(`INSERT INTO test_gc_help(a, b, c, d, e) SELECT * FROM test_gc_read`)
result = tk.MustQuery(`SELECT t1.* FROM test_gc_read t1 JOIN test_gc_help t2 ON t1.c = t2.c ORDER BY t1.a`)
result.Check(testkit.Rows(`1 2 3 2 6`, `3 4 7 12 14`, `8 8 16 64 32`))
result = tk.MustQuery(`SELECT t1.* FROM test_gc_read t1 JOIN test_gc_help t2 ON t1.d = t2.d ORDER BY t1.a`)
result.Check(testkit.Rows(`1 2 3 2 6`, `3 4 7 12 14`, `8 8 16 64 32`))
result = tk.MustQuery(`SELECT t1.* FROM test_gc_read t1 JOIN test_gc_help t2 ON t1.e = t2.e ORDER BY t1.a`)
result.Check(testkit.Rows(`1 2 3 2 6`, `3 4 7 12 14`, `8 8 16 64 32`))
// Test generated column in subqueries.
result = tk.MustQuery(`SELECT * FROM test_gc_read t WHERE t.a not in (SELECT t.a FROM test_gc_read t where t.c > 5)`)
result.Sort().Check(testkit.Rows(`0 <nil> <nil> <nil> <nil>`, `1 2 3 2 6`))
result = tk.MustQuery(`SELECT * FROM test_gc_read t WHERE t.c in (SELECT t.c FROM test_gc_read t where t.c > 5)`)
result.Sort().Check(testkit.Rows(`3 4 7 12 14`, `8 8 16 64 32`))
result = tk.MustQuery(`SELECT tt.b FROM test_gc_read tt WHERE tt.a = (SELECT max(t.a) FROM test_gc_read t WHERE t.c = tt.c) ORDER BY b`)
result.Check(testkit.Rows(`2`, `4`, `8`))
// Test aggregation on virtual/stored generated columns.
result = tk.MustQuery(`SELECT c, sum(a) aa, max(d) dd, sum(e) ee FROM test_gc_read GROUP BY c ORDER BY aa`)
result.Check(testkit.Rows(`<nil> 0 <nil> <nil>`, `3 1 2 6`, `7 3 12 14`, `16 8 64 32`))
result = tk.MustQuery(`SELECT a, sum(c), sum(d), sum(e) FROM test_gc_read GROUP BY a ORDER BY a`)
result.Check(testkit.Rows(`0 <nil> <nil> <nil>`, `1 3 2 6`, `3 7 12 14`, `8 16 64 32`))
// Test multi-update on generated columns.
tk.MustExec(`UPDATE test_gc_read m, test_gc_read n SET m.a = m.a + 10, n.a = n.a + 10`)
result = tk.MustQuery(`SELECT * FROM test_gc_read ORDER BY a`)
result.Check(testkit.Rows(`10 <nil> <nil> <nil> <nil>`, `11 2 13 22 26`, `13 4 17 52 34`, `18 8 26 144 52`))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int)")
tk.MustExec("insert into t values(18)")
tk.MustExec("update test_gc_read set a = a+1 where a in (select a from t)")
result = tk.MustQuery("select * from test_gc_read order by a")
result.Check(testkit.Rows(`10 <nil> <nil> <nil> <nil>`, `11 2 13 22 26`, `13 4 17 52 34`, `19 8 27 152 54`))
// Test different types between generation expression and generated column.
tk.MustExec(`CREATE TABLE test_gc_read_cast(a VARCHAR(255), b VARCHAR(255), c INT AS (JSON_EXTRACT(a, b)), d INT AS (JSON_EXTRACT(a, b)) STORED)`)
tk.MustExec(`INSERT INTO test_gc_read_cast (a, b) VALUES ('{"a": "3"}', '$.a')`)
result = tk.MustQuery(`SELECT c, d FROM test_gc_read_cast`)
result.Check(testkit.Rows(`3 3`))
tk.MustExec(`CREATE TABLE test_gc_read_cast_1(a VARCHAR(255), b VARCHAR(255), c ENUM("red", "yellow") AS (JSON_UNQUOTE(JSON_EXTRACT(a, b))))`)
tk.MustExec(`INSERT INTO test_gc_read_cast_1 (a, b) VALUES ('{"a": "yellow"}', '$.a')`)
result = tk.MustQuery(`SELECT c FROM test_gc_read_cast_1`)
result.Check(testkit.Rows(`yellow`))
tk.MustExec(`CREATE TABLE test_gc_read_cast_2( a JSON, b JSON AS (a->>'$.a'))`)
tk.MustExec(`INSERT INTO test_gc_read_cast_2(a) VALUES ('{"a": "{ \\\"key\\\": \\\"\\u6d4b\\\" }"}')`)
result = tk.MustQuery(`SELECT b FROM test_gc_read_cast_2`)
result.Check(testkit.Rows(`{"key": "测"}`))
tk.MustExec(`CREATE TABLE test_gc_read_cast_3( a JSON, b JSON AS (a->>'$.a'), c INT AS (b * 3.14) )`)
tk.MustExec(`INSERT INTO test_gc_read_cast_3(a) VALUES ('{"a": "5"}')`)
result = tk.MustQuery(`SELECT c FROM test_gc_read_cast_3`)
result.Check(testkit.Rows(`16`))
_, err := tk.Exec(`INSERT INTO test_gc_read_cast_1 (a, b) VALUES ('{"a": "invalid"}', '$.a')`)
c.Assert(err, NotNil)
// Test read generated columns after drop some irrelevant column
tk.MustExec(`DROP TABLE IF EXISTS test_gc_read_m`)
tk.MustExec(`CREATE TABLE test_gc_read_m (a int primary key, b int, c int as (a+1), d int as (c*2))`)
tk.MustExec(`INSERT INTO test_gc_read_m(a) values (1), (2)`)
tk.MustExec(`ALTER TABLE test_gc_read_m DROP b`)
result = tk.MustQuery(`SELECT * FROM test_gc_read_m`)
result.Check(testkit.Rows(`1 2 4`, `2 3 6`))
// Test not null generated columns.
tk.MustExec(`CREATE TABLE test_gc_read_1(a int primary key, b int, c int as (a+b) not null, d int as (a*b) stored)`)
tk.MustExec(`CREATE TABLE test_gc_read_2(a int primary key, b int, c int as (a+b), d int as (a*b) stored not null)`)
tests := []struct {
stmt string
err int
}{
// Can't insert these records, because generated columns are not null.
{`insert into test_gc_read_1(a, b) values (1, null)`, mysql.ErrBadNull},
{`insert into test_gc_read_2(a, b) values (1, null)`, mysql.ErrBadNull},
}
for _, tt := range tests {
_, err := tk.Exec(tt.stmt)
if tt.err != 0 {
c.Assert(err, NotNil)
terr := errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, terror.ErrCode(tt.err))
} else {
c.Assert(err, IsNil)
}
}
}
func (s *testSuiteP2) TestToPBExpr(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a decimal(10,6), b decimal, index idx_b (b))")
tk.MustExec("set sql_mode = ''")
tk.MustExec("insert t values (1.1, 1.1)")
tk.MustExec("insert t values (2.4, 2.4)")
tk.MustExec("insert t values (3.3, 2.7)")
result := tk.MustQuery("select * from t where a < 2.399999")
result.Check(testkit.Rows("1.100000 1"))
result = tk.MustQuery("select * from t where a > 1.5")
result.Check(testkit.Rows("2.400000 2", "3.300000 3"))
result = tk.MustQuery("select * from t where a <= 1.1")
result.Check(testkit.Rows("1.100000 1"))
result = tk.MustQuery("select * from t where b >= 3")
result.Check(testkit.Rows("3.300000 3"))
result = tk.MustQuery("select * from t where not (b = 1)")
result.Check(testkit.Rows("2.400000 2", "3.300000 3"))
result = tk.MustQuery("select * from t where b&1 = a|1")
result.Check(testkit.Rows("1.100000 1"))
result = tk.MustQuery("select * from t where b != 2 and b <=> 3")
result.Check(testkit.Rows("3.300000 3"))
result = tk.MustQuery("select * from t where b in (3)")
result.Check(testkit.Rows("3.300000 3"))
result = tk.MustQuery("select * from t where b not in (1, 2)")
result.Check(testkit.Rows("3.300000 3"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a varchar(255), b int)")
tk.MustExec("insert t values ('abc123', 1)")
tk.MustExec("insert t values ('ab123', 2)")
result = tk.MustQuery("select * from t where a like 'ab%'")
result.Check(testkit.Rows("abc123 1", "ab123 2"))
result = tk.MustQuery("select * from t where a like 'ab_12'")
result.Check(nil)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int primary key)")
tk.MustExec("insert t values (1)")
tk.MustExec("insert t values (2)")
result = tk.MustQuery("select * from t where not (a = 1)")
result.Check(testkit.Rows("2"))
result = tk.MustQuery("select * from t where not(not (a = 1))")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("select * from t where not(a != 1 and a != 2)")
result.Check(testkit.Rows("1", "2"))
}
func (s *testSuiteP2) TestDatumXAPI(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a decimal(10,6), b decimal, index idx_b (b))")
tk.MustExec("set sql_mode = ''")
tk.MustExec("insert t values (1.1, 1.1)")
tk.MustExec("insert t values (2.2, 2.2)")
tk.MustExec("insert t values (3.3, 2.7)")
result := tk.MustQuery("select * from t where a > 1.5")
result.Check(testkit.Rows("2.200000 2", "3.300000 3"))
result = tk.MustQuery("select * from t where b > 1.5")
result.Check(testkit.Rows("2.200000 2", "3.300000 3"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a time(3), b time, index idx_a (a))")
tk.MustExec("insert t values ('11:11:11', '11:11:11')")
tk.MustExec("insert t values ('11:11:12', '11:11:12')")
tk.MustExec("insert t values ('11:11:13', '11:11:13')")
result = tk.MustQuery("select * from t where a > '11:11:11.5'")
result.Check(testkit.Rows("11:11:12.000 11:11:12", "11:11:13.000 11:11:13"))
result = tk.MustQuery("select * from t where b > '11:11:11.5'")
result.Check(testkit.Rows("11:11:12.000 11:11:12", "11:11:13.000 11:11:13"))
}
func (s *testSuiteP2) TestSQLMode(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a tinyint not null)")
tk.MustExec("set sql_mode = 'STRICT_TRANS_TABLES'")
_, err := tk.Exec("insert t values ()")
c.Check(err, NotNil)
_, err = tk.Exec("insert t values ('1000')")
c.Check(err, NotNil)
tk.MustExec("create table if not exists tdouble (a double(3,2))")
_, err = tk.Exec("insert tdouble values (10.23)")
c.Check(err, NotNil)
tk.MustExec("set sql_mode = ''")
tk.MustExec("insert t values ()")
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1364 Field 'a' doesn't have a default value"))
_, err = tk.Exec("insert t values (null)")
c.Check(err, NotNil)
tk.MustExec("insert ignore t values (null)")
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1048 Column 'a' cannot be null"))
tk.MustExec("insert t select null")
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1048 Column 'a' cannot be null"))
tk.MustExec("insert t values (1000)")
tk.MustQuery("select * from t order by a").Check(testkit.Rows("0", "0", "0", "127"))
tk.MustExec("insert tdouble values (10.23)")
tk.MustQuery("select * from tdouble").Check(testkit.Rows("9.99"))
tk.MustExec("set sql_mode = 'STRICT_TRANS_TABLES'")
tk.MustExec("set @@global.sql_mode = ''")
// Disable global variable cache, so load global session variable take effect immediate.
s.domain.GetGlobalVarsCache().Disable()
tk2 := testkit.NewTestKit(c, s.store)
tk2.MustExec("use test")
tk2.MustExec("drop table if exists t2")
tk2.MustExec("create table t2 (a varchar(3))")
tk2.MustExec("insert t2 values ('abcd')")
tk2.MustQuery("select * from t2").Check(testkit.Rows("abc"))
// session1 is still in strict mode.
_, err = tk.Exec("insert t2 values ('abcd')")
c.Check(err, NotNil)
// Restore original global strict mode.
tk.MustExec("set @@global.sql_mode = 'STRICT_TRANS_TABLES'")
}
func (s *testSuiteP2) TestTableDual(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
result := tk.MustQuery("Select 1")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("Select 1 from dual")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("Select count(*) from dual")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("Select 1 from dual where 1")
result.Check(testkit.Rows("1"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int primary key)")
tk.MustQuery("select t1.* from t t1, t t2 where t1.a=t2.a and 1=0").Check(testkit.Rows())
}
func (s *testSuiteP2) TestTableScan(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use information_schema")
result := tk.MustQuery("select * from schemata")
// There must be these tables: information_schema, mysql, performance_schema and test.
c.Assert(len(result.Rows()), GreaterEqual, 4)
tk.MustExec("use test")
tk.MustExec("create database mytest")
rowStr1 := fmt.Sprintf("%s %s %s %s %v", "def", "mysql", "utf8mb4", "utf8mb4_bin", nil)
rowStr2 := fmt.Sprintf("%s %s %s %s %v", "def", "mytest", "utf8mb4", "utf8mb4_bin", nil)
tk.MustExec("use information_schema")
result = tk.MustQuery("select * from schemata where schema_name = 'mysql'")
result.Check(testkit.Rows(rowStr1))
result = tk.MustQuery("select * from schemata where schema_name like 'my%'")
result.Check(testkit.Rows(rowStr1, rowStr2))
result = tk.MustQuery("select 1 from tables limit 1")
result.Check(testkit.Rows("1"))
}
func (s *testSuiteP2) TestAdapterStatement(c *C) {
se, err := session.CreateSession4Test(s.store)
c.Check(err, IsNil)
se.GetSessionVars().TxnCtx.InfoSchema = domain.GetDomain(se).InfoSchema()
compiler := &executor.Compiler{Ctx: se}
stmtNode, err := s.ParseOneStmt("select 1", "", "")
c.Check(err, IsNil)
stmt, err := compiler.Compile(context.TODO(), stmtNode)
c.Check(err, IsNil)
c.Check(stmt.OriginText(), Equals, "select 1")
stmtNode, err = s.ParseOneStmt("create table test.t (a int)", "", "")
c.Check(err, IsNil)
stmt, err = compiler.Compile(context.TODO(), stmtNode)
c.Check(err, IsNil)
c.Check(stmt.OriginText(), Equals, "create table test.t (a int)")
}
func (s *testSuiteP2) TestIsPointGet(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use mysql")
ctx := tk.Se.(sessionctx.Context)
tests := map[string]bool{
"select * from help_topic where name='aaa'": false,
"select 1 from help_topic where name='aaa'": true,
"select * from help_topic where help_topic_id=1": true,
"select * from help_topic where help_category_id=1": false,
}
infoSchema := infoschema.GetInfoSchema(ctx)
for sqlStr, result := range tests {
stmtNode, err := s.ParseOneStmt(sqlStr, "", "")
c.Check(err, IsNil)
err = plannercore.Preprocess(ctx, stmtNode, infoSchema)
c.Check(err, IsNil)
p, _, err := planner.Optimize(context.TODO(), ctx, stmtNode, infoSchema)
c.Check(err, IsNil)
ret, err := plannercore.IsPointGetWithPKOrUniqueKeyByAutoCommit(ctx, p)
c.Assert(err, IsNil)
c.Assert(ret, Equals, result)
}
}
func (s *testSuiteP2) TestPointGetRepeatableRead(c *C) {
tk1 := testkit.NewTestKit(c, s.store)
tk1.MustExec("use test")
tk1.MustExec(`create table point_get (a int, b int, c int,
primary key k_a(a),
unique key k_b(b))`)
tk1.MustExec("insert into point_get values (1, 1, 1)")
tk2 := testkit.NewTestKit(c, s.store)
tk2.MustExec("use test")
var (
step1 = "github.com/pingcap/tidb/executor/pointGetRepeatableReadTest-step1"
step2 = "github.com/pingcap/tidb/executor/pointGetRepeatableReadTest-step2"
)
c.Assert(failpoint.Enable(step1, "return"), IsNil)
c.Assert(failpoint.Enable(step2, "pause"), IsNil)
updateWaitCh := make(chan struct{})
go func() {
ctx := context.WithValue(context.Background(), "pointGetRepeatableReadTest", updateWaitCh)
ctx = failpoint.WithHook(ctx, func(ctx context.Context, fpname string) bool {
return fpname == step1 || fpname == step2
})
rs, err := tk1.Se.Execute(ctx, "select c from point_get where b = 1")
c.Assert(err, IsNil)
result := tk1.ResultSetToResultWithCtx(ctx, rs[0], Commentf("execute sql fail"))
result.Check(testkit.Rows("1"))
}()
<-updateWaitCh // Wait `POINT GET` first time `get`
c.Assert(failpoint.Disable(step1), IsNil)
tk2.MustExec("update point_get set b = 2, c = 2 where a = 1")
c.Assert(failpoint.Disable(step2), IsNil)
}
func (s *testSuiteP2) TestBatchPointGetRepeatableRead(c *C) {
tk1 := testkit.NewTestKit(c, s.store)
tk1.MustExec("use test")
tk1.MustExec(`create table batch_point_get (a int, b int, c int, unique key k_b(a, b, c))`)
tk1.MustExec("insert into batch_point_get values (1, 1, 1), (2, 3, 4), (3, 4, 5)")
tk2 := testkit.NewTestKit(c, s.store)
tk2.MustExec("use test")
var (
step1 = "github.com/pingcap/tidb/executor/batchPointGetRepeatableReadTest-step1"
step2 = "github.com/pingcap/tidb/executor/batchPointGetRepeatableReadTest-step2"
)
c.Assert(failpoint.Enable(step1, "return"), IsNil)
c.Assert(failpoint.Enable(step2, "pause"), IsNil)
updateWaitCh := make(chan struct{})
go func() {
ctx := context.WithValue(context.Background(), "batchPointGetRepeatableReadTest", updateWaitCh)
ctx = failpoint.WithHook(ctx, func(ctx context.Context, fpname string) bool {
return fpname == step1 || fpname == step2
})
rs, err := tk1.Se.Execute(ctx, "select c from batch_point_get where (a, b, c) in ((1, 1, 1))")
c.Assert(err, IsNil)
result := tk1.ResultSetToResultWithCtx(ctx, rs[0], Commentf("execute sql fail"))
result.Check(testkit.Rows("1"))
}()
<-updateWaitCh // Wait `POINT GET` first time `get`
c.Assert(failpoint.Disable(step1), IsNil)
tk2.MustExec("update batch_point_get set b = 2, c = 2 where a = 1")
c.Assert(failpoint.Disable(step2), IsNil)
}
func (s *testSuite7) TestSplitRegionTimeout(c *C) {
c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/tikv/MockSplitRegionTimeout", `return(true)`), IsNil)
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a varchar(100),b int, index idx1(b,a))")
tk.MustExec(`split table t index idx1 by (10000,"abcd"),(10000000);`)
tk.MustExec(`set @@tidb_wait_split_region_timeout=1`)
// result 0 0 means split 0 region and 0 region finish scatter regions before timeout.
tk.MustQuery(`split table t between (0) and (10000) regions 10`).Check(testkit.Rows("0 0"))
c.Assert(failpoint.Disable("github.com/pingcap/tidb/store/tikv/MockSplitRegionTimeout"), IsNil)
// Test scatter regions timeout.
c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/tikv/MockScatterRegionTimeout", `return(true)`), IsNil)
tk.MustQuery(`split table t between (0) and (10000) regions 10`).Check(testkit.Rows("10 1"))
c.Assert(failpoint.Disable("github.com/pingcap/tidb/store/tikv/MockScatterRegionTimeout"), IsNil)
}
func (s *testSuiteP2) TestRow(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c int, d int)")
tk.MustExec("insert t values (1, 1)")
tk.MustExec("insert t values (1, 3)")
tk.MustExec("insert t values (2, 1)")
tk.MustExec("insert t values (2, 3)")
result := tk.MustQuery("select * from t where (c, d) < (2,2)")
result.Check(testkit.Rows("1 1", "1 3", "2 1"))
result = tk.MustQuery("select * from t where (1,2,3) > (3,2,1)")
result.Check(testkit.Rows())
result = tk.MustQuery("select * from t where row(1,2,3) > (3,2,1)")
result.Check(testkit.Rows())
result = tk.MustQuery("select * from t where (c, d) = (select * from t where (c,d) = (1,1))")
result.Check(testkit.Rows("1 1"))
result = tk.MustQuery("select * from t where (c, d) = (select * from t k where (t.c,t.d) = (c,d))")
result.Check(testkit.Rows("1 1", "1 3", "2 1", "2 3"))
result = tk.MustQuery("select (1, 2, 3) < (2, 3, 4)")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("select (2, 3, 4) <= (2, 3, 3)")
result.Check(testkit.Rows("0"))
result = tk.MustQuery("select (2, 3, 4) <= (2, 3, 4)")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("select (2, 3, 4) <= (2, 1, 4)")
result.Check(testkit.Rows("0"))
result = tk.MustQuery("select (2, 3, 4) >= (2, 3, 4)")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("select (2, 3, 4) = (2, 3, 4)")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("select (2, 3, 4) != (2, 3, 4)")
result.Check(testkit.Rows("0"))
result = tk.MustQuery("select row(1, 1) in (row(1, 1))")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("select row(1, 0) in (row(1, 1))")
result.Check(testkit.Rows("0"))
result = tk.MustQuery("select row(1, 1) in (select 1, 1)")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("select row(1, 1) > row(1, 0)")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("select row(1, 1) > (select 1, 0)")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("select 1 > (select 1)")
result.Check(testkit.Rows("0"))
result = tk.MustQuery("select (select 1)")
result.Check(testkit.Rows("1"))
}
func (s *testSuiteP2) TestColumnName(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c int, d int)")
// disable only full group by
tk.MustExec("set sql_mode='STRICT_TRANS_TABLES'")
rs, err := tk.Exec("select 1 + c, count(*) from t")
c.Check(err, IsNil)
fields := rs.Fields()
c.Check(len(fields), Equals, 2)
c.Check(fields[0].Column.Name.L, Equals, "1 + c")
c.Check(fields[0].ColumnAsName.L, Equals, "1 + c")
c.Check(fields[1].Column.Name.L, Equals, "count(*)")
c.Check(fields[1].ColumnAsName.L, Equals, "count(*)")
rs.Close()
rs, err = tk.Exec("select (c) > all (select c from t) from t")
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.L, Equals, "(c) > all (select c from t)")
c.Check(fields[0].ColumnAsName.L, Equals, "(c) > all (select c from t)")
rs.Close()
tk.MustExec("begin")
tk.MustExec("insert t values(1,1)")
rs, err = tk.Exec("select c d, d c from t")
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 2)
c.Check(fields[0].Column.Name.L, Equals, "c")
c.Check(fields[0].ColumnAsName.L, Equals, "d")
c.Check(fields[1].Column.Name.L, Equals, "d")
c.Check(fields[1].ColumnAsName.L, Equals, "c")
rs.Close()
// Test case for query a column of a table.
// In this case, all attributes have values.
rs, err = tk.Exec("select c as a from t as t2")
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(fields[0].Column.Name.L, Equals, "c")
c.Check(fields[0].ColumnAsName.L, Equals, "a")
c.Check(fields[0].Table.Name.L, Equals, "t")
c.Check(fields[0].TableAsName.L, Equals, "t2")
c.Check(fields[0].DBName.L, Equals, "test")
rs.Close()
// Test case for query a expression which only using constant inputs.
// In this case, the table, org_table and database attributes will all be empty.
rs, err = tk.Exec("select hour(1) as a from t as t2")
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(fields[0].Column.Name.L, Equals, "a")
c.Check(fields[0].ColumnAsName.L, Equals, "a")
c.Check(fields[0].Table.Name.L, Equals, "")
c.Check(fields[0].TableAsName.L, Equals, "")
c.Check(fields[0].DBName.L, Equals, "")
rs.Close()
// Test case for query a column wrapped with parentheses and unary plus.
// In this case, the column name should be its original name.
rs, err = tk.Exec("select (c), (+c), +(c), +(+(c)), ++c from t")
c.Check(err, IsNil)
fields = rs.Fields()
for i := 0; i < 5; i++ {
c.Check(fields[i].Column.Name.L, Equals, "c")
c.Check(fields[i].ColumnAsName.L, Equals, "c")
}
rs.Close()
// Test issue https://github.com/pingcap/tidb/issues/9639 .
// Both window function and expression appear in final result field.
tk.MustExec("set @@tidb_enable_window_function = 1")
rs, err = tk.Exec("select 1+1, row_number() over() num from t")
c.Check(err, IsNil)
fields = rs.Fields()
c.Assert(fields[0].Column.Name.L, Equals, "1+1")
c.Assert(fields[0].ColumnAsName.L, Equals, "1+1")
c.Assert(fields[1].Column.Name.L, Equals, "num")
c.Assert(fields[1].ColumnAsName.L, Equals, "num")
tk.MustExec("set @@tidb_enable_window_function = 0")
rs.Close()
rs, err = tk.Exec("select if(1,c,c) from t;")
c.Check(err, IsNil)
fields = rs.Fields()
c.Assert(fields[0].Column.Name.L, Equals, "if(1,c,c)")
// It's a compatibility issue. Should be empty instead.
c.Assert(fields[0].ColumnAsName.L, Equals, "if(1,c,c)")
}
func (s *testSuiteP2) TestSelectVar(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (d int)")
tk.MustExec("insert into t values(1), (2), (1)")
// This behavior is different from MySQL.
result := tk.MustQuery("select @a, @a := d+1 from t")
result.Check(testkit.Rows("<nil> 2", "2 3", "3 2"))
// Test for PR #10658.
tk.MustExec("select SQL_BIG_RESULT d from t group by d")
tk.MustExec("select SQL_SMALL_RESULT d from t group by d")
tk.MustExec("select SQL_BUFFER_RESULT d from t group by d")
}
func (s *testSuiteP2) TestHistoryRead(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists history_read")
tk.MustExec("create table history_read (a int)")
tk.MustExec("insert history_read values (1)")
// For mocktikv, safe point is not initialized, we manually insert it for snapshot to use.
safePointName := "tikv_gc_safe_point"
safePointValue := "20060102-15:04:05 -0700"
safePointComment := "All versions after safe point can be accessed. (DO NOT EDIT)"
updateSafePoint := fmt.Sprintf(`INSERT INTO mysql.tidb VALUES ('%[1]s', '%[2]s', '%[3]s')
ON DUPLICATE KEY
UPDATE variable_value = '%[2]s', comment = '%[3]s'`, safePointName, safePointValue, safePointComment)
tk.MustExec(updateSafePoint)
// Set snapshot to a time before save point will fail.
_, err := tk.Exec("set @@tidb_snapshot = '2006-01-01 15:04:05.999999'")
c.Assert(terror.ErrorEqual(err, variable.ErrSnapshotTooOld), IsTrue, Commentf("err %v", err))
// SnapshotTS Is not updated if check failed.
c.Assert(tk.Se.GetSessionVars().SnapshotTS, Equals, uint64(0))
curVer1, _ := s.store.CurrentVersion()
time.Sleep(time.Millisecond)
snapshotTime := time.Now()
time.Sleep(time.Millisecond)
curVer2, _ := s.store.CurrentVersion()
tk.MustExec("insert history_read values (2)")
tk.MustQuery("select * from history_read").Check(testkit.Rows("1", "2"))
tk.MustExec("set @@tidb_snapshot = '" + snapshotTime.Format("2006-01-02 15:04:05.999999") + "'")
ctx := tk.Se.(sessionctx.Context)
snapshotTS := ctx.GetSessionVars().SnapshotTS
c.Assert(snapshotTS, Greater, curVer1.Ver)
c.Assert(snapshotTS, Less, curVer2.Ver)
tk.MustQuery("select * from history_read").Check(testkit.Rows("1"))
_, err = tk.Exec("insert history_read values (2)")
c.Assert(err, NotNil)
_, err = tk.Exec("update history_read set a = 3 where a = 1")
c.Assert(err, NotNil)
_, err = tk.Exec("delete from history_read where a = 1")
c.Assert(err, NotNil)
tk.MustExec("set @@tidb_snapshot = ''")
tk.MustQuery("select * from history_read").Check(testkit.Rows("1", "2"))
tk.MustExec("insert history_read values (3)")
tk.MustExec("update history_read set a = 4 where a = 3")
tk.MustExec("delete from history_read where a = 1")
time.Sleep(time.Millisecond)
snapshotTime = time.Now()
time.Sleep(time.Millisecond)
tk.MustExec("alter table history_read add column b int")
tk.MustExec("insert history_read values (8, 8), (9, 9)")
tk.MustQuery("select * from history_read order by a").Check(testkit.Rows("2 <nil>", "4 <nil>", "8 8", "9 9"))
tk.MustExec("set @@tidb_snapshot = '" + snapshotTime.Format("2006-01-02 15:04:05.999999") + "'")
tk.MustQuery("select * from history_read order by a").Check(testkit.Rows("2", "4"))
tsoStr := strconv.FormatUint(oracle.EncodeTSO(snapshotTime.UnixNano()/int64(time.Millisecond)), 10)
tk.MustExec("set @@tidb_snapshot = '" + tsoStr + "'")
tk.MustQuery("select * from history_read order by a").Check(testkit.Rows("2", "4"))
tk.MustExec("set @@tidb_snapshot = ''")
tk.MustQuery("select * from history_read order by a").Check(testkit.Rows("2 <nil>", "4 <nil>", "8 8", "9 9"))
}
func (s *testSuite2) TestLowResolutionTSORead(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("set @@autocommit=1")
tk.MustExec("use test")
tk.MustExec("drop table if exists low_resolution_tso")
tk.MustExec("create table low_resolution_tso(a int)")
tk.MustExec("insert low_resolution_tso values (1)")
// enable low resolution tso
c.Assert(tk.Se.GetSessionVars().LowResolutionTSO, IsFalse)
tk.Exec("set @@tidb_low_resolution_tso = 'on'")
c.Assert(tk.Se.GetSessionVars().LowResolutionTSO, IsTrue)
time.Sleep(3 * time.Second)
tk.MustQuery("select * from low_resolution_tso").Check(testkit.Rows("1"))
_, err := tk.Exec("update low_resolution_tso set a = 2")
c.Assert(err, NotNil)
tk.MustExec("set @@tidb_low_resolution_tso = 'off'")
tk.MustExec("update low_resolution_tso set a = 2")
tk.MustQuery("select * from low_resolution_tso").Check(testkit.Rows("2"))
}
func (s *testSuite) TestScanControlSelection(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int primary key, b int, c int, index idx_b(b))")
tk.MustExec("insert into t values (1, 1, 1), (2, 1, 1), (3, 1, 2), (4, 2, 3)")
tk.MustQuery("select (select count(1) k from t s where s.b = t1.c) from t t1").Sort().Check(testkit.Rows("0", "1", "3", "3"))
}
func (s *testSuite) TestSimpleDAG(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int primary key, b int, c int)")
tk.MustExec("insert into t values (1, 1, 1), (2, 1, 1), (3, 1, 2), (4, 2, 3)")
tk.MustQuery("select a from t").Check(testkit.Rows("1", "2", "3", "4"))
tk.MustQuery("select * from t where a = 4").Check(testkit.Rows("4 2 3"))
tk.MustQuery("select a from t limit 1").Check(testkit.Rows("1"))
tk.MustQuery("select a from t order by a desc").Check(testkit.Rows("4", "3", "2", "1"))
tk.MustQuery("select a from t order by a desc limit 1").Check(testkit.Rows("4"))
tk.MustQuery("select a from t order by b desc limit 1").Check(testkit.Rows("4"))
tk.MustQuery("select a from t where a < 3").Check(testkit.Rows("1", "2"))
tk.MustQuery("select a from t where b > 1").Check(testkit.Rows("4"))
tk.MustQuery("select a from t where b > 1 and a < 3").Check(testkit.Rows())
tk.MustQuery("select count(*) from t where b > 1 and a < 3").Check(testkit.Rows("0"))
tk.MustQuery("select count(*) from t").Check(testkit.Rows("4"))
tk.MustQuery("select count(*), c from t group by c order by c").Check(testkit.Rows("2 1", "1 2", "1 3"))
tk.MustQuery("select sum(c) as s from t group by b order by s").Check(testkit.Rows("3", "4"))
tk.MustQuery("select avg(a) as s from t group by b order by s").Check(testkit.Rows("2.0000", "4.0000"))
tk.MustQuery("select sum(distinct c) from t group by b").Check(testkit.Rows("3", "3"))
tk.MustExec("create index i on t(c,b)")
tk.MustQuery("select a from t where c = 1").Check(testkit.Rows("1", "2"))
tk.MustQuery("select a from t where c = 1 and a < 2").Check(testkit.Rows("1"))
tk.MustQuery("select a from t where c = 1 order by a limit 1").Check(testkit.Rows("1"))
tk.MustQuery("select count(*) from t where c = 1 ").Check(testkit.Rows("2"))
tk.MustExec("create index i1 on t(b)")
tk.MustQuery("select c from t where b = 2").Check(testkit.Rows("3"))
tk.MustQuery("select * from t where b = 2").Check(testkit.Rows("4 2 3"))
tk.MustQuery("select count(*) from t where b = 1").Check(testkit.Rows("3"))
tk.MustQuery("select * from t where b = 1 and a > 1 limit 1").Check(testkit.Rows("2 1 1"))
// Test time push down.
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (id int, c1 datetime);")
tk.MustExec("insert into t values (1, '2015-06-07 12:12:12')")
tk.MustQuery("select id from t where c1 = '2015-06-07 12:12:12'").Check(testkit.Rows("1"))
}
func (s *testSuite) TestTimestampTimeZone(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (ts timestamp)")
tk.MustExec("set time_zone = '+00:00'")
tk.MustExec("insert into t values ('2017-04-27 22:40:42')")
// The timestamp will get different value if time_zone session variable changes.
tests := []struct {
timezone string
expect string
}{
{"+10:00", "2017-04-28 08:40:42"},
{"-6:00", "2017-04-27 16:40:42"},
}
for _, tt := range tests {
tk.MustExec(fmt.Sprintf("set time_zone = '%s'", tt.timezone))
tk.MustQuery("select * from t").Check(testkit.Rows(tt.expect))
}
// For issue https://github.com/pingcap/tidb/issues/3467
tk.MustExec("drop table if exists t1")
tk.MustExec(`CREATE TABLE t1 (
id bigint(20) NOT NULL AUTO_INCREMENT,
uid int(11) DEFAULT NULL,
datetime timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
ip varchar(128) DEFAULT NULL,
PRIMARY KEY (id),
KEY i_datetime (datetime),
KEY i_userid (uid)
);`)
tk.MustExec(`INSERT INTO t1 VALUES (123381351,1734,"2014-03-31 08:57:10","127.0.0.1");`)
r := tk.MustQuery("select datetime from t1;") // Cover TableReaderExec
r.Check(testkit.Rows("2014-03-31 08:57:10"))
r = tk.MustQuery("select datetime from t1 where datetime='2014-03-31 08:57:10';")
r.Check(testkit.Rows("2014-03-31 08:57:10")) // Cover IndexReaderExec
r = tk.MustQuery("select * from t1 where datetime='2014-03-31 08:57:10';")
r.Check(testkit.Rows("123381351 1734 2014-03-31 08:57:10 127.0.0.1")) // Cover IndexLookupExec
// For issue https://github.com/pingcap/tidb/issues/3485
tk.MustExec("set time_zone = 'Asia/Shanghai'")
tk.MustExec("drop table if exists t1")
tk.MustExec(`CREATE TABLE t1 (
id bigint(20) NOT NULL AUTO_INCREMENT,
datetime timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (id)
);`)
tk.MustExec(`INSERT INTO t1 VALUES (123381351,"2014-03-31 08:57:10");`)
r = tk.MustQuery(`select * from t1 where datetime="2014-03-31 08:57:10";`)
r.Check(testkit.Rows("123381351 2014-03-31 08:57:10"))
tk.MustExec(`alter table t1 add key i_datetime (datetime);`)
r = tk.MustQuery(`select * from t1 where datetime="2014-03-31 08:57:10";`)
r.Check(testkit.Rows("123381351 2014-03-31 08:57:10"))
r = tk.MustQuery(`select * from t1;`)
r.Check(testkit.Rows("123381351 2014-03-31 08:57:10"))
r = tk.MustQuery("select datetime from t1 where datetime='2014-03-31 08:57:10';")
r.Check(testkit.Rows("2014-03-31 08:57:10"))
}
func (s *testSuite) TestTimestampDefaultValueTimeZone(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("set time_zone = '+08:00'")
tk.MustExec(`create table t (a int, b timestamp default "2019-01-17 14:46:14")`)
tk.MustExec("insert into t set a=1")
r := tk.MustQuery(`show create table t`)
r.Check(testkit.Rows("t CREATE TABLE `t` (\n" + " `a` int(11) DEFAULT NULL,\n" + " `b` timestamp DEFAULT '2019-01-17 14:46:14'\n" + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
tk.MustExec("set time_zone = '+00:00'")
tk.MustExec("insert into t set a=2")
r = tk.MustQuery(`show create table t`)
r.Check(testkit.Rows("t CREATE TABLE `t` (\n" + " `a` int(11) DEFAULT NULL,\n" + " `b` timestamp DEFAULT '2019-01-17 06:46:14'\n" + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
r = tk.MustQuery(`select a,b from t order by a`)
r.Check(testkit.Rows("1 2019-01-17 06:46:14", "2 2019-01-17 06:46:14"))
// Test the column's version is greater than ColumnInfoVersion1.
sctx := tk.Se.(sessionctx.Context)
is := domain.GetDomain(sctx).InfoSchema()
c.Assert(is, NotNil)
tb, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t"))
c.Assert(err, IsNil)
tb.Cols()[1].Version = model.ColumnInfoVersion1 + 1
tk.MustExec("insert into t set a=3")
r = tk.MustQuery(`select a,b from t order by a`)
r.Check(testkit.Rows("1 2019-01-17 06:46:14", "2 2019-01-17 06:46:14", "3 2019-01-17 06:46:14"))
tk.MustExec("delete from t where a=3")
// Change time zone back.
tk.MustExec("set time_zone = '+08:00'")
r = tk.MustQuery(`select a,b from t order by a`)
r.Check(testkit.Rows("1 2019-01-17 14:46:14", "2 2019-01-17 14:46:14"))
tk.MustExec("set time_zone = '-08:00'")
r = tk.MustQuery(`show create table t`)
r.Check(testkit.Rows("t CREATE TABLE `t` (\n" + " `a` int(11) DEFAULT NULL,\n" + " `b` timestamp DEFAULT '2019-01-16 22:46:14'\n" + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
// test zero default value in multiple time zone.
defer tk.MustExec(fmt.Sprintf("set @@sql_mode='%s'", tk.MustQuery("select @@sql_mode").Rows()[0][0]))
tk.MustExec("set @@sql_mode='STRICT_TRANS_TABLES,NO_ENGINE_SUBSTITUTION';")
tk.MustExec("drop table if exists t")
tk.MustExec("set time_zone = '+08:00'")
tk.MustExec(`create table t (a int, b timestamp default "0000-00-00 00")`)
tk.MustExec("insert into t set a=1")
r = tk.MustQuery(`show create table t`)
r.Check(testkit.Rows("t CREATE TABLE `t` (\n" + " `a` int(11) DEFAULT NULL,\n" + " `b` timestamp DEFAULT '0000-00-00 00:00:00'\n" + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
tk.MustExec("set time_zone = '+00:00'")
tk.MustExec("insert into t set a=2")
r = tk.MustQuery(`show create table t`)
r.Check(testkit.Rows("t CREATE TABLE `t` (\n" + " `a` int(11) DEFAULT NULL,\n" + " `b` timestamp DEFAULT '0000-00-00 00:00:00'\n" + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
tk.MustExec("set time_zone = '-08:00'")
tk.MustExec("insert into t set a=3")
r = tk.MustQuery(`show create table t`)
r.Check(testkit.Rows("t CREATE TABLE `t` (\n" + " `a` int(11) DEFAULT NULL,\n" + " `b` timestamp DEFAULT '0000-00-00 00:00:00'\n" + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
r = tk.MustQuery(`select a,b from t order by a`)
r.Check(testkit.Rows("1 0000-00-00 00:00:00", "2 0000-00-00 00:00:00", "3 0000-00-00 00:00:00"))
// test add timestamp column default current_timestamp.
tk.MustExec(`drop table if exists t`)
tk.MustExec(`set time_zone = 'Asia/Shanghai'`)
tk.MustExec(`create table t (a int)`)
tk.MustExec(`insert into t set a=1`)
tk.MustExec(`alter table t add column b timestamp not null default current_timestamp;`)
timeIn8 := tk.MustQuery("select b from t").Rows()[0][0]
tk.MustExec(`set time_zone = '+00:00'`)
timeIn0 := tk.MustQuery("select b from t").Rows()[0][0]
c.Assert(timeIn8 != timeIn0, IsTrue, Commentf("%v == %v", timeIn8, timeIn0))
datumTimeIn8, err := expression.GetTimeValue(tk.Se, timeIn8, mysql.TypeTimestamp, 0)
c.Assert(err, IsNil)
tIn8To0 := datumTimeIn8.GetMysqlTime()
timeZoneIn8, err := time.LoadLocation("Asia/Shanghai")
c.Assert(err, IsNil)
err = tIn8To0.ConvertTimeZone(timeZoneIn8, time.UTC)
c.Assert(err, IsNil)
c.Assert(timeIn0 == tIn8To0.String(), IsTrue, Commentf("%v != %v", timeIn0, tIn8To0.String()))
// test add index.
tk.MustExec(`alter table t add index(b);`)
tk.MustExec("admin check table t")
tk.MustExec(`set time_zone = '+05:00'`)
tk.MustExec("admin check table t")
}
func (s *testSuite) TestTiDBCurrentTS(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustQuery("select @@tidb_current_ts").Check(testkit.Rows("0"))
tk.MustExec("begin")
rows := tk.MustQuery("select @@tidb_current_ts").Rows()
tsStr := rows[0][0].(string)
txn, err := tk.Se.Txn(true)
c.Assert(err, IsNil)
c.Assert(tsStr, Equals, fmt.Sprintf("%d", txn.StartTS()))
tk.MustExec("begin")
rows = tk.MustQuery("select @@tidb_current_ts").Rows()
newTsStr := rows[0][0].(string)
txn, err = tk.Se.Txn(true)
c.Assert(err, IsNil)
c.Assert(newTsStr, Equals, fmt.Sprintf("%d", txn.StartTS()))
c.Assert(newTsStr, Not(Equals), tsStr)
tk.MustExec("commit")
tk.MustQuery("select @@tidb_current_ts").Check(testkit.Rows("0"))
_, err = tk.Exec("set @@tidb_current_ts = '1'")
c.Assert(terror.ErrorEqual(err, variable.ErrReadOnly), IsTrue, Commentf("err %v", err))
}
func (s *testSuite) TestSelectForUpdate(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk1 := testkit.NewTestKit(c, s.store)
tk1.MustExec("use test")
tk2 := testkit.NewTestKit(c, s.store)
tk2.MustExec("use test")
tk.MustExec("drop table if exists t, t1")
txn, err := tk.Se.Txn(true)
c.Assert(kv.ErrInvalidTxn.Equal(err), IsTrue)
c.Assert(txn.Valid(), IsFalse)
tk.MustExec("create table t (c1 int, c2 int, c3 int)")
tk.MustExec("insert t values (11, 2, 3)")
tk.MustExec("insert t values (12, 2, 3)")
tk.MustExec("insert t values (13, 2, 3)")
tk.MustExec("create table t1 (c1 int)")
tk.MustExec("insert t1 values (11)")
// conflict
tk1.MustExec("begin")
tk1.MustQuery("select * from t where c1=11 for update")
tk2.MustExec("begin")
tk2.MustExec("update t set c2=211 where c1=11")
tk2.MustExec("commit")
_, err = tk1.Exec("commit")
c.Assert(err, NotNil)
// no conflict for subquery.
tk1.MustExec("begin")
tk1.MustQuery("select * from t where exists(select null from t1 where t1.c1=t.c1) for update")
tk2.MustExec("begin")
tk2.MustExec("update t set c2=211 where c1=12")
tk2.MustExec("commit")
tk1.MustExec("commit")
// not conflict
tk1.MustExec("begin")
tk1.MustQuery("select * from t where c1=11 for update")
tk2.MustExec("begin")
tk2.MustExec("update t set c2=22 where c1=12")
tk2.MustExec("commit")
tk1.MustExec("commit")
// not conflict, auto commit
tk1.MustExec("set @@autocommit=1;")
tk1.MustQuery("select * from t where c1=11 for update")
tk2.MustExec("begin")
tk2.MustExec("update t set c2=211 where c1=11")
tk2.MustExec("commit")
tk1.MustExec("commit")
// conflict
tk1.MustExec("begin")
tk1.MustQuery("select * from (select * from t for update) t join t1 for update")
tk2.MustExec("begin")
tk2.MustExec("update t1 set c1 = 13")
tk2.MustExec("commit")
_, err = tk1.Exec("commit")
c.Assert(err, NotNil)
}
func (s *testSuite) TestEmptyEnum(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (e enum('Y', 'N'))")
tk.MustExec("set sql_mode='STRICT_TRANS_TABLES'")
_, err := tk.Exec("insert into t values (0)")
c.Assert(terror.ErrorEqual(err, table.ErrTruncatedWrongValueForField), IsTrue, Commentf("err %v", err))
_, err = tk.Exec("insert into t values ('abc')")
c.Assert(terror.ErrorEqual(err, table.ErrTruncatedWrongValueForField), IsTrue, Commentf("err %v", err))
tk.MustExec("set sql_mode=''")
tk.MustExec("insert into t values (0)")
tk.MustQuery("select * from t").Check(testkit.Rows(""))
tk.MustExec("insert into t values ('abc')")
tk.MustQuery("select * from t").Check(testkit.Rows("", ""))
tk.MustExec("insert into t values (null)")
tk.MustQuery("select * from t").Check(testkit.Rows("", "", "<nil>"))
}
// TestIssue4024 This tests https://github.com/pingcap/tidb/issues/4024
func (s *testSuite) TestIssue4024(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("create database test2")
tk.MustExec("use test2")
tk.MustExec("create table t(a int)")
tk.MustExec("insert into t values(1)")
tk.MustExec("use test")
tk.MustExec("create table t(a int)")
tk.MustExec("insert into t values(1)")
tk.MustExec("update t, test2.t set test2.t.a=2")
tk.MustQuery("select * from t").Check(testkit.Rows("1"))
tk.MustQuery("select * from test2.t").Check(testkit.Rows("2"))
tk.MustExec("update test.t, test2.t set test.t.a=3")
tk.MustQuery("select * from t").Check(testkit.Rows("3"))
tk.MustQuery("select * from test2.t").Check(testkit.Rows("2"))
}
const (
checkRequestOff = iota
checkRequestSyncLog
checkDDLAddIndexPriority
)
type checkRequestClient struct {
tikv.Client
priority pb.CommandPri
lowPriorityCnt uint32
mu struct {
sync.RWMutex
checkFlags uint32
syncLog bool
}
}
func (c *checkRequestClient) setCheckPriority(priority pb.CommandPri) {
atomic.StoreInt32((*int32)(&c.priority), int32(priority))
}
func (c *checkRequestClient) getCheckPriority() pb.CommandPri {
return (pb.CommandPri)(atomic.LoadInt32((*int32)(&c.priority)))
}
func (c *checkRequestClient) SendRequest(ctx context.Context, addr string, req *tikvrpc.Request, timeout time.Duration) (*tikvrpc.Response, error) {
resp, err := c.Client.SendRequest(ctx, addr, req, timeout)
c.mu.RLock()
checkFlags := c.mu.checkFlags
c.mu.RUnlock()
if checkFlags == checkRequestSyncLog {
switch req.Type {
case tikvrpc.CmdPrewrite, tikvrpc.CmdCommit:
c.mu.RLock()
syncLog := c.mu.syncLog
c.mu.RUnlock()
if syncLog != req.SyncLog {
return nil, errors.New("fail to set sync log")
}
}
} else if checkFlags == checkDDLAddIndexPriority {
if req.Type == tikvrpc.CmdScan {
if c.getCheckPriority() != req.Priority {
return nil, errors.New("fail to set priority")
}
} else if req.Type == tikvrpc.CmdPrewrite {
if c.getCheckPriority() == pb.CommandPri_Low {
atomic.AddUint32(&c.lowPriorityCnt, 1)
}
}
}
return resp, err
}
type testSuite1 struct {
store kv.Storage
dom *domain.Domain
cli *checkRequestClient
}
func (s *testSuite1) SetUpSuite(c *C) {
cli := &checkRequestClient{}
hijackClient := func(c tikv.Client) tikv.Client {
cli.Client = c
return cli
}
s.cli = cli
var err error
s.store, err = mockstore.NewMockTikvStore(
mockstore.WithHijackClient(hijackClient),
)
c.Assert(err, IsNil)
session.SetStatsLease(0)
s.dom, err = session.BootstrapSession(s.store)
c.Assert(err, IsNil)
s.dom.SetStatsUpdating(true)
}
func (s *testSuite1) TearDownSuite(c *C) {
s.dom.Close()
s.store.Close()
}
func (s *testSuite1) TearDownTest(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
r := tk.MustQuery("show tables")
for _, tb := range r.Rows() {
tableName := tb[0]
tk.MustExec(fmt.Sprintf("drop table %v", tableName))
}
}
func (s *testSuite2) TestAddIndexPriority(c *C) {
cli := &checkRequestClient{}
hijackClient := func(c tikv.Client) tikv.Client {
cli.Client = c
return cli
}
store, err := mockstore.NewMockTikvStore(
mockstore.WithHijackClient(hijackClient),
)
c.Assert(err, IsNil)
dom, err := session.BootstrapSession(store)
c.Assert(err, IsNil)
defer func() {
dom.Close()
store.Close()
}()
tk := testkit.NewTestKit(c, store)
tk.MustExec("use test")
tk.MustExec("create table t1 (id int, v int)")
// Insert some data to make sure plan build IndexLookup for t1.
for i := 0; i < 10; i++ {
tk.MustExec(fmt.Sprintf("insert into t1 values (%d, %d)", i, i))
}
cli.mu.Lock()
cli.mu.checkFlags = checkDDLAddIndexPriority
cli.mu.Unlock()
cli.setCheckPriority(pb.CommandPri_Low)
tk.MustExec("alter table t1 add index t1_index (id);")
c.Assert(atomic.LoadUint32(&cli.lowPriorityCnt) > 0, IsTrue)
cli.mu.Lock()
cli.mu.checkFlags = checkRequestOff
cli.mu.Unlock()
tk.MustExec("alter table t1 drop index t1_index;")
tk.MustExec("SET SESSION tidb_ddl_reorg_priority = 'PRIORITY_NORMAL'")
cli.mu.Lock()
cli.mu.checkFlags = checkDDLAddIndexPriority
cli.mu.Unlock()
cli.setCheckPriority(pb.CommandPri_Normal)
tk.MustExec("alter table t1 add index t1_index (id);")
cli.mu.Lock()
cli.mu.checkFlags = checkRequestOff
cli.mu.Unlock()
tk.MustExec("alter table t1 drop index t1_index;")
tk.MustExec("SET SESSION tidb_ddl_reorg_priority = 'PRIORITY_HIGH'")
cli.mu.Lock()
cli.mu.checkFlags = checkDDLAddIndexPriority
cli.mu.Unlock()
cli.setCheckPriority(pb.CommandPri_High)
tk.MustExec("alter table t1 add index t1_index (id);")
cli.mu.Lock()
cli.mu.checkFlags = checkRequestOff
cli.mu.Unlock()
}
func (s *testSuite1) TestAlterTableComment(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t_1")
tk.MustExec("create table t_1 (c1 int, c2 int, c3 int default 1, index (c1)) comment = 'test table';")
tk.MustExec("alter table `t_1` comment 'this is table comment';")
result := tk.MustQuery("select table_comment from information_schema.tables where table_name = 't_1';")
result.Check(testkit.Rows("this is table comment"))
tk.MustExec("alter table `t_1` comment 'table t comment';")
result = tk.MustQuery("select table_comment from information_schema.tables where table_name = 't_1';")
result.Check(testkit.Rows("table t comment"))
}
func (s *testSuite) TestTimezonePushDown(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table t (ts timestamp)")
defer tk.MustExec("drop table t")
tk.MustExec(`insert into t values ("2018-09-13 10:02:06")`)
systemTZ := timeutil.SystemLocation()
c.Assert(systemTZ.String(), Not(Equals), "System")
c.Assert(systemTZ.String(), Not(Equals), "Local")
ctx := context.Background()
count := 0
ctx1 := context.WithValue(ctx, "CheckSelectRequestHook", func(req *kv.Request) {
count += 1
dagReq := new(tipb.DAGRequest)
err := proto.Unmarshal(req.Data, dagReq)
c.Assert(err, IsNil)
c.Assert(dagReq.GetTimeZoneName(), Equals, systemTZ.String())
})
tk.Se.Execute(ctx1, `select * from t where ts = "2018-09-13 10:02:06"`)
tk.MustExec(`set time_zone="System"`)
tk.Se.Execute(ctx1, `select * from t where ts = "2018-09-13 10:02:06"`)
c.Assert(count, Equals, 2) // Make sure the hook function is called.
}
func (s *testSuite) TestNotFillCacheFlag(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table t (id int primary key)")
defer tk.MustExec("drop table t")
tk.MustExec("insert into t values (1)")
tests := []struct {
sql string
expect bool
}{
{"select SQL_NO_CACHE * from t", true},
{"select SQL_CACHE * from t", false},
{"select * from t", false},
}
count := 0
ctx := context.Background()
for _, test := range tests {
ctx1 := context.WithValue(ctx, "CheckSelectRequestHook", func(req *kv.Request) {
count++
if req.NotFillCache != test.expect {
c.Errorf("sql=%s, expect=%v, get=%v", test.sql, test.expect, req.NotFillCache)
}
})
rs, err := tk.Se.Execute(ctx1, test.sql)
c.Assert(err, IsNil)
tk.ResultSetToResult(rs[0], Commentf("sql: %v", test.sql))
}
c.Assert(count, Equals, len(tests)) // Make sure the hook function is called.
}
func (s *testSuite1) TestSyncLog(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
cli := s.cli
cli.mu.Lock()
cli.mu.checkFlags = checkRequestSyncLog
cli.mu.syncLog = true
cli.mu.Unlock()
tk.MustExec("create table t (id int primary key)")
cli.mu.Lock()
cli.mu.syncLog = false
cli.mu.Unlock()
tk.MustExec("insert into t values (1)")
cli.mu.Lock()
cli.mu.checkFlags = checkRequestOff
cli.mu.Unlock()
}
func (s *testSuite) TestHandleTransfer(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table t(a int, index idx(a))")
tk.MustExec("insert into t values(1), (2), (4)")
tk.MustExec("begin")
tk.MustExec("update t set a = 3 where a = 4")
// test table scan read whose result need handle.
tk.MustQuery("select * from t ignore index(idx)").Check(testkit.Rows("1", "2", "3"))
tk.MustExec("insert into t values(4)")
// test single read whose result need handle
tk.MustQuery("select * from t use index(idx)").Check(testkit.Rows("1", "2", "3", "4"))
tk.MustQuery("select * from t use index(idx) order by a desc").Check(testkit.Rows("4", "3", "2", "1"))
tk.MustExec("update t set a = 5 where a = 3")
tk.MustQuery("select * from t use index(idx)").Check(testkit.Rows("1", "2", "4", "5"))
tk.MustExec("commit")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int, index idx(a))")
tk.MustExec("insert into t values(3, 3), (1, 1), (2, 2)")
// Second test double read.
tk.MustQuery("select * from t use index(idx) order by a").Check(testkit.Rows("1 1", "2 2", "3 3"))
}
func (s *testSuite) TestBit(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c1 bit(2))")
tk.MustExec("insert into t values (0), (1), (2), (3)")
_, err := tk.Exec("insert into t values (4)")
c.Assert(err, NotNil)
_, err = tk.Exec("insert into t values ('a')")
c.Assert(err, NotNil)
r, err := tk.Exec("select * from t where c1 = 2")
c.Assert(err, IsNil)
req := r.NewChunk()
err = r.Next(context.Background(), req)
c.Assert(err, IsNil)
c.Assert(types.BinaryLiteral(req.GetRow(0).GetBytes(0)), DeepEquals, types.NewBinaryLiteralFromUint(2, -1))
r.Close()
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c1 bit(31))")
tk.MustExec("insert into t values (0x7fffffff)")
_, err = tk.Exec("insert into t values (0x80000000)")
c.Assert(err, NotNil)
_, err = tk.Exec("insert into t values (0xffffffff)")
c.Assert(err, NotNil)
tk.MustExec("insert into t values ('123')")
tk.MustExec("insert into t values ('1234')")
_, err = tk.Exec("insert into t values ('12345)")
c.Assert(err, NotNil)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c1 bit(62))")
tk.MustExec("insert into t values ('12345678')")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c1 bit(61))")
_, err = tk.Exec("insert into t values ('12345678')")
c.Assert(err, NotNil)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c1 bit(32))")
tk.MustExec("insert into t values (0x7fffffff)")
tk.MustExec("insert into t values (0xffffffff)")
_, err = tk.Exec("insert into t values (0x1ffffffff)")
c.Assert(err, NotNil)
tk.MustExec("insert into t values ('1234')")
_, err = tk.Exec("insert into t values ('12345')")
c.Assert(err, NotNil)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c1 bit(64))")
tk.MustExec("insert into t values (0xffffffffffffffff)")
tk.MustExec("insert into t values ('12345678')")
_, err = tk.Exec("insert into t values ('123456789')")
c.Assert(err, NotNil)
}
func (s *testSuite) TestEnum(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c enum('a', 'b', 'c'))")
tk.MustExec("insert into t values ('a'), (2), ('c')")
tk.MustQuery("select * from t where c = 'a'").Check(testkit.Rows("a"))
tk.MustQuery("select c + 1 from t where c = 2").Check(testkit.Rows("3"))
tk.MustExec("delete from t")
tk.MustExec("insert into t values ()")
tk.MustExec("insert into t values (null), ('1')")
tk.MustQuery("select c + 1 from t where c = 1").Check(testkit.Rows("2"))
}
func (s *testSuite) TestSet(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c set('a', 'b', 'c'))")
tk.MustExec("insert into t values ('a'), (2), ('c'), ('a,b'), ('b,a')")
tk.MustQuery("select * from t where c = 'a'").Check(testkit.Rows("a"))
tk.MustQuery("select * from t where c = 'a,b'").Check(testkit.Rows("a,b", "a,b"))
tk.MustQuery("select c + 1 from t where c = 2").Check(testkit.Rows("3"))
tk.MustExec("delete from t")
tk.MustExec("insert into t values ()")
tk.MustExec("insert into t values (null), ('1')")
tk.MustQuery("select c + 1 from t where c = 1").Check(testkit.Rows("2"))
}
func (s *testSuite) TestSubqueryInValues(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (id int, name varchar(20))")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1 (gid int)")
tk.MustExec("insert into t1 (gid) value (1)")
tk.MustExec("insert into t (id, name) value ((select gid from t1) ,'asd')")
tk.MustQuery("select * from t").Check(testkit.Rows("1 asd"))
}
func (s *testSuite) TestEnhancedRangeAccess(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int primary key, b int)")
tk.MustExec("insert into t values(1, 2), (2, 1)")
tk.MustQuery("select * from t where (a = 1 and b = 2) or (a = 2 and b = 1)").Check(testkit.Rows("1 2", "2 1"))
tk.MustQuery("select * from t where (a = 1 and b = 1) or (a = 2 and b = 2)").Check(nil)
}
// TestMaxInt64Handle Issue #4810
func (s *testSuite) TestMaxInt64Handle(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(id bigint, PRIMARY KEY (id))")
tk.MustExec("insert into t values(9223372036854775807)")
tk.MustExec("select * from t where id = 9223372036854775807")
tk.MustQuery("select * from t where id = 9223372036854775807;").Check(testkit.Rows("9223372036854775807"))
tk.MustQuery("select * from t").Check(testkit.Rows("9223372036854775807"))
_, err := tk.Exec("insert into t values(9223372036854775807)")
c.Assert(err, NotNil)
tk.MustExec("delete from t where id = 9223372036854775807")
tk.MustQuery("select * from t").Check(nil)
}
func (s *testSuite) TestTableScanWithPointRanges(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(id int, PRIMARY KEY (id))")
tk.MustExec("insert into t values(1), (5), (10)")
tk.MustQuery("select * from t where id in(1, 2, 10)").Check(testkit.Rows("1", "10"))
}
func (s *testSuite) TestUnsignedPk(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(id bigint unsigned primary key)")
var num1, num2 uint64 = math.MaxInt64 + 1, math.MaxInt64 + 2
tk.MustExec(fmt.Sprintf("insert into t values(%v), (%v), (1), (2)", num1, num2))
num1Str := strconv.FormatUint(num1, 10)
num2Str := strconv.FormatUint(num2, 10)
tk.MustQuery("select * from t order by id").Check(testkit.Rows("1", "2", num1Str, num2Str))
tk.MustQuery("select * from t where id not in (2)").Check(testkit.Rows(num1Str, num2Str, "1"))
tk.MustExec("drop table t")
tk.MustExec("create table t(a bigint unsigned primary key, b int, index idx(b))")
tk.MustExec("insert into t values(9223372036854775808, 1), (1, 1)")
tk.MustQuery("select * from t use index(idx) where b = 1 and a < 2").Check(testkit.Rows("1 1"))
tk.MustQuery("select * from t use index(idx) where b = 1 order by b, a").Check(testkit.Rows("1 1", "9223372036854775808 1"))
}
func (s *testSuite) TestIssue5666(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("set @@profiling=1")
tk.MustQuery("SELECT QUERY_ID, SUM(DURATION) AS SUM_DURATION FROM INFORMATION_SCHEMA.PROFILING GROUP BY QUERY_ID;").Check(testkit.Rows("0 0"))
}
func (s *testSuite) TestIssue5341(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("drop table if exists test.t")
tk.MustExec("create table test.t(a char)")
tk.MustExec("insert into test.t value('a')")
tk.MustQuery("select * from test.t where a < 1 order by a limit 0;").Check(testkit.Rows())
}
func (s *testSuite) TestContainDotColumn(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists test.t1")
tk.MustExec("create table test.t1(t1.a char)")
tk.MustExec("drop table if exists t2")
tk.MustExec("create table t2(a char, t2.b int)")
tk.MustExec("drop table if exists t3")
_, err := tk.Exec("create table t3(s.a char);")
terr := errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, terror.ErrCode(mysql.ErrWrongTableName))
}
func (s *testSuite) TestCheckIndex(c *C) {
s.ctx = mock.NewContext()
s.ctx.Store = s.store
se, err := session.CreateSession4Test(s.store)
c.Assert(err, IsNil)
defer se.Close()
_, err = se.Execute(context.Background(), "create database test_admin")
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "use test_admin")
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "create table t (pk int primary key, c int default 1, c1 int default 1, unique key c(c))")
c.Assert(err, IsNil)
is := s.domain.InfoSchema()
db := model.NewCIStr("test_admin")
dbInfo, ok := is.SchemaByName(db)
c.Assert(ok, IsTrue)
tblName := model.NewCIStr("t")
tbl, err := is.TableByName(db, tblName)
c.Assert(err, IsNil)
tbInfo := tbl.Meta()
alloc := autoid.NewAllocator(s.store, dbInfo.ID, false, autoid.RowIDAllocType)
tb, err := tables.TableFromMeta(autoid.NewAllocators(alloc), tbInfo)
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "admin check index t c")
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "admin check index t C")
c.Assert(err, IsNil)
// set data to:
// index data (handle, data): (1, 10), (2, 20)
// table data (handle, data): (1, 10), (2, 20)
recordVal1 := types.MakeDatums(int64(1), int64(10), int64(11))
recordVal2 := types.MakeDatums(int64(2), int64(20), int64(21))
c.Assert(s.ctx.NewTxn(context.Background()), IsNil)
_, err = tb.AddRecord(s.ctx, recordVal1)
c.Assert(err, IsNil)
_, err = tb.AddRecord(s.ctx, recordVal2)
c.Assert(err, IsNil)
txn, err := s.ctx.Txn(true)
c.Assert(err, IsNil)
c.Assert(txn.Commit(context.Background()), IsNil)
mockCtx := mock.NewContext()
idx := tb.Indices()[0]
sc := &stmtctx.StatementContext{TimeZone: time.Local}
_, err = se.Execute(context.Background(), "admin check index t idx_inexistent")
c.Assert(strings.Contains(err.Error(), "not exist"), IsTrue)
// set data to:
// index data (handle, data): (1, 10), (2, 20), (3, 30)
// table data (handle, data): (1, 10), (2, 20), (4, 40)
txn, err = s.store.Begin()
c.Assert(err, IsNil)
_, err = idx.Create(mockCtx, txn, types.MakeDatums(int64(30)), 3)
c.Assert(err, IsNil)
key := tablecodec.EncodeRowKey(tb.Meta().ID, codec.EncodeInt(nil, 4))
setColValue(c, txn, key, types.NewDatum(int64(40)))
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "admin check index t c")
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "handle 3, index:types.Datum{k:0x1, collation:0x0, decimal:0x0, length:0x0, i:30, b:[]uint8(nil), x:interface {}(nil)} != record:<nil>")
// set data to:
// index data (handle, data): (1, 10), (2, 20), (3, 30), (4, 40)
// table data (handle, data): (1, 10), (2, 20), (4, 40)
txn, err = s.store.Begin()
c.Assert(err, IsNil)
_, err = idx.Create(mockCtx, txn, types.MakeDatums(int64(40)), 4)
c.Assert(err, IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "admin check index t c")
c.Assert(strings.Contains(err.Error(), "table count 3 != index(c) count 4"), IsTrue)
// set data to:
// index data (handle, data): (1, 10), (4, 40)
// table data (handle, data): (1, 10), (2, 20), (4, 40)
txn, err = s.store.Begin()
c.Assert(err, IsNil)
err = idx.Delete(sc, txn, types.MakeDatums(int64(30)), 3)
c.Assert(err, IsNil)
err = idx.Delete(sc, txn, types.MakeDatums(int64(20)), 2)
c.Assert(err, IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "admin check index t c")
c.Assert(strings.Contains(err.Error(), "table count 3 != index(c) count 2"), IsTrue)
// TODO: pass the case below:
// set data to:
// index data (handle, data): (1, 10), (4, 40), (2, 30)
// table data (handle, data): (1, 10), (2, 20), (4, 40)
}
func setColValue(c *C, txn kv.Transaction, key kv.Key, v types.Datum) {
row := []types.Datum{v, {}}
colIDs := []int64{2, 3}
sc := &stmtctx.StatementContext{TimeZone: time.Local}
rd := rowcodec.Encoder{Enable: true}
value, err := tablecodec.EncodeRow(sc, row, colIDs, nil, nil, &rd)
c.Assert(err, IsNil)
err = txn.Set(key, value)
c.Assert(err, IsNil)
}
func (s *testSuite) TestCheckTable(c *C) {
tk := testkit.NewTestKit(c, s.store)
// Test 'admin check table' when the table has a unique index with null values.
tk.MustExec("use test")
tk.MustExec("drop table if exists admin_test;")
tk.MustExec("create table admin_test (c1 int, c2 int, c3 int default 1, index (c1), unique key(c2));")
tk.MustExec("insert admin_test (c1, c2) values (1, 1), (2, 2), (NULL, NULL);")
tk.MustExec("admin check table admin_test;")
}
func (s *testSuite) TestCoprocessorStreamingFlag(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table t (id int, value int, index idx(id))")
// Add some data to make statistics work.
for i := 0; i < 100; i++ {
tk.MustExec(fmt.Sprintf("insert into t values (%d, %d)", i, i))
}
tests := []struct {
sql string
expect bool
}{
{"select * from t", true}, // TableReader
{"select * from t where id = 5", true}, // IndexLookup
{"select * from t where id > 5", true}, // Filter
{"select * from t limit 3", false}, // Limit
{"select avg(id) from t", false}, // Aggregate
{"select * from t order by value limit 3", false}, // TopN
}
ctx := context.Background()
for _, test := range tests {
ctx1 := context.WithValue(ctx, "CheckSelectRequestHook", func(req *kv.Request) {
if req.Streaming != test.expect {
c.Errorf("sql=%s, expect=%v, get=%v", test.sql, test.expect, req.Streaming)
}
})
rs, err := tk.Se.Execute(ctx1, test.sql)
c.Assert(err, IsNil)
tk.ResultSetToResult(rs[0], Commentf("sql: %v", test.sql))
}
}
func (s *testSuite) TestIncorrectLimitArg(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test;`)
tk.MustExec(`drop table if exists t;`)
tk.MustExec(`create table t(a bigint);`)
tk.MustExec(`prepare stmt1 from 'select * from t limit ?';`)
tk.MustExec(`prepare stmt2 from 'select * from t limit ?, ?';`)
tk.MustExec(`set @a = -1;`)
tk.MustExec(`set @b = 1;`)
var err error
_, err = tk.Se.Execute(context.TODO(), `execute stmt1 using @a;`)
c.Assert(err.Error(), Equals, `[planner:1210]Incorrect arguments to LIMIT`)
_, err = tk.Se.Execute(context.TODO(), `execute stmt2 using @b, @a;`)
c.Assert(err.Error(), Equals, `[planner:1210]Incorrect arguments to LIMIT`)
}
func (s *testSuite) TestLimit(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test;`)
tk.MustExec(`drop table if exists t;`)
tk.MustExec(`create table t(a bigint, b bigint);`)
tk.MustExec(`insert into t values(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6);`)
tk.MustQuery(`select * from t order by a limit 1, 1;`).Check(testkit.Rows(
"2 2",
))
tk.MustQuery(`select * from t order by a limit 1, 2;`).Check(testkit.Rows(
"2 2",
"3 3",
))
tk.MustQuery(`select * from t order by a limit 1, 3;`).Check(testkit.Rows(
"2 2",
"3 3",
"4 4",
))
tk.MustQuery(`select * from t order by a limit 1, 4;`).Check(testkit.Rows(
"2 2",
"3 3",
"4 4",
"5 5",
))
tk.MustExec(`set @@tidb_init_chunk_size=2;`)
tk.MustQuery(`select * from t order by a limit 2, 1;`).Check(testkit.Rows(
"3 3",
))
tk.MustQuery(`select * from t order by a limit 2, 2;`).Check(testkit.Rows(
"3 3",
"4 4",
))
tk.MustQuery(`select * from t order by a limit 2, 3;`).Check(testkit.Rows(
"3 3",
"4 4",
"5 5",
))
tk.MustQuery(`select * from t order by a limit 2, 4;`).Check(testkit.Rows(
"3 3",
"4 4",
"5 5",
"6 6",
))
}
func (s *testSuite) TestCoprocessorStreamingWarning(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a double)")
tk.MustExec("insert into t value(1.2)")
tk.MustExec("set @@session.tidb_enable_streaming = 1")
result := tk.MustQuery("select * from t where a/0 > 1")
result.Check(testkit.Rows())
tk.MustQuery("show warnings").Check(testutil.RowsWithSep("|", "Warning|1105|Division by 0"))
}
func (s *testSuite3) TestYearTypeDeleteIndex(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a YEAR, PRIMARY KEY(a));")
tk.MustExec("insert into t set a = '2151';")
tk.MustExec("delete from t;")
tk.MustExec("admin check table t")
}
func (s *testSuite3) TestForSelectScopeInUnion(c *C) {
// A union B for update, the "for update" option belongs to union statement, so
// it should works on both A and B.
tk1 := testkit.NewTestKit(c, s.store)
tk2 := testkit.NewTestKit(c, s.store)
tk1.MustExec("use test")
tk1.MustExec("drop table if exists t")
tk1.MustExec("create table t(a int)")
tk1.MustExec("insert into t values (1)")
tk1.MustExec("begin")
// 'For update' would act on the second select.
tk1.MustQuery("select 1 as a union select a from t for update")
tk2.MustExec("use test")
tk2.MustExec("update t set a = a + 1")
// As tk1 use select 'for update', it should detect conflict and fail.
_, err := tk1.Exec("commit")
c.Assert(err, NotNil)
tk1.MustExec("begin")
// 'For update' would be ignored if 'order by' or 'limit' exists.
tk1.MustQuery("select 1 as a union select a from t limit 5 for update")
tk1.MustQuery("select 1 as a union select a from t order by a for update")
tk2.MustExec("update t set a = a + 1")
_, err = tk1.Exec("commit")
c.Assert(err, IsNil)
}
func (s *testSuite3) TestUnsignedDecimalOverflow(c *C) {
tests := []struct {
input interface{}
hasErr bool
err string
}{{
-1,
true,
"Out of range value for column",
}, {
"-1.1e-1",
true,
"Out of range value for column",
}, {
-1.1,
true,
"Out of range value for column",
}, {
-0,
false,
"",
},
}
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a decimal(10,2) unsigned)")
for _, t := range tests {
res, err := tk.Exec("insert into t values (?)", t.input)
if res != nil {
defer res.Close()
}
if t.hasErr {
c.Assert(err, NotNil)
c.Assert(strings.Contains(err.Error(), t.err), IsTrue)
} else {
c.Assert(err, IsNil)
}
if res != nil {
res.Close()
}
}
tk.MustExec("set sql_mode=''")
tk.MustExec("delete from t")
tk.MustExec("insert into t values (?)", -1)
r := tk.MustQuery("select a from t limit 1")
r.Check(testkit.Rows("0.00"))
}
func (s *testSuite3) TestIndexJoinTableDualPanic(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists a")
tk.MustExec("create table a (f1 int, f2 varchar(32), primary key (f1))")
tk.MustExec("insert into a (f1,f2) values (1,'a'), (2,'b'), (3,'c')")
tk.MustQuery("select a.* from a inner join (select 1 as k1,'k2-1' as k2) as k on a.f1=k.k1;").
Check(testkit.Rows("1 a"))
}
func (s *testSuiteP1) TestUnionAutoSignedCast(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1,t2")
tk.MustExec("create table t1 (id int, i int, b bigint, d double, dd decimal)")
tk.MustExec("create table t2 (id int, i int unsigned, b bigint unsigned, d double unsigned, dd decimal unsigned)")
tk.MustExec("insert into t1 values(1, -1, -1, -1.1, -1)")
tk.MustExec("insert into t2 values(2, 1, 1, 1.1, 1)")
tk.MustQuery("select * from t1 union select * from t2 order by id").
Check(testkit.Rows("1 -1 -1 -1.1 -1", "2 1 1 1.1 1"))
tk.MustQuery("select id, i, b, d, dd from t2 union select id, i, b, d, dd from t1 order by id").
Check(testkit.Rows("1 0 0 0 -1", "2 1 1 1.1 1"))
tk.MustQuery("select id, i from t2 union select id, cast(i as unsigned int) from t1 order by id").
Check(testkit.Rows("1 18446744073709551615", "2 1"))
tk.MustQuery("select dd from t2 union all select dd from t2").
Check(testkit.Rows("1", "1"))
tk.MustExec("drop table if exists t3,t4")
tk.MustExec("create table t3 (id int, v int)")
tk.MustExec("create table t4 (id int, v double unsigned)")
tk.MustExec("insert into t3 values (1, -1)")
tk.MustExec("insert into t4 values (2, 1)")
tk.MustQuery("select id, v from t3 union select id, v from t4 order by id").
Check(testkit.Rows("1 -1", "2 1"))
tk.MustQuery("select id, v from t4 union select id, v from t3 order by id").
Check(testkit.Rows("1 0", "2 1"))
tk.MustExec("drop table if exists t5,t6,t7")
tk.MustExec("create table t5 (id int, v bigint unsigned)")
tk.MustExec("create table t6 (id int, v decimal)")
tk.MustExec("create table t7 (id int, v bigint)")
tk.MustExec("insert into t5 values (1, 1)")
tk.MustExec("insert into t6 values (2, -1)")
tk.MustExec("insert into t7 values (3, -1)")
tk.MustQuery("select id, v from t5 union select id, v from t6 order by id").
Check(testkit.Rows("1 1", "2 -1"))
tk.MustQuery("select id, v from t5 union select id, v from t7 union select id, v from t6 order by id").
Check(testkit.Rows("1 1", "2 -1", "3 -1"))
}
func (s *testSuite6) TestUpdateJoin(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1, t2, t3, t4, t5, t6, t7")
tk.MustExec("create table t1(k int, v int)")
tk.MustExec("create table t2(k int, v int)")
tk.MustExec("create table t3(id int auto_increment, k int, v int, primary key(id))")
tk.MustExec("create table t4(k int, v int)")
tk.MustExec("create table t5(v int, k int, primary key(k))")
tk.MustExec("insert into t1 values (1, 1)")
tk.MustExec("insert into t4 values (3, 3)")
tk.MustExec("create table t6 (id int, v longtext)")
tk.MustExec("create table t7 (x int, id int, v longtext, primary key(id))")
// test the normal case that update one row for a single table.
tk.MustExec("update t1 set v = 0 where k = 1")
tk.MustQuery("select k, v from t1 where k = 1").Check(testkit.Rows("1 0"))
// test the case that the table with auto_increment or none-null columns as the right table of left join.
tk.MustExec("update t1 left join t3 on t1.k = t3.k set t1.v = 1")
tk.MustQuery("select k, v from t1").Check(testkit.Rows("1 1"))
tk.MustQuery("select id, k, v from t3").Check(testkit.Rows())
// test left join and the case that the right table has no matching record but has updated the right table columns.
tk.MustExec("update t1 left join t2 on t1.k = t2.k set t1.v = t2.v, t2.v = 3")
tk.MustQuery("select k, v from t1").Check(testkit.Rows("1 <nil>"))
tk.MustQuery("select k, v from t2").Check(testkit.Rows())
// test the case that the update operation in the left table references data in the right table while data of the right table columns is modified.
tk.MustExec("update t1 left join t2 on t1.k = t2.k set t2.v = 3, t1.v = t2.v")
tk.MustQuery("select k, v from t1").Check(testkit.Rows("1 <nil>"))
tk.MustQuery("select k, v from t2").Check(testkit.Rows())
// test right join and the case that the left table has no matching record but has updated the left table columns.
tk.MustExec("update t2 right join t1 on t2.k = t1.k set t2.v = 4, t1.v = 0")
tk.MustQuery("select k, v from t1").Check(testkit.Rows("1 0"))
tk.MustQuery("select k, v from t2").Check(testkit.Rows())
// test the case of right join and left join at the same time.
tk.MustExec("update t1 left join t2 on t1.k = t2.k right join t4 on t4.k = t2.k set t1.v = 4, t2.v = 4, t4.v = 4")
tk.MustQuery("select k, v from t1").Check(testkit.Rows("1 0"))
tk.MustQuery("select k, v from t2").Check(testkit.Rows())
tk.MustQuery("select k, v from t4").Check(testkit.Rows("3 4"))
// test normal left join and the case that the right table has matching rows.
tk.MustExec("insert t2 values (1, 10)")
tk.MustExec("update t1 left join t2 on t1.k = t2.k set t2.v = 11")
tk.MustQuery("select k, v from t2").Check(testkit.Rows("1 11"))
// test the case of continuously joining the same table and updating the unmatching records.
tk.MustExec("update t1 t11 left join t2 on t11.k = t2.k left join t1 t12 on t2.v = t12.k set t12.v = 233, t11.v = 111")
tk.MustQuery("select k, v from t1").Check(testkit.Rows("1 111"))
tk.MustQuery("select k, v from t2").Check(testkit.Rows("1 11"))
// test the left join case that the left table has records but all records are null.
tk.MustExec("delete from t1")
tk.MustExec("delete from t2")
tk.MustExec("insert into t1 values (null, null)")
tk.MustExec("update t1 left join t2 on t1.k = t2.k set t1.v = 1")
tk.MustQuery("select k, v from t1").Check(testkit.Rows("<nil> 1"))
// test the case that the right table of left join has an primary key.
tk.MustExec("insert t5 values(0, 0)")
tk.MustExec("update t1 left join t5 on t1.k = t5.k set t1.v = 2")
tk.MustQuery("select k, v from t1").Check(testkit.Rows("<nil> 2"))
tk.MustQuery("select k, v from t5").Check(testkit.Rows("0 0"))
tk.MustExec("insert into t6 values (1, NULL)")
tk.MustExec("insert into t7 values (5, 1, 'a')")
tk.MustExec("update t6, t7 set t6.v = t7.v where t6.id = t7.id and t7.x = 5")
tk.MustQuery("select v from t6").Check(testkit.Rows("a"))
}
func (s *testSuite3) TestMaxOneRow(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test`)
tk.MustExec(`drop table if exists t1`)
tk.MustExec(`drop table if exists t2`)
tk.MustExec(`create table t1(a double, b double);`)
tk.MustExec(`create table t2(a double, b double);`)
tk.MustExec(`insert into t1 values(1, 1), (2, 2), (3, 3);`)
tk.MustExec(`insert into t2 values(0, 0);`)
tk.MustExec(`set @@tidb_init_chunk_size=1;`)
rs, err := tk.Exec(`select (select t1.a from t1 where t1.a > t2.a) as a from t2;`)
c.Assert(err, IsNil)
err = rs.Next(context.TODO(), rs.NewChunk())
c.Assert(err.Error(), Equals, "subquery returns more than 1 row")
err = rs.Close()
c.Assert(err, IsNil)
}
func (s *testSuiteP2) TestCurrentTimestampValueSelection(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t,t1")
tk.MustExec("create table t (id int, t0 timestamp null default current_timestamp, t1 timestamp(1) null default current_timestamp(1), t2 timestamp(2) null default current_timestamp(2) on update current_timestamp(2))")
tk.MustExec("insert into t (id) values (1)")
rs := tk.MustQuery("select t0, t1, t2 from t where id = 1")
t0 := rs.Rows()[0][0].(string)
t1 := rs.Rows()[0][1].(string)
t2 := rs.Rows()[0][2].(string)
c.Assert(len(strings.Split(t0, ".")), Equals, 1)
c.Assert(len(strings.Split(t1, ".")[1]), Equals, 1)
c.Assert(len(strings.Split(t2, ".")[1]), Equals, 2)
tk.MustQuery("select id from t where t0 = ?", t0).Check(testkit.Rows("1"))
tk.MustQuery("select id from t where t1 = ?", t1).Check(testkit.Rows("1"))
tk.MustQuery("select id from t where t2 = ?", t2).Check(testkit.Rows("1"))
time.Sleep(time.Second)
tk.MustExec("update t set t0 = now() where id = 1")
rs = tk.MustQuery("select t2 from t where id = 1")
newT2 := rs.Rows()[0][0].(string)
c.Assert(newT2 != t2, IsTrue)
tk.MustExec("create table t1 (id int, a timestamp, b timestamp(2), c timestamp(3))")
tk.MustExec("insert into t1 (id, a, b, c) values (1, current_timestamp(2), current_timestamp, current_timestamp(3))")
rs = tk.MustQuery("select a, b, c from t1 where id = 1")
a := rs.Rows()[0][0].(string)
b := rs.Rows()[0][1].(string)
d := rs.Rows()[0][2].(string)
c.Assert(len(strings.Split(a, ".")), Equals, 1)
c.Assert(strings.Split(b, ".")[1], Equals, "00")
c.Assert(len(strings.Split(d, ".")[1]), Equals, 3)
}
func (s *testSuite3) TestRowID(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test`)
tk.MustExec(`drop table if exists t`)
tk.MustExec(`create table t(a varchar(10), b varchar(10), c varchar(1), index idx(a, b, c));`)
tk.MustExec(`insert into t values('a', 'b', 'c');`)
tk.MustExec(`insert into t values('a', 'b', 'c');`)
tk.MustQuery(`select b, _tidb_rowid from t use index(idx) where a = 'a';`).Check(testkit.Rows(
`b 1`,
`b 2`,
))
tk.MustExec(`begin;`)
tk.MustExec(`select * from t for update`)
tk.MustQuery(`select distinct b from t use index(idx) where a = 'a';`).Check(testkit.Rows(`b`))
tk.MustExec(`commit;`)
tk.MustExec(`drop table if exists t`)
tk.MustExec(`create table t(a varchar(5) primary key)`)
tk.MustExec(`insert into t values('a')`)
tk.MustQuery("select *, _tidb_rowid from t use index(`primary`) where _tidb_rowid=1").Check(testkit.Rows("a 1"))
}
func (s *testSuite3) TestDoSubquery(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test`)
tk.MustExec(`drop table if exists t`)
tk.MustExec(`create table t(a int)`)
_, err := tk.Exec(`do 1 in (select * from t)`)
c.Assert(err, IsNil, Commentf("err %v", err))
tk.MustExec(`insert into t values(1)`)
r, err := tk.Exec(`do 1 in (select * from t)`)
c.Assert(err, IsNil, Commentf("err %v", err))
c.Assert(r, IsNil, Commentf("result of Do not empty"))
}
func (s *testSuite3) TestTSOFail(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test`)
tk.MustExec(`drop table if exists t`)
tk.MustExec(`create table t(a int)`)
c.Assert(failpoint.Enable("github.com/pingcap/tidb/session/mockGetTSFail", "return"), IsNil)
ctx := failpoint.WithHook(context.Background(), func(ctx context.Context, fpname string) bool {
return fpname == "github.com/pingcap/tidb/session/mockGetTSFail"
})
_, err := tk.Se.Execute(ctx, `select * from t`)
c.Assert(err, NotNil)
c.Assert(failpoint.Disable("github.com/pingcap/tidb/session/mockGetTSFail"), IsNil)
}
func (s *testSuite3) TestSelectHashPartitionTable(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test`)
tk.MustExec(`drop table if exists th`)
tk.MustExec("set @@session.tidb_enable_table_partition = '1';")
tk.MustExec(`create table th (a int, b int) partition by hash(a) partitions 3;`)
defer tk.MustExec(`drop table if exists th`)
tk.MustExec(`insert into th values (0,0),(1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8);`)
tk.MustExec("insert into th values (-1,-1),(-2,-2),(-3,-3),(-4,-4),(-5,-5),(-6,-6),(-7,-7),(-8,-8);")
tk.MustQuery("select b from th order by a").Check(testkit.Rows("-8", "-7", "-6", "-5", "-4", "-3", "-2", "-1", "0", "1", "2", "3", "4", "5", "6", "7", "8"))
tk.MustQuery(" select * from th where a=-2;").Check(testkit.Rows("-2 -2"))
tk.MustQuery(" select * from th where a=5;").Check(testkit.Rows("5 5"))
}
func (s *testSuiteP1) TestSelectPartition(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test`)
tk.MustExec(`drop table if exists th, tr`)
tk.MustExec("set @@session.tidb_enable_table_partition = '1';")
tk.MustExec(`create table th (a int, b int) partition by hash(a) partitions 3;`)
tk.MustExec(`create table tr (a int, b int)
partition by range (a) (
partition r0 values less than (4),
partition r1 values less than (7),
partition r3 values less than maxvalue)`)
defer tk.MustExec(`drop table if exists th, tr`)
tk.MustExec(`insert into th values (0,0),(1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8);`)
tk.MustExec("insert into th values (-1,-1),(-2,-2),(-3,-3),(-4,-4),(-5,-5),(-6,-6),(-7,-7),(-8,-8);")
tk.MustExec(`insert into tr values (-3,-3),(3,3),(4,4),(7,7),(8,8);`)
// select 1 partition.
tk.MustQuery("select b from th partition (p0) order by a").Check(testkit.Rows("-6", "-3", "0", "3", "6"))
tk.MustQuery("select b from tr partition (r0) order by a").Check(testkit.Rows("-3", "3"))
tk.MustQuery("select b from th partition (p0,P0) order by a").Check(testkit.Rows("-6", "-3", "0", "3", "6"))
tk.MustQuery("select b from tr partition (r0,R0,r0) order by a").Check(testkit.Rows("-3", "3"))
// select multi partition.
tk.MustQuery("select b from th partition (P2,p0) order by a").Check(testkit.Rows("-8", "-6", "-5", "-3", "-2", "0", "2", "3", "5", "6", "8"))
tk.MustQuery("select b from tr partition (r1,R3) order by a").Check(testkit.Rows("4", "7", "8"))
// test select unknown partition error
err := tk.ExecToErr("select b from th partition (p0,p4)")
c.Assert(err.Error(), Equals, "[table:1735]Unknown partition 'p4' in table 'th'")
err = tk.ExecToErr("select b from tr partition (r1,r4)")
c.Assert(err.Error(), Equals, "[table:1735]Unknown partition 'r4' in table 'tr'")
// test select partition table in transaction.
tk.MustExec("begin")
tk.MustExec("insert into th values (10,10),(11,11)")
tk.MustQuery("select a, b from th where b>10").Check(testkit.Rows("11 11"))
tk.MustExec("commit")
tk.MustQuery("select a, b from th where b>10").Check(testkit.Rows("11 11"))
}
func (s *testSuite) TestSelectView(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table view_t (a int,b int)")
tk.MustExec("insert into view_t values(1,2)")
tk.MustExec("create definer='root'@'localhost' view view1 as select * from view_t")
tk.MustExec("create definer='root'@'localhost' view view2(c,d) as select * from view_t")
tk.MustExec("create definer='root'@'localhost' view view3(c,d) as select a,b from view_t")
tk.MustQuery("select * from view1;").Check(testkit.Rows("1 2"))
tk.MustQuery("select * from view2;").Check(testkit.Rows("1 2"))
tk.MustQuery("select * from view3;").Check(testkit.Rows("1 2"))
tk.MustExec("drop table view_t;")
tk.MustExec("create table view_t(c int,d int)")
err := tk.ExecToErr("select * from view1")
c.Assert(err.Error(), Equals, "[planner:1356]View 'test.view1' references invalid table(s) or column(s) or function(s) or definer/invoker of view lack rights to use them")
err = tk.ExecToErr("select * from view2")
c.Assert(err.Error(), Equals, "[planner:1356]View 'test.view2' references invalid table(s) or column(s) or function(s) or definer/invoker of view lack rights to use them")
err = tk.ExecToErr("select * from view3")
c.Assert(err.Error(), Equals, plannercore.ErrViewInvalid.GenWithStackByArgs("test", "view3").Error())
tk.MustExec("drop table view_t;")
tk.MustExec("create table view_t(a int,b int,c int)")
tk.MustExec("insert into view_t values(1,2,3)")
tk.MustQuery("select * from view1;").Check(testkit.Rows("1 2"))
tk.MustQuery("select * from view2;").Check(testkit.Rows("1 2"))
tk.MustQuery("select * from view3;").Check(testkit.Rows("1 2"))
tk.MustExec("alter table view_t drop column a")
tk.MustExec("alter table view_t add column a int after b")
tk.MustExec("update view_t set a=1;")
tk.MustQuery("select * from view1;").Check(testkit.Rows("1 2"))
tk.MustQuery("select * from view2;").Check(testkit.Rows("1 2"))
tk.MustQuery("select * from view3;").Check(testkit.Rows("1 2"))
tk.MustExec("drop table view_t;")
tk.MustExec("drop view view1,view2,view3;")
tk.MustExec("set @@tidb_enable_window_function = 1")
defer func() {
tk.MustExec("set @@tidb_enable_window_function = 0")
}()
tk.MustExec("create table t(a int, b int)")
tk.MustExec("insert into t values (1,1),(1,2),(2,1),(2,2)")
tk.MustExec("create definer='root'@'localhost' view v as select a, first_value(a) over(rows between 1 preceding and 1 following), last_value(a) over(rows between 1 preceding and 1 following) from t")
result := tk.MustQuery("select * from v")
result.Check(testkit.Rows("1 1 1", "1 1 2", "2 1 2", "2 2 2"))
tk.MustExec("drop view v;")
}
type testSuite2 struct {
*baseTestSuite
}
func (s *testSuite2) TearDownTest(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
r := tk.MustQuery("show full tables")
for _, tb := range r.Rows() {
tableName := tb[0]
if tb[1] == "VIEW" {
tk.MustExec(fmt.Sprintf("drop view %v", tableName))
} else {
tk.MustExec(fmt.Sprintf("drop table %v", tableName))
}
}
}
type testSuite3 struct {
*baseTestSuite
}
func (s *testSuite3) TearDownTest(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
r := tk.MustQuery("show full tables")
for _, tb := range r.Rows() {
tableName := tb[0]
if tb[1] == "VIEW" {
tk.MustExec(fmt.Sprintf("drop view %v", tableName))
} else {
tk.MustExec(fmt.Sprintf("drop table %v", tableName))
}
}
}
type testSuite4 struct {
*baseTestSuite
}
func (s *testSuite4) TearDownTest(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
r := tk.MustQuery("show full tables")
for _, tb := range r.Rows() {
tableName := tb[0]
if tb[1] == "VIEW" {
tk.MustExec(fmt.Sprintf("drop view %v", tableName))
} else {
tk.MustExec(fmt.Sprintf("drop table %v", tableName))
}
}
}
type testSuite5 struct {
*baseTestSuite
}
func (s *testSuite5) TearDownTest(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
r := tk.MustQuery("show full tables")
for _, tb := range r.Rows() {
tableName := tb[0]
if tb[1] == "VIEW" {
tk.MustExec(fmt.Sprintf("drop view %v", tableName))
} else {
tk.MustExec(fmt.Sprintf("drop table %v", tableName))
}
}
}
type testSuite6 struct {
*baseTestSuite
}
func (s *testSuite6) TearDownTest(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
r := tk.MustQuery("show full tables")
for _, tb := range r.Rows() {
tableName := tb[0]
if tb[1] == "VIEW" {
tk.MustExec(fmt.Sprintf("drop view %v", tableName))
} else {
tk.MustExec(fmt.Sprintf("drop table %v", tableName))
}
}
}
type testSuite7 struct {
*baseTestSuite
}
func (s *testSuite7) TearDownTest(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
r := tk.MustQuery("show full tables")
for _, tb := range r.Rows() {
tableName := tb[0]
if tb[1] == "VIEW" {
tk.MustExec(fmt.Sprintf("drop view %v", tableName))
} else {
tk.MustExec(fmt.Sprintf("drop table %v", tableName))
}
}
}
type testSuite8 struct {
*baseTestSuite
}
func (s *testSuite8) TearDownTest(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
r := tk.MustQuery("show full tables")
for _, tb := range r.Rows() {
tableName := tb[0]
if tb[1] == "VIEW" {
tk.MustExec(fmt.Sprintf("drop view %v", tableName))
} else {
tk.MustExec(fmt.Sprintf("drop table %v", tableName))
}
}
}
func (s *testSuiteP2) TestStrToDateBuiltin(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustQuery(`select str_to_date('20190101','%Y%m%d%!') from dual`).Check(testkit.Rows("2019-01-01"))
tk.MustQuery(`select str_to_date('20190101','%Y%m%d%f') from dual`).Check(testkit.Rows("2019-01-01 00:00:00.000000"))
tk.MustQuery(`select str_to_date('20190101','%Y%m%d%H%i%s') from dual`).Check(testkit.Rows("2019-01-01 00:00:00"))
tk.MustQuery(`select str_to_date('18/10/22','%y/%m/%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('a18/10/22','%y/%m/%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('69/10/22','%y/%m/%d') from dual`).Check(testkit.Rows("2069-10-22"))
tk.MustQuery(`select str_to_date('70/10/22','%y/%m/%d') from dual`).Check(testkit.Rows("1970-10-22"))
tk.MustQuery(`select str_to_date('8/10/22','%y/%m/%d') from dual`).Check(testkit.Rows("2008-10-22"))
tk.MustQuery(`select str_to_date('8/10/22','%Y/%m/%d') from dual`).Check(testkit.Rows("2008-10-22"))
tk.MustQuery(`select str_to_date('18/10/22','%Y/%m/%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('a18/10/22','%Y/%m/%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('69/10/22','%Y/%m/%d') from dual`).Check(testkit.Rows("2069-10-22"))
tk.MustQuery(`select str_to_date('70/10/22','%Y/%m/%d') from dual`).Check(testkit.Rows("1970-10-22"))
tk.MustQuery(`select str_to_date('018/10/22','%Y/%m/%d') from dual`).Check(testkit.Rows("0018-10-22"))
tk.MustQuery(`select str_to_date('2018/10/22','%Y/%m/%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('018/10/22','%y/%m/%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('18/10/22','%y0/%m/%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('18/10/22','%Y0/%m/%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('18a/10/22','%y/%m/%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('18a/10/22','%Y/%m/%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('20188/10/22','%Y/%m/%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('2018510522','%Y5%m5%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('2018^10^22','%Y^%m^%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('2018@10@22','%Y@%m@%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('2018%10%22','%Y%%m%%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('2018(10(22','%Y(%m(%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('2018\10\22','%Y\%m\%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('2018=10=22','%Y=%m=%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('2018+10+22','%Y+%m+%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('2018_10_22','%Y_%m_%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('69510522','%y5%m5%d') from dual`).Check(testkit.Rows("2069-10-22"))
tk.MustQuery(`select str_to_date('69^10^22','%y^%m^%d') from dual`).Check(testkit.Rows("2069-10-22"))
tk.MustQuery(`select str_to_date('18@10@22','%y@%m@%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('18%10%22','%y%%m%%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('18(10(22','%y(%m(%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('18\10\22','%y\%m\%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('18+10+22','%y+%m+%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('18=10=22','%y=%m=%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('18_10_22','%y_%m_%d') from dual`).Check(testkit.Rows("2018-10-22"))
}
func (s *testSuiteP2) TestReadPartitionedTable(c *C) {
// Test three reader on partitioned table.
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists pt")
tk.MustExec("create table pt (a int, b int, index i_b(b)) partition by range (a) (partition p1 values less than (2), partition p2 values less than (4), partition p3 values less than (6))")
for i := 0; i < 6; i++ {
tk.MustExec(fmt.Sprintf("insert into pt values(%d, %d)", i, i))
}
// Table reader
tk.MustQuery("select * from pt order by a").Check(testkit.Rows("0 0", "1 1", "2 2", "3 3", "4 4", "5 5"))
// Index reader
tk.MustQuery("select b from pt where b = 3").Check(testkit.Rows("3"))
// Index lookup
tk.MustQuery("select a from pt where b = 3").Check(testkit.Rows("3"))
}
func (s *testSuiteP2) TestSplitRegion(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t, t1")
tk.MustExec("create table t(a varchar(100),b int, index idx1(b,a))")
tk.MustExec(`split table t index idx1 by (10000,"abcd"),(10000000);`)
_, err := tk.Exec(`split table t index idx1 by ("abcd");`)
c.Assert(err, NotNil)
terr := errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, terror.ErrCode(mysql.WarnDataTruncated))
// Test for split index region.
// Check min value is more than max value.
tk.MustExec(`split table t index idx1 between (0) and (1000000000) regions 10`)
_, err = tk.Exec(`split table t index idx1 between (2,'a') and (1,'c') regions 10`)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "Split index `idx1` region lower value (2,a) should less than the upper value (1,c)")
// Check min value is invalid.
_, err = tk.Exec(`split table t index idx1 between () and (1) regions 10`)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "Split index `idx1` region lower value count should more than 0")
// Check max value is invalid.
_, err = tk.Exec(`split table t index idx1 between (1) and () regions 10`)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "Split index `idx1` region upper value count should more than 0")
// Check pre-split region num is too large.
_, err = tk.Exec(`split table t index idx1 between (0) and (1000000000) regions 10000`)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "Split index region num exceeded the limit 1000")
// Check pre-split region num 0 is invalid.
_, err = tk.Exec(`split table t index idx1 between (0) and (1000000000) regions 0`)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "Split index region num should more than 0")
// Test truncate error msg.
_, err = tk.Exec(`split table t index idx1 between ("aa") and (1000000000) regions 0`)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "[types:1265]Incorrect value: 'aa' for column 'b'")
// Test for split table region.
tk.MustExec(`split table t between (0) and (1000000000) regions 10`)
// Check the lower value is more than the upper value.
_, err = tk.Exec(`split table t between (2) and (1) regions 10`)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "Split table `t` region lower value 2 should less than the upper value 1")
// Check the lower value is invalid.
_, err = tk.Exec(`split table t between () and (1) regions 10`)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "Split table region lower value count should be 1")
// Check upper value is invalid.
_, err = tk.Exec(`split table t between (1) and () regions 10`)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "Split table region upper value count should be 1")
// Check pre-split region num is too large.
_, err = tk.Exec(`split table t between (0) and (1000000000) regions 10000`)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "Split table region num exceeded the limit 1000")
// Check pre-split region num 0 is invalid.
_, err = tk.Exec(`split table t between (0) and (1000000000) regions 0`)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "Split table region num should more than 0")
// Test truncate error msg.
_, err = tk.Exec(`split table t between ("aa") and (1000000000) regions 10`)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "[types:1265]Incorrect value: 'aa' for column '_tidb_rowid'")
// Test split table region step is too small.
_, err = tk.Exec(`split table t between (0) and (100) regions 10`)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "Split table `t` region step value should more than 1000, step 10 is invalid")
// Test split region by syntax.
tk.MustExec(`split table t by (0),(1000),(1000000)`)
// Test split region twice to test for multiple batch split region requests.
tk.MustExec("create table t1(a int, b int)")
tk.MustQuery("split table t1 between(0) and (10000) regions 10;").Check(testkit.Rows("9 1"))
tk.MustQuery("split table t1 between(10) and (10010) regions 5;").Check(testkit.Rows("4 1"))
// Test split region for partition table.
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int,b int) partition by hash(a) partitions 5;")
tk.MustQuery("split table t between (0) and (1000000) regions 5;").Check(testkit.Rows("20 1"))
// Test for `split for region` syntax.
tk.MustQuery("split region for partition table t between (1000000) and (100000000) regions 10;").Check(testkit.Rows("45 1"))
// Test split region for partition table with specified partition.
tk.MustQuery("split table t partition (p1,p2) between (100000000) and (1000000000) regions 5;").Check(testkit.Rows("8 1"))
// Test for `split for region` syntax.
tk.MustQuery("split region for partition table t partition (p3,p4) between (100000000) and (1000000000) regions 5;").Check(testkit.Rows("8 1"))
}
func (s *testSuiteP2) TestShowTableRegion(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t_regions")
tk.MustExec("create table t_regions (a int key, b int, c int, index idx(b), index idx2(c))")
// Test show table regions.
tk.MustQuery(`split table t_regions between (-10000) and (10000) regions 4;`).Check(testkit.Rows("4 1"))
re := tk.MustQuery("show table t_regions regions")
rows := re.Rows()
// Table t_regions should have 5 regions now.
// 4 regions to store record data.
// 1 region to store index data.
c.Assert(len(rows), Equals, 5)
c.Assert(len(rows[0]), Equals, 11)
tbl := testGetTableByName(c, tk.Se, "test", "t_regions")
// Check the region start key.
c.Assert(rows[0][1], Equals, fmt.Sprintf("t_%d_r", tbl.Meta().ID))
c.Assert(rows[1][1], Equals, fmt.Sprintf("t_%d_r_-5000", tbl.Meta().ID))
c.Assert(rows[2][1], Equals, fmt.Sprintf("t_%d_r_0", tbl.Meta().ID))
c.Assert(rows[3][1], Equals, fmt.Sprintf("t_%d_r_5000", tbl.Meta().ID))
c.Assert(rows[4][2], Equals, fmt.Sprintf("t_%d_r", tbl.Meta().ID))
// Test show table index regions.
tk.MustQuery(`split table t_regions index idx between (-1000) and (1000) regions 4;`).Check(testkit.Rows("5 1"))
re = tk.MustQuery("show table t_regions index idx regions")
rows = re.Rows()
// The index `idx` of table t_regions should have 4 regions now.
c.Assert(len(rows), Equals, 4)
// Check the region start key.
c.Assert(rows[0][1], Equals, fmt.Sprintf("t_%d_i_1_", tbl.Meta().ID))
c.Assert(rows[1][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID))
c.Assert(rows[2][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID))
c.Assert(rows[3][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID))
re = tk.MustQuery("show table t_regions regions")
rows = re.Rows()
// The index `idx` of table t_regions should have 9 regions now.
// 4 regions to store record data.
// 4 region to store index idx data.
// 1 region to store index idx2 data.
c.Assert(len(rows), Equals, 9)
// Check the region start key.
c.Assert(rows[0][1], Equals, fmt.Sprintf("t_%d_r", tbl.Meta().ID))
c.Assert(rows[1][1], Equals, fmt.Sprintf("t_%d_r_-5000", tbl.Meta().ID))
c.Assert(rows[2][1], Equals, fmt.Sprintf("t_%d_r_0", tbl.Meta().ID))
c.Assert(rows[3][1], Equals, fmt.Sprintf("t_%d_r_5000", tbl.Meta().ID))
c.Assert(rows[4][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID))
c.Assert(rows[5][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID))
c.Assert(rows[6][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID))
c.Assert(rows[7][2], Equals, fmt.Sprintf("t_%d_i_2_", tbl.Meta().ID))
c.Assert(rows[8][2], Equals, fmt.Sprintf("t_%d_r", tbl.Meta().ID))
// Test unsigned primary key and wait scatter finish.
tk.MustExec("drop table if exists t_regions")
tk.MustExec("create table t_regions (a int unsigned key, b int, index idx(b))")
// Test show table regions.
tk.MustExec(`set @@session.tidb_wait_split_region_finish=1;`)
tk.MustQuery(`split table t_regions by (2500),(5000),(7500);`).Check(testkit.Rows("3 1"))
re = tk.MustQuery("show table t_regions regions")
rows = re.Rows()
// Table t_regions should have 4 regions now.
c.Assert(len(rows), Equals, 4)
tbl = testGetTableByName(c, tk.Se, "test", "t_regions")
// Check the region start key.
c.Assert(rows[0][1], Matches, "t_.*")
c.Assert(rows[1][1], Equals, fmt.Sprintf("t_%d_r_2500", tbl.Meta().ID))
c.Assert(rows[2][1], Equals, fmt.Sprintf("t_%d_r_5000", tbl.Meta().ID))
c.Assert(rows[3][1], Equals, fmt.Sprintf("t_%d_r_7500", tbl.Meta().ID))
// Test show table index regions.
tk.MustQuery(`split table t_regions index idx by (250),(500),(750);`).Check(testkit.Rows("4 1"))
re = tk.MustQuery("show table t_regions index idx regions")
rows = re.Rows()
// The index `idx` of table t_regions should have 4 regions now.
c.Assert(len(rows), Equals, 4)
// Check the region start key.
c.Assert(rows[0][1], Equals, fmt.Sprintf("t_%d_i_1_", tbl.Meta().ID))
c.Assert(rows[1][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID))
c.Assert(rows[2][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID))
c.Assert(rows[3][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID))
// Test show table regions for partition table when disable split region when create table.
atomic.StoreUint32(&ddl.EnableSplitTableRegion, 0)
tk.MustExec("drop table if exists partition_t;")
tk.MustExec("set @@session.tidb_enable_table_partition = '1';")
tk.MustExec("create table partition_t (a int, b int,index(a)) partition by hash (a) partitions 3")
re = tk.MustQuery("show table partition_t regions")
rows = re.Rows()
c.Assert(len(rows), Equals, 1)
c.Assert(rows[0][1], Matches, "t_.*")
// Test show table regions for partition table when enable split region when create table.
atomic.StoreUint32(&ddl.EnableSplitTableRegion, 1)
tk.MustExec("set @@global.tidb_scatter_region=1;")
tk.MustExec("drop table if exists partition_t;")
tk.MustExec("create table partition_t (a int, b int,index(a)) partition by hash (a) partitions 3")
re = tk.MustQuery("show table partition_t regions")
rows = re.Rows()
c.Assert(len(rows), Equals, 3)
tbl = testGetTableByName(c, tk.Se, "test", "partition_t")
partitionDef := tbl.Meta().GetPartitionInfo().Definitions
c.Assert(rows[0][1], Matches, fmt.Sprintf("t_%d_.*", partitionDef[0].ID))
c.Assert(rows[1][1], Matches, fmt.Sprintf("t_%d_.*", partitionDef[1].ID))
c.Assert(rows[2][1], Matches, fmt.Sprintf("t_%d_.*", partitionDef[2].ID))
// Test pre-split table region when create table.
tk.MustExec("drop table if exists t_pre")
tk.MustExec("create table t_pre (a int, b int) shard_row_id_bits = 2 pre_split_regions=2;")
re = tk.MustQuery("show table t_pre regions")
rows = re.Rows()
// Table t_regions should have 4 regions now.
c.Assert(len(rows), Equals, 4)
tbl = testGetTableByName(c, tk.Se, "test", "t_pre")
c.Assert(rows[1][1], Equals, fmt.Sprintf("t_%d_r_2305843009213693952", tbl.Meta().ID))
c.Assert(rows[2][1], Equals, fmt.Sprintf("t_%d_r_4611686018427387904", tbl.Meta().ID))
c.Assert(rows[3][1], Equals, fmt.Sprintf("t_%d_r_6917529027641081856", tbl.Meta().ID))
defer atomic.StoreUint32(&ddl.EnableSplitTableRegion, 0)
// Test split partition table.
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int,b int) partition by hash(a) partitions 5;")
tk.MustQuery("split table t between (0) and (4000000) regions 4;").Check(testkit.Rows("15 1"))
re = tk.MustQuery("show table t regions")
rows = re.Rows()
c.Assert(len(rows), Equals, 20)
tbl = testGetTableByName(c, tk.Se, "test", "t")
c.Assert(len(tbl.Meta().GetPartitionInfo().Definitions), Equals, 5)
for i, p := range tbl.Meta().GetPartitionInfo().Definitions {
c.Assert(rows[i*4+0][1], Equals, fmt.Sprintf("t_%d_", p.ID))
c.Assert(rows[i*4+1][1], Equals, fmt.Sprintf("t_%d_r_1000000", p.ID))
c.Assert(rows[i*4+2][1], Equals, fmt.Sprintf("t_%d_r_2000000", p.ID))
c.Assert(rows[i*4+3][1], Equals, fmt.Sprintf("t_%d_r_3000000", p.ID))
}
// Test split region for partition table with specified partition.
tk.MustQuery("split table t partition (p4) between (1000000) and (2000000) regions 5;").Check(testkit.Rows("4 1"))
re = tk.MustQuery("show table t regions")
rows = re.Rows()
c.Assert(len(rows), Equals, 24)
tbl = testGetTableByName(c, tk.Se, "test", "t")
c.Assert(len(tbl.Meta().GetPartitionInfo().Definitions), Equals, 5)
for i := 0; i < 4; i++ {
p := tbl.Meta().GetPartitionInfo().Definitions[i]
c.Assert(rows[i*4+0][1], Equals, fmt.Sprintf("t_%d_", p.ID))
c.Assert(rows[i*4+1][1], Equals, fmt.Sprintf("t_%d_r_1000000", p.ID))
c.Assert(rows[i*4+2][1], Equals, fmt.Sprintf("t_%d_r_2000000", p.ID))
c.Assert(rows[i*4+3][1], Equals, fmt.Sprintf("t_%d_r_3000000", p.ID))
}
for i := 4; i < 5; i++ {
p := tbl.Meta().GetPartitionInfo().Definitions[i]
c.Assert(rows[i*4+0][1], Equals, fmt.Sprintf("t_%d_", p.ID))
c.Assert(rows[i*4+1][1], Equals, fmt.Sprintf("t_%d_r_1000000", p.ID))
c.Assert(rows[i*4+2][1], Equals, fmt.Sprintf("t_%d_r_1200000", p.ID))
c.Assert(rows[i*4+3][1], Equals, fmt.Sprintf("t_%d_r_1400000", p.ID))
c.Assert(rows[i*4+4][1], Equals, fmt.Sprintf("t_%d_r_1600000", p.ID))
c.Assert(rows[i*4+5][1], Equals, fmt.Sprintf("t_%d_r_1800000", p.ID))
c.Assert(rows[i*4+6][1], Equals, fmt.Sprintf("t_%d_r_2000000", p.ID))
c.Assert(rows[i*4+7][1], Equals, fmt.Sprintf("t_%d_r_3000000", p.ID))
}
// Test split partition table index.
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int,b int,index idx(a)) partition by hash(a) partitions 5;")
tk.MustQuery("split table t between (0) and (4000000) regions 4;").Check(testkit.Rows("20 1"))
tk.MustQuery("split table t index idx between (0) and (4000000) regions 4;").Check(testkit.Rows("20 1"))
re = tk.MustQuery("show table t regions")
rows = re.Rows()
c.Assert(len(rows), Equals, 40)
tbl = testGetTableByName(c, tk.Se, "test", "t")
c.Assert(len(tbl.Meta().GetPartitionInfo().Definitions), Equals, 5)
for i := 0; i < 5; i++ {
p := tbl.Meta().GetPartitionInfo().Definitions[i]
c.Assert(rows[i*8+0][1], Equals, fmt.Sprintf("t_%d_r", p.ID))
c.Assert(rows[i*8+1][1], Equals, fmt.Sprintf("t_%d_r_1000000", p.ID))
c.Assert(rows[i*8+2][1], Equals, fmt.Sprintf("t_%d_r_2000000", p.ID))
c.Assert(rows[i*8+3][1], Equals, fmt.Sprintf("t_%d_r_3000000", p.ID))
c.Assert(rows[i*8+4][1], Equals, fmt.Sprintf("t_%d_i_1_", p.ID))
c.Assert(rows[i*8+5][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[i*8+6][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[i*8+7][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
}
// Test split index region for partition table with specified partition.
tk.MustQuery("split table t partition (p4) index idx between (0) and (1000000) regions 5;").Check(testkit.Rows("4 1"))
re = tk.MustQuery("show table t regions")
rows = re.Rows()
c.Assert(len(rows), Equals, 44)
tbl = testGetTableByName(c, tk.Se, "test", "t")
c.Assert(len(tbl.Meta().GetPartitionInfo().Definitions), Equals, 5)
for i := 0; i < 4; i++ {
p := tbl.Meta().GetPartitionInfo().Definitions[i]
c.Assert(rows[i*8+0][1], Equals, fmt.Sprintf("t_%d_r", p.ID))
c.Assert(rows[i*8+1][1], Equals, fmt.Sprintf("t_%d_r_1000000", p.ID))
c.Assert(rows[i*8+2][1], Equals, fmt.Sprintf("t_%d_r_2000000", p.ID))
c.Assert(rows[i*8+3][1], Equals, fmt.Sprintf("t_%d_r_3000000", p.ID))
c.Assert(rows[i*8+4][1], Equals, fmt.Sprintf("t_%d_i_1_", p.ID))
c.Assert(rows[i*8+5][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[i*8+6][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[i*8+7][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
}
for i := 4; i < 5; i++ {
p := tbl.Meta().GetPartitionInfo().Definitions[i]
c.Assert(rows[i*8+0][1], Equals, fmt.Sprintf("t_%d_r", p.ID))
c.Assert(rows[i*8+1][1], Equals, fmt.Sprintf("t_%d_r_1000000", p.ID))
c.Assert(rows[i*8+2][1], Equals, fmt.Sprintf("t_%d_r_2000000", p.ID))
c.Assert(rows[i*8+3][1], Equals, fmt.Sprintf("t_%d_r_3000000", p.ID))
c.Assert(rows[i*8+4][1], Equals, fmt.Sprintf("t_%d_i_1_", p.ID))
c.Assert(rows[i*8+5][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[i*8+6][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[i*8+7][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[i*8+8][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[i*8+9][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[i*8+10][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[i*8+11][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
}
}
func testGetTableByName(c *C, ctx sessionctx.Context, db, table string) table.Table {
dom := domain.GetDomain(ctx)
// Make sure the table schema is the new schema.
err := dom.Reload()
c.Assert(err, IsNil)
tbl, err := dom.InfoSchema().TableByName(model.NewCIStr(db), model.NewCIStr(table))
c.Assert(err, IsNil)
return tbl
}
func (s *testSuiteP2) TestIssue10435(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1(i int, j int, k int)")
tk.MustExec("insert into t1 VALUES (1,1,1),(2,2,2),(3,3,3),(4,4,4)")
tk.MustExec("INSERT INTO t1 SELECT 10*i,j,5*j FROM t1 UNION SELECT 20*i,j,5*j FROM t1 UNION SELECT 30*i,j,5*j FROM t1")
tk.MustExec("set @@session.tidb_enable_window_function=1")
tk.MustQuery("SELECT SUM(i) OVER W FROM t1 WINDOW w AS (PARTITION BY j ORDER BY i) ORDER BY 1+SUM(i) OVER w").Check(
testkit.Rows("1", "2", "3", "4", "11", "22", "31", "33", "44", "61", "62", "93", "122", "124", "183", "244"),
)
}
func (s *testSuiteP2) TestUnsignedFeedback(c *C) {
tk := testkit.NewTestKit(c, s.store)
oriProbability := statistics.FeedbackProbability.Load()
statistics.FeedbackProbability.Store(1.0)
defer func() { statistics.FeedbackProbability.Store(oriProbability) }()
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a bigint unsigned, b int, primary key(a))")
tk.MustExec("insert into t values (1,1),(2,2)")
tk.MustExec("analyze table t")
tk.MustQuery("select count(distinct b) from t").Check(testkit.Rows("2"))
result := tk.MustQuery("explain analyze select count(distinct b) from t")
c.Assert(result.Rows()[2][3], Equals, "table:t, range:[0,+inf], keep order:false")
}
func (s *testSuite) TestOOMPanicAction(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int primary key, b double);")
tk.MustExec("insert into t values (1,1)")
sm := &mockSessionManager1{
PS: make([]*util.ProcessInfo, 0),
}
tk.Se.SetSessionManager(sm)
s.domain.ExpensiveQueryHandle().SetSessionManager(sm)
orgAction := config.GetGlobalConfig().OOMAction
setOOMAction(config.OOMActionCancel)
defer func() {
setOOMAction(orgAction)
}()
tk.MustExec("set @@tidb_mem_quota_query=1;")
err := tk.QueryToErr("select sum(b) from t group by a;")
c.Assert(err, NotNil)
c.Assert(err.Error(), Matches, "Out Of Memory Quota!.*")
// Test insert from select oom panic.
tk.MustExec("drop table if exists t,t1")
tk.MustExec("create table t (a bigint);")
tk.MustExec("create table t1 (a bigint);")
tk.MustExec("set @@tidb_mem_quota_query=200;")
_, err = tk.Exec("insert into t1 values (1),(2),(3),(4),(5);")
c.Assert(err.Error(), Matches, "Out Of Memory Quota!.*")
_, err = tk.Exec("replace into t1 values (1),(2),(3),(4),(5);")
c.Assert(err.Error(), Matches, "Out Of Memory Quota!.*")
tk.MustExec("set @@tidb_mem_quota_query=10000")
tk.MustExec("insert into t1 values (1),(2),(3),(4),(5);")
tk.MustExec("set @@tidb_mem_quota_query=200;")
_, err = tk.Exec("insert into t select a from t1 order by a desc;")
c.Assert(err.Error(), Matches, "Out Of Memory Quota!.*")
_, err = tk.Exec("replace into t select a from t1 order by a desc;")
c.Assert(err.Error(), Matches, "Out Of Memory Quota!.*")
tk.MustExec("set @@tidb_mem_quota_query=10000")
tk.MustExec("insert into t values (1),(2),(3),(4),(5);")
// Set the memory quota to 244 to make this SQL panic during the DeleteExec
// instead of the TableReaderExec.
tk.MustExec("set @@tidb_mem_quota_query=244;")
_, err = tk.Exec("delete from t")
c.Assert(err.Error(), Matches, "Out Of Memory Quota!.*")
tk.MustExec("set @@tidb_mem_quota_query=10000;")
tk.MustExec("delete from t1")
tk.MustExec("insert into t1 values(1)")
tk.MustExec("insert into t values (1),(2),(3),(4),(5);")
tk.MustExec("set @@tidb_mem_quota_query=244;")
_, err = tk.Exec("delete t, t1 from t join t1 on t.a = t1.a")
tk.MustExec("set @@tidb_mem_quota_query=100000;")
tk.MustExec("truncate table t")
tk.MustExec("insert into t values(1),(2),(3)")
// set the memory to quota to make the SQL panic during UpdateExec instead
// of TableReader.
tk.MustExec("set @@tidb_mem_quota_query=244;")
_, err = tk.Exec("update t set a = 4")
c.Assert(err.Error(), Matches, "Out Of Memory Quota!.*")
}
func setOOMAction(action string) {
old := config.GetGlobalConfig()
newConf := *old
newConf.OOMAction = action
config.StoreGlobalConfig(&newConf)
}
type testRecoverTable struct {
store kv.Storage
dom *domain.Domain
cluster *mocktikv.Cluster
cli *regionProperityClient
}
func (s *testRecoverTable) SetUpSuite(c *C) {
cli := ®ionProperityClient{}
hijackClient := func(c tikv.Client) tikv.Client {
cli.Client = c
return cli
}
s.cli = cli
var err error
s.cluster = mocktikv.NewCluster()
mocktikv.BootstrapWithSingleStore(s.cluster)
s.store, err = mockstore.NewMockTikvStore(
mockstore.WithHijackClient(hijackClient),
mockstore.WithCluster(s.cluster),
)
c.Assert(err, IsNil)
s.dom, err = session.BootstrapSession(s.store)
c.Assert(err, IsNil)
}
func (s *testRecoverTable) TearDownSuite(c *C) {
s.store.Close()
s.dom.Close()
}
func (s *testRecoverTable) TestRecoverTable(c *C) {
c.Assert(failpoint.Enable("github.com/pingcap/tidb/meta/autoid/mockAutoIDChange", `return(true)`), IsNil)
defer func() {
failpoint.Disable("github.com/pingcap/tidb/meta/autoid/mockAutoIDChange")
}()
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("create database if not exists test_recover")
tk.MustExec("use test_recover")
tk.MustExec("drop table if exists t_recover, t_recover2")
tk.MustExec("create table t_recover (a int);")
defer func(originGC bool) {
if originGC {
ddl.EmulatorGCEnable()
} else {
ddl.EmulatorGCDisable()
}
}(ddl.IsEmulatorGCEnable())
// disable emulator GC.
// Otherwise emulator GC will delete table record as soon as possible after execute drop table ddl.
ddl.EmulatorGCDisable()
gcTimeFormat := "20060102-15:04:05 -0700 MST"
timeBeforeDrop := time.Now().Add(0 - time.Duration(48*60*60*time.Second)).Format(gcTimeFormat)
timeAfterDrop := time.Now().Add(time.Duration(48 * 60 * 60 * time.Second)).Format(gcTimeFormat)
safePointSQL := `INSERT HIGH_PRIORITY INTO mysql.tidb VALUES ('tikv_gc_safe_point', '%[1]s', '')
ON DUPLICATE KEY
UPDATE variable_value = '%[1]s'`
// clear GC variables first.
tk.MustExec("delete from mysql.tidb where variable_name in ( 'tikv_gc_safe_point','tikv_gc_enable' )")
tk.MustExec("insert into t_recover values (1),(2),(3)")
tk.MustExec("drop table t_recover")
// if GC safe point is not exists in mysql.tidb
_, err := tk.Exec("recover table t_recover")
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "can not get 'tikv_gc_safe_point'")
// set GC safe point
tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop))
// if GC enable is not exists in mysql.tidb
_, err = tk.Exec("recover table t_recover")
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "[ddl:-1]can not get 'tikv_gc_enable'")
err = gcutil.EnableGC(tk.Se)
c.Assert(err, IsNil)
// recover job is before GC safe point
tk.MustExec(fmt.Sprintf(safePointSQL, timeAfterDrop))
_, err = tk.Exec("recover table t_recover")
c.Assert(err, NotNil)
c.Assert(strings.Contains(err.Error(), "snapshot is older than GC safe point"), Equals, true)
// set GC safe point
tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop))
// if there is a new table with the same name, should return failed.
tk.MustExec("create table t_recover (a int);")
_, err = tk.Exec("recover table t_recover")
c.Assert(err.Error(), Equals, infoschema.ErrTableExists.GenWithStackByArgs("t_recover").Error())
// drop the new table with the same name, then recover table.
tk.MustExec("rename table t_recover to t_recover2")
// do recover table.
tk.MustExec("recover table t_recover")
// check recover table meta and data record.
tk.MustQuery("select * from t_recover;").Check(testkit.Rows("1", "2", "3"))
// check recover table autoID.
tk.MustExec("insert into t_recover values (4),(5),(6)")
tk.MustQuery("select * from t_recover;").Check(testkit.Rows("1", "2", "3", "4", "5", "6"))
// check rebase auto id.
tk.MustQuery("select a,_tidb_rowid from t_recover;").Check(testkit.Rows("1 1", "2 2", "3 3", "4 5001", "5 5002", "6 5003"))
// recover table by none exits job.
_, err = tk.Exec(fmt.Sprintf("recover table by job %d", 10000000))
c.Assert(err, NotNil)
// Disable GC by manual first, then after recover table, the GC enable status should also be disabled.
err = gcutil.DisableGC(tk.Se)
c.Assert(err, IsNil)
tk.MustExec("delete from t_recover where a > 1")
tk.MustExec("drop table t_recover")
tk.MustExec("recover table t_recover")
// check recover table meta and data record.
tk.MustQuery("select * from t_recover;").Check(testkit.Rows("1"))
// check recover table autoID.
tk.MustExec("insert into t_recover values (7),(8),(9)")
tk.MustQuery("select * from t_recover;").Check(testkit.Rows("1", "7", "8", "9"))
gcEnable, err := gcutil.CheckGCEnable(tk.Se)
c.Assert(err, IsNil)
c.Assert(gcEnable, Equals, false)
}
func (s *testRecoverTable) TestFlashbackTable(c *C) {
c.Assert(failpoint.Enable("github.com/pingcap/tidb/meta/autoid/mockAutoIDChange", `return(true)`), IsNil)
defer func() {
c.Assert(failpoint.Disable("github.com/pingcap/tidb/meta/autoid/mockAutoIDChange"), IsNil)
}()
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("create database if not exists test_flashback")
tk.MustExec("use test_flashback")
tk.MustExec("drop table if exists t_flashback")
tk.MustExec("create table t_flashback (a int);")
defer func(originGC bool) {
if originGC {
ddl.EmulatorGCEnable()
} else {
ddl.EmulatorGCDisable()
}
}(ddl.IsEmulatorGCEnable())
// Disable emulator GC.
// Otherwise emulator GC will delete table record as soon as possible after execute drop table ddl.
ddl.EmulatorGCDisable()
gcTimeFormat := "20060102-15:04:05 -0700 MST"
timeBeforeDrop := time.Now().Add(0 - time.Duration(48*60*60*time.Second)).Format(gcTimeFormat)
safePointSQL := `INSERT HIGH_PRIORITY INTO mysql.tidb VALUES ('tikv_gc_safe_point', '%[1]s', '')
ON DUPLICATE KEY
UPDATE variable_value = '%[1]s'`
// Clear GC variables first.
tk.MustExec("delete from mysql.tidb where variable_name in ( 'tikv_gc_safe_point','tikv_gc_enable' )")
// Set GC safe point
tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop))
// Set GC enable.
err := gcutil.EnableGC(tk.Se)
c.Assert(err, IsNil)
tk.MustExec("insert into t_flashback values (1),(2),(3)")
tk.MustExec("drop table t_flashback")
// Test flash table with wrong time.
_, err = tk.Exec(fmt.Sprintf("flashback table t_flashback until timestamp '%v'", time.Now().String()))
c.Assert(err.Error(), Equals, "Can't find dropped table: t_flashback in ddl history jobs")
// Test flashback table failed by there is already a new table with the same name.
ts := getDDLJobStartTime(tk, "test_flashback", "t_flashback")
// If there is a new table with the same name, should return failed.
tk.MustExec("create table t_flashback (a int);")
_, err = tk.Exec(fmt.Sprintf("flashback table t_flashback until timestamp '%v'", ts))
c.Assert(err.Error(), Equals, infoschema.ErrTableExists.GenWithStackByArgs("t_flashback").Error())
// Drop the new table with the same name, then flashback table.
tk.MustExec("drop table t_flashback")
// Test for flashback table.
tk.MustExec(fmt.Sprintf("flashback table t_flashback until timestamp '%v'", ts))
// Check flashback table meta and data record.
tk.MustQuery("select * from t_flashback;").Check(testkit.Rows("1", "2", "3"))
// Check flashback table autoID.
tk.MustExec("insert into t_flashback values (4),(5),(6)")
tk.MustQuery("select * from t_flashback;").Check(testkit.Rows("1", "2", "3", "4", "5", "6"))
// Check rebase auto id.
tk.MustQuery("select a,_tidb_rowid from t_flashback;").Check(testkit.Rows("1 1", "2 2", "3 3", "4 5001", "5 5002", "6 5003"))
// Test for flashback to new table.
tk.MustExec("drop table t_flashback")
ts = getDDLJobStartTime(tk, "test_flashback", "t_flashback")
tk.MustExec("create table t_flashback (a int);")
tk.MustExec(fmt.Sprintf("flashback table t_flashback until timestamp '%v' to t_flashback2", ts))
// Check flashback table meta and data record.
tk.MustQuery("select * from t_flashback2;").Check(testkit.Rows("1", "2", "3", "4", "5", "6"))
// Check flashback table autoID.
tk.MustExec("insert into t_flashback2 values (7),(8),(9)")
tk.MustQuery("select * from t_flashback2;").Check(testkit.Rows("1", "2", "3", "4", "5", "6", "7", "8", "9"))
// Check rebase auto id.
tk.MustQuery("select a,_tidb_rowid from t_flashback2;").Check(testkit.Rows("1 1", "2 2", "3 3", "4 5001", "5 5002", "6 5003", "7 10001", "8 10002", "9 10003"))
// Test for flashback one table multiple time.
_, err = tk.Exec(fmt.Sprintf("flashback table t_flashback until timestamp '%v' to t_flashback4", ts))
c.Assert(infoschema.ErrTableExists.Equal(err), IsTrue)
// Test for flashback truncated table to new table.
tk.MustExec("truncate table t_flashback2")
ts = getDDLJobStartTime(tk, "test_flashback", "t_flashback2")
tk.MustExec(fmt.Sprintf("flashback table t_flashback2 until timestamp '%v' to t_flashback3", ts))
// Check flashback table meta and data record.
tk.MustQuery("select * from t_flashback3;").Check(testkit.Rows("1", "2", "3", "4", "5", "6", "7", "8", "9"))
// Check flashback table autoID.
tk.MustExec("insert into t_flashback3 values (10),(11)")
tk.MustQuery("select * from t_flashback3;").Check(testkit.Rows("1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11"))
// Check rebase auto id.
tk.MustQuery("select a,_tidb_rowid from t_flashback3;").Check(testkit.Rows("1 1", "2 2", "3 3", "4 5001", "5 5002", "6 5003", "7 10001", "8 10002", "9 10003", "10 15001", "11 15002"))
// Test for flashback drop partition table.
tk.MustExec("drop table if exists t_p_flashback")
tk.MustExec("create table t_p_flashback (a int) partition by hash(a) partitions 4;")
tk.MustExec("insert into t_p_flashback values (1),(2),(3)")
tk.MustExec("drop table t_p_flashback")
ts = getDDLJobStartTime(tk, "test_flashback", "t_p_flashback")
tk.MustExec(fmt.Sprintf("flashback table t_p_flashback until timestamp '%v'", ts))
// Check flashback table meta and data record.
tk.MustQuery("select * from t_p_flashback order by a;").Check(testkit.Rows("1", "2", "3"))
// Check flashback table autoID.
tk.MustExec("insert into t_p_flashback values (4),(5)")
tk.MustQuery("select a,_tidb_rowid from t_p_flashback order by a;").Check(testkit.Rows("1 1", "2 2", "3 3", "4 5001", "5 5002"))
// Test for flashback truncate partition table.
tk.MustExec("truncate table t_p_flashback")
ts = getDDLJobStartTime(tk, "test_flashback", "t_p_flashback")
tk.MustExec(fmt.Sprintf("flashback table t_p_flashback until timestamp '%v' to t_p_flashback1", ts))
// Check flashback table meta and data record.
tk.MustQuery("select * from t_p_flashback1 order by a;").Check(testkit.Rows("1", "2", "3", "4", "5"))
// Check flashback table autoID.
tk.MustExec("insert into t_p_flashback1 values (6)")
tk.MustQuery("select a,_tidb_rowid from t_p_flashback1 order by a;").Check(testkit.Rows("1 1", "2 2", "3 3", "4 5001", "5 5002", "6 10001"))
}
func getDDLJobStartTime(tk *testkit.TestKit, dbName, tblName string) string {
re := tk.MustQuery("admin show ddl jobs 100")
rows := re.Rows()
for _, row := range rows {
if row[1] == dbName && row[2] == tblName && (row[3] == "drop table" || row[3] == "truncate table") {
return row[8].(string)
}
}
return ""
}
func (s *testSuiteP2) TestPointGetPreparedPlan(c *C) {
tk1 := testkit.NewTestKit(c, s.store)
tk1.MustExec("drop database if exists ps_text")
defer tk1.MustExec("drop database if exists ps_text")
tk1.MustExec("create database ps_text")
tk1.MustExec("use ps_text")
tk1.MustExec(`create table t (a int, b int, c int,
primary key k_a(a),
unique key k_b(b))`)
tk1.MustExec("insert into t values (1, 1, 1)")
tk1.MustExec("insert into t values (2, 2, 2)")
tk1.MustExec("insert into t values (3, 3, 3)")
pspk1Id, _, _, err := tk1.Se.PrepareStmt("select * from t where a = ?")
c.Assert(err, IsNil)
pspk2Id, _, _, err := tk1.Se.PrepareStmt("select * from t where ? = a ")
c.Assert(err, IsNil)
ctx := context.Background()
// first time plan generated
rs, err := tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(0)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(nil)
// using the generated plan but with different params
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(1)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("1 1 1"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(2)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("2 2 2"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk2Id, []types.Datum{types.NewDatum(3)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("3 3 3"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk2Id, []types.Datum{types.NewDatum(0)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(nil)
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk2Id, []types.Datum{types.NewDatum(1)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("1 1 1"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk2Id, []types.Datum{types.NewDatum(2)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("2 2 2"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk2Id, []types.Datum{types.NewDatum(3)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("3 3 3"))
// unique index
psuk1Id, _, _, err := tk1.Se.PrepareStmt("select * from t where b = ? ")
c.Assert(err, IsNil)
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(1)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("1 1 1"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(2)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("2 2 2"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(3)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("3 3 3"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(0)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(nil)
// test schema changed, cached plan should be invalidated
tk1.MustExec("alter table t add column col4 int default 10 after c")
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(0)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(nil)
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(1)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("1 1 1 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(2)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("2 2 2 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk2Id, []types.Datum{types.NewDatum(3)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("3 3 3 10"))
tk1.MustExec("alter table t drop index k_b")
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(1)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("1 1 1 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(2)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("2 2 2 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(3)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("3 3 3 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(0)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(nil)
tk1.MustExec(`insert into t values(4, 3, 3, 11)`)
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(1)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("1 1 1 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(2)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("2 2 2 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(3)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("3 3 3 10", "4 3 3 11"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(0)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(nil)
tk1.MustExec("delete from t where a = 4")
tk1.MustExec("alter table t add index k_b(b)")
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(1)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("1 1 1 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(2)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("2 2 2 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(3)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("3 3 3 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(0)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(nil)
// use pk again
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk2Id, []types.Datum{types.NewDatum(3)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("3 3 3 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(3)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("3 3 3 10"))
}
func (s *testSuiteP2) TestPointGetPreparedPlanWithCommitMode(c *C) {
tk1 := testkit.NewTestKit(c, s.store)
tk1.MustExec("drop database if exists ps_text")
defer tk1.MustExec("drop database if exists ps_text")
tk1.MustExec("create database ps_text")
tk1.MustExec("use ps_text")
tk1.MustExec(`create table t (a int, b int, c int,
primary key k_a(a),
unique key k_b(b))`)
tk1.MustExec("insert into t values (1, 1, 1)")
tk1.MustExec("insert into t values (2, 2, 2)")
tk1.MustExec("insert into t values (3, 3, 3)")
pspk1Id, _, _, err := tk1.Se.PrepareStmt("select * from t where a = ?")
c.Assert(err, IsNil)
ctx := context.Background()
// first time plan generated
rs, err := tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(0)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(nil)
// using the generated plan but with different params
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(1)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("1 1 1"))
// next start a non autocommit txn
tk1.MustExec("set autocommit = 0")
tk1.MustExec("begin")
// try to exec using point get plan(this plan should not go short path)
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(1)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("1 1 1"))
// update rows
tk2 := testkit.NewTestKit(c, s.store)
tk2.MustExec("use ps_text")
tk2.MustExec("update t set c = c + 10 where c = 1")
// try to point get again
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(1)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("1 1 1"))
// try to update in session 1
tk1.MustExec("update t set c = c + 10 where c = 1")
_, err = tk1.Exec("commit")
c.Assert(kv.ErrWriteConflict.Equal(err), IsTrue, Commentf("error: %s", err))
// verify
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(1)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("1 1 11"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(2)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("2 2 2"))
tk2.MustQuery("select * from t where a = 1").Check(testkit.Rows("1 1 11"))
}
func (s *testSuiteP2) TestPointUpdatePreparedPlan(c *C) {
tk1 := testkit.NewTestKit(c, s.store)
tk1.MustExec("drop database if exists pu_test")
defer tk1.MustExec("drop database if exists pu_test")
tk1.MustExec("create database pu_test")
tk1.MustExec("use pu_test")
tk1.MustExec(`create table t (a int, b int, c int,
primary key k_a(a),
unique key k_b(b))`)
tk1.MustExec("insert into t values (1, 1, 1)")
tk1.MustExec("insert into t values (2, 2, 2)")
tk1.MustExec("insert into t values (3, 3, 3)")
updateID1, pc, _, err := tk1.Se.PrepareStmt(`update t set c = c + 1 where a = ?`)
c.Assert(err, IsNil)
c.Assert(pc, Equals, 1)
updateID2, pc, _, err := tk1.Se.PrepareStmt(`update t set c = c + 2 where ? = a`)
c.Assert(err, IsNil)
c.Assert(pc, Equals, 1)
ctx := context.Background()
// first time plan generated
rs, err := tk1.Se.ExecutePreparedStmt(ctx, updateID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 4"))
// using the generated plan but with different params
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updateID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 5"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updateID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 6"))
// updateID2
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updateID2, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 8"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updateID2, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 10"))
// unique index
updUkID1, _, _, err := tk1.Se.PrepareStmt(`update t set c = c + 10 where b = ?`)
c.Assert(err, IsNil)
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updUkID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 20"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updUkID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 30"))
// test schema changed, cached plan should be invalidated
tk1.MustExec("alter table t add column col4 int default 10 after c")
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updateID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 31 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updateID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 32 10"))
tk1.MustExec("alter table t drop index k_b")
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updUkID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 42 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updUkID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 52 10"))
tk1.MustExec("alter table t add unique index k_b(b)")
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updUkID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 62 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updUkID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 72 10"))
tk1.MustQuery("select * from t where a = 1").Check(testkit.Rows("1 1 1 10"))
tk1.MustQuery("select * from t where a = 2").Check(testkit.Rows("2 2 2 10"))
}
func (s *testSuiteP2) TestPointUpdatePreparedPlanWithCommitMode(c *C) {
tk1 := testkit.NewTestKit(c, s.store)
tk1.MustExec("drop database if exists pu_test2")
defer tk1.MustExec("drop database if exists pu_test2")
tk1.MustExec("create database pu_test2")
tk1.MustExec("use pu_test2")
tk1.MustExec(`create table t (a int, b int, c int,
primary key k_a(a),
unique key k_b(b))`)
tk1.MustExec("insert into t values (1, 1, 1)")
tk1.MustExec("insert into t values (2, 2, 2)")
tk1.MustExec("insert into t values (3, 3, 3)")
ctx := context.Background()
updateID1, _, _, err := tk1.Se.PrepareStmt(`update t set c = c + 1 where a = ?`)
c.Assert(err, IsNil)
// first time plan generated
rs, err := tk1.Se.ExecutePreparedStmt(ctx, updateID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 4"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updateID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 5"))
// next start a non autocommit txn
tk1.MustExec("set autocommit = 0")
tk1.MustExec("begin")
// try to exec using point get plan(this plan should not go short path)
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updateID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 6"))
// update rows
tk2 := testkit.NewTestKit(c, s.store)
tk2.MustExec("use pu_test2")
tk2.MustExec(`prepare pu2 from "update t set c = c + 2 where ? = a "`)
tk2.MustExec("set @p3 = 3")
tk2.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 5"))
tk2.MustExec("execute pu2 using @p3")
tk2.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 7"))
tk2.MustExec("execute pu2 using @p3")
tk2.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 9"))
// try to update in session 1
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 6"))
_, err = tk1.Exec("commit")
c.Assert(kv.ErrWriteConflict.Equal(err), IsTrue, Commentf("error: %s", err))
// verify
tk2.MustQuery("select * from t where a = 1").Check(testkit.Rows("1 1 1"))
tk1.MustQuery("select * from t where a = 2").Check(testkit.Rows("2 2 2"))
tk2.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 9"))
tk1.MustQuery("select * from t where a = 2").Check(testkit.Rows("2 2 2"))
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 9"))
// again next start a non autocommit txn
tk1.MustExec("set autocommit = 0")
tk1.MustExec("begin")
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updateID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updateID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 11"))
tk1.MustExec("commit")
tk2.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 11"))
}
func (s *testSuite1) TestPartitionHashCode(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec(`create table t(c1 bigint, c2 bigint, c3 bigint, primary key(c1))
partition by hash (c1) partitions 4;`)
wg := sync.WaitGroup{}
for i := 0; i < 5; i++ {
wg.Add(1)
go func() {
defer wg.Done()
tk1 := testkit.NewTestKitWithInit(c, s.store)
for i := 0; i < 5; i++ {
tk1.MustExec("select * from t")
}
}()
}
wg.Wait()
}
func (s *testSuite1) TestAlterDefaultValue(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t(a int, primary key(a))")
tk.MustExec("insert into t(a) values(1)")
tk.MustExec("alter table t add column b int default 1")
tk.MustExec("alter table t alter b set default 2")
tk.MustQuery("select b from t where a = 1").Check(testkit.Rows("1"))
}
| [
"\"log_level\""
] | [] | [
"log_level"
] | [] | ["log_level"] | go | 1 | 0 | |
app/main.go | package main
import (
"fmt"
"log"
"net/http"
"os"
"strings"
"github.com/Useurmind/go-greetings/pkg/db"
)
var dbType string = ""
var dataSource string = ""
func splitOutFirstPathPart(path string) (string, string) {
parts := strings.SplitN(path, "/", 2)
secondPart := ""
if len(parts) > 1 {
secondPart = parts[1]
}
return parts[0], secondPart
}
func handler(w http.ResponseWriter, r *http.Request) {
action, remainder := splitOutFirstPathPart(r.URL.Path[1:])
log.Printf("Handling request to path %s, action is %s, remainder is %s", r.URL.Path, action, remainder)
var err error = nil
ctx, err := db.NewDBContext(dbType, dataSource)
if err != nil {
log.Printf("ERROR while creating db context: %v", err)
w.WriteHeader(500)
return
}
switch action {
case "health":
err = handleHealth(ctx, w, r, remainder)
case "remember":
err = handleRemember(ctx, w, r, remainder)
case "greet":
err = handleGreet(ctx, w, r, remainder)
default:
w.WriteHeader(404)
fmt.Fprintf(w, "Unkown path")
return
}
if err != nil {
log.Printf("ERROR while executing action %s: %s", action, err)
w.WriteHeader(500)
fmt.Fprintf(w, "An error occured while action %s was performed", action)
}
}
func handleHealth(ctx db.DBContext, w http.ResponseWriter, r *http.Request, remainderPath string) error {
w.WriteHeader(200)
fmt.Fprintf(w, "Working")
return nil
}
func handleRemember(ctx db.DBContext, w http.ResponseWriter, r *http.Request, remainderPath string) error {
rememberedPerson, greeting := splitOutFirstPathPart(remainderPath)
err := ctx.SaveGreeting(rememberedPerson, greeting)
if err != nil {
return err
}
fmt.Fprintf(w, "Stored greeting for %s: %s", rememberedPerson, greeting)
return nil
}
func handleGreet(ctx db.DBContext, w http.ResponseWriter, r *http.Request, remainderPath string) error {
rememberedPerson, _ := splitOutFirstPathPart(remainderPath)
greeting, err := ctx.GetGreeting(rememberedPerson)
if err != nil {
return err
}
fmt.Fprintf(w, *greeting)
return nil
}
func main() {
port := 8080
dbType = os.Getenv("GOGREETING_DBTYPE")
dataSource = os.Getenv("GOGREETING_DATASOURCE")
if dbType == "" {
panic("No db type specified, set GOGREETING_DBTYPE")
}
if dataSource == "" {
panic("No datasource specified, set GOGREETING_DATASOURCE")
}
fmt.Printf("Using %s\r\n", dbType)
http.HandleFunc("/", handler)
log.Printf("Listening on port %d", port)
log.Fatal(http.ListenAndServe(fmt.Sprintf(":%d", port), nil))
}
| [
"\"GOGREETING_DBTYPE\"",
"\"GOGREETING_DATASOURCE\""
] | [] | [
"GOGREETING_DBTYPE",
"GOGREETING_DATASOURCE"
] | [] | ["GOGREETING_DBTYPE", "GOGREETING_DATASOURCE"] | go | 2 | 0 | |
generator/header.go | package generator
import (
"bytes"
"os"
"strings"
"text/template"
"time"
"go.uber.org/zap"
)
const (
templateHeader = `#!/usr/bin/env bash
#
# PATCHFILES SCRIPT FOR {{.ScriptFor}}
#
# author: {{.Author}}
# version: {{.Version}}
# environment: {{.Environment}}
# built: {{.Built}}
#
#
args=("$@")
category="${args[0]}"
{{ if eq .ScriptFor "PATCHING" }}
if test -f "{{.PatchFilesControlFile}}"; then
echo "System already patched exiting"
exit 0
fi
{{ end }}
{{ if eq .ScriptFor "REVERTING" }}
if test ! -f "{{.PatchFilesControlFile}}"; then
echo "System is not patched. Exiting."
exit 0
fi
{{ end }}
`
)
type Header struct {
ScriptFor string
Author string
Version string
Environment string
Built string
PatchFilesControlFile string
}
// generateHeader generates header based on input parameters
func (generator *Generator) writeHeader(fd *os.File, scriptFor string) (err error) {
logger := generator.Log.WithOptions(zap.Fields())
logger.Debug("attempt to write footer",
zap.String("scriptFor", scriptFor),
)
built := time.Now().UTC().Format("2006-01-02 15:04:05 -07:00")
author := os.Getenv("AUTHOR")
author = strings.ToLower(author)
author = strings.Trim(author, " ")
version := os.Getenv("VERSION")
version = strings.ToLower(version)
version = strings.Trim(version, " ")
data := Header{
Author: author,
Version: version,
Built: built,
ScriptFor: scriptFor,
Environment: generator.Environment,
PatchFilesControlFile: patchFilesControlFile,
}
var (
buf = new(bytes.Buffer)
)
tpl, err := template.New("template").Parse(templateHeader)
t := template.Must(tpl, err)
err = t.Execute(buf, data)
if err != nil {
return
}
res := buf.String()
res = strings.ReplaceAll(res, "\t", "")
fd.WriteString(res)
fd.Sync()
return
}
| [
"\"AUTHOR\"",
"\"VERSION\""
] | [] | [
"VERSION",
"AUTHOR"
] | [] | ["VERSION", "AUTHOR"] | go | 2 | 0 | |
drivers/node/gke/gke.go | package gke
import (
"os"
"time"
"github.com/sirupsen/logrus"
"github.com/libopenstorage/cloudops"
"github.com/libopenstorage/cloudops/gce"
"github.com/portworx/torpedo/drivers/node"
"github.com/portworx/torpedo/drivers/node/ssh"
)
const (
// DriverName is the name of the gke driver
DriverName = "gke"
)
type gke struct {
ssh.SSH
ops cloudops.Ops
instanceGroup string
}
func (g *gke) String() string {
return DriverName
}
func (g *gke) Init(nodeOpts node.InitOptions) error {
g.SSH.Init(nodeOpts)
instanceGroup := os.Getenv("INSTANCE_GROUP")
if len(instanceGroup) != 0 {
g.instanceGroup = instanceGroup
} else {
g.instanceGroup = "default-pool"
}
ops, err := gce.NewClient()
if err != nil {
return err
}
g.ops = ops
return nil
}
func (g *gke) SetASGClusterSize(perZoneCount int64, timeout time.Duration) error {
// GCP SDK requires per zone cluster size
err := g.ops.SetInstanceGroupSize(g.instanceGroup, perZoneCount, timeout)
if err != nil {
logrus.Errorf("failed to set size of node pool %s. Error: %v", g.instanceGroup, err)
return err
}
return nil
}
func (g *gke) GetASGClusterSize() (int64, error) {
nodeCount, err := g.ops.GetInstanceGroupSize(g.instanceGroup)
if err != nil {
logrus.Errorf("failed to get size of node pool %s. Error: %v", g.instanceGroup, err)
return 0, err
}
return nodeCount, nil
}
func (g *gke) SetClusterVersion(version string, timeout time.Duration) error {
err := g.ops.SetClusterVersion(version, timeout)
if err != nil {
logrus.Errorf("failed to set version for cluster. Error: %v", err)
return err
}
logrus.Infof("Cluster version set successfully. Setting up node group version now ...")
err = g.ops.SetInstanceGroupVersion(g.instanceGroup, version, timeout)
if err != nil {
logrus.Errorf("failed to set version for instance group %s. Error: %v", g.instanceGroup, err)
return err
}
logrus.Infof("Node group version set successfully.")
return nil
}
func (g *gke) DeleteNode(node node.Node, timeout time.Duration) error {
err := g.ops.DeleteInstance(node.Name, node.Zone, timeout)
if err != nil {
return err
}
return nil
}
func (g *gke) GetZones() ([]string, error) {
asgInfo, err := g.ops.InspectInstanceGroupForInstance(g.ops.InstanceID())
if err != nil {
return []string{}, err
}
return asgInfo.Zones, nil
}
func init() {
g := &gke{
SSH: ssh.SSH{},
}
node.Register(DriverName, g)
}
| [
"\"INSTANCE_GROUP\""
] | [] | [
"INSTANCE_GROUP"
] | [] | ["INSTANCE_GROUP"] | go | 1 | 0 | |
snowflake/muon-app/src/main/java/muon/app/ui/components/session/terminal/ssh/SshTtyConnector.java | package muon.app.ui.components.session.terminal.ssh;
import java.awt.Dimension;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.atomic.AtomicBoolean;
import com.jediterm.terminal.Questioner;
import muon.app.App;
import muon.app.ssh.SshClient2;
import muon.app.ui.components.session.SessionContentPanel;
import muon.app.ui.components.session.SessionInfo;
import net.schmizz.sshj.connection.ConnectionException;
import net.schmizz.sshj.connection.channel.direct.PTYMode;
import net.schmizz.sshj.connection.channel.direct.Session;
import net.schmizz.sshj.connection.channel.direct.Session.Shell;
import net.schmizz.sshj.connection.channel.direct.SessionChannel;
import net.schmizz.sshj.transport.TransportException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import util.CollectionHelper;
public class SshTtyConnector implements DisposableTtyConnector {
public static final Logger log = LoggerFactory.getLogger(SshTtyConnector.class);
private InputStreamReader myInputStreamReader;
private InputStream myInputStream = null;
private OutputStream myOutputStream = null;
private SessionChannel shell;
private Session channel;
private AtomicBoolean isInitiated = new AtomicBoolean(false);
private AtomicBoolean isCancelled = new AtomicBoolean(false);
private AtomicBoolean stopFlag = new AtomicBoolean(false);
private Dimension myPendingTermSize;
private Dimension myPendingPixelSize;
private SshClient2 wr;
private String initialCommand;
private SessionInfo info;
private SessionContentPanel sessionContentPanel;
public SshTtyConnector(SessionInfo info, String initialCommand, SessionContentPanel sessionContentPanel) {
this.initialCommand = initialCommand;
this.info = info;
this.sessionContentPanel = sessionContentPanel;
}
public SshTtyConnector(SessionInfo info, SessionContentPanel sessionContentPanel) {
this(info, null, sessionContentPanel);
}
@Override
public boolean init(Questioner q) {
try {
this.wr = new SshClient2(this.info, App.getInputBlocker(), sessionContentPanel);
this.wr.connect();
this.channel = wr.openSession();
this.channel.setAutoExpand(true);
//Map<PTYMode, Integer>ptyMode=new CollectionHelper.Dict<PTYMode, Integer>().putItem(PTYMode., v)
this.channel.allocatePTY(App.getGlobalSettings().getTerminalType(), App.getGlobalSettings().getTermWidth(),
App.getGlobalSettings().getTermHeight(), 0, 0, Collections.<PTYMode, Integer>emptyMap());
this.channel.setEnvVar("LANG", "en_US.UTF-8");
this.shell = (SessionChannel) this.channel.startShell();
// String lang = System.getenv().get("LANG");
// this.channel.setEnvVar("LANG", lang != null ? lang :
// "en_US.UTF-8");
// this.channel.
// channel.setEnv("LANG", lang != null ? lang : "en_US.UTF-8");
// channel.setPtyType(App.getGlobalSettings().getTerminalType());
// channel.setPtyType("xterm-256color");
// PipedOutputStream pout1 = new PipedOutputStream();
// PipedInputStream pin1 = new PipedInputStream();
// channel.setOutputStream(pout1);
//
// PipedOutputStream pout2 = new PipedOutputStream();
// PipedInputStream pin2 = new PipedInputStream(pout2);
// channel.setInputStream(pin2);
myInputStream = shell.getInputStream();// channel.getInputStream();
myOutputStream = shell.getOutputStream();// channel.getOutputStream();
myInputStreamReader = new InputStreamReader(myInputStream, "utf-8");
// channel.connect();
resizeImmediately();
log.info("Initiated");
if (initialCommand != null) {
myOutputStream.write((initialCommand + "\n").getBytes("utf-8"));
myOutputStream.flush();
}
// resize(termSize, pixelSize);
isInitiated.set(true);
return true;
} catch (Exception e) {
e.printStackTrace();
isInitiated.set(false);
isCancelled.set(true);
return false;
}
}
@Override
public void close() {
try {
stopFlag.set(true);
log.info("Terminal wrapper disconnecting");
wr.disconnect();
} catch (Exception e) {
}
}
@Override
public void resize(Dimension termSize, Dimension pixelSize) {
myPendingTermSize = termSize;
myPendingPixelSize = pixelSize;
if (channel != null) {
resizeImmediately();
}
// if (channel == null) {
// return;
// }
// log.info("Terminal resized");
// channel.setPtySize(termSize.width, termSize.height, pixelSize.width, pixelSize.height);
}
@Override
public String getName() {
return "Remote";
}
@Override
public int read(char[] buf, int offset, int length) throws IOException {
return myInputStreamReader.read(buf, offset, length);
}
@Override
public void write(byte[] bytes) throws IOException {
myOutputStream.write(bytes);
myOutputStream.flush();
}
@Override
public boolean isConnected() {
return channel != null && channel.isOpen() && isInitiated.get();
}
@Override
public void write(String string) throws IOException {
write(string.getBytes("utf-8"));
}
@Override
public int waitFor() throws InterruptedException {
log.info("Start waiting...");
while (!isInitiated.get() || isRunning()) {
log.info("waiting");
Thread.sleep(100); // TODO: remove busy wait
}
log.info("waiting exit");
try {
shell.join();
} catch (ConnectionException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
return shell.getExitStatus();
}
public boolean isRunning() {
return shell != null && shell.isOpen();
}
public boolean isBusy() {
return channel.isOpen();
}
public boolean isCancelled() {
return isCancelled.get();
}
public void stop() {
stopFlag.set(true);
close();
}
public int getExitStatus() {
if (shell != null) {
Integer exit = shell.getExitStatus();
return exit == null ? -1 : exit;
}
return -2;
}
private void resizeImmediately() {
if (myPendingTermSize != null && myPendingPixelSize != null) {
setPtySize(shell, myPendingTermSize.width, myPendingTermSize.height, myPendingPixelSize.width,
myPendingPixelSize.height);
myPendingTermSize = null;
myPendingPixelSize = null;
}
}
private void setPtySize(Shell shell, int col, int row, int wp, int hp) {
log.info("Exec pty resized:- col: " + col + " row: " + row + " wp: " + wp + " hp: " + hp);
if (shell != null) {
try {
shell.changeWindowDimensions(col, row, wp, hp);
} catch (TransportException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
// channel.setPtySize(col, row, wp, hp);
}
@Override
public boolean isInitialized() {
return isInitiated.get();
}
} | [] | [] | [] | [] | [] | java | 0 | 0 | |
routers/router.go | package routers
import (
"dogego/api"
"dogego/auth"
"dogego/middlewares"
"os"
"github.com/gin-gonic/gin"
)
func NewRouter() *gin.Engine {
router := gin.Default()
// 中间件, 顺序必须是这样
//router.Use(middlewares.SentryReportor())
router.Use(middlewares.Session(os.Getenv("SESSION_SECRET")))
router.Use(middlewares.Cors(os.Getenv("CORS_DOMAIN")))
router.Use(middlewares.CurrentUser())
v1 := router.Group("/api/v1")
{
v1.POST("/ping", api.Ping)
v1.POST("/user/register", api.UserRegister)
v1.POST("/user/login", api.UserLogin)
v1.POST("/task/test", api.TestAsyncTask)
// 需要进行登录验证(auth.User)
userauthed := v1.Group("")
{
// 使用登录验证中间件
userauthed.Use(middlewares.AuthRequired(auth.User))
}
// 需要进行登录验证(auth.Admin)
adminauthed := v1.Group("")
{
// 使用登录验证中间件
adminauthed.Use(middlewares.AuthRequired(auth.Admin))
}
// 需要进行登录验证(auth.All)
authed := v1.Group("")
{
// 使用登录验证中间件
authed.Use(middlewares.AuthRequired(auth.All))
authed.PUT("/user/change_password", api.UserChangePassword)
authed.PUT("/user/update_profile", api.UserUpdateProfile)
authed.GET("/user/me", api.UserMe)
authed.POST("/user/logout", api.UserLogout)
}
}
return router
}
| [
"\"SESSION_SECRET\"",
"\"CORS_DOMAIN\""
] | [] | [
"CORS_DOMAIN",
"SESSION_SECRET"
] | [] | ["CORS_DOMAIN", "SESSION_SECRET"] | go | 2 | 0 | |
utils/fsutils/fsutils.go | package fsutils
import (
"fmt"
"os"
"os/user"
"path/filepath"
"strings"
)
// ResolvePath {"" -> ".", "~..." -> "user.HomeDir..."} -> Abs
func ResolvePath(path string, usr *user.User) (string, error) {
// var err error
if path == "" {
path = "."
}
if strings.HasPrefix(path, "~") && usr != nil {
//if usr == nil {
// usr, err = GetCurrentUser()
// if err != nil {
// return path, fmt.Errorf("resolving path [%s] failed due to inability to get user info: %w", path, err)
// }
//}
path = usr.HomeDir + path[1:]
}
return filepath.Abs(path)
}
func SafeParentResolvePath(path string, usr *user.User, perm os.FileMode) (string, error) {
fullPath, err := ResolvePath(path, usr)
if err != nil {
return path, err
}
err = os.MkdirAll(filepath.Dir(fullPath), perm)
if err != nil {
return path, err
}
return fullPath, nil
}
func GetCurrentUser() (usr *user.User, err error) {
if userName := os.Getenv("SUDO_USER"); userName != "" {
usr, err = user.Lookup(userName)
} else {
usr, err = user.Current()
}
return usr, err
}
// IsDirectory checks whether path is directory and exists
func IsDirectory(path string) (b bool, err error) {
fi, err := os.Stat(path)
if err != nil {
if os.IsNotExist(err) {
return false, err
}
}
if !fi.IsDir() {
return false, fmt.Errorf(`not a directory: %v`, path)
}
return true, nil
}
| [
"\"SUDO_USER\""
] | [] | [
"SUDO_USER"
] | [] | ["SUDO_USER"] | go | 1 | 0 | |
setup.py | #!/usr/bin/env python
import os
from distutils.core import setup
from Cython.Build import cythonize
from setuptools.extension import Extension
import numpy as np
# Only compile with OpenMP if user asks for it
USE_OPENMP = os.environ.get('USE_OPENMP', False)
print("USE_OPENMP", USE_OPENMP)
# Create the extensions. Manually enumerate the required
extensions = []
extensions.append(
Extension('ssm.messages',
extra_compile_args=[],
extra_link_args=[],
language="c++",
sources=["ssm/messages.pyx"],
)
)
extensions.append(
Extension('ssm.cstats',
extra_compile_args=["-fopenmp"] if USE_OPENMP else [],
extra_link_args=["-fopenmp"] if USE_OPENMP else [],
language="c++",
sources=["ssm/cstats.pyx"],
)
)
extensions = cythonize(extensions)
setup(name='ssm',
version='0.0.1',
description='Bayesian learning and inference for a variety of state space models',
author='Scott Linderman',
author_email='[email protected]',
url='https://github.com/slinderman/ssm',
install_requires=['future', 'numpy', 'scipy', 'matplotlib', 'joblib', 'scikit-learn', 'tqdm', 'autograd'],
packages=['ssm'],
ext_modules=extensions,
include_dirs=[np.get_include(),],
)
| [] | [] | [
"USE_OPENMP"
] | [] | ["USE_OPENMP"] | python | 1 | 0 | |
weatherProj/asgi.py | """
ASGI config for weatherProj project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'weatherProj.settings')
application = get_asgi_application()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
web/static.go | package web
import (
"blockexchange/types"
"html/template"
"net/http"
"os"
"github.com/gorilla/mux"
)
const template_str = `
<!DOCTYPE HTML>
<html>
<head>
<meta name="og:title" content="{{.Schema.Name}} by {{.Username}}"/>
<meta name="og:type" content="Schematic"/>
<meta name="og:url" content="{{.BaseURL}}/#/schema/{{.Username}}/{{.Schema.Name}}"/>
<meta name="og:image" content="{{.BaseURL}}/api/schema/{{.Schema.ID}}/screenshot/{{.Screenshot.ID}}"/>
<meta name="og:site_name" content="Block exchange"/>
<meta name="og:description" content="{{.Schema.Description}}"/>
<meta http-equiv="refresh" content="0; url={{.BaseURL}}/#/schema/{{.Username}}/{{.Schema.Name}}" />
</head>
<body>
</body>
</html>
`
var tmpl = template.Must(template.New("main").Parse(template_str))
type TemplateData struct {
Schema *types.SchemaSearchResult
Screenshot *types.SchemaScreenshot
Username string
BaseURL string
}
func (api *Api) GetStaticView(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
schema_name := vars["schema_name"]
user_name := vars["user_name"]
schema, err := api.SchemaSearchRepo.FindByUsernameAndSchemaname(schema_name, user_name)
if err != nil {
SendError(w, 500, err.Error())
return
}
if schema == nil {
SendError(w, 404, "Not found")
return
}
screenshots, err := api.SchemaScreenshotRepo.GetBySchemaID(schema.ID)
if err != nil {
SendError(w, 500, err.Error())
return
}
if screenshots == nil || len(screenshots) < 1 {
SendError(w, 500, "no screenshots found")
return
}
data := TemplateData{
Schema: schema,
Screenshot: &screenshots[0],
Username: user_name,
BaseURL: os.Getenv("BASE_URL"),
}
w.Header().Set("Content-Type", "text/html")
tmpl.Execute(w, data)
}
| [
"\"BASE_URL\""
] | [] | [
"BASE_URL"
] | [] | ["BASE_URL"] | go | 1 | 0 | |
tests/fixtures/noqa.py | # -*- coding: utf-8 -*-
"""
This file contains all possible violations.
It is used for e2e tests.
"""
from __future__ import print_function # noqa: WPS422
import os.path # noqa: WPS301
import sys as sys # noqa: WPS113
from some import _protected # noqa: WPS436
from .version import get_version # noqa: WPS300
full_name = u'Nikita Sobolev' # noqa: WPS302
phone_number = 555_123_999 # noqa: WPS303
partial_number = .05 # noqa: WPS304
formatted_string = f'Hi, {full_name}' # noqa: WPS305
def __getattr__(): # noqa: WPS413
# See:
# https://github.com/wemake-services/wemake-python-styleguide/issues/461
anti_z444 = 1
def foo_func():
# See:
# https://github.com/wemake-services/wemake-python-styleguide/issues/601
yield (1, 2, 3, 4, 5, 6) # noqa: WPS227
print(x > 2 > y > 4) # noqa: WPS228
try: # noqa: WPS229
print(1)
print(2)
print(3)
except AnyError:
print('nope')
class TooManyPublicAtts(object): # noqa: WPS230
def __init__(self):
self.first = 1
self.second = 2
self.third = 3
self.fourth = 4
self.fifth = 5
self.sixth = 6
self.boom = 7
def function_name(
value: int = 0, # noqa: WPS110
):
# See:
# https://github.com/wemake-services/wemake-python-styleguide/issues/392
anti_z444 = 1
def some(): # noqa: WPS110
from my_module import some_import # noqa: WPS433
class Nested(object): # noqa: WPS431
... # noqa: WPS428, WPS604
def nested(): # noqa: WPS430
anti_z444 = 1
raise NotImplemented # noqa: WPS423
del {'a': 1}['a'] # noqa: WPS420
hasattr(object, 'some') # noqa: WPS421
value = 1 # noqa: WPS110
x = 2 # noqa: WPS111
__private = 3 # noqa: WPS112
star_wars_episode_7 = 'the worst episode ever after 8' # noqa: WPS114
consecutive__underscores = 4 # noqa: WPS116
cls = 5 # noqa: WPS117
__author__ = 'Nikita Sobolev' # noqa: WPS410
extremely_long_name_that_needs_to_be_shortened_to_work_fine = 2 # noqa: WPS118
привет_по_русски = 'Hello, world!' # noqa: WPS119
wrong_alias_ = 'some fake builtin alias' # noqa: WPS120
def some_function():
_should_not_be_used = 1 # noqa: WPS122
print(_should_not_be_used) # noqa: WPS121
used, __ = 1, 2 # noqa: WPS123
some._execute() # noqa: WPS437
def many_locals(): # noqa: WPS210
arg1, arg2, arg3, arg4, arg5, arg6 = range(6)
def many_arguments(_arg1, _arg2, _arg3, _arg4, _arg5, _arg6): # noqa: WPS211
anti_z444 = 1
def many_returns(xy): # noqa: WPS212
if xy > 1:
return 1
if xy > 2:
return 2
if xy > 3:
return 3
if xy > 4:
return 4
if xy > 5:
return 5
return 6
def many_expressions(xy): # noqa: WPS213
print(xy)
print(xy)
print(xy)
print(xy)
print(xy)
print(xy)
print(xy)
print(xy)
print(xy)
print(xy)
class ManyParents(First, Second, Third, Exception): # noqa: WPS215
anti_z444 = 1
async def too_many_awaits(): # noqa: WPS217
await test_function(1)
await test_function(2)
await test_function(3)
await test_function(4)
await test_function(5)
await test_function(6)
await test_function(7)
async def too_many_asserts(): # noqa: WPS218
assert test_function(1)
assert test_function(2)
assert test_function(3)
assert test_function(4)
assert test_function(5)
assert test_function(6)
deep_access = some.other[0].field.type.boom # noqa: WPS219
def test_function(): # noqa: WPS231
if xy > 1:
if xy > 2:
if xy > 3:
if xy > 4:
if xy > 5:
test(5) # noqa: WPS220
line = some.call(7 * 2, 3 / 4) / some.run(5 / some, 8 - 2 + 6) # noqa: WPS221
if line and line > 2 and line > 3 and line > 4 and line > 5: # noqa: WPS221,WPS222
anti_z444 = 1
if line: # noqa: WPS223
anti_z444 = 1
elif line > 1:
anti_z444 = 1
elif line > 2:
anti_z444 = 1
elif line > 3:
anti_z444 = 1
elif line > 4:
anti_z444 = 1
try: # noqa: WPS225
do_some_bad()
except ValueError:
print('value')
except KeyError:
print('key')
except IndexError as exc:
print('index', exc)
except TypeError:
print('type')
class BadClass: # noqa: WPS306
UPPER_CASE_ATTRIBUTE = 12 # noqa: WPS115
@staticmethod # noqa: WPS602
def some_static(arg1):
return [
target # noqa: WPS224
for assignment in range(hex_number)
for target in range(assignment)
for _ in range(10)
if isinstance(target, int)
]
@staticmethod # noqa: WPS602
async def some_async_static(arg1):
return [
node for node in 'ab' if node != 'a' if node != 'b' # noqa: WPS307
]
def __del__(self, *_args, **_kwargs): # noqa: WPS603
anti_z444 = 1 # noqa: WPS442
class Nested: # noqa: WPS306,WPS431
anti_z444 = 1
async def __eq__(self, other): # noqa: WPS610
anti_z444 = 3 # noqa: WPS442
magic_numbers = 13.2 + 50 # noqa: WPS432
assert 1 < 1 < hex_number # noqa: WPS308
assert 2 > octal_number # noqa: WPS309
hex_number = 0XFF # noqa: WPS310
octal_number = 0O11 # noqa: WPS310
binary_number = 0B1001 # noqa: WPS310
number_with_scientific_notation = 1.5E-10 # noqa: WPS310
number_with_useless_plus = +5 # noqa: WPS330
if '6' in nodes in '6': # noqa: WPS311, WPS525
anti_z444 = 1
assert hex_number == hex_number # noqa: WPS312
async def test_async_function():
return(123, 33) # noqa: WPS313
if True: # noqa: WPS314
anti_z444 = 1
class SomeTestClass(FirstParent, SecondParent, object): # noqa: WPS315
anti_z444 = 1
class SomeClass(FirstParent, # noqa: WPS317
SecondParent, # noqa: WPS318
ThirdParent): # noqa: WPS319
anti_z444 = 1
if SomeClass:
print(SomeClass) # noqa: WPS318
print(
1,
2) # noqa: WPS319
def function( # noqa: WPS320
arg: Optional[ # noqa: WPS320
str,
]
) -> Optional[
str,
]:
some_set = {1
} # noqa: WPS318
string_modifier = R'(s)' # noqa: WPS321
multiline_string = """abc""" # noqa: WPS322
def function_with_wrong_return():
if some:
print(some)
return # noqa: WPS324
def function_with_wrong_yield():
if some:
yield # noqa: WPS325
yield 1
bad_concatenation = 'a' 'b' # noqa: WPS326
for literal in bad_concatenation: # noqa: WPS327, WPS328
continue
with open(bad_concatenation): # noqa: WPS328
pass # noqa: WPS420
try:
anti_z444 = 1
except Exception as ex: # noqa: WPS329
raise ex
def some_other_function():
some_value = 1
return some_value # noqa: WPS331
some_cond = cond() and 1 or None # noqa: WPS332
print(one > two and two > three) # noqa: WPS333
print(biggesst > middle >= smallest) # noqa: WPS334
for index in [1, 2]: # noqa: WPS335
print(index)
string_concat = 'a' + 'b' # noqa: WPS336
print(one == 'a' or one == 'b') # noqa: WPS514
file_obj = open('filaname.py') # noqa: WPS515
print(type(file_obj) == int) # noqa: WPS516
print(*[], **{'@': 1}) # noqa: WPS517, WPS445
pi = 3.14 # noqa: WPS446
print(lambda: 0) # noqa: WPS522
xterm += xterm + 1 # noqa: WPS524
for range_len in range(len(file_obj)): # noqa: WPS518
print(range_len)
sum_container = 0
for sum_item in file_obj: # noqa: WPS519
sum_container += sum_item
print(sum_container == []) # noqa: WPS520
print(sum_container is 0) # noqa: WPS521
try:
anti_z444 = 1
except BaseException: # noqa: WPS424
anti_z444 = 1
call_with_positional_bool(True) # noqa: WPS425
class MyInt(int): # noqa: WPS600
"""My custom int subclass."""
class ShadowsAttribute(object):
"""Redefines attr from class."""
first: int
second = 1
def __init__(self) -> None:
self.first = 1
self.second = 2 # noqa: WPS601
for symbol in 'abc': # noqa: WPS500
anti_z444 = 1
else:
anti_z444 = 1
try: # noqa: WPS501
anti_z444 = 1
finally:
anti_z444 = 1
nodes = nodes # noqa: WPS434
class Example(object):
"""Correct class docstring."""
def __init__(self): # noqa: WPS611
"""Correct function docstring."""
yield 10
def __eq__(self, object_: object) -> bool: # noqa: WPS612
return super().__eq__(object_)
for loop_index in range(6): # noqa: WPS426
print(lambda: loop_index)
async def function_with_unreachable():
await test_function()
raise ValueError()
print(1) # noqa: WPS427
1 + 2 # noqa: WPS428
first = second = 2 # noqa: WPS429
first, nodes[0] = range(2) # noqa: WPS414
try: # noqa: WPS415
anti_z444 = 1
except ValueError:
anti_z444 = 1
except ValueError:
anti_z444 = 1
iters = list((yield letter) for letter in 'ab') # noqa: WPS416
class MyBadException(BaseException): # noqa: WPS418
anti_z444 = 1
some_if_expr = True if some_set else False # noqa: WPS502
if some_if_expr: # noqa: WPS502
some_dict['x'] = True
else:
some_dict['x'] = False
class ClassWithWrongContents((lambda: object)()): # noqa: WPS606
__slots__ = ['a', 'a'] # noqa: WPS607
for _ in range(1): # noqa: WPS604
anti_z444 = 1
def method_with_no_args(): # noqa: WPS605
super(ClassWithWrongContents, self).method_with_no_args() # noqa: WPS608
self.some_set = {1, 1} # noqa: WPS417
def useless_returning_else():
if some_set:
return some_set
else:
return TypeError # noqa: WPS503
def multiple_return_path():
try: # noqa: WPS419
return 1
except Exception:
return 2
else:
return 3
def bad_default_values(
self,
withDoctest='PYFLAKES_DOCTEST' in os.environ, # noqa: WPS404
):
return True
for nodes[0] in (1, 2, 3): # noqa: WPS405
anti_z444 = 1
with open('some') as MyBadException.custom: # noqa: WPS406
anti_z444 = 1
anti_z444.__truediv__(1) # noqa: WPS609
if not some: # noqa: WPS504
print('False')
else:
print('Wrong')
try:
try: # noqa: WPS505
anti_z444 = 1
except ValueError:
raise TypeError('Second')
except TypeError:
print('WTF?')
if some and ( # noqa: WPS337
anti_z444 == 1
):
anti_z444 = 'some text'
class WrongMethodOrder(object): # noqa: WPS338
def _protected(self):
return self
def public(self):
return self
leading_zero = 1.2e01 # noqa: WPS339
positive_exponent = 1.1e+1 # noqa: WPS340
wrong_hex = 0xabc # noqa: WPS341
wrong_escape_raw_string = '\\n' # noqa: WPS342
bad_complex = 1J # noqa: WPS343
zero_div = bad_complex / 0.0 # noqa: WPS344
mult_one = zero_div * 1 # noqa: WPS345
mult_one -= -1 # noqa: WPS346
CONSTANT = [] # noqa: WPS407
numbers = map(lambda string: int(string), ['1']) # noqa: WPS506
if len(numbers) > 0: # noqa: WPS507
print('len!')
if numbers and numbers: # noqa: WPS408
print('duplicate boolop')
if not numbers == [1]: # noqa: WPS508
print('bad compare with not')
if numbers == CONSTANT != [2]: # noqa: WPS409
print(1 + (1 if number else 2)) # noqa: WPS509
print(numbers in []) # noqa: WPS510
print(isinstance(number, int) or isinstance(number, (float, str))) # noqa: 474
print(isinstance(numbers, (int,))) # noqa: WPS512
if numbers:
print('first')
else:
if numbers: # noqa: WPS513
print('other')
def sync_gen():
yield
raise StopIteration # noqa: WPS438
async def async_gen():
yield
raise StopIteration # noqa: WPS438
class CheckStopIteration(object):
def sync_gen(self):
yield
raise StopIteration() # noqa: WPS438
async def async_gen(self):
yield
raise StopIteration() # noqa: WPS438
bad_unicode = b'\u1' # noqa: WPS439
CheckStopIteration = 1 # noqa: WPS440
print(literal) # noqa: WPS441
unhashable = {[]} # noqa: WPS443
assert [] # noqa: WPS444
unhashable = [] * 2 # noqa: WPS435
from json import loads # noqa: WPS347
some_model = (
MyModel.objects.filter(...)
.exclude(...) # noqa: WPS348
)
swap_a = swap_b
swap_b = swap_a # noqa: WPS523
print(constant[0:7]) # noqa: WPS349
var_a = var_a + var_b # noqa: WPS350
class ChildClass(ParentClass):
def some_method(self):
super().some_other_method() # noqa: WPS613
LOWERCASE_ALPH = "abcdefghijklmnopqrstuvwxyz" # noqa: WPS447
int() # noqa: WPS351
for wrong_loop in call( # noqa: WPS352
1, 2, 3,
):
print('bad loop')
if a in {1}: # noqa: WPS525
print('bad!')
def implicit_yield_from():
for wrong_yield in call(): # noqa: WPS526
yield wrong_yield
try: # noqa: WPS448
anti_z444 = 1
except Exception:
anti_z444 = 1
except ValueError:
anti_z444 = 1
| [] | [] | [] | [] | [] | python | 0 | 0 | |
segmentation/models/Ours-novel-model/training.py | #System
import numpy as np
import sys
import os
import random
from glob import glob
from skimage import io
from PIL import Image
import random
import SimpleITK as sitk
#Torch
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Function
import torch
import torchvision.transforms as standard_transforms
#from torchvision.models import resnet18
from UNet_3D import UNet3D
import nibabel as nib
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
#torch.cuda.set_device(1)
ckpt_path = 'ckpt'
exp_name = 's(c)SE'
if not os.path.exists(ckpt_path):
os.makedirs(ckpt_path)
if not os.path.exists(os.path.join(ckpt_path, exp_name)):
os.makedirs(os.path.join(ckpt_path, exp_name))
args = {
'num_class': 2,
'num_gpus': 1,
'start_epoch': 1,
'num_epoch': 200,
'batch_size': 1 ,
'lr': 0.0001,
'lr_decay': 0.9,
'weight_decay': 1e-4,
'momentum': 0.9,
'snapshot': '',
'opt': 'adam',
'crop_size1': 138,
}
def mat2img(slices):
tmin = np.amin(slices)
tmax = np.amax(slices)
diff = tmax -tmin
if (diff == 0):
return slices
else:
return np.uint8(255 * (slices - tmin) / (diff))
class HEMDataset(Dataset):
def __init__(self, img_dir, transform=None):
self.img_anno_pairs = glob(img_dir)
def __len__(self):
return len(self.img_anno_pairs)
def __getitem__(self, index):
_img = glob(self.img_anno_pairs[index] + '/*__resampled.nii.gz')
_gt = glob(self.img_anno_pairs[index] + '/*seg_resampled.nii.gz')
_img = nib.load(_img[0]).get_data()
_gt = nib.load(_gt[0]).get_data()
_img = _img.transpose(2,0,1)
_gt = _gt.transpose(2,0,1)
a0 = _img.shape[0] #138
a1 = _img.shape[2] #186
a0 = (a0 - 138)//2
a1 = (a1 - 186)//2
img = _img[a0:a0+138, a1:a1+186, a1:a1+186]
target = _gt[a0:a0+138, a1:a1+186, a1:a1+186]
img = img/255
img = np.expand_dims(img, axis=0)
hflip = random.random() < 0.5
if hflip:
img = img[:, ::-1, :, :]
target = target[::-1,:, :]
img = torch.from_numpy(np.array(img)).float()
target = torch.from_numpy(np.array(target)).long()
return img, target
class CrossEntropyLoss2d(torch.nn.Module):
def __init__(self, weight=None, size_average=True):
super(CrossEntropyLoss2d, self).__init__()
self.nll_loss = torch.nn.NLLLoss(weight, size_average)
def forward(self, inputs, targets):
return self.nll_loss(F.log_softmax(inputs), targets)
if __name__ == '__main__':
input_transform = standard_transforms.Compose([standard_transforms.ToTensor()])
img_dir = '/media/mmlab/data/sesh/Data_ICH/Sesh_Segmentation/**'
print(img_dir)
dataset = HEMDataset(img_dir=img_dir)
train_loader = DataLoader(dataset=dataset, batch_size=args['batch_size'], shuffle=True, num_workers=2,drop_last=True)
in_channels = 1
model = UNet3D(in_channels=1, out_channels=2, final_sigmoid=True)
gpu_ids = range(args['num_gpus'])
model = torch.nn.parallel.DataParallel(model, device_ids=gpu_ids)
model = model.cuda()
optimizer = optim.Adam(model.parameters(), lr=args['lr'], weight_decay=0.0001)
criterion = CrossEntropyLoss2d(size_average=True).cuda()
model.train()
epoch_iters = dataset.__len__() / args['batch_size']
max_epoch = 100
print(exp_name)
for epoch in range(max_epoch):
for batch_idx, data in enumerate(train_loader):
inputs, labels = data
inputs = Variable(inputs).cuda()
labels = Variable(labels).cuda()
optimizer.zero_grad()
outputs = model(inputs)
outputs = outputs.view(args['batch_size'], args['num_class'], args['crop_size1'], -1)
labels = labels.view(args['batch_size'], args['crop_size1'], -1)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
if (batch_idx + 1) % 20 == 0:
print('[epoch %d], [iter %d / %d], [train main loss %.5f], [lr %.10f]' % (
epoch, batch_idx + 1, epoch_iters, loss.item(),
optimizer.param_groups[0]['lr']))
cur_iter = batch_idx + epoch * len(train_loader)
max_iter = len(train_loader) * max_epoch
snapshot_name = 'epoch_' + str(epoch)
torch.save(model.state_dict(), os.path.join(ckpt_path, exp_name, snapshot_name + '.pth.tar'))
| [] | [] | [
"CUDA_VISIBLE_DEVICES"
] | [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
main.py | import numpy as np
from recommenders.Rec4RecRecommender import Rec4RecRecommender
from recommenders.KNNRecommender import KNNRecommender
from recommenders.RNNRecommender import RNNRecommender
from util import evaluation
from util.make_data import *
from util.metrics import mrr,recall
import os
import argparse
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
# def get_test_sequences(test_data, given_k):
# # we can run evaluation only over sequences longer than abs(LAST_K)
# test_sequences = test_data.loc[test_data['sequence'].map(
# len) > abs(given_k), 'sequence'].values
# return test_sequences
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--d', type=int, default=100)
parser.add_argument('--n_iter', type=int, default=10)
parser.add_argument('--seed', type=int, default=1234)
parser.add_argument('--batch_size', type=int, default=1024)
parser.add_argument('--learning_rate', type=float, default=.5e-2)
parser.add_argument('--l2', type=float, default=3e-3)
parser.add_argument('--neg_samples', type=int, default=3)
parser.add_argument('--sets_of_neg_samples', type=int, default=50)
config = parser.parse_args()
METRICS = {'mrr': mrr}
sequences, test_sequences = make_data_toy_data()
item_count = item_count(sequences, 'sequence')
rec_sknn = KNNRecommender(model='sknn', k=12)
rec_gru4rec = RNNRecommender(session_layers=[
20], batch_size=16, learning_rate=0.1, momentum=0.1, dropout=0.1, epochs=5)
rec_ensemble = [rec_sknn, rec_gru4rec]
for rec in rec_ensemble:
rec.fit(sequences)
ensemble = Rec4RecRecommender(
item_count, 100, rec_ensemble, config, pretrained_embeddings=None)
ensemble.fit(test_sequences,METRICS)
ensemble_eval_score = evaluation.sequential_evaluation(
ensemble, test_sequences=test_sequences, evaluation_functions=METRICS.values(), top_n=10, scroll=False)
| [] | [] | [
"KMP_DUPLICATE_LIB_OK"
] | [] | ["KMP_DUPLICATE_LIB_OK"] | python | 1 | 0 | |
test/functional/test_framework/test_node.py | #!/usr/bin/env python3
# Copyright (c) 2017-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Class for cypheriumlited node under test"""
import contextlib
import decimal
import errno
from enum import Enum
import http.client
import json
import logging
import os
import re
import subprocess
import tempfile
import time
import urllib.parse
import collections
import shlex
import sys
from .authproxy import JSONRPCException
from .util import (
append_config,
delete_cookie_file,
get_rpc_proxy,
rpc_url,
wait_until,
p2p_port,
)
# For Python 3.4 compatibility
JSONDecodeError = getattr(json, "JSONDecodeError", ValueError)
BITCOIND_PROC_WAIT_TIMEOUT = 60
class FailedToStartError(Exception):
"""Raised when a node fails to start correctly."""
class ErrorMatch(Enum):
FULL_TEXT = 1
FULL_REGEX = 2
PARTIAL_REGEX = 3
class TestNode():
"""A class for representing a cypheriumlited node under test.
This class contains:
- state about the node (whether it's running, etc)
- a Python subprocess.Popen object representing the running process
- an RPC connection to the node
- one or more P2P connections to the node
To make things easier for the test writer, any unrecognised messages will
be dispatched to the RPC connection."""
def __init__(self, i, datadir, *, rpchost, timewait, bitcoind, bitcoin_cli, coverage_dir, cwd, extra_conf=None, extra_args=None, use_cli=False, start_perf=False):
"""
Kwargs:
start_perf (bool): If True, begin profiling the node with `perf` as soon as
the node starts.
"""
self.index = i
self.datadir = datadir
self.stdout_dir = os.path.join(self.datadir, "stdout")
self.stderr_dir = os.path.join(self.datadir, "stderr")
self.rpchost = rpchost
self.rpc_timeout = timewait
self.binary = bitcoind
self.coverage_dir = coverage_dir
self.cwd = cwd
if extra_conf is not None:
append_config(datadir, extra_conf)
# Most callers will just need to add extra args to the standard list below.
# For those callers that need more flexibility, they can just set the args property directly.
# Note that common args are set in the config file (see initialize_datadir)
self.extra_args = extra_args
self.args = [
self.binary,
"-datadir=" + self.datadir,
"-logtimemicros",
"-debug",
"-debugexclude=libevent",
"-debugexclude=leveldb",
"-uacomment=testnode%d" % i,
]
self.cli = TestNodeCLI(bitcoin_cli, self.datadir)
self.use_cli = use_cli
self.start_perf = start_perf
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.url = None
self.log = logging.getLogger('TestFramework.node%d' % i)
self.cleanup_on_exit = True # Whether to kill the node when this object goes away
# Cache perf subprocesses here by their data output filename.
self.perf_subprocesses = {}
self.p2ps = []
def get_deterministic_priv_key(self):
"""Return a deterministic priv key in base58, that only depends on the node's index"""
AddressKeyPair = collections.namedtuple('AddressKeyPair', ['address', 'key'])
PRIV_KEYS = [
# address , privkey
AddressKeyPair('mjTkW3DjgyZck4KbiRusZsqTgaYTxdSz6z', 'cVpF924EspNh8KjYsfhgY96mmxvT6DgdWiTYMtMjuM74hJaU5psW'),
AddressKeyPair('msX6jQXvxiNhx3Q62PKeLPrhrqZQdSimTg', 'cUxsWyKyZ9MAQTaAhUQWJmBbSvHMwSmuv59KgxQV7oZQU3PXN3KE'),
AddressKeyPair('mnonCMyH9TmAsSj3M59DsbH8H63U3RKoFP', 'cTrh7dkEAeJd6b3MRX9bZK8eRmNqVCMH3LSUkE3dSFDyzjU38QxK'),
AddressKeyPair('mqJupas8Dt2uestQDvV2NH3RU8uZh2dqQR', 'cVuKKa7gbehEQvVq717hYcbE9Dqmq7KEBKqWgWrYBa2CKKrhtRim'),
AddressKeyPair('msYac7Rvd5ywm6pEmkjyxhbCDKqWsVeYws', 'cQDCBuKcjanpXDpCqacNSjYfxeQj8G6CAtH1Dsk3cXyqLNC4RPuh'),
AddressKeyPair('n2rnuUnwLgXqf9kk2kjvVm8R5BZK1yxQBi', 'cQakmfPSLSqKHyMFGwAqKHgWUiofJCagVGhiB4KCainaeCSxeyYq'),
AddressKeyPair('myzuPxRwsf3vvGzEuzPfK9Nf2RfwauwYe6', 'cQMpDLJwA8DBe9NcQbdoSb1BhmFxVjWD5gRyrLZCtpuF9Zi3a9RK'),
AddressKeyPair('mumwTaMtbxEPUswmLBBN3vM9oGRtGBrys8', 'cSXmRKXVcoouhNNVpcNKFfxsTsToY5pvB9DVsFksF1ENunTzRKsy'),
AddressKeyPair('mpV7aGShMkJCZgbW7F6iZgrvuPHjZjH9qg', 'cSoXt6tm3pqy43UMabY6eUTmR3eSUYFtB2iNQDGgb3VUnRsQys2k'),
]
return PRIV_KEYS[self.index]
def get_mem_rss_kilobytes(self):
"""Get the memory usage (RSS) per `ps`.
Returns None if `ps` is unavailable.
"""
assert self.running
try:
return int(subprocess.check_output(
["ps", "h", "-o", "rss", "{}".format(self.process.pid)],
stderr=subprocess.DEVNULL).split()[-1])
# Avoid failing on platforms where ps isn't installed.
#
# We could later use something like `psutils` to work across platforms.
except (FileNotFoundError, subprocess.SubprocessError):
self.log.exception("Unable to get memory usage")
return None
def _node_msg(self, msg: str) -> str:
"""Return a modified msg that identifies this node by its index as a debugging aid."""
return "[node %d] %s" % (self.index, msg)
def _raise_assertion_error(self, msg: str):
"""Raise an AssertionError with msg modified to identify this node."""
raise AssertionError(self._node_msg(msg))
def __del__(self):
# Ensure that we don't leave any bitcoind processes lying around after
# the test ends
if self.process and self.cleanup_on_exit:
# Should only happen on test failure
# Avoid using logger, as that may have already been shutdown when
# this destructor is called.
print(self._node_msg("Cleaning up leftover process"))
self.process.kill()
def __getattr__(self, name):
"""Dispatches any unrecognised messages to the RPC connection or a CLI instance."""
if self.use_cli:
return getattr(self.cli, name)
else:
assert self.rpc_connected and self.rpc is not None, self._node_msg("Error: no RPC connection")
return getattr(self.rpc, name)
def start(self, extra_args=None, *, cwd=None, stdout=None, stderr=None, **kwargs):
"""Start the node."""
if extra_args is None:
extra_args = self.extra_args
# Add a new stdout and stderr file each time bitcoind is started
if stderr is None:
stderr = tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False)
if stdout is None:
stdout = tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False)
self.stderr = stderr
self.stdout = stdout
if cwd is None:
cwd = self.cwd
# Delete any existing cookie file -- if such a file exists (eg due to
# unclean shutdown), it will get overwritten anyway by bitcoind, and
# potentially interfere with our attempt to authenticate
delete_cookie_file(self.datadir)
# add environment variable LIBC_FATAL_STDERR_=1 so that libc errors are written to stderr and not the terminal
subp_env = dict(os.environ, LIBC_FATAL_STDERR_="1")
self.process = subprocess.Popen(self.args + extra_args, env=subp_env, stdout=stdout, stderr=stderr, cwd=cwd, **kwargs)
self.running = True
self.log.debug("cypheriumlited started, waiting for RPC to come up")
if self.start_perf:
self._start_perf()
def wait_for_rpc_connection(self):
"""Sets up an RPC connection to the cypheriumlited process. Returns False if unable to connect."""
# Poll at a rate of four times per second
poll_per_s = 4
for _ in range(poll_per_s * self.rpc_timeout):
if self.process.poll() is not None:
raise FailedToStartError(self._node_msg(
'cypheriumlited exited with status {} during initialization'.format(self.process.returncode)))
try:
rpc = get_rpc_proxy(rpc_url(self.datadir, self.index, self.rpchost), self.index, timeout=self.rpc_timeout, coveragedir=self.coverage_dir)
rpc.getblockcount()
# If the call to getblockcount() succeeds then the RPC connection is up
self.log.debug("RPC successfully started")
if self.use_cli:
return
self.rpc = rpc
self.rpc_connected = True
self.url = self.rpc.url
return
except IOError as e:
if e.errno != errno.ECONNREFUSED: # Port not yet open?
raise # unknown IO error
except JSONRPCException as e: # Initialization phase
# -28 RPC in warmup
# -342 Service unavailable, RPC server started but is shutting down due to error
if e.error['code'] != -28 and e.error['code'] != -342:
raise # unknown JSON RPC exception
except ValueError as e: # cookie file not found and no rpcuser or rpcassword. bitcoind still starting
if "No RPC credentials" not in str(e):
raise
time.sleep(1.0 / poll_per_s)
self._raise_assertion_error("Unable to connect to cypheriumlited")
def generate(self, nblocks, maxtries=1000000):
self.log.debug("TestNode.generate() dispatches `generate` call to `generatetoaddress`")
return self.generatetoaddress(nblocks=nblocks, address=self.get_deterministic_priv_key().address, maxtries=maxtries)
def get_wallet_rpc(self, wallet_name):
if self.use_cli:
return self.cli("-rpcwallet={}".format(wallet_name))
else:
assert self.rpc_connected and self.rpc, self._node_msg("RPC not connected")
wallet_path = "wallet/{}".format(urllib.parse.quote(wallet_name))
return self.rpc / wallet_path
def stop_node(self, expected_stderr='', wait=0):
"""Stop the node."""
if not self.running:
return
self.log.debug("Stopping node")
try:
self.stop(wait=wait)
except http.client.CannotSendRequest:
self.log.exception("Unable to stop node.")
# If there are any running perf processes, stop them.
for profile_name in tuple(self.perf_subprocesses.keys()):
self._stop_perf(profile_name)
# Check that stderr is as expected
self.stderr.seek(0)
stderr = self.stderr.read().decode('utf-8').strip()
if stderr != expected_stderr:
raise AssertionError("Unexpected stderr {} != {}".format(stderr, expected_stderr))
self.stdout.close()
self.stderr.close()
del self.p2ps[:]
def is_node_stopped(self):
"""Checks whether the node has stopped.
Returns True if the node has stopped. False otherwise.
This method is responsible for freeing resources (self.process)."""
if not self.running:
return True
return_code = self.process.poll()
if return_code is None:
return False
# process has stopped. Assert that it didn't return an error code.
assert return_code == 0, self._node_msg(
"Node returned non-zero exit code (%d) when stopping" % return_code)
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.log.debug("Node stopped")
return True
def wait_until_stopped(self, timeout=BITCOIND_PROC_WAIT_TIMEOUT):
wait_until(self.is_node_stopped, timeout=timeout)
@contextlib.contextmanager
def assert_debug_log(self, expected_msgs):
debug_log = os.path.join(self.datadir, 'regtest', 'debug.log')
with open(debug_log, encoding='utf-8') as dl:
dl.seek(0, 2)
prev_size = dl.tell()
try:
yield
finally:
with open(debug_log, encoding='utf-8') as dl:
dl.seek(prev_size)
log = dl.read()
print_log = " - " + "\n - ".join(log.splitlines())
for expected_msg in expected_msgs:
if re.search(re.escape(expected_msg), log, flags=re.MULTILINE) is None:
self._raise_assertion_error('Expected message "{}" does not partially match log:\n\n{}\n\n'.format(expected_msg, print_log))
@contextlib.contextmanager
def assert_memory_usage_stable(self, *, increase_allowed=0.03):
"""Context manager that allows the user to assert that a node's memory usage (RSS)
hasn't increased beyond some threshold percentage.
Args:
increase_allowed (float): the fractional increase in memory allowed until failure;
e.g. `0.12` for up to 12% increase allowed.
"""
before_memory_usage = self.get_mem_rss_kilobytes()
yield
after_memory_usage = self.get_mem_rss_kilobytes()
if not (before_memory_usage and after_memory_usage):
self.log.warning("Unable to detect memory usage (RSS) - skipping memory check.")
return
perc_increase_memory_usage = (after_memory_usage / before_memory_usage) - 1
if perc_increase_memory_usage > increase_allowed:
self._raise_assertion_error(
"Memory usage increased over threshold of {:.3f}% from {} to {} ({:.3f}%)".format(
increase_allowed * 100, before_memory_usage, after_memory_usage,
perc_increase_memory_usage * 100))
@contextlib.contextmanager
def profile_with_perf(self, profile_name):
"""
Context manager that allows easy profiling of node activity using `perf`.
See `test/functional/README.md` for details on perf usage.
Args:
profile_name (str): This string will be appended to the
profile data filename generated by perf.
"""
subp = self._start_perf(profile_name)
yield
if subp:
self._stop_perf(profile_name)
def _start_perf(self, profile_name=None):
"""Start a perf process to profile this node.
Returns the subprocess running perf."""
subp = None
def test_success(cmd):
return subprocess.call(
# shell=True required for pipe use below
cmd, shell=True,
stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL) == 0
if not sys.platform.startswith('linux'):
self.log.warning("Can't profile with perf; only availabe on Linux platforms")
return None
if not test_success('which perf'):
self.log.warning("Can't profile with perf; must install perf-tools")
return None
if not test_success('readelf -S {} | grep .debug_str'.format(shlex.quote(self.binary))):
self.log.warning(
"perf output won't be very useful without debug symbols compiled into cypheriumlited")
output_path = tempfile.NamedTemporaryFile(
dir=self.datadir,
prefix="{}.perf.data.".format(profile_name or 'test'),
delete=False,
).name
cmd = [
'perf', 'record',
'-g', # Record the callgraph.
'--call-graph', 'dwarf', # Compatibility for gcc's --fomit-frame-pointer.
'-F', '101', # Sampling frequency in Hz.
'-p', str(self.process.pid),
'-o', output_path,
]
subp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.perf_subprocesses[profile_name] = subp
return subp
def _stop_perf(self, profile_name):
"""Stop (and pop) a perf subprocess."""
subp = self.perf_subprocesses.pop(profile_name)
output_path = subp.args[subp.args.index('-o') + 1]
subp.terminate()
subp.wait(timeout=10)
stderr = subp.stderr.read().decode()
if 'Consider tweaking /proc/sys/kernel/perf_event_paranoid' in stderr:
self.log.warning(
"perf couldn't collect data! Try "
"'sudo sysctl -w kernel.perf_event_paranoid=-1'")
else:
report_cmd = "perf report -i {}".format(output_path)
self.log.info("See perf output by running '{}'".format(report_cmd))
def assert_start_raises_init_error(self, extra_args=None, expected_msg=None, match=ErrorMatch.FULL_TEXT, *args, **kwargs):
"""Attempt to start the node and expect it to raise an error.
extra_args: extra arguments to pass through to cypheriumlited
expected_msg: regex that stderr should match when cypheriumlited fails
Will throw if cypheriumlited starts without an error.
Will throw if an expected_msg is provided and it does not match cypheriumlited's stdout."""
with tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False) as log_stderr, \
tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False) as log_stdout:
try:
self.start(extra_args, stdout=log_stdout, stderr=log_stderr, *args, **kwargs)
self.wait_for_rpc_connection()
self.stop_node()
self.wait_until_stopped()
except FailedToStartError as e:
self.log.debug('cypheriumlited failed to start: %s', e)
self.running = False
self.process = None
# Check stderr for expected message
if expected_msg is not None:
log_stderr.seek(0)
stderr = log_stderr.read().decode('utf-8').strip()
if match == ErrorMatch.PARTIAL_REGEX:
if re.search(expected_msg, stderr, flags=re.MULTILINE) is None:
self._raise_assertion_error(
'Expected message "{}" does not partially match stderr:\n"{}"'.format(expected_msg, stderr))
elif match == ErrorMatch.FULL_REGEX:
if re.fullmatch(expected_msg, stderr) is None:
self._raise_assertion_error(
'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
elif match == ErrorMatch.FULL_TEXT:
if expected_msg != stderr:
self._raise_assertion_error(
'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
else:
if expected_msg is None:
assert_msg = "cypheriumlited should have exited with an error"
else:
assert_msg = "cypheriumlited should have exited with expected error " + expected_msg
self._raise_assertion_error(assert_msg)
def add_p2p_connection(self, p2p_conn, *, wait_for_verack=True, **kwargs):
"""Add a p2p connection to the node.
This method adds the p2p connection to the self.p2ps list and also
returns the connection to the caller."""
if 'dstport' not in kwargs:
kwargs['dstport'] = p2p_port(self.index)
if 'dstaddr' not in kwargs:
kwargs['dstaddr'] = '127.0.0.1'
p2p_conn.peer_connect(**kwargs)()
self.p2ps.append(p2p_conn)
if wait_for_verack:
p2p_conn.wait_for_verack()
return p2p_conn
@property
def p2p(self):
"""Return the first p2p connection
Convenience property - most tests only use a single p2p connection to each
node, so this saves having to write node.p2ps[0] many times."""
assert self.p2ps, self._node_msg("No p2p connection")
return self.p2ps[0]
def disconnect_p2ps(self):
"""Close all p2p connections to the node."""
for p in self.p2ps:
p.peer_disconnect()
del self.p2ps[:]
class TestNodeCLIAttr:
def __init__(self, cli, command):
self.cli = cli
self.command = command
def __call__(self, *args, **kwargs):
return self.cli.send_cli(self.command, *args, **kwargs)
def get_request(self, *args, **kwargs):
return lambda: self(*args, **kwargs)
def arg_to_cli(arg):
if isinstance(arg, bool):
return str(arg).lower()
elif isinstance(arg, dict) or isinstance(arg, list):
return json.dumps(arg)
else:
return str(arg)
class TestNodeCLI():
"""Interface to cypheriumlite-cli for an individual node"""
def __init__(self, binary, datadir):
self.options = []
self.binary = binary
self.datadir = datadir
self.input = None
self.log = logging.getLogger('TestFramework.bitcoincli')
def __call__(self, *options, input=None):
# TestNodeCLI is callable with bitcoin-cli command-line options
cli = TestNodeCLI(self.binary, self.datadir)
cli.options = [str(o) for o in options]
cli.input = input
return cli
def __getattr__(self, command):
return TestNodeCLIAttr(self, command)
def batch(self, requests):
results = []
for request in requests:
try:
results.append(dict(result=request()))
except JSONRPCException as e:
results.append(dict(error=e))
return results
def send_cli(self, command=None, *args, **kwargs):
"""Run cypheriumlite-cli command. Deserializes returned string as python object."""
pos_args = [arg_to_cli(arg) for arg in args]
named_args = [str(key) + "=" + arg_to_cli(value) for (key, value) in kwargs.items()]
assert not (pos_args and named_args), "Cannot use positional arguments and named arguments in the same cypheriumlite-cli call"
p_args = [self.binary, "-datadir=" + self.datadir] + self.options
if named_args:
p_args += ["-named"]
if command is not None:
p_args += [command]
p_args += pos_args + named_args
self.log.debug("Running cypheriumlite-cli command: %s" % command)
process = subprocess.Popen(p_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
cli_stdout, cli_stderr = process.communicate(input=self.input)
returncode = process.poll()
if returncode:
match = re.match(r'error code: ([-0-9]+)\nerror message:\n(.*)', cli_stderr)
if match:
code, message = match.groups()
raise JSONRPCException(dict(code=int(code), message=message))
# Ignore cli_stdout, raise with cli_stderr
raise subprocess.CalledProcessError(returncode, self.binary, output=cli_stderr)
try:
return json.loads(cli_stdout, parse_float=decimal.Decimal)
except JSONDecodeError:
return cli_stdout.rstrip("\n")
| [] | [] | [] | [] | [] | python | 0 | 0 | |
venv/Lib/site-packages/selenium/webdriver/common/service.py | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from subprocess import DEVNULL
import errno
import os
import subprocess
from platform import system
from subprocess import PIPE
from time import sleep
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.common import utils
_HAS_NATIVE_DEVNULL = True
class Service(object):
def __init__(self, executable, port=0, log_file=DEVNULL, env=None, start_error_message=""):
self.path = executable
self.port = port
if self.port == 0:
self.port = utils.free_port()
if not _HAS_NATIVE_DEVNULL and log_file == DEVNULL:
log_file = open(os.devnull, 'wb')
self.start_error_message = start_error_message
self.log_file = log_file
# Default value for every python subprocess: subprocess.Popen(..., creationflags=0)
self.creationflags = 0
self.env = env or os.environ
@property
def service_url(self):
"""
Gets the url of the Service
"""
return "http://%s" % utils.join_host_port('localhost', self.port)
def command_line_args(self):
raise NotImplementedError("This method needs to be implemented in a sub class")
def start(self):
"""
Starts the Service.
:Exceptions:
- WebDriverException : Raised either when it can't start the service
or when it can't connect to the service
"""
try:
cmd = [self.path]
cmd.extend(self.command_line_args())
self.process = subprocess.Popen(cmd, env=self.env,
close_fds=system() != 'Windows',
stdout=self.log_file,
stderr=self.log_file,
stdin=PIPE,
creationflags=self.creationflags)
except TypeError:
raise
except OSError as err:
if err.errno == errno.ENOENT:
raise WebDriverException(
"'%s' executable needs to be in PATH. %s" % (
os.path.basename(self.path), self.start_error_message)
)
elif err.errno == errno.EACCES:
raise WebDriverException(
"'%s' executable may have wrong permissions. %s" % (
os.path.basename(self.path), self.start_error_message)
)
else:
raise
except Exception as e:
raise WebDriverException(
"The executable %s needs to be available in the path. %s\n%s" %
(os.path.basename(self.path), self.start_error_message, str(e)))
count = 0
while True:
self.assert_process_still_running()
if self.is_connectable():
break
count += 1
sleep(0.5)
if count == 60:
raise WebDriverException("Can not connect to the Service %s" % self.path)
def assert_process_still_running(self):
return_code = self.process.poll()
if return_code:
raise WebDriverException(
'Service %s unexpectedly exited. Status code was: %s'
% (self.path, return_code)
)
def is_connectable(self):
return utils.is_connectable(self.port)
def send_remote_shutdown_command(self):
from urllib import request as url_request
URLError = url_request.URLError
try:
url_request.urlopen("%s/shutdown" % self.service_url)
except URLError:
return
for x in range(30):
if not self.is_connectable():
break
else:
sleep(1)
def stop(self):
"""
Stops the service.
"""
if self.log_file != PIPE and not (self.log_file == DEVNULL and _HAS_NATIVE_DEVNULL):
try:
self.log_file.close()
except Exception:
pass
if not self.process:
return
try:
self.send_remote_shutdown_command()
except TypeError:
pass
try:
if self.process:
for stream in [self.process.stdin,
self.process.stdout,
self.process.stderr]:
try:
stream.close()
except AttributeError:
pass
self.process.terminate()
self.process.wait()
self.process.kill()
self.process = None
except OSError:
pass
def __del__(self):
# `subprocess.Popen` doesn't send signal on `__del__`;
# so we attempt to close the launched process when `__del__`
# is triggered.
try:
self.stop()
except Exception:
pass
| [] | [] | [] | [] | [] | python | 0 | 0 | |
cmd/vulcan-nessus/nessus.go | /*
Copyright 2019 Adevinta
*/
package main
import (
"context"
"encoding/json"
"errors"
"fmt"
"math/rand"
"os"
"strconv"
"strings"
"time"
"github.com/jpillora/backoff"
log "github.com/sirupsen/logrus"
"github.com/adevinta/restuss"
"github.com/adevinta/vulcan-check-sdk/helpers"
checkstate "github.com/adevinta/vulcan-check-sdk/state"
report "github.com/adevinta/vulcan-report"
)
const (
// Default polling interval is 5min.
defPollingInterval = 5 * 60
// Default delay range is 1min.
defDelayRange = 60
)
// Runner executes a Nessus check.
type Runner interface {
Run(ctx context.Context) (err error)
}
type runner struct {
nessusCli *restuss.NessusClient
nessusPersistedScan *restuss.PersistedScan
Delete bool
}
func (r *runner) Run(ctx context.Context, target, assetType, optJSON string, state checkstate.State) (err error) {
var opt options
if optJSON != "" {
if err = json.Unmarshal([]byte(optJSON), &opt); err != nil {
return err
}
}
isReachable, err := helpers.IsReachable(target, assetType, nil)
if err != nil {
logger.Warnf("Can not check asset reachability: %v", err)
}
if !isReachable {
return checkstate.ErrAssetUnreachable
}
p, err := strconv.Atoi(os.Getenv("NESSUS_POLICY_ID"))
if err != nil {
return fmt.Errorf("wrong value for NESSUS_POLICY_ID: %v", err)
}
policyID := int64(p)
basicAuth := opt.BasicAuth
// Default value for delete option is TRUE
r.Delete = true
if opt.Delete != nil {
r.Delete = *opt.Delete
}
pollingInterval := opt.PollingInterval
if pollingInterval <= 0 {
pollingInterval = defPollingInterval
}
// In order to not overload Tenable API
// sleep a random time from within a range
// so we distribute initial spike during
// scans creation process.
delayRange := opt.DelayRange
if delayRange <= 0 {
delayRange = defDelayRange
}
delay := time.Duration(rand.Intn(delayRange)) * time.Second
logger.Infof("Delaying startup for %v", delay)
time.Sleep(delay)
logger = logger.WithFields(log.Fields{
"target": target,
"policy ID": policyID,
})
err = r.auth(basicAuth)
if err != nil {
return err
}
policy, err := r.loadPolicyDetails(ctx, policyID)
if err != nil {
return err
}
scan, err := r.launchScan(ctx, target, policy)
if err != nil {
return err
}
// We need to store in a field the scan info in order to delete it in the clean
// up step.
r.nessusPersistedScan = scan
scanDetail, err := r.waitUntilScanFinishes(ctx, pollingInterval)
if err != nil {
return err
}
vulns, err := r.addVulnerabilities(*scanDetail, target)
if err != nil {
return err
}
state.AddVulnerabilities(vulns...)
return nil
}
func (r *runner) auth(basicAuth bool) error {
var auth restuss.AuthProvider
if basicAuth {
auth = restuss.NewBasicAuthProvider(os.Getenv("NESSUS_USERNAME"), os.Getenv("NESSUS_PASSWORD"))
} else {
auth = restuss.NewKeyAuthProvider(os.Getenv("NESSUS_USERNAME"), os.Getenv("NESSUS_PASSWORD"))
}
cli, err := restuss.NewClient(auth, os.Getenv("NESSUS_ENDPOINT"), false)
if err != nil {
return fmt.Errorf("error creating restuss client: %+v", err)
}
r.nessusCli = cli
return nil
}
func (r *runner) loadPolicyDetails(ctx context.Context, policyID int64) (restuss.Policy, error) {
policyDetails, err := r.nessusCli.GetPolicyByIDContext(ctx, policyID)
if err != nil {
return restuss.Policy{}, fmt.Errorf("error loading policy: %+v", err)
}
if policyDetails == nil {
return restuss.Policy{}, errors.New("Returned policy is nil")
}
return *policyDetails, nil
}
func (r *runner) launchScan(ctx context.Context, target string, policy restuss.Policy) (*restuss.PersistedScan, error) {
scan, err := r.nessusCli.CreateScanContext(ctx,
&restuss.Scan{
TemplateUUID: policy.UUID,
Settings: restuss.ScanSettings{
Enabled: true,
Name: policy.Settings.Name + ": " + target,
Targets: target,
PolicyID: policy.ID,
}})
if err != nil {
return nil, err
}
logger := logger.WithFields(log.Fields{
"scan": fmt.Sprintf("%+v", scan),
})
logger.Debug("Scan Created")
b := &backoff.Backoff{
Min: 100 * time.Millisecond,
Max: 60 * time.Second,
Factor: 1.5,
Jitter: true,
}
rand.Seed(time.Now().UnixNano())
// Try 20 times then return an error
for i := 0; i < 20; i++ {
err = r.nessusCli.LaunchScan(scan.ID)
if err == nil {
return scan, nil
}
d := b.Duration()
logger.Debug(fmt.Sprintf("Err when launching scan: %v, trying again in %v", err, d))
time.Sleep(d)
}
return nil, fmt.Errorf("Not possible to launch scan: %v", scan.ID)
}
func (r *runner) deleteScan(ctx context.Context, scanID int64) error {
err := r.nessusCli.DeleteScanContext(ctx, scanID)
if err != nil {
logger.WithFields(log.Fields{
"scan": fmt.Sprintf("%+v", r.nessusPersistedScan), "error": err,
}).Error("error deleting Nessus scan")
return err
}
logger.WithFields(log.Fields{
"scan": fmt.Sprintf("%+v", r.nessusPersistedScan),
}).Debug("Scan deleted from Nessus")
return err
}
func (r *runner) waitUntilScanFinishes(ctx context.Context, pollingInterval int) (*restuss.ScanDetail, error) {
t := time.NewTicker(time.Duration(pollingInterval) * time.Second)
LOOP:
for {
select {
case <-ctx.Done():
logger.Infof("ctx.Done")
t.Stop()
return nil, ctx.Err()
case <-t.C:
scanDetail, err := r.nessusCli.GetScanByID(r.nessusPersistedScan.ID)
if err != nil {
logger.WithFields(log.Fields{
"scan": fmt.Sprintf("%+v", r.nessusPersistedScan),
}).Errorf("Error while retrieving scan details: %v", err)
continue LOOP
}
if scanDetail == nil {
logger.WithFields(log.Fields{
"scan": fmt.Sprintf("%+v", r.nessusPersistedScan),
}).Errorf("Missing Status information when retrieving Nessus scan information. will try again in 30 seconds.")
continue LOOP
}
logger.WithFields(log.Fields{
"nessusScanID": fmt.Sprintf("%+v", r.nessusPersistedScan.ID),
}).Infof("Status: %s", scanDetail.Info.Status)
if scanDetail.Info.Status == "completed" {
t.Stop()
return scanDetail, nil
}
if scanDetail.Info.Status == "canceled" {
t.Stop()
return nil, errors.New("canceled")
}
if scanDetail.Info.Status == "aborted" {
t.Stop()
return nil, errors.New("aborted")
}
}
}
}
// CleanUp is called by the sdk when the check needs to be aborted in order to give the
// opportunity to clean up resources.
func (r *runner) CleanUp(ctx context.Context, target, assetType, opts string) {
l := logger.WithFields(log.Fields{"action": "CleanUp"})
l.Debug("cleaning up nessus scan")
if r.nessusPersistedScan == nil {
l.Debug("no clean up needed")
return
}
id := r.nessusPersistedScan.ID
// Get the last status of the scan.
scanDetail, err := r.nessusCli.GetScanByIDContext(ctx, r.nessusPersistedScan.ID)
if err != nil {
l.Errorf("error cleaning scan %+v", r.nessusPersistedScan)
return
}
if !(scanDetail.Info.Status == "canceled") && !(scanDetail.Info.Status == "completed") {
l.Debug("stopping scan")
err = r.nessusCli.StopScanContext(ctx, id)
if err != nil {
l.WithError(err).Errorf("error trying to stop the scan")
return
}
// We decrease the pool time here because stoping a scan should take far less time
// than running a scan.
_, err = r.waitUntilScanFinishes(ctx, 2)
if err != nil && err.Error() != "canceled" {
l.WithError(err).Errorf("error while waiting the scan to stop")
return
}
}
if r.Delete {
err = r.deleteScan(ctx, id)
if err != nil {
l.WithError(err).Error("error deleting scan")
}
}
}
func (r *runner) addVulnerabilities(scan restuss.ScanDetail, target string) ([]report.Vulnerability, error) {
if len(scan.Vulnerabilities) <= 0 {
return nil, nil
}
vulns := []report.Vulnerability{}
for _, nessusVulnerability := range scan.Vulnerabilities {
if len(scan.Hosts) == 0 {
logger.Errorf("Hosts array is empty")
continue
}
hostID := scan.Hosts[0].ID
vulcanVulnerabilities, err := r.translateFromNessusToVulcan(hostID, target, nessusVulnerability)
if err != nil {
logger.Errorf("Error reading nessusVulnerability[%v] :%v", nessusVulnerability.PluginName, err)
continue
}
vulns = append(vulns, vulcanVulnerabilities...)
}
return vulns, nil
}
// translateFromNessusToVulcan converts the vulnerabilities reported by Nessus
// into Vulcan vulnerabilities.
//
// The information of a Nessus vulnerability is spread in two different places:
//
// * The Nessus plugin (vulnerability) definition.
//
// * The output or execution context of that plugin against a concrete target.
//
// The plugin definition contains inherent information about the issue, like
// the summary/title, description, score, solution, references, etc. For
// example https://www.tenable.com/plugins/nessus/20007
//
// The output indicates runtime/execution context details, like the part of the
// target where the issue was found (i.e. TCP/UDP ports) and the matching
// information found to report the issue. For example for the `SSL Version 2
// and 3 Protocol Detection` plugin it reports information about the protocols
// and ciphersuites enabled for the target.
func (r *runner) translateFromNessusToVulcan(hostID int64, target string, nessusVulnerability restuss.Vulnerability) ([]report.Vulnerability, error) {
p, err := r.nessusCli.GetPluginByID(nessusVulnerability.PluginID)
if err != nil {
return nil, err
}
vulcanVulnerability := report.Vulnerability{
Summary: p.Name,
Labels: []string{"nessus"},
}
// There might be more than one attribute with the same name. For example
// "cwe", "solution" or "see_also".
attributesMap := make(map[string][]string)
for _, attr := range p.Attributes {
attributesMap[attr.Name] = append(attributesMap[attr.Name], attr.Value)
}
// Tenable is now using CVSS v3 score as their default scoring system. In
// order to match the score of the vulnerabilities we report with the score
// Tenable reports in the tenable.io UI, we will default to the CVSS v3
// Nessus score if available, falling back to the already used CVSS base
// score otherwise.
scores := attributesMap["cvss3_base_score"]
if len(scores) < 1 {
scores = attributesMap["cvss_base_score"]
}
// There might be the case where Nessus doesn't provide a CVSS score and in
// that case we will use the Severity they report.
if len(scores) > 0 {
score, errParse := strconv.ParseFloat(scores[0], 32)
if errParse != nil {
return nil, errParse
}
vulcanVulnerability.Score = float32(score)
} else {
vulcanVulnerability.Score = report.ScoreSeverity(report.SeverityRank(nessusVulnerability.Severity))
}
// NOTE: even that Nessus plugins might be reporting more than one CWE per
// vulnerability our vulcan-report just supports one value per
// vulnerability, so we are taking just the first one returned by Nessus.
if cwes := attributesMap["cwe"]; len(cwes) > 0 {
cweid, errAtoi := strconv.Atoi(cwes[0])
if errAtoi != nil {
return nil, errAtoi
}
vulcanVulnerability.CWEID = uint32(cweid)
}
if desc := attributesMap["description"]; len(desc) > 0 {
vulcanVulnerability.Description = desc[0]
}
if syn := attributesMap["synopsis"]; len(syn) > 0 {
vulcanVulnerability.ImpactDetails = syn[0]
}
for _, sol := range attributesMap["solution"] {
vulcanVulnerability.Recommendations = append(vulcanVulnerability.Recommendations, sol)
}
for _, ref := range attributesMap["see_also"] {
references := strings.Split(ref, "\n")
vulcanVulnerability.References = append(vulcanVulnerability.References, references...)
}
pluginOutput, err := r.nessusCli.GetPluginOutput(r.nessusPersistedScan.ID, hostID, nessusVulnerability.PluginID)
if err != nil {
return nil, err
}
// In the case Nessus doesn't provide runtime/context information there's
// no much we can state in addition from what the plugin itself describes.
if len(pluginOutput.Output) < 1 {
// As there are no ports specified in the Output, we can't be more
// specific for the affected resource than the whole target.
vulcanVulnerability.AffectedResource = target
if vulcanVulnerability.Score == 0 {
vulcanVulnerability.Labels = append(vulcanVulnerability.Labels, "informational")
} else {
vulcanVulnerability.Labels = append(vulcanVulnerability.Labels, "issue")
}
// As we don't have context information from the Output, at least we
// use the score as a fingerprint.
vulcanVulnerability.Fingerprint = helpers.ComputeFingerprint(vulcanVulnerability.Score)
return []report.Vulnerability{vulcanVulnerability}, nil
}
var vulnerabilities []report.Vulnerability
// Create a new vulnerability per each Output and Port (in case they
// exist). Port format seems to be 'port / protocol / service'. For
// example: '25 / tcp / smtp'. In case the Output is not associated to a
// specific port, Nessus seems to be using '0 / tcp'.
for _, output := range pluginOutput.Output {
v := vulcanVulnerability
v.Details = output.Output
mapPorts, ok := output.Ports.(map[string]interface{})
// Only parse the mapPorts if we get the right type.
if !ok || len(mapPorts) < 1 {
logger.Warnf("unexpected type for Output.Ports: %#v", output.Ports)
// Again, if there are no ports specified we can't be more precise
// than using the target as the affected resource.
v.AffectedResource = target
if v.Score == 0 {
v.Labels = append(v.Labels, "informational")
} else {
v.Labels = append(v.Labels, "issue")
}
// Apart from the score, we can use the Details as a fingerprint,
// that is supposed to give the context of the vulnerability in the
// scanned target.
//
// NOTE: in the examples we analyzed the Details field seemed to be
// stable between executions, but there might be plugins where this
// information changes more often than expected.
v.Fingerprint = helpers.ComputeFingerprint(v.Score, v.Details)
vulnerabilities = append(vulnerabilities, v)
continue
}
for portInformation := range mapPorts {
v := v
parts := strings.Split(portInformation, " / ")
if len(parts) > 2 {
networkResource := map[string]string{
"Hostname": target,
"Port": parts[0],
"Protocol": parts[1],
"Service": parts[2],
}
v.Resources = []report.ResourcesGroup{
report.ResourcesGroup{
Name: "Network Resources",
Header: []string{
"Hostname",
"Port",
"Protocol",
"Service",
},
Rows: []map[string]string{networkResource},
},
}
}
v.AffectedResource = portInformation
if v.Score == 0 {
v.Labels = append(v.Labels, "informational")
} else {
v.Labels = append(v.Labels, "issue")
}
v.Fingerprint = helpers.ComputeFingerprint(v.Score, v.Details, v.Resources)
vulnerabilities = append(vulnerabilities, v)
}
}
return vulnerabilities, nil
}
| [
"\"NESSUS_POLICY_ID\"",
"\"NESSUS_USERNAME\"",
"\"NESSUS_PASSWORD\"",
"\"NESSUS_USERNAME\"",
"\"NESSUS_PASSWORD\"",
"\"NESSUS_ENDPOINT\""
] | [] | [
"NESSUS_POLICY_ID",
"NESSUS_USERNAME",
"NESSUS_PASSWORD",
"NESSUS_ENDPOINT"
] | [] | ["NESSUS_POLICY_ID", "NESSUS_USERNAME", "NESSUS_PASSWORD", "NESSUS_ENDPOINT"] | go | 4 | 0 | |
pkg/dlock/dlock.go | // distributed-lock base etcd
// usage:
// lock, _ := dlock.New(dlock.WithTTL(5), func(){})
// lock.Lock(ctx)
// // do something...
// lock.Unlock()
// // release resource
// lock.Close() // see also lock.UnlockAndClose()
// //see also dlock_test.go
package dlock
import (
"context"
"crypto/tls"
"net/url"
"os"
"strings"
"time"
"github.com/coreos/etcd/clientv3"
"github.com/coreos/etcd/clientv3/concurrency"
"github.com/coreos/etcd/pkg/transport"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
const (
// The short keepalive timeout and interval have been chosen to aggressively
// detect a failed etcd server without introducing much overhead.
keepaliveTime = 30 * time.Second
keepaliveTimeout = 10 * time.Second
// default ttl, time to live when current process exit without unlock (eg. panic)
defaultTTL = 5
)
type DLock struct {
client *clientv3.Client
session *concurrency.Session
mutex *concurrency.Mutex
option *option
lockKey string
normalClose bool
}
type option struct {
ttl int
}
type OpOption func(*option)
func WithTTL(ttl int) OpOption {
return func(op *option) {
op.ttl = ttl
}
}
func applyOptions(ops []OpOption, option *option) error {
for _, op := range ops {
op(option)
}
if option.ttl <= 0 {
return errors.New("illegal ttl value, must greater than 0")
}
return nil
}
func New(lockKey string, locklostCallback func(), ops ...OpOption) (*DLock, error) {
option := &option{ttl: defaultTTL}
if err := applyOptions(ops, option); err != nil {
return nil, err
}
var endpoints []string
env := os.Getenv("ETCD_ENDPOINTS")
if env == "" {
endpoints = []string{"http://127.0.0.1:2379"}
} else {
endpoints = strings.Split(env, ",")
}
var tlsConfig *tls.Config
if len(endpoints) < 1 {
return nil, errors.New("Invalid Etcd endpoints")
}
url, err := url.Parse(endpoints[0])
if err != nil {
return nil, errors.Wrap(err, "Invalid Etcd endpoints")
}
if url.Scheme == "https" {
tlsInfo := transport.TLSInfo{
CertFile: "/certs/etcd-client.pem",
KeyFile: "/certs/etcd-client-key.pem",
TrustedCAFile: "/certs/etcd-ca.pem",
}
tlsConfig, err = tlsInfo.ClientConfig()
if err != nil {
return nil, errors.Wrap(err, "Invalid Etcd TLS config")
}
}
cli, err := clientv3.New(clientv3.Config{
Endpoints: endpoints,
DialKeepAliveTime: keepaliveTime,
DialKeepAliveTimeout: keepaliveTimeout,
TLS: tlsConfig,
})
if err != nil {
return nil, err
}
session, err := concurrency.NewSession(cli, concurrency.WithTTL(option.ttl))
if err != nil {
return nil, err
}
mutex := concurrency.NewMutex(session, lockKey)
l := DLock{
client: cli,
session: session,
mutex: mutex,
lockKey: lockKey,
option: option,
}
go func() {
select {
case <-l.session.Done():
// invoke l.Close() or l.UnlockAndClose()
if l.normalClose {
return
}
if locklostCallback != nil {
locklostCallback()
}
}
}()
return &l, nil
}
// it's cancelable
func (l *DLock) Lock(ctx context.Context) error {
return l.mutex.Lock(ctx)
}
func (l *DLock) Unlock() error {
return l.mutex.Unlock(context.Background())
}
func (l *DLock) Close() error {
l.normalClose = true
var errs []string
if err := l.session.Close(); err != nil {
logrus.Errorf("dlock: failed to close concurrency session, err: %v", err)
errs = append(errs, err.Error())
}
if err := l.client.Close(); err != nil {
logrus.Errorf("dlock: failed to close etcd client, err: %v", err)
errs = append(errs, err.Error())
}
if len(errs) == 0 {
return nil
}
return errors.New(strings.Join(errs, "\n"))
}
func (l *DLock) UnlockAndClose() error {
defer l.Close()
return l.Unlock()
}
// return locked key belong to this locker: <lockKey>/<lease-ID>
func (l *DLock) Key() string {
return l.mutex.Key()
}
func (l *DLock) IsOwner() (bool, error) {
r, err := l.client.Txn(context.Background()).If(l.mutex.IsOwner()).Commit()
if err != nil {
return false, err
}
return r.Succeeded, nil
}
| [
"\"ETCD_ENDPOINTS\""
] | [] | [
"ETCD_ENDPOINTS"
] | [] | ["ETCD_ENDPOINTS"] | go | 1 | 0 | |
pxr/usdImaging/usdviewq/stageView.py | #
# Copyright 2016 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
'''
Module that provides the StageView class.
'''
from __future__ import print_function
from math import tan, floor, ceil, radians as rad, isinf
import os, sys
from time import time
from .qt import QtCore, QtGui, QtWidgets, QtOpenGL
from pxr import Tf
from pxr import Gf
from pxr import Glf
from pxr import Sdf, Usd, UsdGeom
from pxr import UsdImagingGL
from pxr import CameraUtil
from .common import (RenderModes, ColorCorrectionModes, ShadedRenderModes, Timer,
ReportMetricSize, SelectionHighlightModes, DEBUG_CLIPPING,
DefaultFontFamily)
from .rootDataModel import RootDataModel
from .selectionDataModel import ALL_INSTANCES, SelectionDataModel
from .viewSettingsDataModel import ViewSettingsDataModel
from .freeCamera import FreeCamera
# A viewport rectangle to be used for GL must be integer values.
# In order to loose the least amount of precision the viewport
# is centered and adjusted to initially contain entirely the
# given viewport.
# If it turns out that doing so gives more than a pixel width
# or height of error the viewport is instead inset.
# This does mean that the returned viewport may have a slightly
# different aspect ratio to the given viewport.
def ViewportMakeCenteredIntegral(viewport):
# The values are initially integral and containing the
# the given rect
left = int(floor(viewport[0]))
bottom = int(floor(viewport[1]))
right = int(ceil(viewport[0] + viewport[2]))
top = int(ceil(viewport[1] + viewport[3]))
width = right - left
height = top - bottom
# Compare the integral height to the original height
# and do a centered 1 pixel adjustment if more than
# a pixel off.
if (height - viewport[3]) > 1.0:
bottom += 1
height -= 2
# Compare the integral width to the original width
# and do a centered 1 pixel adjustment if more than
# a pixel off.
if (width - viewport[2]) > 1.0:
left += 1
width -= 2
return (left, bottom, width, height)
class GLSLProgram():
def __init__(self, VS3, FS3, VS2, FS2, uniformDict):
from OpenGL import GL
# versionString = <version_number><space><vendor_specific_information>
versionString = GL.glGetString(GL.GL_VERSION).decode()
# <version_number> = <major_number>.<minor_number>[.<release_number>]
versionNumberString = versionString.split()[0]
self._glMajorVersion = int(versionNumberString.split('.')[0])
# requires PyOpenGL 3.0.2 or later for glGenVertexArrays.
self.useVAO = (self._glMajorVersion >= 3 and
hasattr(GL, 'glGenVertexArrays'))
self.useSampleAlphaToCoverage = (self._glMajorVersion >= 4)
self.program = GL.glCreateProgram()
vertexShader = GL.glCreateShader(GL.GL_VERTEX_SHADER)
fragmentShader = GL.glCreateShader(GL.GL_FRAGMENT_SHADER)
if (self._glMajorVersion >= 3):
vsSource = VS3
fsSource = FS3
else:
vsSource = VS2
fsSource = FS2
GL.glShaderSource(vertexShader, vsSource)
GL.glCompileShader(vertexShader)
GL.glShaderSource(fragmentShader, fsSource)
GL.glCompileShader(fragmentShader)
GL.glAttachShader(self.program, vertexShader)
GL.glAttachShader(self.program, fragmentShader)
GL.glLinkProgram(self.program)
if GL.glGetProgramiv(self.program, GL.GL_LINK_STATUS) == GL.GL_FALSE:
print(GL.glGetShaderInfoLog(vertexShader))
print(GL.glGetShaderInfoLog(fragmentShader))
print(GL.glGetProgramInfoLog(self.program))
GL.glDeleteShader(vertexShader)
GL.glDeleteShader(fragmentShader)
GL.glDeleteProgram(self.program)
self.program = 0
GL.glDeleteShader(vertexShader)
GL.glDeleteShader(fragmentShader)
self.uniformLocations = {}
for param in uniformDict:
self.uniformLocations[param] = GL.glGetUniformLocation(self.program, param)
def uniform4f(self, param, x, y, z, w):
from OpenGL import GL
GL.glUniform4f(self.uniformLocations[param], x, y, z, w)
class Rect():
def __init__(self):
self.xywh = [0.0] * 4
@classmethod
def fromXYWH(cls, xywh):
self = cls()
self.xywh[:] = list(map(float, xywh[:4]))
return self
@classmethod
def fromCorners(cls, c0, c1):
self = cls()
self.xywh[0] = float(min(c0[0], c1[0]))
self.xywh[1] = float(min(c0[1], c1[1]))
self.xywh[2] = float(max(c0[0], c1[0])) - self.xywh[0]
self.xywh[3] = float(max(c0[1], c1[1])) - self.xywh[1]
return self
def scaledAndBiased(self, sxy, txy):
ret = self.__class__()
for c in range(2):
ret.xywh[c] = sxy[c] * self.xywh[c] + txy[c]
ret.xywh[c + 2] = sxy[c] * self.xywh[c + 2]
return ret
def _splitAlongY(self, y):
bottom = self.__class__()
top = self.__class__()
bottom.xywh[:] = self.xywh
top.xywh[:] = self.xywh
top.xywh[1] = y
bottom.xywh[3] = top.xywh[1] - bottom.xywh[1]
top.xywh[3] = top.xywh[3] - bottom.xywh[3]
return bottom, top
def _splitAlongX(self, x):
left = self.__class__()
right = self.__class__()
left.xywh[:] = self.xywh
right.xywh[:] = self.xywh
right.xywh[0] = x
left.xywh[2] = right.xywh[0] - left.xywh[0]
right.xywh[2] = right.xywh[2] - left.xywh[2]
return left, right
def difference(self, xywh):
#check x
if xywh[0] > self.xywh[0]:
#keep left, check right
left, right = self._splitAlongX(xywh[0])
return [left] + right.difference(xywh)
if (xywh[0] + xywh[2]) < (self.xywh[0] + self.xywh[2]):
#keep right
left, right = self._splitAlongX(xywh[0] + xywh[2])
return [right]
#check y
if xywh[1] > self.xywh[1]:
#keep bottom, check top
bottom, top = self._splitAlongY(xywh[1])
return [bottom] + top.difference(xywh)
if (xywh[1] + xywh[3]) < (self.xywh[1] + self.xywh[3]):
#keep top
bottom, top = self._splitAlongY(xywh[1] + xywh[3])
return [top]
return []
class OutlineRect(Rect):
_glslProgram = None
_vbo = 0
_vao = 0
def __init__(self):
Rect.__init__(self)
@classmethod
def compileProgram(self):
if self._glslProgram:
return self._glslProgram
from OpenGL import GL
import ctypes
# prep a quad line vbo
self._vbo = GL.glGenBuffers(1)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self._vbo)
st = [0, 0, 1, 0, 1, 1, 0, 1]
GL.glBufferData(GL.GL_ARRAY_BUFFER, len(st)*4,
(ctypes.c_float*len(st))(*st), GL.GL_STATIC_DRAW)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, 0)
self._glslProgram = GLSLProgram(
# for OpenGL 3.1 or later
"""#version 140
uniform vec4 rect;
in vec2 st;
void main() {
gl_Position = vec4(rect.x + rect.z*st.x,
rect.y + rect.w*st.y, 0, 1); }""",
"""#version 140
out vec4 fragColor;
uniform vec4 color;
void main() { fragColor = color; }""",
# for OpenGL 2.1 (osx compatibility profile)
"""#version 120
uniform vec4 rect;
attribute vec2 st;
void main() {
gl_Position = vec4(rect.x + rect.z*st.x,
rect.y + rect.w*st.y, 0, 1); }""",
"""#version 120
uniform vec4 color;
void main() { gl_FragColor = color; }""",
["rect", "color"])
return self._glslProgram
def glDraw(self, color):
from OpenGL import GL
cls = self.__class__
program = cls.compileProgram()
if program.program == 0:
return
GL.glUseProgram(program.program)
if program.useSampleAlphaToCoverage:
GL.glDisable(GL.GL_SAMPLE_ALPHA_TO_COVERAGE)
if program.useVAO:
if (cls._vao == 0):
cls._vao = GL.glGenVertexArrays(1)
GL.glBindVertexArray(cls._vao)
# for some reason, we need to bind at least 1 vertex attrib (is OSX)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, cls._vbo)
GL.glEnableVertexAttribArray(0)
GL.glVertexAttribPointer(0, 2, GL.GL_FLOAT, False, 0, None)
program.uniform4f("color", *color)
program.uniform4f("rect", *self.xywh)
GL.glDrawArrays(GL.GL_LINE_LOOP, 0, 4)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, 0)
GL.glDisableVertexAttribArray(0)
if program.useVAO:
GL.glBindVertexArray(0)
GL.glUseProgram(0)
class FilledRect(Rect):
_glslProgram = None
_vbo = 0
_vao = 0
def __init__(self):
Rect.__init__(self)
@classmethod
def compileProgram(self):
if self._glslProgram:
return self._glslProgram
from OpenGL import GL
import ctypes
# prep a quad line vbo
self._vbo = GL.glGenBuffers(1)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self._vbo)
st = [0, 0, 1, 0, 0, 1, 1, 1]
GL.glBufferData(GL.GL_ARRAY_BUFFER, len(st)*4,
(ctypes.c_float*len(st))(*st), GL.GL_STATIC_DRAW)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, 0)
self._glslProgram = GLSLProgram(
# for OpenGL 3.1 or later
"""#version 140
uniform vec4 rect;
in vec2 st;
void main() {
gl_Position = vec4(rect.x + rect.z*st.x,
rect.y + rect.w*st.y, 0, 1); }""",
"""#version 140
out vec4 fragColor;
uniform vec4 color;
void main() { fragColor = color; }""",
# for OpenGL 2.1 (osx compatibility profile)
"""#version 120
uniform vec4 rect;
attribute vec2 st;
void main() {
gl_Position = vec4(rect.x + rect.z*st.x,
rect.y + rect.w*st.y, 0, 1); }""",
"""#version 120
uniform vec4 color;
void main() { gl_FragColor = color; }""",
["rect", "color"])
return self._glslProgram
def glDraw(self, color):
#don't draw if too small
if self.xywh[2] < 0.001 or self.xywh[3] < 0.001:
return
from OpenGL import GL
cls = self.__class__
program = cls.compileProgram()
if program.program == 0:
return
GL.glUseProgram(program.program)
if program.useSampleAlphaToCoverage:
GL.glDisable(GL.GL_SAMPLE_ALPHA_TO_COVERAGE)
if program.useVAO:
if (cls._vao == 0):
cls._vao = GL.glGenVertexArrays(1)
GL.glBindVertexArray(cls._vao)
# for some reason, we need to bind at least 1 vertex attrib (is OSX)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, cls._vbo)
GL.glEnableVertexAttribArray(0)
GL.glVertexAttribPointer(0, 2, GL.GL_FLOAT, False, 0, None)
program.uniform4f("color", *color)
program.uniform4f("rect", *self.xywh)
GL.glDrawArrays(GL.GL_TRIANGLE_STRIP, 0, 4)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, 0)
GL.glDisableVertexAttribArray(0)
if program.useVAO:
GL.glBindVertexArray(0)
GL.glUseProgram(0)
class Prim2DSetupTask():
def __init__(self, viewport):
self._viewport = viewport[:]
def Sync(self, ctx):
pass
def Execute(self, ctx):
from OpenGL import GL
GL.glViewport(*self._viewport)
GL.glDisable(GL.GL_DEPTH_TEST)
GL.glBlendFunc(GL.GL_SRC_ALPHA, GL.GL_ONE_MINUS_SRC_ALPHA)
GL.glEnable(GL.GL_BLEND)
class Prim2DDrawTask():
def __init__(self):
self._prims = []
self._colors = []
self._pixelRatio = QtWidgets.QApplication.instance().devicePixelRatio()
def Sync(self, ctx):
for prim in self._prims:
prim.__class__.compileProgram()
def Execute(self, ctx):
for prim, color in zip(self._prims, self._colors):
prim.glDraw(color)
class Outline(Prim2DDrawTask):
def __init__(self):
Prim2DDrawTask.__init__(self)
self._outlineColor = Gf.ConvertDisplayToLinear(Gf.Vec4f(0.0, 0.0, 0.0, 1.0))
def updatePrims(self, croppedViewport, qglwidget):
width = float(qglwidget.width()) * self._pixelRatio
height = float(qglwidget.height()) * self._pixelRatio
prims = [ OutlineRect.fromXYWH(croppedViewport) ]
self._prims = [p.scaledAndBiased((2.0 / width, 2.0 / height), (-1, -1))
for p in prims]
self._colors = [ self._outlineColor ]
class Reticles(Prim2DDrawTask):
def __init__(self):
Prim2DDrawTask.__init__(self)
self._outlineColor = Gf.ConvertDisplayToLinear(Gf.Vec4f(0.0, 0.7, 1.0, 0.9))
def updateColor(self, color):
self._outlineColor = Gf.ConvertDisplayToLinear(Gf.Vec4f(*color))
def updatePrims(self, croppedViewport, qglwidget, inside, outside):
width = float(qglwidget.width()) * self._pixelRatio
height = float(qglwidget.height()) * self._pixelRatio
prims = [ ]
ascenders = [0, 0]
descenders = [0, 0]
if inside:
descenders = [7, 15]
if outside:
ascenders = [7, 15]
# vertical reticles on the top and bottom
for i in range(5):
w = 2.6
h = ascenders[i & 1] + descenders[i & 1]
x = croppedViewport[0] - (w // 2) + ((i + 1) * croppedViewport[2]) // 6
bottomY = croppedViewport[1] - ascenders[i & 1]
topY = croppedViewport[1] + croppedViewport[3] - descenders[i & 1]
prims.append(FilledRect.fromXYWH((x, bottomY, w, h)))
prims.append(FilledRect.fromXYWH((x, topY, w, h)))
# horizontal reticles on the left and right
for i in range(5):
w = ascenders[i & 1] + descenders[i & 1]
h = 2.6
leftX = croppedViewport[0] - ascenders[i & 1]
rightX = croppedViewport[0] + croppedViewport[2] - descenders[i & 1]
y = croppedViewport[1] - (h // 2) + ((i + 1) * croppedViewport[3]) // 6
prims.append(FilledRect.fromXYWH((leftX, y, w, h)))
prims.append(FilledRect.fromXYWH((rightX, y, w, h)))
self._prims = [p.scaledAndBiased((2.0 / width, 2.0 / height), (-1, -1))
for p in prims]
self._colors = [ self._outlineColor ] * len(self._prims)
class Mask(Prim2DDrawTask):
def __init__(self):
Prim2DDrawTask.__init__(self)
self._maskColor = Gf.ConvertDisplayToLinear(Gf.Vec4f(0.0, 0.0, 0.0, 1.0))
def updateColor(self, color):
self._maskColor = Gf.ConvertDisplayToLinear(Gf.Vec4f(*color))
def updatePrims(self, croppedViewport, qglwidget):
width = float(qglwidget.width()) * self._pixelRatio
height = float(qglwidget.height()) * self._pixelRatio
rect = FilledRect.fromXYWH((0, 0, width, height))
prims = rect.difference(croppedViewport)
self._prims = [p.scaledAndBiased((2.0 / width, 2.0 / height), (-1, -1))
for p in prims]
self._colors = [ self._maskColor ] * 2
class HUD():
class Group():
def __init__(self, name, w, h):
self.x = 0
self.y = 0
self.w = w
self.h = h
pixelRatio = QtWidgets.QApplication.instance().devicePixelRatio()
imageW = w * pixelRatio
imageH = h * pixelRatio
self.qimage = QtGui.QImage(imageW, imageH, QtGui.QImage.Format_ARGB32)
self.qimage.fill(QtGui.QColor(0, 0, 0, 0))
self.painter = QtGui.QPainter()
def __init__(self):
self._pixelRatio = QtWidgets.QApplication.instance().devicePixelRatio()
self._HUDLineSpacing = 15
self._HUDFont = QtGui.QFont(DefaultFontFamily.MONOSPACE_FONT_FAMILY,
9*self._pixelRatio)
self._groups = {}
self._glslProgram = None
self._vao = 0
def compileProgram(self):
from OpenGL import GL
import ctypes
# prep a quad vbo
self._vbo = GL.glGenBuffers(1)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self._vbo)
st = [0, 0, 1, 0, 0, 1, 1, 1]
GL.glBufferData(GL.GL_ARRAY_BUFFER, len(st)*4,
(ctypes.c_float*len(st))(*st), GL.GL_STATIC_DRAW)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, 0)
self._glslProgram = GLSLProgram(
# for OpenGL 3.1 or later
"""#version 140
uniform vec4 rect;
in vec2 st;
out vec2 uv;
void main() {
gl_Position = vec4(rect.x + rect.z*st.x,
rect.y + rect.w*st.y, 0, 1);
uv = vec2(st.x, 1 - st.y); }""",
"""#version 140
in vec2 uv;
out vec4 color;
uniform sampler2D tex;
void main() { color = texture(tex, uv); }""",
# for OpenGL 2.1 (osx compatibility profile)
"""#version 120
uniform vec4 rect;
attribute vec2 st;
varying vec2 uv;
void main() {
gl_Position = vec4(rect.x + rect.z*st.x,
rect.y + rect.w*st.y, 0, 1);
uv = vec2(st.x, 1 - st.y); }""",
"""#version 120
varying vec2 uv;
uniform sampler2D tex;
void main() { gl_FragColor = texture2D(tex, uv); }""",
["rect", "tex"])
return True
def addGroup(self, name, w, h):
self._groups[name] = self.Group(name, w, h)
def updateGroup(self, name, x, y, col, dic, keys = None):
group = self._groups[name]
group.qimage.fill(QtGui.QColor(0, 0, 0, 0))
group.x = x
group.y = y
painter = group.painter
painter.begin(group.qimage)
from .prettyPrint import prettyPrint
if keys is None:
keys = sorted(dic.keys())
# find the longest key so we know how far from the edge to print
# add [0] at the end so that max() never gets an empty sequence
longestKeyLen = max([len(k) for k in dic.keys()]+[0])
margin = int(longestKeyLen*1.4)
painter.setFont(self._HUDFont)
color = QtGui.QColor()
yy = 10 * self._pixelRatio
lineSpacing = self._HUDLineSpacing * self._pixelRatio
for key in keys:
if key not in dic:
continue
line = key.rjust(margin) + ": " + str(prettyPrint(dic[key]))
# Shadow of text
shadow = Gf.ConvertDisplayToLinear(Gf.Vec3f(.2, .2, .2))
color.setRgbF(shadow[0], shadow[1], shadow[2])
painter.setPen(color)
painter.drawText(1, yy+1, line)
# Colored text
color.setRgbF(col[0], col[1], col[2])
painter.setPen(color)
painter.drawText(0, yy, line)
yy += lineSpacing
painter.end()
return y + lineSpacing
def draw(self, qglwidget):
from OpenGL import GL
if (self._glslProgram == None):
self.compileProgram()
if (self._glslProgram.program == 0):
return
GL.glUseProgram(self._glslProgram.program)
width = float(qglwidget.width())
height = float(qglwidget.height())
if self._glslProgram.useSampleAlphaToCoverage:
GL.glDisable(GL.GL_SAMPLE_ALPHA_TO_COVERAGE)
if self._glslProgram.useVAO:
if (self._vao == 0):
self._vao = GL.glGenVertexArrays(1)
GL.glBindVertexArray(self._vao)
# for some reason, we need to bind at least 1 vertex attrib (is OSX)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self._vbo)
GL.glEnableVertexAttribArray(0)
GL.glVertexAttribPointer(0, 2, GL.GL_FLOAT, False, 0, None)
# seems like a bug in Qt4.8/CoreProfile on OSX that GL_UNPACK_ROW_LENGTH has changed.
GL.glPixelStorei(GL.GL_UNPACK_ROW_LENGTH, 0)
for name in self._groups:
group = self._groups[name]
tex = qglwidget.bindTexture(group.qimage, GL.GL_TEXTURE_2D, GL.GL_RGBA,
QtOpenGL.QGLContext.NoBindOption)
GL.glUniform4f(self._glslProgram.uniformLocations["rect"],
2*group.x/width - 1,
1 - 2*group.y/height - 2*group.h/height,
2*group.w/width,
2*group.h/height)
GL.glUniform1i(self._glslProgram.uniformLocations["tex"], 0)
GL.glActiveTexture(GL.GL_TEXTURE0)
GL.glBindTexture(GL.GL_TEXTURE_2D, tex)
GL.glDrawArrays(GL.GL_TRIANGLE_STRIP, 0, 4)
GL.glDeleteTextures(tex)
GL.glBindTexture(GL.GL_TEXTURE_2D, 0)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, 0)
GL.glDisableVertexAttribArray(0)
if self._glslProgram.useVAO:
GL.glBindVertexArray(0)
GL.glUseProgram(0)
def _ComputeCameraFraming(viewport, renderBufferSize):
x, y, w, h = viewport
renderBufferWidth = renderBufferSize[0]
renderBufferHeight = renderBufferSize[1]
# Set display window equal to viewport - but flipped
# since viewport is in y-Up coordinate system but
# display window is y-Down.
displayWindow = Gf.Range2f(
Gf.Vec2f(x, renderBufferHeight - y - h),
Gf.Vec2f(x + w, renderBufferHeight - y))
# Intersect the display window with render buffer rect for
# data window.
renderBufferRect = Gf.Rect2i(
Gf.Vec2i(0, 0), renderBufferWidth, renderBufferHeight)
dataWindow = renderBufferRect.GetIntersection(
Gf.Rect2i(
Gf.Vec2i(x, renderBufferHeight - y - h),
w, h))
return CameraUtil.Framing(displayWindow, dataWindow)
class StageView(QtOpenGL.QGLWidget):
'''
QGLWidget that displays a USD Stage. A StageView requires a dataModel
object from which it will query state it needs to properly image its
given UsdStage. See the nested DefaultDataModel class for the expected
API.
'''
# TODO: most, if not all of the state StageView requires (except possibly
# the stage?), should be migrated to come from the dataModel, and redrawing
# should be triggered by signals the dataModel emits.
class DefaultDataModel(RootDataModel):
def __init__(self):
super(StageView.DefaultDataModel, self).__init__()
self._selectionDataModel = SelectionDataModel(self)
self._viewSettingsDataModel = ViewSettingsDataModel(self, None)
@property
def selection(self):
return self._selectionDataModel
@property
def viewSettings(self):
return self._viewSettingsDataModel
###########
# Signals #
###########
signalBboxUpdateTimeChanged = QtCore.Signal(int)
# First arg is primPath, (which could be empty Path)
# Second arg is instanceIndex (or UsdImagingGL.ALL_INSTANCES for all
# instances)
# Third and fourth arg are primPath, instanceIndex, of root level
# boundable (if applicable).
# Fifth arg is selectedPoint
# Sixth and seventh args represent state at time of the pick
signalPrimSelected = QtCore.Signal(Sdf.Path, int, Sdf.Path, int, Gf.Vec3f,
QtCore.Qt.MouseButton,
QtCore.Qt.KeyboardModifiers)
# Only raised when StageView has been told to do so, setting
# rolloverPicking to True
signalPrimRollover = QtCore.Signal(Sdf.Path, int, Sdf.Path, int,
Gf.Vec3f, QtCore.Qt.KeyboardModifiers)
signalMouseDrag = QtCore.Signal()
signalErrorMessage = QtCore.Signal(str)
signalSwitchedToFreeCam = QtCore.Signal()
signalFrustumChanged = QtCore.Signal()
@property
def renderParams(self):
return self._renderParams
@renderParams.setter
def renderParams(self, params):
self._renderParams = params
@property
def autoClip(self):
return self._dataModel.viewSettings.autoComputeClippingPlanes
@property
def showReticles(self):
return ((self._dataModel.viewSettings.showReticles_Inside or self._dataModel.viewSettings.showReticles_Outside)
and self._dataModel.viewSettings.cameraPrim != None)
@property
def _fitCameraInViewport(self):
return ((self._dataModel.viewSettings.showMask or self._dataModel.viewSettings.showMask_Outline or self.showReticles)
and self._dataModel.viewSettings.cameraPrim != None)
@property
def _cropImageToCameraViewport(self):
return ((self._dataModel.viewSettings.showMask and self._dataModel.viewSettings.showMask_Opaque)
and self._dataModel.viewSettings.cameraPrim != None)
@property
def cameraPrim(self):
return self._dataModel.viewSettings.cameraPrim
@cameraPrim.setter
def cameraPrim(self, prim):
self._dataModel.viewSettings.cameraPrim = prim
@property
def rolloverPicking(self):
return self._rolloverPicking
@rolloverPicking.setter
def rolloverPicking(self, enabled):
self._rolloverPicking = enabled
self.setMouseTracking(enabled)
@property
def fpsHUDInfo(self):
return self._fpsHUDInfo
@fpsHUDInfo.setter
def fpsHUDInfo(self, info):
self._fpsHUDInfo = info
@property
def fpsHUDKeys(self):
return self._fpsHUDKeys
@fpsHUDKeys.setter
def fpsHUDKeys(self, keys):
self._fpsHUDKeys = keys
@property
def upperHUDInfo(self):
return self._upperHUDInfo
@upperHUDInfo.setter
def upperHUDInfo(self, info):
self._upperHUDInfo = info
@property
def HUDStatKeys(self):
return self._HUDStatKeys
@HUDStatKeys.setter
def HUDStatKeys(self, keys):
self._HUDStatKeys = keys
@property
def overrideNear(self):
return self._overrideNear
@overrideNear.setter
def overrideNear(self, value):
"""To remove the override, set to None. Causes FreeCamera to become
active."""
self._overrideNear = value
self.switchToFreeCamera()
self._dataModel.viewSettings.freeCamera.overrideNear = value
self.updateGL()
@property
def overrideFar(self):
return self._overrideFar
@overrideFar.setter
def overrideFar(self, value):
"""To remove the override, set to None. Causes FreeCamera to become
active."""
self._overrideFar = value
self.switchToFreeCamera()
self._dataModel.viewSettings.freeCamera.overrideFar = value
self.updateGL()
@property
def allSceneCameras(self):
return self._allSceneCameras
@allSceneCameras.setter
def allSceneCameras(self, value):
self._allSceneCameras = value
@property
def gfCamera(self):
"""Return the last computed Gf Camera"""
return self._lastComputedGfCamera
@property
def cameraFrustum(self):
"""Unlike the StageView.freeCamera property, which is invalid/None
whenever we are viewing from a scene/stage camera, the 'cameraFrustum'
property will always return the last-computed camera frustum, regardless
of source."""
return self._lastComputedGfCamera.frustum
@property
def rendererDisplayName(self):
return self._rendererDisplayName
@property
def rendererAovName(self):
return self._rendererAovName
def __init__(self, parent=None, dataModel=None, printTiming=False):
# Note: The default format *disables* the alpha component and so the
# default backbuffer uses GL_RGB.
glFormat = QtOpenGL.QGLFormat()
msaa = os.getenv("USDVIEW_ENABLE_MSAA", "1")
if msaa == "1":
glFormat.setSampleBuffers(True)
glFormat.setSamples(4)
# XXX: for OSX (QT5 required)
# glFormat.setProfile(QtOpenGL.QGLFormat.CoreProfile)
super(StageView, self).__init__(glFormat, parent)
self._dataModel = dataModel or StageView.DefaultDataModel()
self._printTiming = printTiming
self._isFirstImage = True
# update() whenever a visible view setting (one which affects the view)
# is changed.
self._dataModel.viewSettings.signalVisibleSettingChanged.connect(
self.update)
self._dataModel.viewSettings.signalAutoComputeClippingPlanesChanged\
.connect(self._onAutoComputeClippingChanged)
self._dataModel.signalStageReplaced.connect(self._stageReplaced)
self._dataModel.selection.signalPrimSelectionChanged.connect(
self._primSelectionChanged)
self._dataModel.viewSettings.freeCamera = FreeCamera(True,
self._dataModel.viewSettings.freeCameraFOV)
self._lastComputedGfCamera = None
# prep Mask regions
self._mask = Mask()
self._maskOutline = Outline()
self._reticles = Reticles()
# prep HUD regions
self._hud = HUD()
self._hud.addGroup("TopLeft", 250, 160) # subtree
self._hud.addGroup("TopRight", 140, 32) # Hydra: Enabled
self._hud.addGroup("BottomLeft", 250, 160) # GPU stats
self._hud.addGroup("BottomRight", 210, 32) # Camera, Complexity
self._stageIsZup = True
self._cameraMode = "none"
self._rolloverPicking = False
self._dragActive = False
self._lastX = 0
self._lastY = 0
self._renderer = None
self._renderPauseState = False
self._renderStopState = False
self._reportedContextError = False
self._renderModeDict = {
RenderModes.WIREFRAME: UsdImagingGL.DrawMode.DRAW_WIREFRAME,
RenderModes.WIREFRAME_ON_SURFACE:
UsdImagingGL.DrawMode.DRAW_WIREFRAME_ON_SURFACE,
RenderModes.SMOOTH_SHADED: UsdImagingGL.DrawMode.DRAW_SHADED_SMOOTH,
RenderModes.POINTS: UsdImagingGL.DrawMode.DRAW_POINTS,
RenderModes.FLAT_SHADED: UsdImagingGL.DrawMode.DRAW_SHADED_FLAT,
RenderModes.GEOM_ONLY: UsdImagingGL.DrawMode.DRAW_GEOM_ONLY,
RenderModes.GEOM_SMOOTH: UsdImagingGL.DrawMode.DRAW_GEOM_SMOOTH,
RenderModes.GEOM_FLAT: UsdImagingGL.DrawMode.DRAW_GEOM_FLAT,
RenderModes.HIDDEN_SURFACE_WIREFRAME:
UsdImagingGL.DrawMode.DRAW_WIREFRAME
}
self._renderParams = UsdImagingGL.RenderParams()
# Optionally override OCIO lut size. Similar env var available for
# other apps: ***_OCIO_LUT3D_EDGE_SIZE
ocioLutSize = os.getenv("USDVIEW_OCIO_LUT3D_EDGE_SIZE", 0)
if ocioLutSize > 0:
self._renderParams.lut3dSizeOCIO = ocioLutSize
self._dist = 50
self._bbox = Gf.BBox3d()
self._selectionBBox = Gf.BBox3d()
self._selectionBrange = Gf.Range3d()
self._selectionOrientedRange = Gf.Range3d()
self._bbcenterForBoxDraw = (0, 0, 0)
self._overrideNear = None
self._overrideFar = None
self._forceRefresh = False
self._renderTime = 0
self._allSceneCameras = None
# HUD properties
self._fpsHUDInfo = dict()
self._fpsHUDKeys = []
self._upperHUDInfo = dict()
self._HUDStatKeys = list()
self._glPrimitiveGeneratedQuery = None
self._glTimeElapsedQuery = None
self._simpleGLSLProgram = None
self._axisVBO = None
self._bboxVBO = None
self._cameraGuidesVBO = None
self._vao = 0
# Update all properties for the current stage.
self._stageReplaced()
def _getRenderer(self):
# Unfortunately, we cannot assume that initializeGL() was called
# before attempts to use the renderer (e.g. pick()), so we must
# create the renderer lazily, when we try to do real work with it.
if not self._renderer:
if self.context().isValid():
if self.context().initialized():
self._renderer = UsdImagingGL.Engine()
self._handleRendererChanged(self.GetCurrentRendererId())
elif not self._reportedContextError:
self._reportedContextError = True
raise RuntimeError("StageView could not initialize renderer without a valid GL context")
return self._renderer
def _handleRendererChanged(self, rendererId):
self._rendererDisplayName = self.GetRendererDisplayName(rendererId)
self._rendererAovName = "color"
self._renderPauseState = False
self._renderStopState = False
# XXX For HdSt we explicitely enable AOV via SetRendererAov
# This is because ImagingGL / TaskController are spawned via prims in
# Presto, so we default AOVs OFF until everything is AOV ready.
self.SetRendererAov(self.rendererAovName)
def _scaleMouseCoords(self, point):
return point * QtWidgets.QApplication.instance().devicePixelRatio()
def closeRenderer(self):
'''Close the current renderer.'''
with Timer() as t:
self._renderer = None
if self._printTiming:
t.PrintTime('shut down Hydra')
def GetRendererPlugins(self):
if self._renderer:
return self._renderer.GetRendererPlugins()
else:
return []
def GetRendererDisplayName(self, plugId):
if self._renderer:
return self._renderer.GetRendererDisplayName(plugId)
else:
return ""
def GetCurrentRendererId(self):
if self._renderer:
return self._renderer.GetCurrentRendererId()
else:
return ""
def SetRendererPlugin(self, plugId):
if self._renderer:
if self._renderer.SetRendererPlugin(plugId):
self._handleRendererChanged(plugId)
self.updateGL()
return True
else:
return False
return True
def GetRendererAovs(self):
if self._renderer:
return self._renderer.GetRendererAovs()
else:
return []
def SetRendererAov(self, aov):
if self._renderer:
if self._renderer.SetRendererAov(aov):
self._rendererAovName = aov
self.updateGL()
return True
else:
return False
return True
def GetRendererSettingsList(self):
if self._renderer:
return self._renderer.GetRendererSettingsList()
else:
return []
def GetRendererSetting(self, name):
if self._renderer:
return self._renderer.GetRendererSetting(name)
else:
return None
def SetRendererSetting(self, name, value):
if self._renderer:
self._renderer.SetRendererSetting(name, value)
self.updateGL()
def SetRendererPaused(self, paused):
if self._renderer and (not self._renderer.IsConverged()):
if paused:
self._renderPauseState = self._renderer.PauseRenderer()
else:
self._renderPauseState = not self._renderer.ResumeRenderer()
self.updateGL()
def IsPauseRendererSupported(self):
if self._renderer:
if self._renderer.IsPauseRendererSupported():
return True
return False
def IsRendererConverged(self):
return self._renderer and self._renderer.IsConverged()
def SetRendererStopped(self, stopped):
if self._renderer:
if stopped:
self._renderStopState = self._renderer.StopRenderer()
else:
self._renderStopState = not self._renderer.RestartRenderer()
self.updateGL()
def IsStopRendererSupported(self):
if self._renderer:
if self._renderer.IsStopRendererSupported():
return True
return False
def _stageReplaced(self):
'''Set the USD Stage this widget will be displaying. To decommission
(even temporarily) this widget, supply None as 'stage'.'''
self.allSceneCameras = None
if self._dataModel.stage:
self._stageIsZup = (
UsdGeom.GetStageUpAxis(self._dataModel.stage) == UsdGeom.Tokens.z)
self._dataModel.viewSettings.freeCamera = \
FreeCamera(self._stageIsZup,
self._dataModel.viewSettings.freeCameraFOV)
# simple GLSL program for axis/bbox drawings
def GetSimpleGLSLProgram(self):
if self._simpleGLSLProgram == None:
self._simpleGLSLProgram = GLSLProgram(
"""#version 140
uniform mat4 mvpMatrix;
in vec3 position;
void main() { gl_Position = vec4(position, 1)*mvpMatrix; }""",
"""#version 140
out vec4 outColor;
uniform vec4 color;
void main() { outColor = color; }""",
"""#version 120
uniform mat4 mvpMatrix;
attribute vec3 position;
void main() { gl_Position = vec4(position, 1)*mvpMatrix; }""",
"""#version 120
uniform vec4 color;
void main() { gl_FragColor = color; }""",
["mvpMatrix", "color"])
return self._simpleGLSLProgram
def DrawAxis(self, viewProjectionMatrix):
from OpenGL import GL
import ctypes
# grab the simple shader
glslProgram = self.GetSimpleGLSLProgram()
if glslProgram.program == 0:
return
# vao
if glslProgram.useVAO:
if (self._vao == 0):
self._vao = GL.glGenVertexArrays(1)
GL.glBindVertexArray(self._vao)
# prep a vbo for axis
if (self._axisVBO is None):
self._axisVBO = GL.glGenBuffers(1)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self._axisVBO)
data = [1, 0, 0, 0, 0, 0,
0, 1, 0, 0, 0, 0,
0, 0, 1, 0, 0, 0]
GL.glBufferData(GL.GL_ARRAY_BUFFER, len(data)*4,
(ctypes.c_float*len(data))(*data), GL.GL_STATIC_DRAW)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self._axisVBO)
GL.glEnableVertexAttribArray(0)
GL.glVertexAttribPointer(0, 3, GL.GL_FLOAT, False, 0, ctypes.c_void_p(0))
GL.glUseProgram(glslProgram.program)
# i *think* this actually wants the camera dist so that the axis stays
# somewhat fixed in screen-space size.
mvpMatrix = Gf.Matrix4f().SetScale(self._dist/20.0) * viewProjectionMatrix
matrix = (ctypes.c_float*16).from_buffer_copy(mvpMatrix)
GL.glUniformMatrix4fv(glslProgram.uniformLocations["mvpMatrix"],
1, GL.GL_TRUE, matrix)
GL.glUniform4f(glslProgram.uniformLocations["color"], 1, 0, 0, 1)
GL.glDrawArrays(GL.GL_LINES, 0, 2)
GL.glUniform4f(glslProgram.uniformLocations["color"], 0, 1, 0, 1)
GL.glDrawArrays(GL.GL_LINES, 2, 2)
GL.glUniform4f(glslProgram.uniformLocations["color"], 0, 0, 1, 1)
GL.glDrawArrays(GL.GL_LINES, 4, 2)
GL.glDisableVertexAttribArray(0)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, 0)
GL.glUseProgram(0)
if glslProgram.useVAO:
GL.glBindVertexArray(0)
def DrawBBox(self, viewProjectionMatrix):
col = self._dataModel.viewSettings.clearColor
color = Gf.Vec3f(col[0]-.6 if col[0]>0.5 else col[0]+.6,
col[1]-.6 if col[1]>0.5 else col[1]+.6,
col[2]-.6 if col[2]>0.5 else col[2]+.6)
color[0] = Gf.Clamp(color[0], 0, 1);
color[1] = Gf.Clamp(color[1], 0, 1);
color[2] = Gf.Clamp(color[2], 0, 1);
# Draw axis-aligned bounding box
if self._dataModel.viewSettings.showAABBox:
bsize = self._selectionBrange.max - self._selectionBrange.min
trans = Gf.Transform()
trans.SetScale(0.5*bsize)
trans.SetTranslation(self._bbcenterForBoxDraw)
self.drawWireframeCube(color,
Gf.Matrix4f(trans.GetMatrix()) * viewProjectionMatrix)
# Draw oriented bounding box
if self._dataModel.viewSettings.showOBBox:
bsize = self._selectionOrientedRange.max - self._selectionOrientedRange.min
center = bsize / 2. + self._selectionOrientedRange.min
trans = Gf.Transform()
trans.SetScale(0.5*bsize)
trans.SetTranslation(center)
self.drawWireframeCube(color,
Gf.Matrix4f(trans.GetMatrix()) *
Gf.Matrix4f(self._selectionBBox.matrix) *
viewProjectionMatrix)
# XXX:
# First pass at visualizing cameras in usdview-- just oracles for
# now. Eventually the logic should live in usdImaging, where the delegate
# would add the camera guide geometry to the GL buffers over the course over
# its stage traversal, and get time samples accordingly.
def DrawCameraGuides(self, mvpMatrix):
from OpenGL import GL
import ctypes
# prep a vbo for camera guides
if (self._cameraGuidesVBO is None):
self._cameraGuidesVBO = GL.glGenBuffers(1)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self._cameraGuidesVBO)
data = []
for camera in self._allSceneCameras:
# Don't draw guides for the active camera.
if camera == self._dataModel.viewSettings.cameraPrim or not (camera and camera.IsActive()):
continue
gfCamera = UsdGeom.Camera(camera).GetCamera(
self._dataModel.currentFrame)
frustum = gfCamera.frustum
# (Gf documentation seems to be wrong)-- Ordered as
# 0: left bottom near
# 1: right bottom near
# 2: left top near
# 3: right top near
# 4: left bottom far
# 5: right bottom far
# 6: left top far
# 7: right top far
oraclePoints = frustum.ComputeCorners()
# Near plane
indices = [0,1,1,3,3,2,2,0, # Near plane
4,5,5,7,7,6,6,4, # Far plane
3,7,0,4,1,5,2,6] # Lines between near and far planes.
data.extend([oraclePoints[i][j] for i in indices for j in range(3)])
GL.glBufferData(GL.GL_ARRAY_BUFFER, len(data)*4,
(ctypes.c_float*len(data))(*data), GL.GL_STATIC_DRAW)
# grab the simple shader
glslProgram = self.GetSimpleGLSLProgram()
if (glslProgram.program == 0):
return
GL.glEnableVertexAttribArray(0)
GL.glVertexAttribPointer(0, 3, GL.GL_FLOAT, False, 0, ctypes.c_void_p(0))
GL.glUseProgram(glslProgram.program)
matrix = (ctypes.c_float*16).from_buffer_copy(mvpMatrix)
GL.glUniformMatrix4fv(glslProgram.uniformLocations["mvpMatrix"],
1, GL.GL_TRUE, matrix)
# Grabbed fallback oracleColor from CamCamera.
GL.glUniform4f(glslProgram.uniformLocations["color"],
0.82745, 0.39608, 0.1647, 1)
GL.glDrawArrays(GL.GL_LINES, 0, len(data)//3)
GL.glDisableVertexAttribArray(0)
GL.glUseProgram(0)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, 0)
def updateBboxPurposes(self):
includedPurposes = self._dataModel.includedPurposes
if self._dataModel.viewSettings.displayGuide:
includedPurposes.add(UsdGeom.Tokens.guide)
elif UsdGeom.Tokens.guide in includedPurposes:
includedPurposes.remove(UsdGeom.Tokens.guide)
if self._dataModel.viewSettings.displayProxy:
includedPurposes.add(UsdGeom.Tokens.proxy)
elif UsdGeom.Tokens.proxy in includedPurposes:
includedPurposes.remove(UsdGeom.Tokens.proxy)
if self._dataModel.viewSettings.displayRender:
includedPurposes.add(UsdGeom.Tokens.render)
elif UsdGeom.Tokens.render in includedPurposes:
includedPurposes.remove(UsdGeom.Tokens.render)
self._dataModel.includedPurposes = includedPurposes
# force the bbox to refresh
self._bbox = Gf.BBox3d()
def recomputeBBox(self):
selectedPrims = self._dataModel.selection.getLCDPrims()
try:
startTime = time()
self._bbox = self.getStageBBox()
if len(selectedPrims) == 1 and selectedPrims[0].GetPath() == '/':
if self._bbox.GetRange().IsEmpty():
self._selectionBBox = self._getDefaultBBox()
else:
self._selectionBBox = self._bbox
else:
self._selectionBBox = self.getSelectionBBox()
# BBox computation time for HUD
endTime = time()
ms = (endTime - startTime) * 1000.
self.signalBboxUpdateTimeChanged.emit(ms)
except RuntimeError:
# This may fail, but we want to keep the UI available,
# so print the error and attempt to continue loading
self.signalErrorMessage.emit("unable to get bounding box on "
"stage at frame {0}".format(self._dataModel.currentFrame))
import traceback
traceback.print_exc()
self._bbox = self._getEmptyBBox()
self._selectionBBox = self._getDefaultBBox()
self._selectionBrange = self._selectionBBox.ComputeAlignedRange()
self._selectionOrientedRange = self._selectionBBox.box
self._bbcenterForBoxDraw = self._selectionBBox.ComputeCentroid()
def resetCam(self, frameFit=1.1):
validFrameRange = (not self._selectionBrange.IsEmpty() and
self._selectionBrange.GetMax() != self._selectionBrange.GetMin())
if validFrameRange:
self.switchToFreeCamera(False)
self._dataModel.viewSettings.freeCamera.frameSelection(self._selectionBBox,
frameFit)
if self._dataModel.viewSettings.autoComputeClippingPlanes:
self.computeAndSetClosestDistance()
def updateView(self, resetCam=False, forceComputeBBox=False, frameFit=1.1):
'''Updates bounding boxes and camera. resetCam = True causes the camera to reframe
the specified prims. frameFit sets the ratio of the camera's frustum's
relevant dimension to the object's bounding box. 1.1, the default,
fits the prim's bounding box in the frame with a roughly 10% margin.
'''
# Only compute BBox if forced, if needed for drawing,
# or if this is the first time running.
computeBBox = forceComputeBBox or \
(self._dataModel.viewSettings.showBBoxes and
(self._dataModel.viewSettings.showAABBox or self._dataModel.viewSettings.showOBBox))\
or self._bbox.GetRange().IsEmpty()
if computeBBox:
self.recomputeBBox()
if resetCam:
self.resetCam(frameFit)
self.updateGL()
def updateSelection(self):
try:
renderer = self._getRenderer()
if not renderer:
# error has already been issued
return
renderer.ClearSelected()
psuRoot = self._dataModel.stage.GetPseudoRoot()
allInstances = self._dataModel.selection.getPrimInstances()
for prim in self._dataModel.selection.getLCDPrims():
if prim == psuRoot:
continue
primInstances = allInstances[prim]
if primInstances != ALL_INSTANCES:
for instanceIndex in primInstances:
renderer.AddSelected(prim.GetPath(), instanceIndex)
else:
renderer.AddSelected(
prim.GetPath(), UsdImagingGL.ALL_INSTANCES)
except Tf.ErrorException as e:
# If we encounter an error, we want to continue running. Just log
# the error and continue.
sys.stderr.write(
"ERROR: Usdview encountered an error while updating selection."
"{}\n".format(e))
finally:
# Make sure not to leak a reference to the renderer
renderer = None
def _getEmptyBBox(self):
# This returns the default empty bbox [FLT_MAX,-FLT_MAX]
return Gf.BBox3d()
def _getDefaultBBox(self):
return Gf.BBox3d(Gf.Range3d((-10,-10,-10), (10,10,10)))
def _isInfiniteBBox(self, bbox):
return isinf(bbox.GetRange().GetMin().GetLength()) or \
isinf(bbox.GetRange().GetMax().GetLength())
def getStageBBox(self):
bbox = self._dataModel.computeWorldBound(
self._dataModel.stage.GetPseudoRoot())
if bbox.GetRange().IsEmpty() or self._isInfiniteBBox(bbox):
bbox = self._getEmptyBBox()
return bbox
def getSelectionBBox(self):
bbox = Gf.BBox3d()
for n in self._dataModel.selection.getLCDPrims():
if n.IsActive() and not n.IsInPrototype():
primBBox = self._dataModel.computeWorldBound(n)
bbox = Gf.BBox3d.Combine(bbox, primBBox)
return bbox
def renderSinglePass(self, renderMode, renderSelHighlights):
if not self._dataModel.stage:
return
renderer = self._getRenderer()
if not renderer:
# error has already been issued
return
# update rendering parameters
self._renderParams.frame = self._dataModel.currentFrame
self._renderParams.complexity = self._dataModel.viewSettings.complexity.value
self._renderParams.drawMode = renderMode
self._renderParams.showGuides = self._dataModel.viewSettings.displayGuide
self._renderParams.showProxy = self._dataModel.viewSettings.displayProxy
self._renderParams.showRender = self._dataModel.viewSettings.displayRender
self._renderParams.forceRefresh = self._forceRefresh
self._renderParams.cullStyle = \
(UsdImagingGL.CullStyle.CULL_STYLE_BACK_UNLESS_DOUBLE_SIDED
if self._dataModel.viewSettings.cullBackfaces
else UsdImagingGL.CullStyle.CULL_STYLE_NOTHING)
self._renderParams.gammaCorrectColors = False
self._renderParams.enableIdRender = self._dataModel.viewSettings.displayPrimId
self._renderParams.enableSampleAlphaToCoverage = not self._dataModel.viewSettings.displayPrimId
self._renderParams.highlight = renderSelHighlights
self._renderParams.enableSceneMaterials = self._dataModel.viewSettings.enableSceneMaterials
self._renderParams.enableSceneLights = self._dataModel.viewSettings.enableSceneLights
self._renderParams.colorCorrectionMode = self._dataModel.viewSettings.colorCorrectionMode
self._renderParams.clearColor = Gf.Vec4f(self._dataModel.viewSettings.clearColor)
pseudoRoot = self._dataModel.stage.GetPseudoRoot()
renderer.SetSelectionColor(self._dataModel.viewSettings.highlightColor)
try:
renderer.Render(pseudoRoot, self._renderParams)
except Tf.ErrorException as e:
# If we encounter an error during a render, we want to continue
# running. Just log the error and continue.
sys.stderr.write(
"ERROR: Usdview encountered an error while rendering.{}\n".format(e))
finally:
# Make sure not to leak a reference to the renderer
renderer = None
self._forceRefresh = False
def initializeGL(self):
if not self.isValid():
return
from pxr import Glf
Glf.RegisterDefaultDebugOutputMessageCallback()
def updateGL(self):
"""We override this virtual so that we can make it a no-op during
playback. The client driving playback at a particular rate should
instead call updateForPlayback() to image the next frame."""
if not self._dataModel.playing:
super(StageView, self).updateGL()
def updateForPlayback(self):
"""If playing, update the GL canvas. Otherwise a no-op"""
if self._dataModel.playing:
super(StageView, self).updateGL()
def getActiveSceneCamera(self):
cameraPrim = self._dataModel.viewSettings.cameraPrim
if cameraPrim and cameraPrim.IsActive():
return cameraPrim
return None
# XXX: Consolidate window/frustum conformance code that is littered in
# several places.
def computeWindowPolicy(self, cameraAspectRatio):
# The freeCam always uses 'MatchVertically'.
# When using a scene cam, we factor in the masking setting and window
# size to compute it.
windowPolicy = CameraUtil.MatchVertically
if self.getActiveSceneCamera():
if self._cropImageToCameraViewport:
targetAspect = (
float(self.size().width()) / max(1.0, self.size().height()))
if targetAspect < cameraAspectRatio:
windowPolicy = CameraUtil.MatchHorizontally
else:
if self._fitCameraInViewport:
windowPolicy = CameraUtil.Fit
return windowPolicy
def computeWindowSize(self):
size = self.size() * self.devicePixelRatioF()
return (int(size.width()), int(size.height()))
def computeWindowViewport(self):
return (0, 0) + self.computeWindowSize()
def resolveCamera(self):
"""Returns a tuple of the camera to use for rendering (either a scene
camera or a free camera) and that camera's original aspect ratio.
Depending on camera guide settings, the camera frustum may be conformed
to fit the window viewport. Emits a signalFrustumChanged if the
camera frustum has changed since the last time resolveCamera was called."""
# If 'camera' is None, make sure we have a valid freeCamera
sceneCam = self.getActiveSceneCamera()
if sceneCam:
gfCam = UsdGeom.Camera(sceneCam).GetCamera(
self._dataModel.currentFrame)
else:
self.switchToFreeCamera()
gfCam = self._dataModel.viewSettings.freeCamera.computeGfCamera(
self._bbox, autoClip=self.autoClip)
cameraAspectRatio = gfCam.aspectRatio
# Conform the camera's frustum to the window viewport, if necessary.
if not self._cropImageToCameraViewport:
targetAspect = float(self.size().width()) / max(1.0, self.size().height())
if self._fitCameraInViewport:
CameraUtil.ConformWindow(gfCam, CameraUtil.Fit, targetAspect)
else:
CameraUtil.ConformWindow(gfCam, CameraUtil.MatchVertically, targetAspect)
frustumChanged = ((not self._lastComputedGfCamera) or
self._lastComputedGfCamera.frustum != gfCam.frustum)
# We need to COPY the camera, not assign it...
self._lastComputedGfCamera = Gf.Camera(gfCam)
if frustumChanged:
self.signalFrustumChanged.emit()
return (gfCam, cameraAspectRatio)
def computeCameraViewport(self, cameraAspectRatio):
# Conform the camera viewport to the camera's aspect ratio,
# and center the camera viewport in the window viewport.
windowPolicy = CameraUtil.MatchVertically
targetAspect = (
float(self.size().width()) / max(1.0, self.size().height()))
if targetAspect < cameraAspectRatio:
windowPolicy = CameraUtil.MatchHorizontally
viewport = Gf.Range2d(Gf.Vec2d(0, 0),
Gf.Vec2d(self.computeWindowSize()))
viewport = CameraUtil.ConformedWindow(viewport, windowPolicy, cameraAspectRatio)
viewport = (viewport.GetMin()[0], viewport.GetMin()[1],
viewport.GetSize()[0], viewport.GetSize()[1])
viewport = ViewportMakeCenteredIntegral(viewport)
return viewport
def copyViewState(self):
"""Returns a copy of this StageView's view-affecting state,
which can be used later to restore the view via restoreViewState().
Take note that we do NOT include the StageView's notion of the
current time (used by prim-based cameras to extract their data),
since we do not want a restore operation to put us out of sync
with respect to our owner's time.
"""
viewState = {}
viewState["_cameraPrim"] = self._dataModel.viewSettings.cameraPrim
viewState["_stageIsZup"] = self._stageIsZup
viewState["_overrideNear"] = self._overrideNear
viewState["_overrideFar"] = self._overrideFar
# Since FreeCamera is a compound/class object, we must copy
# it more deeply
viewState["_freeCamera"] = self._dataModel.viewSettings.freeCamera.clone() if self._dataModel.viewSettings.freeCamera else None
return viewState
def restoreViewState(self, viewState):
"""Restore view parameters from 'viewState', and redraw"""
self._dataModel.viewSettings.cameraPrim = viewState["_cameraPrim"]
self._stageIsZup = viewState["_stageIsZup"]
self._overrideNear = viewState["_overrideNear"]
self._overrideFar = viewState["_overrideFar"]
restoredCamera = viewState["_freeCamera"]
# Detach our freeCamera from the given viewState, to
# insulate against changes to viewState by caller
self._dataModel.viewSettings.freeCamera = restoredCamera.clone() if restoredCamera else None
self.update()
def drawWireframeCube(self, col, mvpMatrix):
from OpenGL import GL
import ctypes, itertools
# grab the simple shader
glslProgram = self.GetSimpleGLSLProgram()
if (glslProgram.program == 0):
return
# vao
if glslProgram.useVAO:
if (self._vao == 0):
self._vao = GL.glGenVertexArrays(1)
GL.glBindVertexArray(self._vao)
# prep a vbo for bbox
if (self._bboxVBO is None):
self._bboxVBO = GL.glGenBuffers(1)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self._bboxVBO)
# create 12 edges
data = []
p = list(itertools.product([-1,1],[-1,1],[-1,1]))
for i in p:
data.extend([i[0], i[1], i[2]])
for i in p:
data.extend([i[1], i[2], i[0]])
for i in p:
data.extend([i[2], i[0], i[1]])
GL.glBufferData(GL.GL_ARRAY_BUFFER, len(data)*4,
(ctypes.c_float*len(data))(*data), GL.GL_STATIC_DRAW)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self._bboxVBO)
GL.glEnableVertexAttribArray(0)
GL.glVertexAttribPointer(0, 3, GL.GL_FLOAT, False, 0, ctypes.c_void_p(0))
GL.glEnable(GL.GL_LINE_STIPPLE)
GL.glLineStipple(2,0xAAAA)
GL.glUseProgram(glslProgram.program)
matrix = (ctypes.c_float*16).from_buffer_copy(mvpMatrix)
GL.glUniformMatrix4fv(glslProgram.uniformLocations["mvpMatrix"],
1, GL.GL_TRUE, matrix)
GL.glUniform4f(glslProgram.uniformLocations["color"],
col[0], col[1], col[2], 1)
GL.glDrawArrays(GL.GL_LINES, 0, 24)
GL.glDisableVertexAttribArray(0)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, 0)
GL.glUseProgram(0)
GL.glDisable(GL.GL_LINE_STIPPLE)
if glslProgram.useVAO:
GL.glBindVertexArray(0)
def paintGL(self):
if not self._dataModel.stage:
return
renderer = self._getRenderer()
if not renderer:
# error has already been issued
return
try:
from OpenGL import GL
if self._dataModel.viewSettings.showHUD_GPUstats:
if self._glPrimitiveGeneratedQuery is None:
self._glPrimitiveGeneratedQuery = Glf.GLQueryObject()
if self._glTimeElapsedQuery is None:
self._glTimeElapsedQuery = Glf.GLQueryObject()
self._glPrimitiveGeneratedQuery.BeginPrimitivesGenerated()
self._glTimeElapsedQuery.BeginTimeElapsed()
if not UsdImagingGL.Engine.IsColorCorrectionCapable():
from OpenGL.GL.EXT.framebuffer_sRGB import GL_FRAMEBUFFER_SRGB_EXT
GL.glEnable(GL_FRAMEBUFFER_SRGB_EXT)
# Clear the default FBO associated with the widget/context to
# fully transparent and *not* the bg color.
# The bg color is used as the clear color for the aov, and the
# results of rendering are composited over the FBO (and not blit).
GL.glClearColor(*Gf.Vec4f(0,0,0,0))
GL.glEnable(GL.GL_DEPTH_TEST)
GL.glDepthFunc(GL.GL_LESS)
GL.glBlendFunc(GL.GL_SRC_ALPHA, GL.GL_ONE_MINUS_SRC_ALPHA)
GL.glEnable(GL.GL_BLEND)
# Note: camera lights and camera guides require the
# resolved (adjusted) camera viewProjection matrix, which is
# why we resolve the camera above always.
(gfCamera, cameraAspect) = self.resolveCamera()
frustum = gfCamera.frustum
cameraViewport = self.computeCameraViewport(cameraAspect)
viewport = self.computeWindowViewport()
windowViewport = viewport
if self._cropImageToCameraViewport:
viewport = cameraViewport
# For legacy implementation (--renderer HydraDisabled)
if not renderer.IsHydraEnabled():
renderer.SetRenderViewport(viewport)
renderer.SetWindowPolicy(self.computeWindowPolicy(cameraAspect))
renderBufferSize = Gf.Vec2i(self.computeWindowSize())
renderer.SetRenderBufferSize(
renderBufferSize)
renderer.SetFraming(
_ComputeCameraFraming(viewport, renderBufferSize))
renderer.SetOverrideWindowPolicy(
self.computeWindowPolicy(cameraAspect))
sceneCam = self.getActiveSceneCamera()
if sceneCam:
# When using a USD camera, simply set it as the active camera.
# Window policy conformance is handled in the engine/hydra.
renderer.SetCameraPath(sceneCam.GetPath())
else:
# When using the free cam (which isn't currently backed on the
# USD stage), we send the camera matrices to the engine.
renderer.SetCameraState(frustum.ComputeViewMatrix(),
frustum.ComputeProjectionMatrix())
viewProjectionMatrix = Gf.Matrix4f(frustum.ComputeViewMatrix()
* frustum.ComputeProjectionMatrix())
GL.glClear(GL.GL_COLOR_BUFFER_BIT|GL.GL_DEPTH_BUFFER_BIT)
# ensure viewport is right for the camera framing
GL.glViewport(*viewport)
# Set the clipping planes.
self._renderParams.clipPlanes = [Gf.Vec4d(i) for i in
gfCamera.clippingPlanes]
if len(self._dataModel.selection.getLCDPrims()) > 0:
cam_pos = frustum.position
sceneAmbient = (0.01, 0.01, 0.01, 1.0)
material = Glf.SimpleMaterial()
lights = []
# for renderModes that need lights
if self._dataModel.viewSettings.renderMode in ShadedRenderModes:
# ambient light located at the camera
if self._dataModel.viewSettings.ambientLightOnly:
l = Glf.SimpleLight()
l.ambient = (0, 0, 0, 0)
l.position = (cam_pos[0], cam_pos[1], cam_pos[2], 1)
lights.append(l)
# Default Dome Light
if self._dataModel.viewSettings.domeLightEnabled:
l = Glf.SimpleLight()
l.isDomeLight = True
if self._stageIsZup:
l.transform = Gf.Matrix4d().SetRotate(
Gf.Rotation(Gf.Vec3d.XAxis(), 90))
lights.append(l)
kA = self._dataModel.viewSettings.defaultMaterialAmbient
kS = self._dataModel.viewSettings.defaultMaterialSpecular
material.ambient = (kA, kA, kA, 1.0)
material.specular = (kS, kS, kS, 1.0)
material.shininess = 32.0
# modes that want no lighting simply leave lights as an empty list
renderer.SetLightingState(lights, material, sceneAmbient)
if self._dataModel.viewSettings.renderMode == RenderModes.HIDDEN_SURFACE_WIREFRAME:
GL.glEnable( GL.GL_POLYGON_OFFSET_FILL )
GL.glPolygonOffset( 1.0, 1.0 )
GL.glPolygonMode( GL.GL_FRONT_AND_BACK, GL.GL_FILL )
self.renderSinglePass(
UsdImagingGL.DrawMode.DRAW_GEOM_ONLY, False)
GL.glDisable( GL.GL_POLYGON_OFFSET_FILL )
# Use display space for the second clear when color
# correction is performed by the engine because we
# composite the framebuffer contents with the
# color-corrected (i.e., display space) aov contents.
clearColor = Gf.ConvertLinearToDisplay(Gf.Vec4f(
self._dataModel.viewSettings.clearColor))
if not UsdImagingGL.Engine.IsColorCorrectionCapable():
# Use linear color when using the sRGB extension
clearColor = Gf.Vec4f(
self._dataModel.viewSettings.clearColor)
GL.glClearColor(*clearColor)
GL.glClear(GL.GL_COLOR_BUFFER_BIT)
highlightMode = self._dataModel.viewSettings.selHighlightMode
if self._dataModel.playing:
# Highlight mode must be ALWAYS to draw highlights during playback.
drawSelHighlights = (
highlightMode == SelectionHighlightModes.ALWAYS)
else:
# Highlight mode can be ONLY_WHEN_PAUSED or ALWAYS to draw
# highlights when paused.
drawSelHighlights = (
highlightMode != SelectionHighlightModes.NEVER)
self.renderSinglePass(
self._renderModeDict[self._dataModel.viewSettings.renderMode],
drawSelHighlights)
if not UsdImagingGL.Engine.IsColorCorrectionCapable():
GL.glDisable(GL_FRAMEBUFFER_SRGB_EXT)
self.DrawAxis(viewProjectionMatrix)
# XXX:
# Draw camera guides-- no support for toggling guide visibility on
# individual cameras until we move this logic directly into
# usdImaging.
if self._dataModel.viewSettings.displayCameraOracles:
self.DrawCameraGuides(viewProjectionMatrix)
if self._dataModel.viewSettings.showBBoxes and\
(self._dataModel.viewSettings.showBBoxPlayback or not self._dataModel.playing):
self.DrawBBox(viewProjectionMatrix)
else:
GL.glClear(GL.GL_COLOR_BUFFER_BIT)
if self._dataModel.viewSettings.showHUD_GPUstats:
self._glPrimitiveGeneratedQuery.End()
self._glTimeElapsedQuery.End()
# reset the viewport for 2D and HUD drawing
uiTasks = [ Prim2DSetupTask(self.computeWindowViewport()) ]
if self._dataModel.viewSettings.showMask:
color = self._dataModel.viewSettings.cameraMaskColor
if self._dataModel.viewSettings.showMask_Opaque:
color = color[0:3] + (1.0,)
else:
color = color[0:3] + (color[3] * 0.45,)
self._mask.updateColor(color)
self._mask.updatePrims(cameraViewport, self)
uiTasks.append(self._mask)
if self._dataModel.viewSettings.showMask_Outline:
self._maskOutline.updatePrims(cameraViewport, self)
uiTasks.append(self._maskOutline)
if self.showReticles:
color = self._dataModel.viewSettings.cameraReticlesColor
color = color[0:3] + (color[3] * 0.85,)
self._reticles.updateColor(color)
self._reticles.updatePrims(cameraViewport, self,
self._dataModel.viewSettings.showReticles_Inside, self._dataModel.viewSettings.showReticles_Outside)
uiTasks.append(self._reticles)
for task in uiTasks:
task.Sync(None)
for task in uiTasks:
task.Execute(None)
# check current state of renderer -- (not IsConverged()) means renderer is running
if self._renderStopState and (not renderer.IsConverged()):
self._renderStopState = False
# ### DRAW HUD ### #
if self._dataModel.viewSettings.showHUD:
self.drawHUD(renderer)
if (not self._dataModel.playing) & (not renderer.IsConverged()):
QtCore.QTimer.singleShot(5, self.update)
except Exception as e:
# If we encounter an error during a render, we want to continue
# running. Just log the error and continue.
sys.stderr.write(
"ERROR: Usdview encountered an error while rendering."
"{}\n".format(e))
finally:
# Make sure not to leak a reference to the renderer
renderer = None
def drawHUD(self, renderer):
# compute the time it took to render this frame,
# so we can display it in the HUD
ms = self._renderTime * 1000.
fps = float("inf")
if not self._renderTime == 0:
fps = 1./self._renderTime
# put the result in the HUD string
self.fpsHUDInfo['Render'] = "%.2f ms (%.2f FPS)" % (ms, fps)
col = Gf.Vec3f(.733,.604,.333)
# the subtree info does not update while animating, grey it out
if not self._dataModel.playing:
subtreeCol = col
else:
subtreeCol = Gf.Vec3f(.6,.6,.6)
# Subtree Info
if self._dataModel.viewSettings.showHUD_Info:
self._hud.updateGroup("TopLeft", 0, 14, subtreeCol,
self.upperHUDInfo,
self.HUDStatKeys)
else:
self._hud.updateGroup("TopLeft", 0, 0, subtreeCol, {})
# Complexity
if self._dataModel.viewSettings.showHUD_Complexity:
# Camera name
camName = "Free%s" % (" AutoClip" if self.autoClip else "")
if self._dataModel.viewSettings.cameraPrim:
camName = self._dataModel.viewSettings.cameraPrim.GetName()
toPrint = {"Complexity" : self._dataModel.viewSettings.complexity.name,
"Camera" : camName}
self._hud.updateGroup("BottomRight",
self.width()-210, self.height()-self._hud._HUDLineSpacing*2,
col, toPrint)
else:
self._hud.updateGroup("BottomRight", 0, 0, col, {})
# Hydra Enabled (Top Right)
hydraMode = "Disabled"
if UsdImagingGL.Engine.IsHydraEnabled():
hydraMode = self._rendererDisplayName
if not hydraMode:
hydraMode = "Enabled"
if self._renderPauseState:
toPrint = {"Hydra": "(paused)"}
elif self._renderStopState:
toPrint = {"Hydra": "(stopped)"}
else:
toPrint = {"Hydra": hydraMode}
if self._rendererAovName != "color":
toPrint[" AOV"] = self._rendererAovName
self._hud.updateGroup("TopRight", self.width()-160, 14, col,
toPrint, toPrint.keys())
# bottom left
from collections import OrderedDict
toPrint = OrderedDict()
# GPU stats (TimeElapsed is in nano seconds)
if self._dataModel.viewSettings.showHUD_GPUstats:
def _addSizeMetric(toPrint, stats, label, key):
if key in stats:
toPrint[label] = ReportMetricSize(stats[key])
rStats = renderer.GetRenderStats()
toPrint["GL prims "] = self._glPrimitiveGeneratedQuery.GetResult()
if not (self._renderPauseState or self._renderStopState):
toPrint["GPU time "] = "%.2f ms " % (self._glTimeElapsedQuery.GetResult() / 1000000.0)
_addSizeMetric(toPrint, rStats, "GPU mem ", "gpuMemoryUsed")
_addSizeMetric(toPrint, rStats, " primvar ", "primvar")
_addSizeMetric(toPrint, rStats, " topology", "topology")
_addSizeMetric(toPrint, rStats, " shader ", "drawingShader")
_addSizeMetric(toPrint, rStats, " texture ", "textureMemory")
if "numCompletedSamples" in rStats:
toPrint["Samples done "] = rStats["numCompletedSamples"]
# Playback Rate
if (not (self._renderPauseState or self._renderStopState)) and \
self._dataModel.viewSettings.showHUD_Performance:
for key in self.fpsHUDKeys:
toPrint[key] = self.fpsHUDInfo[key]
self._hud.updateGroup("BottomLeft",
0, self.height()-len(toPrint)*self._hud._HUDLineSpacing,
col, toPrint, toPrint.keys())
# draw HUD
self._hud.draw(self)
def sizeHint(self):
return QtCore.QSize(460, 460)
def switchToFreeCamera(self, computeAndSetClosestDistance=True):
"""
If our current camera corresponds to a prim, create a FreeCamera
that has the same view and use it.
"""
if self._dataModel.viewSettings.cameraPrim != None:
# cameraPrim may no longer be valid, so use the last-computed
# gf camera
if self._lastComputedGfCamera:
self._dataModel.viewSettings.freeCamera = FreeCamera.FromGfCamera(
self._lastComputedGfCamera, self._stageIsZup)
else:
self._dataModel.viewSettings.freeCamera = FreeCamera(
self._stageIsZup,
self._dataModel.viewSettings.freeCameraFOV)
# override clipping plane state is managed by StageView,
# so that it can be persistent. Therefore we must restore it
# now
self._dataModel.viewSettings.freeCamera.overrideNear = self._overrideNear
self._dataModel.viewSettings.freeCamera.overrideFar = self._overrideFar
self._dataModel.viewSettings.cameraPrim = None
if computeAndSetClosestDistance:
self.computeAndSetClosestDistance()
# let the controller know we've done this!
self.signalSwitchedToFreeCam.emit()
# It WBN to support marquee selection in the viewer also, at some point...
def mousePressEvent(self, event):
"""This widget claims the Alt modifier key as the enabler for camera
manipulation, and will consume mousePressEvents when Alt is present.
In any other modifier state, a mousePressEvent will result in a
pick operation, and the pressed button and active modifiers will be
made available to clients via a signalPrimSelected()."""
# It's important to set this first, since pickObject(), called below
# may produce the mouse-up event that will terminate the drag
# initiated by this mouse-press
self._dragActive = True
# Note: multiplying by devicePixelRatio is only necessary because this
# is a QGLWidget.
x = event.x() * self.devicePixelRatioF()
y = event.y() * self.devicePixelRatioF()
# Allow for either meta or alt key, since meta maps to Windows and Apple
# keys on various hardware/os combos, and some windowing systems consume
# one or the other by default, but hopefully not both.
if (event.modifiers() & (QtCore.Qt.AltModifier | QtCore.Qt.MetaModifier)):
if event.button() == QtCore.Qt.LeftButton:
self.switchToFreeCamera()
ctrlModifier = event.modifiers() & QtCore.Qt.ControlModifier
self._cameraMode = "truck" if ctrlModifier else "tumble"
if event.button() == QtCore.Qt.MidButton:
self.switchToFreeCamera()
self._cameraMode = "truck"
if event.button() == QtCore.Qt.RightButton:
self.switchToFreeCamera()
self._cameraMode = "zoom"
else:
self._cameraMode = "pick"
self.pickObject(x, y, event.button(), event.modifiers())
self._lastX = x
self._lastY = y
def mouseReleaseEvent(self, event):
self._cameraMode = "none"
self._dragActive = False
def mouseMoveEvent(self, event):
# Note: multiplying by devicePixelRatio is only necessary because this
# is a QGLWidget.
x = event.x() * self.devicePixelRatioF()
y = event.y() * self.devicePixelRatioF()
if self._dragActive:
dx = x - self._lastX
dy = y - self._lastY
if dx == 0 and dy == 0:
return
freeCam = self._dataModel.viewSettings.freeCamera
if self._cameraMode == "tumble":
freeCam.Tumble(0.25 * dx, 0.25*dy)
elif self._cameraMode == "zoom":
zoomDelta = -.002 * (dx + dy)
if freeCam.orthographic:
# orthographic cameras zoom by scaling fov
# fov is the height of the view frustum in world units
freeCam.fov *= (1 + zoomDelta)
else:
# perspective cameras dolly forward or back
freeCam.AdjustDistance(1 + zoomDelta)
elif self._cameraMode == "truck":
height = float(self.size().height())
pixelsToWorld = freeCam.ComputePixelsToWorldFactor(height)
self._dataModel.viewSettings.freeCamera.Truck(
-dx * pixelsToWorld,
dy * pixelsToWorld)
self._lastX = x
self._lastY = y
self.updateGL()
self.signalMouseDrag.emit()
elif self._cameraMode == "none":
# Mouse tracking is only enabled when rolloverPicking is enabled,
# and this function only gets called elsewise when mouse-tracking
# is enabled
self.pickObject(event.x(), event.y(), None, event.modifiers())
else:
event.ignore()
def wheelEvent(self, event):
self.switchToFreeCamera()
self._dataModel.viewSettings.freeCamera.AdjustDistance(
1-max(-0.5,min(0.5,(event.angleDelta().y()/1000.))))
self.updateGL()
def _onAutoComputeClippingChanged(self):
"""If we are currently rendering from a prim camera, switch to the
FreeCamera. Then reset the near/far clipping planes based on
distance to closest geometry. But only when autoClip has turned on!"""
if self._dataModel.viewSettings.autoComputeClippingPlanes:
if not self._dataModel.viewSettings.freeCamera:
self.switchToFreeCamera()
else:
self.computeAndSetClosestDistance()
def computeAndSetClosestDistance(self):
'''Using the current FreeCamera's frustum, determine the world-space
closest rendered point to the camera. Use that point
to set our FreeCamera's closest visible distance.'''
# pick() operates at very low screen resolution, but that's OK for
# our purposes. Ironically, the same limited Z-buffer resolution for
# which we are trying to compensate may cause us to completely lose
# ALL of our geometry if we set the near-clip really small (which we
# want to do so we don't miss anything) when geometry is clustered
# closer to far-clip. So in the worst case, we may need to perform
# two picks, with the first pick() using a small near and far, and the
# second pick() using a near that keeps far within the safe precision
# range. We don't expect the worst-case to happen often.
if not self._dataModel.viewSettings.freeCamera:
return
cameraFrustum = self.resolveCamera()[0].frustum
trueFar = cameraFrustum.nearFar.max
smallNear = min(FreeCamera.defaultNear,
self._dataModel.viewSettings.freeCamera._selSize / 10.0)
cameraFrustum.nearFar = \
Gf.Range1d(smallNear, smallNear*FreeCamera.maxSafeZResolution)
pickResults = self.pick(cameraFrustum)
if pickResults[0] is None or pickResults[2] == Sdf.Path.emptyPath:
cameraFrustum.nearFar = \
Gf.Range1d(trueFar/FreeCamera.maxSafeZResolution, trueFar)
pickResults = self.pick(cameraFrustum)
if Tf.Debug.IsDebugSymbolNameEnabled(DEBUG_CLIPPING):
print("computeAndSetClosestDistance: Needed to call pick() a second time")
if pickResults[0] is not None and pickResults[2] != Sdf.Path.emptyPath:
self._dataModel.viewSettings.freeCamera.setClosestVisibleDistFromPoint(pickResults[0])
self.updateView()
def pick(self, pickFrustum):
'''
Find closest point in scene rendered through 'pickFrustum'.
Returns a sextuple:
selectedPoint, selectedNormal, selectedPrimPath,
selectedInstancerPath, selectedInstanceIndex, selectedInstancerContext
'''
renderer = self._getRenderer()
if not self._dataModel.stage or not renderer:
# error has already been issued
return None, None, Sdf.Path.emptyPath, None, None, None
# this import is here to make sure the create_first_image stat doesn't
# regress..
from OpenGL import GL
# Need a correct OpenGL Rendering context for FBOs
self.makeCurrent()
# update rendering parameters
self._renderParams.frame = self._dataModel.currentFrame
self._renderParams.complexity = self._dataModel.viewSettings.complexity.value
self._renderParams.drawMode = self._renderModeDict[self._dataModel.viewSettings.renderMode]
self._renderParams.showGuides = self._dataModel.viewSettings.displayGuide
self._renderParams.showProxy = self._dataModel.viewSettings.displayProxy
self._renderParams.showRender = self._dataModel.viewSettings.displayRender
self._renderParams.forceRefresh = self._forceRefresh
self._renderParams.cullStyle = \
(UsdImagingGL.CullStyle.CULL_STYLE_BACK_UNLESS_DOUBLE_SIDED
if self._dataModel.viewSettings.cullBackfaces
else UsdImagingGL.CullStyle.CULL_STYLE_NOTHING)
self._renderParams.gammaCorrectColors = False
self._renderParams.enableIdRender = True
self._renderParams.enableSampleAlphaToCoverage = False
self._renderParams.enableSceneMaterials = self._dataModel.viewSettings.enableSceneMaterials
self._renderParams.enableSceneLights = self._dataModel.viewSettings.enableSceneLights
results = renderer.TestIntersection(
pickFrustum.ComputeViewMatrix(),
pickFrustum.ComputeProjectionMatrix(),
self._dataModel.stage.GetPseudoRoot(), self._renderParams)
if Tf.Debug.IsDebugSymbolNameEnabled(DEBUG_CLIPPING):
print("Pick results = {}".format(results))
return results
def computePickFrustum(self, x, y):
# compute pick frustum
(gfCamera, cameraAspect) = self.resolveCamera()
cameraFrustum = gfCamera.frustum
viewport = self.computeWindowViewport()
if self._cropImageToCameraViewport:
viewport = self.computeCameraViewport(cameraAspect)
# normalize position and pick size by the viewport size
point = Gf.Vec2d((x - viewport[0]) / float(viewport[2]),
(y - viewport[1]) / float(viewport[3]))
point[0] = (point[0] * 2.0 - 1.0)
point[1] = -1.0 * (point[1] * 2.0 - 1.0)
size = Gf.Vec2d(1.0 / viewport[2], 1.0 / viewport[3])
# "point" is normalized to the image viewport size, but if the image
# is cropped to the camera viewport, the image viewport won't fill the
# whole window viewport. Clicking outside the image will produce
# normalized coordinates > 1 or < -1; in this case, we should skip
# picking.
inImageBounds = (abs(point[0]) <= 1.0 and abs(point[1]) <= 1.0)
return (inImageBounds, cameraFrustum.ComputeNarrowedFrustum(point, size))
def pickObject(self, x, y, button, modifiers):
'''
Render stage into fbo with each piece as a different color.
Emits a signalPrimSelected or signalRollover depending on
whether 'button' is None.
'''
if not self._dataModel.stage:
return
renderer = self._getRenderer()
if not renderer:
# error has already been issued
return
try:
(inImageBounds, pickFrustum) = self.computePickFrustum(x,y)
if inImageBounds:
selectedPoint, selectedNormal, selectedPrimPath, \
selectedInstanceIndex, selectedTLPath, selectedTLIndex = \
self.pick(pickFrustum)
else:
# If we're picking outside the image viewport (maybe because
# camera guides are on), treat that as a de-select.
selectedPoint, selectedNormal, selectedPrimPath, \
selectedInstanceIndex, selectedTLPath, selectedTLIndex = \
[-1,-1], None, Sdf.Path.emptyPath, -1, Sdf.Path.emptyPath, -1
# Correct for high DPI displays
# Cast to int explicitly as some versions of PySide/Shiboken throw
# when converting extremely small doubles held in selectedPoint
coord = self._scaleMouseCoords(QtCore.QPoint(
int(selectedPoint[0]), int(selectedPoint[1])))
selectedPoint[0] = coord.x()
selectedPoint[1] = coord.y()
if button:
self.signalPrimSelected.emit(
selectedPrimPath, selectedInstanceIndex, selectedTLPath,
selectedTLIndex, selectedPoint, button, modifiers)
else:
self.signalPrimRollover.emit(
selectedPrimPath, selectedInstanceIndex, selectedTLPath,
selectedTLIndex, selectedPoint, modifiers)
except Tf.ErrorException as e:
# If we encounter an error, we want to continue running. Just log
# the error and continue.
sys.stderr.write(
"ERROR: Usdview encountered an error while picking."
"{}\n".format(e))
finally:
renderer = None
def glDraw(self):
# override glDraw so we can time it.
with Timer() as t:
QtOpenGL.QGLWidget.glDraw(self)
# Render creation is a deferred operation, so the render may not
# be initialized on entry to the function.
#
# This function itself can not create the render, as to create the
# renderer we need a valid GL context, which QT has not made current
# yet.
#
# So instead check that the render has been created after the fact.
# The point is to avoid reporting an invalid first image time.
if not self._renderer:
# error has already been issued
return
self._renderTime = t.interval
# If timings are being printed and this is the first time an image is
# being drawn, report how long it took to do so.
if self._printTiming and self._isFirstImage:
self._isFirstImage = False
t.PrintTime("create first image")
def SetForceRefresh(self, val):
self._forceRefresh = val or self._forceRefresh
def ExportFreeCameraToStage(self, stage, defcamName='usdviewCam',
imgWidth=None, imgHeight=None):
'''
Export the free camera to the specified USD stage, if it is
currently defined. If it is not active (i.e. we are viewing through
a stage camera), raise a ValueError.
'''
if not self._dataModel.viewSettings.freeCamera:
raise ValueError("StageView's Free Camera is not defined, so cannot"
" be exported")
imgWidth = imgWidth if imgWidth is not None else self.width()
imgHeight = imgHeight if imgHeight is not None else self.height()
defcam = UsdGeom.Camera.Define(stage, '/'+defcamName)
# Map free camera params to usd camera. We do **not** want to burn
# auto-clipping near/far into our exported camera
gfCamera = self._dataModel.viewSettings.freeCamera.computeGfCamera(self._bbox, autoClip=False)
targetAspect = float(imgWidth) / max(1.0, imgHeight)
CameraUtil.ConformWindow(
gfCamera, CameraUtil.MatchVertically, targetAspect)
when = (self._dataModel.currentFrame
if stage.HasAuthoredTimeCodeRange() else Usd.TimeCode.Default())
defcam.SetFromCamera(gfCamera, when)
def ExportSession(self, stagePath, defcamName='usdviewCam',
imgWidth=None, imgHeight=None):
'''
Export the free camera (if currently active) and session layer to a
USD file at the specified stagePath that references the current-viewed
stage.
'''
tmpStage = Usd.Stage.CreateNew(stagePath)
if self._dataModel.stage:
tmpStage.GetRootLayer().TransferContent(
self._dataModel.stage.GetSessionLayer())
if not self.cameraPrim:
# Export the free camera if it's the currently-visible camera
self.ExportFreeCameraToStage(tmpStage, defcamName, imgWidth,
imgHeight)
tmpStage.GetRootLayer().Save()
del tmpStage
# Reopen just the tmp layer, to sublayer in the pose cache without
# incurring Usd composition cost.
if self._dataModel.stage:
from pxr import Sdf
sdfLayer = Sdf.Layer.FindOrOpen(stagePath)
sdfLayer.subLayerPaths.append(
os.path.abspath(
self._dataModel.stage.GetRootLayer().realPath))
sdfLayer.Save()
def _primSelectionChanged(self):
# set highlighted paths to renderer
self.updateSelection()
self.update()
| [] | [] | [
"USDVIEW_ENABLE_MSAA",
"USDVIEW_OCIO_LUT3D_EDGE_SIZE"
] | [] | ["USDVIEW_ENABLE_MSAA", "USDVIEW_OCIO_LUT3D_EDGE_SIZE"] | python | 2 | 0 | |
source/git/gitsource.go | package git
import (
"bytes"
"context"
"encoding/base64"
"fmt"
"io"
"io/ioutil"
"net/url"
"os"
"os/exec"
"os/user"
"path"
"path/filepath"
"regexp"
"strconv"
"strings"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/session/secrets"
"github.com/moby/buildkit/session/sshforward"
"github.com/moby/buildkit/snapshot"
"github.com/moby/buildkit/solver"
"github.com/moby/buildkit/source"
srctypes "github.com/moby/buildkit/source/types"
"github.com/moby/buildkit/util/bklog"
"github.com/moby/buildkit/util/progress/logs"
"github.com/moby/buildkit/util/urlutil"
"github.com/moby/locker"
"github.com/pkg/errors"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
var validHex = regexp.MustCompile(`^[a-f0-9]{40}$`)
var defaultBranch = regexp.MustCompile(`refs/heads/(\S+)`)
type Opt struct {
CacheAccessor cache.Accessor
}
type gitSource struct {
cache cache.Accessor
locker *locker.Locker
}
// Supported returns nil if the system supports Git source
func Supported() error {
if err := exec.Command("git", "version").Run(); err != nil {
return errors.Wrap(err, "failed to find git binary")
}
return nil
}
func NewSource(opt Opt) (source.Source, error) {
gs := &gitSource{
cache: opt.CacheAccessor,
locker: locker.New(),
}
return gs, nil
}
func (gs *gitSource) ID() string {
return srctypes.GitScheme
}
// needs to be called with repo lock
func (gs *gitSource) mountRemote(ctx context.Context, remote string, auth []string, g session.Group) (target string, release func(), retErr error) {
sis, err := searchGitRemote(ctx, gs.cache, remote)
if err != nil {
return "", nil, errors.Wrapf(err, "failed to search metadata for %s", urlutil.RedactCredentials(remote))
}
var remoteRef cache.MutableRef
for _, si := range sis {
remoteRef, err = gs.cache.GetMutable(ctx, si.ID())
if err != nil {
if errors.Is(err, cache.ErrLocked) {
// should never really happen as no other function should access this metadata, but lets be graceful
bklog.G(ctx).Warnf("mutable ref for %s %s was locked: %v", urlutil.RedactCredentials(remote), si.ID(), err)
continue
}
return "", nil, errors.Wrapf(err, "failed to get mutable ref for %s", urlutil.RedactCredentials(remote))
}
break
}
initializeRepo := false
if remoteRef == nil {
remoteRef, err = gs.cache.New(ctx, nil, g, cache.CachePolicyRetain, cache.WithDescription(fmt.Sprintf("shared git repo for %s", urlutil.RedactCredentials(remote))))
if err != nil {
return "", nil, errors.Wrapf(err, "failed to create new mutable for %s", urlutil.RedactCredentials(remote))
}
initializeRepo = true
}
releaseRemoteRef := func() {
remoteRef.Release(context.TODO())
}
defer func() {
if retErr != nil && remoteRef != nil {
releaseRemoteRef()
}
}()
mount, err := remoteRef.Mount(ctx, false, g)
if err != nil {
return "", nil, err
}
lm := snapshot.LocalMounter(mount)
dir, err := lm.Mount()
if err != nil {
return "", nil, err
}
defer func() {
if retErr != nil {
lm.Unmount()
}
}()
if initializeRepo {
if _, err := gitWithinDir(ctx, dir, "", "", "", auth, "init", "--bare"); err != nil {
return "", nil, errors.Wrapf(err, "failed to init repo at %s", dir)
}
if _, err := gitWithinDir(ctx, dir, "", "", "", auth, "remote", "add", "origin", remote); err != nil {
return "", nil, errors.Wrapf(err, "failed add origin repo at %s", dir)
}
// save new remote metadata
md := cacheRefMetadata{remoteRef}
if err := md.setGitRemote(remote); err != nil {
return "", nil, err
}
}
return dir, func() {
lm.Unmount()
releaseRemoteRef()
}, nil
}
type gitSourceHandler struct {
*gitSource
src source.GitIdentifier
cacheKey string
sm *session.Manager
auth []string
}
func (gs *gitSourceHandler) shaToCacheKey(sha string) string {
key := sha
if gs.src.KeepGitDir {
key += ".git"
}
if gs.src.Subdir != "" {
key += ":" + gs.src.Subdir
}
return key
}
func (gs *gitSource) Resolve(ctx context.Context, id source.Identifier, sm *session.Manager, _ solver.Vertex) (source.SourceInstance, error) {
gitIdentifier, ok := id.(*source.GitIdentifier)
if !ok {
return nil, errors.Errorf("invalid git identifier %v", id)
}
return &gitSourceHandler{
src: *gitIdentifier,
gitSource: gs,
sm: sm,
}, nil
}
type authSecret struct {
token bool
name string
}
func (gs *gitSourceHandler) authSecretNames() (sec []authSecret, _ error) {
u, err := url.Parse(gs.src.Remote)
if err != nil {
return nil, err
}
if gs.src.AuthHeaderSecret != "" {
sec = append(sec, authSecret{name: gs.src.AuthHeaderSecret + "." + u.Host})
}
if gs.src.AuthTokenSecret != "" {
sec = append(sec, authSecret{name: gs.src.AuthTokenSecret + "." + u.Host, token: true})
}
if gs.src.AuthHeaderSecret != "" {
sec = append(sec, authSecret{name: gs.src.AuthHeaderSecret})
}
if gs.src.AuthTokenSecret != "" {
sec = append(sec, authSecret{name: gs.src.AuthTokenSecret, token: true})
}
return sec, nil
}
func (gs *gitSourceHandler) getAuthToken(ctx context.Context, g session.Group) error {
if gs.auth != nil {
return nil
}
sec, err := gs.authSecretNames()
if err != nil {
return err
}
return gs.sm.Any(ctx, g, func(ctx context.Context, _ string, caller session.Caller) error {
for _, s := range sec {
dt, err := secrets.GetSecret(ctx, caller, s.name)
if err != nil {
if errors.Is(err, secrets.ErrNotFound) {
continue
}
return err
}
if s.token {
dt = []byte("basic " + base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("x-access-token:%s", dt))))
}
gs.auth = []string{"-c", "http." + tokenScope(gs.src.Remote) + ".extraheader=Authorization: " + string(dt)}
break
}
return nil
})
}
func (gs *gitSourceHandler) mountSSHAuthSock(ctx context.Context, sshID string, g session.Group) (string, func() error, error) {
var caller session.Caller
err := gs.sm.Any(ctx, g, func(ctx context.Context, _ string, c session.Caller) error {
if err := sshforward.CheckSSHID(ctx, c, sshID); err != nil {
if st, ok := status.FromError(err); ok && st.Code() == codes.Unimplemented {
return errors.Errorf("no SSH key %q forwarded from the client", sshID)
}
return err
}
caller = c
return nil
})
if err != nil {
return "", nil, err
}
usr, err := user.Current()
if err != nil {
return "", nil, err
}
// best effort, default to root
uid, _ := strconv.Atoi(usr.Uid)
gid, _ := strconv.Atoi(usr.Gid)
sock, cleanup, err := sshforward.MountSSHSocket(ctx, caller, sshforward.SocketOpt{
ID: sshID,
UID: uid,
GID: gid,
Mode: 0700,
})
if err != nil {
return "", nil, err
}
return sock, cleanup, nil
}
func (gs *gitSourceHandler) mountKnownHosts(ctx context.Context) (string, func() error, error) {
if gs.src.KnownSSHHosts == "" {
return "", nil, errors.Errorf("no configured known hosts forwarded from the client")
}
knownHosts, err := ioutil.TempFile("", "")
if err != nil {
return "", nil, err
}
cleanup := func() error {
return os.Remove(knownHosts.Name())
}
_, err = knownHosts.Write([]byte(gs.src.KnownSSHHosts))
if err != nil {
cleanup()
return "", nil, err
}
err = knownHosts.Close()
if err != nil {
cleanup()
return "", nil, err
}
return knownHosts.Name(), cleanup, nil
}
func (gs *gitSourceHandler) CacheKey(ctx context.Context, g session.Group, index int) (string, string, solver.CacheOpts, bool, error) {
remote := gs.src.Remote
gs.locker.Lock(remote)
defer gs.locker.Unlock(remote)
if ref := gs.src.Ref; ref != "" && isCommitSHA(ref) {
cacheKey := gs.shaToCacheKey(ref)
gs.cacheKey = cacheKey
return cacheKey, ref, nil, true, nil
}
gs.getAuthToken(ctx, g)
gitDir, unmountGitDir, err := gs.mountRemote(ctx, remote, gs.auth, g)
if err != nil {
return "", "", nil, false, err
}
defer unmountGitDir()
var sock string
if gs.src.MountSSHSock != "" {
var unmountSock func() error
sock, unmountSock, err = gs.mountSSHAuthSock(ctx, gs.src.MountSSHSock, g)
if err != nil {
return "", "", nil, false, err
}
defer unmountSock()
}
var knownHosts string
if gs.src.KnownSSHHosts != "" {
var unmountKnownHosts func() error
knownHosts, unmountKnownHosts, err = gs.mountKnownHosts(ctx)
if err != nil {
return "", "", nil, false, err
}
defer unmountKnownHosts()
}
ref := gs.src.Ref
if ref == "" {
ref, err = getDefaultBranch(ctx, gitDir, "", sock, knownHosts, gs.auth, gs.src.Remote)
if err != nil {
return "", "", nil, false, err
}
}
// TODO: should we assume that remote tag is immutable? add a timer?
buf, err := gitWithinDir(ctx, gitDir, "", sock, knownHosts, gs.auth, "ls-remote", "origin", ref)
if err != nil {
return "", "", nil, false, errors.Wrapf(err, "failed to fetch remote %s", urlutil.RedactCredentials(remote))
}
out := buf.String()
idx := strings.Index(out, "\t")
if idx == -1 {
return "", "", nil, false, errors.Errorf("repository does not contain ref %s, output: %q", ref, string(out))
}
sha := string(out[:idx])
if !isCommitSHA(sha) {
return "", "", nil, false, errors.Errorf("invalid commit sha %q", sha)
}
cacheKey := gs.shaToCacheKey(sha)
gs.cacheKey = cacheKey
return cacheKey, sha, nil, true, nil
}
func (gs *gitSourceHandler) Snapshot(ctx context.Context, g session.Group) (out cache.ImmutableRef, retErr error) {
cacheKey := gs.cacheKey
if cacheKey == "" {
var err error
cacheKey, _, _, _, err = gs.CacheKey(ctx, g, 0)
if err != nil {
return nil, err
}
}
gs.getAuthToken(ctx, g)
snapshotKey := cacheKey + ":" + gs.src.Subdir
gs.locker.Lock(snapshotKey)
defer gs.locker.Unlock(snapshotKey)
sis, err := searchGitSnapshot(ctx, gs.cache, snapshotKey)
if err != nil {
return nil, errors.Wrapf(err, "failed to search metadata for %s", snapshotKey)
}
if len(sis) > 0 {
return gs.cache.Get(ctx, sis[0].ID())
}
gs.locker.Lock(gs.src.Remote)
defer gs.locker.Unlock(gs.src.Remote)
gitDir, unmountGitDir, err := gs.mountRemote(ctx, gs.src.Remote, gs.auth, g)
if err != nil {
return nil, err
}
defer unmountGitDir()
var sock string
if gs.src.MountSSHSock != "" {
var unmountSock func() error
sock, unmountSock, err = gs.mountSSHAuthSock(ctx, gs.src.MountSSHSock, g)
if err != nil {
return nil, err
}
defer unmountSock()
}
var knownHosts string
if gs.src.KnownSSHHosts != "" {
var unmountKnownHosts func() error
knownHosts, unmountKnownHosts, err = gs.mountKnownHosts(ctx)
if err != nil {
return nil, err
}
defer unmountKnownHosts()
}
ref := gs.src.Ref
if ref == "" {
ref, err = getDefaultBranch(ctx, gitDir, "", sock, knownHosts, gs.auth, gs.src.Remote)
if err != nil {
return nil, err
}
}
doFetch := true
if isCommitSHA(ref) {
// skip fetch if commit already exists
if _, err := gitWithinDir(ctx, gitDir, "", sock, knownHosts, nil, "cat-file", "-e", ref+"^{commit}"); err == nil {
doFetch = false
}
}
if doFetch {
// make sure no old lock files have leaked
os.RemoveAll(filepath.Join(gitDir, "shallow.lock"))
args := []string{"fetch"}
if !isCommitSHA(ref) { // TODO: find a branch from ls-remote?
args = append(args, "--depth=1", "--no-tags")
} else {
if _, err := os.Lstat(filepath.Join(gitDir, "shallow")); err == nil {
args = append(args, "--unshallow")
}
}
args = append(args, "origin")
if !isCommitSHA(ref) {
args = append(args, "--force", ref+":tags/"+ref)
// local refs are needed so they would be advertised on next fetches. Force is used
// in case the ref is a branch and it now points to a different commit sha
// TODO: is there a better way to do this?
}
if _, err := gitWithinDir(ctx, gitDir, "", sock, knownHosts, gs.auth, args...); err != nil {
return nil, errors.Wrapf(err, "failed to fetch remote %s", urlutil.RedactCredentials(gs.src.Remote))
}
}
checkoutRef, err := gs.cache.New(ctx, nil, g, cache.WithRecordType(client.UsageRecordTypeGitCheckout), cache.WithDescription(fmt.Sprintf("git snapshot for %s#%s", gs.src.Remote, ref)))
if err != nil {
return nil, errors.Wrapf(err, "failed to create new mutable for %s", urlutil.RedactCredentials(gs.src.Remote))
}
defer func() {
if retErr != nil && checkoutRef != nil {
checkoutRef.Release(context.TODO())
}
}()
mount, err := checkoutRef.Mount(ctx, false, g)
if err != nil {
return nil, err
}
lm := snapshot.LocalMounter(mount)
checkoutDir, err := lm.Mount()
if err != nil {
return nil, err
}
defer func() {
if retErr != nil && lm != nil {
lm.Unmount()
}
}()
subdir := path.Clean(gs.src.Subdir)
if subdir == "/" {
subdir = "."
}
if gs.src.KeepGitDir && subdir == "." {
checkoutDirGit := filepath.Join(checkoutDir, ".git")
if err := os.MkdirAll(checkoutDir, 0711); err != nil {
return nil, err
}
_, err = gitWithinDir(ctx, checkoutDirGit, "", sock, knownHosts, nil, "init")
if err != nil {
return nil, err
}
_, err = gitWithinDir(ctx, checkoutDirGit, "", sock, knownHosts, nil, "remote", "add", "origin", gitDir)
if err != nil {
return nil, err
}
pullref := ref
if isCommitSHA(ref) {
pullref = "refs/buildkit/" + identity.NewID()
_, err = gitWithinDir(ctx, gitDir, "", sock, knownHosts, gs.auth, "update-ref", pullref, ref)
if err != nil {
return nil, err
}
} else {
pullref += ":" + pullref
}
_, err = gitWithinDir(ctx, checkoutDirGit, "", sock, knownHosts, gs.auth, "fetch", "-u", "--depth=1", "origin", pullref)
if err != nil {
return nil, err
}
_, err = gitWithinDir(ctx, checkoutDirGit, checkoutDir, sock, knownHosts, nil, "checkout", "FETCH_HEAD")
if err != nil {
return nil, errors.Wrapf(err, "failed to checkout remote %s", urlutil.RedactCredentials(gs.src.Remote))
}
gitDir = checkoutDirGit
} else {
cd := checkoutDir
if subdir != "." {
cd, err = ioutil.TempDir(cd, "checkout")
if err != nil {
return nil, errors.Wrapf(err, "failed to create temporary checkout dir")
}
}
_, err = gitWithinDir(ctx, gitDir, cd, sock, knownHosts, nil, "checkout", ref, "--", ".")
if err != nil {
return nil, errors.Wrapf(err, "failed to checkout remote %s", urlutil.RedactCredentials(gs.src.Remote))
}
if subdir != "." {
d, err := os.Open(filepath.Join(cd, subdir))
if err != nil {
return nil, errors.Wrapf(err, "failed to open subdir %v", subdir)
}
defer func() {
if d != nil {
d.Close()
}
}()
names, err := d.Readdirnames(0)
if err != nil {
return nil, err
}
for _, n := range names {
if err := os.Rename(filepath.Join(cd, subdir, n), filepath.Join(checkoutDir, n)); err != nil {
return nil, err
}
}
if err := d.Close(); err != nil {
return nil, err
}
d = nil // reset defer
if err := os.RemoveAll(cd); err != nil {
return nil, err
}
}
}
_, err = gitWithinDir(ctx, gitDir, checkoutDir, sock, knownHosts, gs.auth, "submodule", "update", "--init", "--recursive", "--depth=1")
if err != nil {
return nil, errors.Wrapf(err, "failed to update submodules for %s", urlutil.RedactCredentials(gs.src.Remote))
}
if idmap := mount.IdentityMapping(); idmap != nil {
u := idmap.RootPair()
err := filepath.Walk(gitDir, func(p string, f os.FileInfo, err error) error {
return os.Lchown(p, u.UID, u.GID)
})
if err != nil {
return nil, errors.Wrap(err, "failed to remap git checkout")
}
}
lm.Unmount()
lm = nil
snap, err := checkoutRef.Commit(ctx)
if err != nil {
return nil, err
}
checkoutRef = nil
defer func() {
if retErr != nil {
snap.Release(context.TODO())
}
}()
md := cacheRefMetadata{snap}
if err := md.setGitSnapshot(snapshotKey); err != nil {
return nil, err
}
return snap, nil
}
func isCommitSHA(str string) bool {
return validHex.MatchString(str)
}
func gitWithinDir(ctx context.Context, gitDir, workDir, sshAuthSock, knownHosts string, auth []string, args ...string) (*bytes.Buffer, error) {
a := append([]string{"--git-dir", gitDir}, auth...)
if workDir != "" {
a = append(a, "--work-tree", workDir)
}
return git(ctx, workDir, sshAuthSock, knownHosts, append(a, args...)...)
}
func getGitSSHCommand(knownHosts string) string {
gitSSHCommand := "ssh -F /dev/null"
if knownHosts != "" {
gitSSHCommand += " -o UserKnownHostsFile=" + knownHosts
} else {
gitSSHCommand += " -o StrictHostKeyChecking=no"
}
return gitSSHCommand
}
func git(ctx context.Context, dir, sshAuthSock, knownHosts string, args ...string) (*bytes.Buffer, error) {
for {
stdout, stderr := logs.NewLogStreams(ctx, false)
defer stdout.Close()
defer stderr.Close()
cmd := exec.Command("git", args...)
cmd.Dir = dir // some commands like submodule require this
buf := bytes.NewBuffer(nil)
errbuf := bytes.NewBuffer(nil)
cmd.Stdin = nil
cmd.Stdout = io.MultiWriter(stdout, buf)
cmd.Stderr = io.MultiWriter(stderr, errbuf)
cmd.Env = []string{
"PATH=" + os.Getenv("PATH"),
"GIT_TERMINAL_PROMPT=0",
"GIT_SSH_COMMAND=" + getGitSSHCommand(knownHosts),
// "GIT_TRACE=1",
}
if sshAuthSock != "" {
cmd.Env = append(cmd.Env, "SSH_AUTH_SOCK="+sshAuthSock)
}
// remote git commands spawn helper processes that inherit FDs and don't
// handle parent death signal so exec.CommandContext can't be used
err := runProcessGroup(ctx, cmd)
if err != nil {
if strings.Contains(errbuf.String(), "--depth") || strings.Contains(errbuf.String(), "shallow") {
if newArgs := argsNoDepth(args); len(args) > len(newArgs) {
args = newArgs
continue
}
}
}
return buf, err
}
}
func argsNoDepth(args []string) []string {
out := make([]string, 0, len(args))
for _, a := range args {
if a != "--depth=1" {
out = append(out, a)
}
}
return out
}
func tokenScope(remote string) string {
// generally we can only use the token for fetching main remote but in case of github.com we do best effort
// to try reuse same token for all github.com remotes. This is the same behavior actions/checkout uses
for _, pfx := range []string{"https://github.com/", "https://www.github.com/"} {
if strings.HasPrefix(remote, pfx) {
return pfx
}
}
return remote
}
// getDefaultBranch gets the default branch of a repository using ls-remote
func getDefaultBranch(ctx context.Context, gitDir, workDir, sshAuthSock, knownHosts string, auth []string, remoteURL string) (string, error) {
buf, err := gitWithinDir(ctx, gitDir, workDir, sshAuthSock, knownHosts, auth, "ls-remote", "--symref", remoteURL, "HEAD")
if err != nil {
return "", errors.Wrapf(err, "error fetching default branch for repository %s", urlutil.RedactCredentials(remoteURL))
}
ss := defaultBranch.FindAllStringSubmatch(buf.String(), -1)
if len(ss) == 0 || len(ss[0]) != 2 {
return "", errors.Errorf("could not find default branch for repository: %s", urlutil.RedactCredentials(remoteURL))
}
return ss[0][1], nil
}
const keyGitRemote = "git-remote"
const gitRemoteIndex = keyGitRemote + "::"
const keyGitSnapshot = "git-snapshot"
const gitSnapshotIndex = keyGitSnapshot + "::"
func search(ctx context.Context, store cache.MetadataStore, key string, idx string) ([]cacheRefMetadata, error) {
var results []cacheRefMetadata
mds, err := store.Search(ctx, idx+key)
if err != nil {
return nil, err
}
for _, md := range mds {
results = append(results, cacheRefMetadata{md})
}
return results, nil
}
func searchGitRemote(ctx context.Context, store cache.MetadataStore, remote string) ([]cacheRefMetadata, error) {
return search(ctx, store, remote, gitRemoteIndex)
}
func searchGitSnapshot(ctx context.Context, store cache.MetadataStore, key string) ([]cacheRefMetadata, error) {
return search(ctx, store, key, gitSnapshotIndex)
}
type cacheRefMetadata struct {
cache.RefMetadata
}
func (md cacheRefMetadata) setGitSnapshot(key string) error {
return md.SetString(keyGitSnapshot, key, gitSnapshotIndex+key)
}
func (md cacheRefMetadata) setGitRemote(key string) error {
return md.SetString(keyGitRemote, key, gitRemoteIndex+key)
}
| [
"\"PATH\""
] | [] | [
"PATH"
] | [] | ["PATH"] | go | 1 | 0 | |
dpyr/tests/test_api.py | import os
import operator
from typing import Union, Type, Callable
import pytest
import _pytest as pt
import pandas as pd
import pandas.util.testing as tm
import ibis
import ibis.expr.types as ir
import ibis.expr.datatypes as dt
from dpyr import (
anti_join,
cast,
desc,
distinct,
do,
groupby,
head,
inner_join,
join,
left_join,
log,
max,
mean,
min,
mutate,
n,
nullif,
nunique,
outer_join,
right_join,
select,
semi_join,
sift,
sort_by,
std,
sum,
summarize,
transmute,
var,
X, Y,
exp,
ln,
log2,
log10,
floor,
ceil,
abs,
round,
sign,
sqrt,
lower,
upper,
)
from dpyr.core import Unary
@pytest.fixture(scope='module')
def df() -> pd.DataFrame:
path = os.environ.get('DIAMONDS_CSV', 'diamonds.csv')
return pd.read_csv(path, index_col=None)
@pytest.fixture(scope='module')
def batting_df() -> pd.DataFrame:
path = os.environ.get('BATTING_CSV', 'batting.csv')
return pd.read_csv(path, index_col=None)
@pytest.fixture(
params=[
# ibis.postgres.connect(
# database=os.environ.get('TEST_POSTGRES_DB', 'ibis_testing')
# ),
ibis.sqlite.connect(
os.environ.get('TEST_SQLITE_DB', 'ibis_testing.db')
),
# ibis.pandas.connect({
# 'diamonds': df(), 'other_diamonds': df(), 'batting': batting_df()
# })
],
scope='module',
)
def client(request: pt.fixtures.FixtureRequest) -> ibis.client.Client:
return request.param
@pytest.fixture
def diamonds(client: ibis.client.Client) -> ir.TableExpr:
return client.table('diamonds').head(1000)
@pytest.fixture
def batting(client: ibis.client.Client) -> ir.TableExpr:
return client.table('batting')
@pytest.fixture
def awards_players(client: ibis.client.Client) -> ir.TableExpr:
return client.table('awards_players')
@pytest.fixture
def other_diamonds(client: ibis.client.Client) -> ir.TableExpr:
return client.table('diamonds').view().head(1000)
def test_compound_expression(diamonds: ir.TableExpr) -> None:
expected = diamonds[diamonds.price * diamonds.price / 2.0 >= 100]
expected = expected.groupby('cut').aggregate([
expected.carat.max().name('max_carat'),
expected.carat.mean().name('mean_carat'),
expected.carat.min().name('min_carat'),
expected.x.count().name('n'),
expected.carat.std().name('std_carat'),
expected.carat.sum().name('sum_carat'),
expected.carat.var().name('var_carat'),
])
expected = expected.mutate(
foo=expected.mean_carat,
bar=expected.var_carat
).sort_by([ibis.desc('foo'), 'bar']).head()
result = (
diamonds >> sift(X.price * X.price / 2.0 >= 100)
>> groupby(X.cut)
>> summarize(
max_carat=max(X.carat),
mean_carat=mean(X.carat),
min_carat=min(X.carat),
n=n(X.x),
std_carat=std(X.carat),
sum_carat=sum(X.carat),
var_carat=var(X.carat),
)
>> mutate(foo=X.mean_carat, bar=X.var_carat)
>> sort_by(desc(X.foo), X.bar)
>> head(5)
)
assert result.equals(expected)
tm.assert_frame_equal(expected.execute(), result >> do())
@pytest.mark.parametrize(
'join_func',
[
inner_join,
left_join,
pytest.mark.xfail(right_join, raises=KeyError),
outer_join,
semi_join,
anti_join,
]
)
def test_join(
diamonds: ir.TableExpr,
other_diamonds: ir.TableExpr,
join_func: Type[join]
) -> None:
result = (
diamonds >> join_func(other_diamonds, on=X.cut == Y.cut)
>> select(X.x, Y.y)
)
join_func_name = join_func.__name__ # type: str
joined = getattr(diamonds, join_func_name)(
other_diamonds, diamonds.cut == other_diamonds.cut
)
expected = joined[diamonds.x, other_diamonds.y]
assert result.equals(expected)
@pytest.mark.parametrize(
'column',
[
'carat',
'cut',
'color',
'clarity',
'depth',
'table',
'price',
'x',
'y',
'z',
0,
] + list(range(1, 10))
)
def test_pull(diamonds: ir.TableExpr, column: Union[str, int]) -> None:
result = diamonds >> X[column]
expected = diamonds[column]
assert result.equals(expected)
tm.assert_series_equal(expected.execute(), result >> do())
def test_do(diamonds: ir.TableExpr) -> None:
tm.assert_frame_equal(diamonds.execute(), diamonds >> do())
def test_simple_arithmetic(diamonds: ir.TableExpr) -> None:
result = diamonds >> mean(X.carat) + 1
expected = diamonds.carat.mean() + 1
assert result.equals(expected)
assert float(expected.execute()) == float(result >> do())
def test_mutate(diamonds: ir.TableExpr) -> None:
result = diamonds >> mutate(new_column=X.carat + 1)
expected = diamonds.mutate(new_column=lambda x: x.carat + 1)
assert result.equals(expected)
tm.assert_frame_equal(expected.execute(), result >> do())
def test_transmute(diamonds: ir.TableExpr) -> None:
result = diamonds >> transmute(new_column=X.carat * 2)
expected = diamonds[[(diamonds.carat * 2).name('new_column')]]
assert result.equals(expected)
tm.assert_frame_equal(expected.execute(), result >> do())
@pytest.mark.parametrize('to', ['string', dt.string])
def test_cast(
diamonds: ir.TableExpr, to: Union[str, dt.DataType]
) -> None:
result = diamonds >> cast(X.carat + 1, to=to)
expected = (diamonds.carat + 1).cast(to)
assert result.equals(expected)
tm.assert_series_equal(expected.execute(), result >> do())
@pytest.mark.parametrize(
'column',
[
'carat',
'cut',
'color',
'clarity',
'depth',
'table',
'price',
'x',
'y',
'z',
]
)
def test_distinct(diamonds: ir.TableExpr, column: str) -> None:
result = diamonds >> distinct(X[column])
expected = diamonds[column].distinct()
assert result.equals(expected)
tm.assert_series_equal(expected.execute(), result >> do())
@pytest.mark.parametrize(
'column',
[
'carat',
'cut',
'color',
'clarity',
'depth',
'table',
'price',
'x',
'y',
'z',
]
)
def test_nunique(diamonds: ir.TableExpr, column: str) -> None:
result = diamonds >> nunique(X[column])
expected = diamonds[column].nunique()
assert result.equals(expected)
assert expected.execute() == result >> do()
@pytest.mark.parametrize(
'func',
[
exp,
ln,
log2,
log10,
floor,
ceil,
abs,
sign,
sqrt,
]
)
def test_unary_math(diamonds: ir.TableExpr, func: Type[Unary]) -> None:
result = diamonds >> func(cast(X.carat, to=dt.Decimal(19, 7)))
expected = getattr(diamonds.carat.cast(dt.Decimal(19, 7)), func.__name__)()
assert result.equals(expected)
tm.assert_series_equal(result >> do(), expected.execute())
@pytest.mark.parametrize(
'func',
[
operator.add,
operator.sub,
operator.mul,
operator.truediv,
operator.floordiv,
operator.pow,
operator.mod,
operator.eq,
operator.ne,
operator.lt,
operator.le,
operator.gt,
operator.ge,
]
)
def test_binary_math(diamonds: ir.TableExpr, func: Callable) -> None:
result = diamonds >> func(X.carat, X.z)
expected = func(diamonds.carat, diamonds.z)
assert result.equals(expected)
tm.assert_series_equal(result >> do(), expected.execute())
@pytest.mark.parametrize(
'base',
[-2, -1, 1, 2],
)
def test_log(diamonds: ir.TableExpr, base: int) -> None:
result_expr = diamonds >> log(nullif(X.carat, 0), base)
expected_expr = diamonds.carat.nullif(0).log(base)
assert result_expr.equals(expected_expr)
result_df = result_expr >> do()
expected_df = expected_expr.execute()
tm.assert_series_equal(result_df, expected_df)
@pytest.mark.parametrize('places', list(range(-5, 6)))
def test_round(diamonds: ir.TableExpr, places: int) -> None:
result = diamonds >> round(X.carat, places)
expected = diamonds.carat.round(places)
assert result.equals(expected)
tm.assert_series_equal(result >> do(), expected.execute())
@pytest.mark.parametrize('func', [lower, upper])
def test_unary_string(diamonds: ir.TableExpr, func: Type[Unary]) -> None:
result = diamonds >> func(X.cut)
expected = getattr(diamonds.cut, func.__name__)()
assert result.equals(expected)
tm.assert_series_equal(result >> do(), expected.execute())
def test_column_slice(batting: ir.TableExpr) -> None:
result = batting >> select(
X.playerID, X.yearID, X.teamID, X.G, X['AB':'H']
)
columns = batting.columns
expected = batting[
['playerID', 'yearID', 'teamID', 'G'] + [
columns[i]
for i in range(columns.index('AB'), columns.index('H') + 1)
]
]
assert result.equals(expected)
| [] | [] | [
"BATTING_CSV",
"DIAMONDS_CSV",
"TEST_POSTGRES_DB",
"TEST_SQLITE_DB"
] | [] | ["BATTING_CSV", "DIAMONDS_CSV", "TEST_POSTGRES_DB", "TEST_SQLITE_DB"] | python | 4 | 0 | |
src/java/org/apache/sqoop/mapreduce/JobBase.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.sqoop.mapreduce;
import java.io.File;
import java.io.IOException;
import java.sql.SQLException;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.OutputFormat;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.sqoop.config.ConfigurationConstants;
import org.apache.sqoop.SqoopOptions;
import org.apache.sqoop.config.ConfigurationHelper;
import org.apache.sqoop.manager.ConnManager;
import org.apache.sqoop.tool.SqoopTool;
import org.apache.sqoop.util.ClassLoaderStack;
import org.apache.sqoop.util.Jars;
import org.apache.sqoop.validation.*;
/**
* Base class for configuring and running a MapReduce job.
* Allows dependency injection, etc, for easy customization of import job types.
*/
public class JobBase {
public static final Log LOG = LogFactory.getLog(JobBase.class.getName());
public static final String SERIALIZE_SQOOPOPTIONS = "sqoop.jobbase.serialize.sqoopoptions";
public static final boolean SERIALIZE_SQOOPOPTIONS_DEFAULT = false;
public static final String HADOOP_MAP_TASK_MAX_ATTEMTPS =
"mapreduce.map.maxattempts";
public static final String HADOOP_REDUCE_TASK_MAX_ATTEMTPS =
"mapreduce.reduce.maxattempts";
protected SqoopOptions options;
protected Class<? extends Mapper> mapperClass;
protected Class<? extends InputFormat> inputFormatClass;
protected Class<? extends OutputFormat> outputFormatClass;
private Job mrJob;
private ClassLoader prevClassLoader = null;
protected final boolean isHCatJob;
public static final String PROPERTY_VERBOSE = "sqoop.verbose";
public JobBase() {
this(null);
}
public JobBase(final SqoopOptions opts) {
this(opts, null, null, null);
}
public JobBase(final SqoopOptions opts,
final Class<? extends Mapper> mapperClass,
final Class<? extends InputFormat> inputFormatClass,
final Class<? extends OutputFormat> outputFormatClass) {
this.options = opts;
this.mapperClass = mapperClass;
this.inputFormatClass = inputFormatClass;
this.outputFormatClass = outputFormatClass;
isHCatJob = options.getHCatTableName() != null;
}
/**
* @return the mapper class to use for the job.
*/
protected Class<? extends Mapper> getMapperClass()
throws ClassNotFoundException {
return this.mapperClass;
}
/**
* @return the inputformat class to use for the job.
*/
protected Class<? extends InputFormat> getInputFormatClass()
throws ClassNotFoundException {
return this.inputFormatClass;
}
/**
* @return the outputformat class to use for the job.
*/
protected Class<? extends OutputFormat> getOutputFormatClass()
throws ClassNotFoundException {
return this.outputFormatClass;
}
/** Set the OutputFormat class to use for this job. */
public void setOutputFormatClass(Class<? extends OutputFormat> cls) {
this.outputFormatClass = cls;
}
/** Set the InputFormat class to use for this job. */
public void setInputFormatClass(Class<? extends InputFormat> cls) {
this.inputFormatClass = cls;
}
/** Set the Mapper class to use for this job. */
public void setMapperClass(Class<? extends Mapper> cls) {
this.mapperClass = cls;
}
/**
* Set the SqoopOptions configuring this job.
*/
public void setOptions(SqoopOptions opts) {
this.options = opts;
}
/**
* Put jar files required by Sqoop into the DistributedCache.
* @param job the Job being submitted.
* @param mgr the ConnManager to use.
*/
protected void cacheJars(Job job, ConnManager mgr)
throws IOException {
if (options.isSkipDistCache()) {
LOG.info("Not adding sqoop jars to distributed cache as requested");
return;
}
Configuration conf = job.getConfiguration();
FileSystem fs = FileSystem.getLocal(conf);
Set<String> localUrls = new HashSet<String>();
addToCache(Jars.getSqoopJarPath(), fs, localUrls);
if (null != mgr) {
addToCache(Jars.getDriverClassJar(mgr), fs, localUrls);
addToCache(Jars.getJarPathForClass(mgr.getClass()), fs, localUrls);
}
SqoopTool tool = this.options.getActiveSqoopTool();
if (null != tool) {
// Make sure the jar for the tool itself is on the classpath. (In case
// this is a third-party plugin tool.)
addToCache(Jars.getJarPathForClass(tool.getClass()), fs, localUrls);
List<String> toolDeps = tool.getDependencyJars();
if (null != toolDeps) {
for (String depFile : toolDeps) {
addToCache(depFile, fs, localUrls);
}
}
}
// If the user specified a particular jar file name,
// Add anything in $SQOOP_HOME/lib, if this is set.
String sqoopHome = System.getenv("SQOOP_HOME");
if (null != sqoopHome) {
File sqoopHomeFile = new File(sqoopHome);
File sqoopLibFile = new File(sqoopHomeFile, "lib");
if (sqoopLibFile.exists()) {
addDirToCache(sqoopLibFile, fs, localUrls);
}
} else {
LOG.warn("SQOOP_HOME is unset. May not be able to find "
+ "all job dependencies.");
}
// If the user run import into hive as Parquet file,
// Add anything in $HIVE_HOME/lib.
if (options.doHiveImport() && (options.getFileLayout() == SqoopOptions.FileLayout.ParquetFile)) {
String hiveHome = options.getHiveHome();
if (null != hiveHome) {
File hiveHomeFile = new File(hiveHome);
File hiveLibFile = new File(hiveHomeFile, "lib");
if (hiveLibFile.exists()) {
addDirToCache(hiveLibFile, fs, localUrls);
}
} else {
LOG.warn("HIVE_HOME is unset. Cannot add hive libs as dependencies.");
}
}
String tmpjars = conf.get(ConfigurationConstants.MAPRED_DISTCACHE_CONF_PARAM);
StringBuilder sb = new StringBuilder();
// If we didn't put anything in our set, then there's nothing to cache.
if (localUrls.isEmpty() && (org.apache.commons.lang.StringUtils.isEmpty(tmpjars))) {
return;
}
if (null != tmpjars) {
String[] tmpjarsElements = tmpjars.split(",");
for (String jarElement : tmpjarsElements) {
if (jarElement.isEmpty()) {
warn("Empty input is invalid and was removed from tmpjars.");
} else {
sb.append(jarElement);
sb.append(",");
}
}
}
int lastComma = sb.lastIndexOf(",");
if (localUrls.isEmpty() && lastComma >= 0) {
sb.deleteCharAt(lastComma);
}
// Add these to the 'tmpjars' array, which the MR JobSubmitter
// will upload to HDFS and put in the DistributedCache libjars.
sb.append(StringUtils.arrayToString(localUrls.toArray(new String[0])));
conf.set(ConfigurationConstants.MAPRED_DISTCACHE_CONF_PARAM, sb.toString());
}
protected void warn(String message) {
LOG.warn(message);
}
private void addToCache(String file, FileSystem fs, Set<String> localUrls) {
if (null == file) {
return;
}
Path p = new Path(file);
String qualified = p.makeQualified(fs).toString();
LOG.debug("Adding to job classpath: " + qualified);
localUrls.add(qualified);
}
/**
* Add the .jar elements of a directory to the DCache classpath,
* nonrecursively.
*/
private void addDirToCache(File dir, FileSystem fs, Set<String> localUrls) {
if (null == dir) {
return;
}
for (File libfile : dir.listFiles()) {
if (libfile.exists() && !libfile.isDirectory()
&& libfile.getName().endsWith("jar")) {
addToCache(libfile.toString(), fs, localUrls);
}
}
}
/**
* If jars must be loaded into the local environment, do so here.
*/
protected void loadJars(Configuration conf, String ormJarFile,
String tableClassName) throws IOException {
if (ConfigurationHelper.isLocalJobTracker(conf)) {
// If we're using the LocalJobRunner, then instead of using the compiled
// jar file as the job source, we're running in the current thread. Push
// on another classloader that loads from that jar in addition to
// everything currently on the classpath.
this.prevClassLoader = ClassLoaderStack.addJarFile(ormJarFile,
tableClassName);
}
}
/**
* If any classloader was invoked by loadJars, free it here.
*/
protected void unloadJars() {
if (null != this.prevClassLoader) {
// unload the special classloader for this jar.
ClassLoaderStack.setCurrentClassLoader(this.prevClassLoader);
}
}
/**
* Configure the inputformat to use for the job.
*/
protected void configureInputFormat(Job job, String tableName,
String tableClassName, String splitByCol)
throws ClassNotFoundException, IOException {
//TODO: 'splitByCol' is import-job specific; lift it out of this API.
Class<? extends InputFormat> ifClass = getInputFormatClass();
LOG.debug("Using InputFormat: " + ifClass);
job.setInputFormatClass(ifClass);
}
/**
* Configure the output format to use for the job.
*/
protected void configureOutputFormat(Job job, String tableName,
String tableClassName) throws ClassNotFoundException, IOException {
Class<? extends OutputFormat> ofClass = getOutputFormatClass();
LOG.debug("Using OutputFormat: " + ofClass);
job.setOutputFormatClass(ofClass);
}
/**
* Set the mapper class implementation to use in the job,
* as well as any related configuration (e.g., map output types).
*/
protected void configureMapper(Job job, String tableName,
String tableClassName) throws ClassNotFoundException, IOException {
job.setMapperClass(getMapperClass());
}
/**
* Configure the number of map/reduce tasks to use in the job,
* returning the number of map tasks for backward compatibility.
*/
protected int configureNumTasks(Job job) throws IOException {
int numMapTasks = configureNumMapTasks(job);
configureNumReduceTasks(job);
return numMapTasks;
}
/**
* Configure the number of map tasks to use in the job.
*/
protected int configureNumMapTasks(Job job) throws IOException {
int numMapTasks = options.getNumMappers();
if (numMapTasks < 1) {
numMapTasks = SqoopOptions.DEFAULT_NUM_MAPPERS;
LOG.warn("Invalid mapper count; using " + numMapTasks + " mappers.");
}
ConfigurationHelper.setJobNumMaps(job, numMapTasks);
return numMapTasks;
}
/**
* Configure the number of reduce tasks to use in the job.
*/
protected int configureNumReduceTasks(Job job) throws IOException {
job.setNumReduceTasks(0);
return 0;
}
/** Set the main job that will be run. */
protected void setJob(Job job) {
mrJob = job;
}
/**
* @return the main MapReduce job that is being run, or null if no
* job has started.
*/
public Job getJob() {
return mrJob;
}
/**
* Create new Job object in unified way for all types of jobs.
*
* @param configuration Hadoop configuration that should be used
* @return New job object, created object won't be persisted in the instance
*/
public Job createJob(Configuration configuration) throws IOException {
// Put the SqoopOptions into job if requested
if(configuration.getBoolean(SERIALIZE_SQOOPOPTIONS, SERIALIZE_SQOOPOPTIONS_DEFAULT)) {
putSqoopOptionsToConfiguration(options, configuration);
}
return new Job(configuration);
}
/**
* Iterates over serialized form of SqoopOptions and put them into Configuration
* object.
*
* @param opts SqoopOptions that should be serialized
* @param configuration Target configuration object
*/
public void putSqoopOptionsToConfiguration(SqoopOptions opts, Configuration configuration) {
for(Map.Entry<Object, Object> e : opts.writeProperties().entrySet()) {
String key = (String)e.getKey();
String value = (String)e.getValue();
// We don't need to do if(value is empty) because that is already done
// for us by the SqoopOptions.writeProperties() method.
configuration.set("sqoop.opt." + key, value);
}
}
/**
* Actually run the MapReduce job.
*/
protected boolean runJob(Job job) throws ClassNotFoundException, IOException,
InterruptedException {
return job.waitForCompletion(true);
}
/**
* Display a notice on the log that the current MapReduce job has
* been retired, and thus Counters are unavailable.
* @param log the Log to display the info to.
*/
protected void displayRetiredJobNotice(Log log) {
log.info("The MapReduce job has already been retired. Performance");
log.info("counters are unavailable. To get this information, ");
log.info("you will need to enable the completed job store on ");
log.info("the jobtracker with:");
log.info("mapreduce.jobtracker.persist.jobstatus.active = true");
log.info("mapreduce.jobtracker.persist.jobstatus.hours = 1");
log.info("A jobtracker restart is required for these settings");
log.info("to take effect.");
}
/**
* Save interesting options to constructed job. Goal here is to propagate some
* of them to the job itself, so that they can be easily accessed. We're
* propagating only interesting global options (like verbose flag).
*
* @param job Destination job to save options
*/
protected void propagateOptionsToJob(Job job) {
Configuration configuration = job.getConfiguration();
// So far, propagate only verbose flag
configuration.setBoolean(PROPERTY_VERBOSE, options.getVerbose());
}
protected long getRowCountFromDB(ConnManager connManager, String tableName)
throws SQLException {
return connManager.getTableRowCount(tableName);
}
protected long getRowCountFromHadoop(Job job)
throws IOException, InterruptedException {
return ConfigurationHelper.getNumMapOutputRecords(job);
}
protected void doValidate(SqoopOptions options, Configuration conf,
ValidationContext validationContext)
throws ValidationException {
Validator validator = (Validator) ReflectionUtils.newInstance(
options.getValidatorClass(), conf);
ValidationThreshold threshold = (ValidationThreshold)
ReflectionUtils.newInstance(options.getValidationThresholdClass(),
conf);
ValidationFailureHandler failureHandler = (ValidationFailureHandler)
ReflectionUtils.newInstance(options.getValidationFailureHandlerClass(),
conf);
StringBuilder sb = new StringBuilder();
sb.append("Validating the integrity of the import using the "
+ "following configuration\n");
sb.append("\tValidator : ").append(validator.getClass().getName())
.append('\n');
sb.append("\tThreshold Specifier : ")
.append(threshold.getClass().getName()).append('\n');
sb.append("\tFailure Handler : ")
.append(failureHandler.getClass().getName()).append('\n');
LOG.info(sb.toString());
validator.validate(validationContext, threshold, failureHandler);
}
}
| [
"\"SQOOP_HOME\""
] | [] | [
"SQOOP_HOME"
] | [] | ["SQOOP_HOME"] | java | 1 | 0 | |
commands/initialize.go | package commands
import (
"fmt"
"strings"
routing_api "code.cloudfoundry.org/routing-api"
cfclient "github.com/cloudfoundry-community/go-cfclient"
"github.com/vmwarepivotallabs/cf-mgmt/config"
"github.com/vmwarepivotallabs/cf-mgmt/configcommands"
"github.com/vmwarepivotallabs/cf-mgmt/isosegment"
"github.com/vmwarepivotallabs/cf-mgmt/organization"
"github.com/vmwarepivotallabs/cf-mgmt/organizationreader"
"github.com/vmwarepivotallabs/cf-mgmt/privatedomain"
"github.com/vmwarepivotallabs/cf-mgmt/quota"
"github.com/vmwarepivotallabs/cf-mgmt/securitygroup"
"github.com/vmwarepivotallabs/cf-mgmt/serviceaccess"
"github.com/vmwarepivotallabs/cf-mgmt/shareddomain"
"github.com/vmwarepivotallabs/cf-mgmt/space"
"github.com/vmwarepivotallabs/cf-mgmt/uaa"
"github.com/vmwarepivotallabs/cf-mgmt/user"
"github.com/xchapter7x/lo"
)
type CFMgmt struct {
UAAManager uaa.Manager
OrgReader organizationreader.Reader
OrgManager organization.Manager
SpaceManager space.Manager
UserManager user.Manager
QuotaManager *quota.Manager
PrivateDomainManager privatedomain.Manager
ConfigManager config.Updater
ConfigDirectory string
SystemDomain string
SecurityGroupManager securitygroup.Manager
IsolationSegmentManager isosegment.Manager
ServiceAccessManager *serviceaccess.Manager
SharedDomainManager *shareddomain.Manager
}
type Initialize struct {
ConfigDir, SystemDomain, UserID, Password, ClientSecret, LdapPwd string
Peek bool
}
func InitializeManagers(baseCommand BaseCFConfigCommand) (*CFMgmt, error) {
return InitializePeekManagers(baseCommand, false)
}
func InitializePeekManagers(baseCommand BaseCFConfigCommand, peek bool) (*CFMgmt, error) {
lo.G.Debugf("Using %s of cf-mgmt", configcommands.GetFormattedVersion())
if baseCommand.SystemDomain == "" ||
baseCommand.UserID == "" ||
baseCommand.ClientSecret == "" {
return nil, fmt.Errorf("must set system-domain, user-id, client-secret properties")
}
cfg := config.NewManager(baseCommand.ConfigDirectory)
var err error
cfMgmt := &CFMgmt{}
cfMgmt.ConfigDirectory = baseCommand.ConfigDirectory
cfMgmt.SystemDomain = baseCommand.SystemDomain
cfMgmt.ConfigManager = config.NewManager(cfMgmt.ConfigDirectory)
userAgent := fmt.Sprintf("cf-mgmt/%s", configcommands.VERSION)
uaaMgr, err := uaa.NewDefaultUAAManager(cfMgmt.SystemDomain, baseCommand.UserID, baseCommand.ClientSecret, userAgent, peek)
if err != nil {
return nil, err
}
cfMgmt.UAAManager = uaaMgr
var c *cfclient.Config
if baseCommand.Password != "" {
lo.G.Warning("Password parameter is deprecated, create uaa client and client-secret instead")
c = &cfclient.Config{
ApiAddress: fmt.Sprintf("https://api.%s", cfMgmt.SystemDomain),
SkipSslValidation: true,
Username: baseCommand.UserID,
Password: baseCommand.Password,
UserAgent: userAgent,
}
} else {
c = &cfclient.Config{
ApiAddress: fmt.Sprintf("https://api.%s", cfMgmt.SystemDomain),
SkipSslValidation: true,
ClientID: baseCommand.UserID,
ClientSecret: baseCommand.ClientSecret,
UserAgent: userAgent,
}
}
// if strings.EqualFold(os.Getenv("LOG_LEVEL"), "debug") {
// c.Debug = true
// }
client, err := cfclient.NewClient(c)
if err != nil {
return nil, err
}
cfMgmt.OrgReader = organizationreader.NewReader(client, cfg, peek)
cfMgmt.SpaceManager = space.NewManager(client, cfMgmt.UAAManager, cfMgmt.OrgReader, cfg, peek)
cfMgmt.OrgManager = organization.NewManager(client, cfMgmt.OrgReader, cfMgmt.SpaceManager, cfg, peek)
cfMgmt.UserManager = user.NewManager(client, cfg, cfMgmt.SpaceManager, cfMgmt.OrgReader, cfMgmt.UAAManager, peek)
cfMgmt.SecurityGroupManager = securitygroup.NewManager(client, cfMgmt.SpaceManager, cfg, peek)
cfMgmt.QuotaManager = quota.NewManager(client, cfMgmt.SpaceManager, cfMgmt.OrgReader, cfMgmt.OrgManager, cfg, peek)
cfMgmt.PrivateDomainManager = privatedomain.NewManager(client, cfMgmt.OrgReader, cfg, peek)
if isoSegmentManager, err := isosegment.NewManager(client, cfg, cfMgmt.OrgReader, cfMgmt.SpaceManager, peek); err == nil {
cfMgmt.IsolationSegmentManager = isoSegmentManager
} else {
return nil, err
}
cfMgmt.ServiceAccessManager = serviceaccess.NewManager(client, cfMgmt.OrgReader, cfg, peek)
token, err := client.GetToken()
if err != nil {
return nil, err
}
//needs to not include bearer prefix
token = strings.Replace(token, "bearer ", "", 1)
routingAPIClient := routing_api.NewClient(c.ApiAddress, true)
routingAPIClient.SetToken(token)
cfMgmt.SharedDomainManager = shareddomain.NewManager(client, routingAPIClient, cfg, peek)
return cfMgmt, nil
}
| [
"\"LOG_LEVEL\""
] | [] | [
"LOG_LEVEL"
] | [] | ["LOG_LEVEL"] | go | 1 | 0 | |
src/terraform-resource/in/in_backend_test.go | package in_test
import (
"encoding/json"
"io/ioutil"
"os"
"path"
"strconv"
"time"
"terraform-resource/in"
"terraform-resource/models"
"terraform-resource/test/helpers"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("In with Backend", func() {
var (
awsVerifier *helpers.AWSVerifier
inReq models.InRequest
bucket string
prevEnvName string
currEnvName string
modulesEnvName string
pathToPrevS3Fixture string
pathToCurrS3Fixture string
pathToModulesS3Fixture string
tmpDir string
)
BeforeEach(func() {
accessKey := os.Getenv("AWS_ACCESS_KEY")
Expect(accessKey).ToNot(BeEmpty(), "AWS_ACCESS_KEY must be set")
secretKey := os.Getenv("AWS_SECRET_KEY")
Expect(secretKey).ToNot(BeEmpty(), "AWS_SECRET_KEY must be set")
bucket = os.Getenv("AWS_BUCKET")
Expect(bucket).ToNot(BeEmpty(), "AWS_BUCKET must be set")
bucketPath := os.Getenv("AWS_BUCKET_SUBFOLDER")
Expect(bucketPath).ToNot(BeEmpty(), "AWS_BUCKET_SUBFOLDER must be set")
region := os.Getenv("AWS_REGION") // optional
if region == "" {
region = "us-east-1"
}
awsVerifier = helpers.NewAWSVerifier(
accessKey,
secretKey,
region,
"",
)
prevEnvName = helpers.RandomString("s3-test-fixture-previous")
currEnvName = helpers.RandomString("s3-test-fixture-current")
modulesEnvName = helpers.RandomString("s3-test-fixture-modules")
workspacePath := helpers.RandomString("in-backend-test")
pathToPrevS3Fixture = path.Join(workspacePath, prevEnvName, "terraform.tfstate")
pathToCurrS3Fixture = path.Join(workspacePath, currEnvName, "terraform.tfstate")
pathToModulesS3Fixture = path.Join(workspacePath, modulesEnvName, "terraform.tfstate")
inReq = models.InRequest{
Source: models.Source{
Terraform: models.Terraform{
BackendType: "s3",
BackendConfig: map[string]interface{}{
"bucket": bucket,
"key": "terraform.tfstate",
"access_key": accessKey,
"secret_key": secretKey,
"region": region,
"workspace_key_prefix": workspacePath,
},
},
},
}
var err error
tmpDir, err = ioutil.TempDir(os.TempDir(), "terraform-resource-in-test")
Expect(err).ToNot(HaveOccurred())
err = os.Chdir(tmpDir)
Expect(err).ToNot(HaveOccurred())
})
AfterEach(func() {
_ = os.RemoveAll(tmpDir)
})
Context("when multiple state files exist on S3", func() {
BeforeEach(func() {
prevFixture, err := os.Open(helpers.FileLocation("fixtures/s3-backend/terraform-previous.tfstate"))
Expect(err).ToNot(HaveOccurred())
defer prevFixture.Close()
awsVerifier.UploadObjectToS3(bucket, pathToPrevS3Fixture, prevFixture)
time.Sleep(5 * time.Second) // ensure last modified is different
currFixture, err := os.Open(helpers.FileLocation("fixtures/s3-backend/terraform-current.tfstate"))
Expect(err).ToNot(HaveOccurred())
defer currFixture.Close()
awsVerifier.UploadObjectToS3(bucket, pathToCurrS3Fixture, currFixture)
modulesFixture, err := os.Open(helpers.FileLocation("fixtures/s3-backend/terraform-modules.tfstate"))
Expect(err).ToNot(HaveOccurred())
defer modulesFixture.Close()
awsVerifier.UploadObjectToS3(bucket, pathToModulesS3Fixture, modulesFixture)
})
AfterEach(func() {
awsVerifier.DeleteObjectFromS3(bucket, pathToPrevS3Fixture)
awsVerifier.DeleteObjectFromS3(bucket, pathToCurrS3Fixture)
awsVerifier.DeleteObjectFromS3(bucket, pathToModulesS3Fixture)
})
It("fetches the state file matching the provided version", func() {
inReq.Version = models.Version{
EnvName: prevEnvName,
Serial: "0",
}
runner := in.Runner{
OutputDir: tmpDir,
}
resp, err := runner.Run(inReq)
Expect(err).ToNot(HaveOccurred())
Expect(resp.Version.EnvName).To(Equal(prevEnvName))
serial, err := strconv.Atoi(resp.Version.Serial)
Expect(err).ToNot(HaveOccurred())
Expect(serial).To(BeNumerically(">=", 0))
Expect(resp.Version.Lineage).To(Equal("f62eee11-6a4e-4d39-b5c7-15d3dad8e5f7"))
metadata := map[string]string{}
for _, field := range resp.Metadata {
metadata[field.Name] = field.Value
}
Expect(metadata["terraform_version"]).To(MatchRegexp("Terraform v.*"))
Expect(metadata["env_name"]).To(Equal("previous"))
Expect(metadata["secret"]).To(Equal("<sensitive>"))
expectedOutputPath := path.Join(tmpDir, "metadata")
Expect(expectedOutputPath).To(BeAnExistingFile())
outputFile, err := os.Open(expectedOutputPath)
Expect(err).ToNot(HaveOccurred())
defer outputFile.Close()
outputContents := map[string]interface{}{}
err = json.NewDecoder(outputFile).Decode(&outputContents)
Expect(err).ToNot(HaveOccurred())
Expect(outputContents["env_name"]).To(Equal("previous"))
Expect(outputContents["map"]).To(Equal(map[string]interface{}{
"key-1": "value-1",
"key-2": "value-2",
}))
Expect(outputContents["list"]).To(Equal([]interface{}{
"item-1",
"item-2",
}))
Expect(outputContents["secret"]).To(Equal("super-secret"))
expectedNamePath := path.Join(tmpDir, "name")
Expect(expectedNamePath).To(BeAnExistingFile())
nameContents, err := ioutil.ReadFile(expectedNamePath)
Expect(err).ToNot(HaveOccurred())
Expect(string(nameContents)).To(Equal(prevEnvName))
})
It("outputs the statefile if `output_statefile` is given", func() {
inReq.Params.OutputStatefile = true
inReq.Version = models.Version{
EnvName: prevEnvName,
Serial: "0",
}
runner := in.Runner{
OutputDir: tmpDir,
}
_, err := runner.Run(inReq)
Expect(err).ToNot(HaveOccurred())
expectedOutputPath := path.Join(tmpDir, "metadata")
Expect(expectedOutputPath).To(BeAnExistingFile())
expectedNamePath := path.Join(tmpDir, "name")
Expect(expectedNamePath).To(BeAnExistingFile())
expectedStatePath := path.Join(tmpDir, "terraform.tfstate")
Expect(expectedStatePath).To(BeAnExistingFile())
stateContents, err := ioutil.ReadFile(expectedStatePath)
Expect(err).To(BeNil())
Expect(string(stateContents)).To(ContainSubstring("previous"))
})
It("returns an error when OutputModule is used", func() {
inReq.Params.OutputModule = "module_1"
inReq.Version = models.Version{
EnvName: modulesEnvName,
Serial: "1",
}
runner := in.Runner{
OutputDir: tmpDir,
}
_, err := runner.Run(inReq)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(MatchRegexp("output_module"))
})
Context("when 'default' workspace contains custom plugins", func() {
var pathToDefaultS3Fixture string
BeforeEach(func() {
// S3 backend ignores workspace_key_prefix/key for 'default' workspace.
// Unfortunately this makes this test vulnerable to test pollution.
pathToDefaultS3Fixture = "terraform.tfstate"
defaultFixture, err := os.Open(helpers.FileLocation("fixtures/custom-plugin-backend/terraform.tfstate"))
Expect(err).ToNot(HaveOccurred())
defer defaultFixture.Close()
awsVerifier.UploadObjectToS3(bucket, pathToDefaultS3Fixture, defaultFixture)
})
AfterEach(func() {
awsVerifier.DeleteObjectFromS3(bucket, pathToDefaultS3Fixture)
})
It("fetches the state file without trying to download plugins", func() {
inReq.Version = models.Version{
EnvName: prevEnvName,
Serial: "0",
}
runner := in.Runner{
OutputDir: tmpDir,
}
resp, err := runner.Run(inReq)
Expect(err).ToNot(HaveOccurred())
Expect(resp.Version.EnvName).To(Equal(prevEnvName))
})
})
})
Context("when state file does not exist on S3", func() {
Context("and it was called as part of the 'destroy' action", func() {
BeforeEach(func() {
inReq.Params.Action = models.DestroyAction
inReq.Version = models.Version{
EnvName: currEnvName,
Serial: "1",
}
})
It("returns the deleted version, but does not create the metadata file", func() {
runner := in.Runner{
OutputDir: tmpDir,
}
resp, err := runner.Run(inReq)
Expect(err).ToNot(HaveOccurred())
Expect(resp.Version.EnvName).To(Equal(currEnvName))
serial, err := strconv.Atoi(resp.Version.Serial)
Expect(err).ToNot(HaveOccurred())
Expect(serial).To(BeNumerically(">=", 1))
expectedOutputPath := path.Join(tmpDir, "metadata")
Expect(expectedOutputPath).ToNot(BeAnExistingFile())
})
})
Context("and it was called with 'plan_only'", func() {
BeforeEach(func() {
inReq.Version = models.Version{
EnvName: currEnvName,
Serial: "1",
PlanOnly: "true",
}
})
It("returns the version, but does not create the metadata file", func() {
runner := in.Runner{
OutputDir: tmpDir,
}
resp, err := runner.Run(inReq)
Expect(err).ToNot(HaveOccurred())
Expect(resp.Version.EnvName).To(Equal(currEnvName))
serial, err := strconv.Atoi(resp.Version.Serial)
Expect(err).ToNot(HaveOccurred())
Expect(serial).To(BeNumerically(">=", 1))
expectedOutputPath := path.Join(tmpDir, "metadata")
Expect(expectedOutputPath).ToNot(BeAnExistingFile())
})
})
Context("and it was called as part of update or create", func() {
BeforeEach(func() {
inReq.Params.Action = ""
inReq.Version = models.Version{
EnvName: "missing-env-name",
Serial: "0",
}
})
It("returns an error", func() {
runner := in.Runner{
OutputDir: tmpDir,
}
_, err := runner.Run(inReq)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("missing-env-name"))
Expect(err.Error()).To(ContainSubstring("get_params"))
})
})
})
})
| [
"\"AWS_ACCESS_KEY\"",
"\"AWS_SECRET_KEY\"",
"\"AWS_BUCKET\"",
"\"AWS_BUCKET_SUBFOLDER\"",
"\"AWS_REGION\""
] | [] | [
"AWS_REGION",
"AWS_BUCKET",
"AWS_BUCKET_SUBFOLDER",
"AWS_ACCESS_KEY",
"AWS_SECRET_KEY"
] | [] | ["AWS_REGION", "AWS_BUCKET", "AWS_BUCKET_SUBFOLDER", "AWS_ACCESS_KEY", "AWS_SECRET_KEY"] | go | 5 | 0 | |
tests/pygametest.py | import pygame
import os
import sys
import time
from rpi_vision.agent.capture import PiCameraStream
import numpy as np
os.environ['SDL_FBDEV'] = "/dev/fb1"
os.environ['SDL_VIDEODRIVER'] = "fbcon"
capture_manager = PiCameraStream(resolution=(320, 320), rotation=180, preview=False)
pygame.init()
screen = pygame.display.set_mode((0,0), pygame.FULLSCREEN)
pygame.mouse.set_visible(False)
screen.fill((255,0,0))
splash = pygame.image.load(os.path.dirname(sys.argv[0])+'/bchatsplash.bmp')
screen.blit(splash, (0, 0))
pygame.display.update()
font = pygame.font.Font(None, 48)
print(screen.get_size())
capture_manager.start()
while not capture_manager.stopped:
if capture_manager.frame is None:
continue
frame = capture_manager.frame
t = time.monotonic()
# swap red & blue channels
npframe = np.ascontiguousarray(np.flip(np.array(frame), 2))
# make it an image
img = pygame.image.frombuffer(npframe, capture_manager.camera.resolution, 'RGB')
# draw it!
screen.blit(img, (0, 0))
# add some text
text_surface = font.render("Hi!", True, (255, 255, 255))
text_position = (screen.get_width()//2, screen.get_height()-24)
rect = text_surface.get_rect(center=text_position)
screen.blit(text_surface, rect)
pygame.display.update()
| [] | [] | [
"SDL_FBDEV",
"SDL_VIDEODRIVER"
] | [] | ["SDL_FBDEV", "SDL_VIDEODRIVER"] | python | 2 | 0 | |
train.py | import argparse
from datetime import datetime
import math
import numpy as np
import os
import subprocess
import time
import tensorflow as tf
import traceback
from datasets.datafeeder import DataFeeder
from hparams import hparams, hparams_debug_string
from models import create_model
from text import sequence_to_text
from util import audio, infolog, plot, ValueWindow
log = infolog.log
def get_git_commit():
subprocess.check_output(['git', 'diff-index', '--quiet', 'HEAD']) # Verify client is clean
commit = subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode().strip()[:10]
log('Git commit: %s' % commit)
return commit
def add_stats(model):
with tf.variable_scope('stats') as scope:
tf.summary.histogram('linear_outputs', model.linear_outputs)
tf.summary.histogram('linear_targets', model.linear_targets)
tf.summary.histogram('mel_outputs', model.mel_outputs)
tf.summary.histogram('mel_targets', model.mel_targets)
tf.summary.scalar('loss_mel', model.mel_loss)
tf.summary.scalar('loss_linear', model.linear_loss)
tf.summary.scalar('learning_rate', model.learning_rate)
tf.summary.scalar('loss', model.loss)
gradient_norms = [tf.norm(grad) for grad in model.gradients]
tf.summary.histogram('gradient_norm', gradient_norms)
tf.summary.scalar('max_gradient_norm', tf.reduce_max(gradient_norms))
return tf.summary.merge_all()
def time_string():
return datetime.now().strftime('%Y-%m-%d %H:%M')
def train(log_dir, args):
commit = get_git_commit() if args.git else 'None'
checkpoint_path = os.path.join(log_dir, 'model.ckpt')
input_path = os.path.join(args.base_dir, args.input)
log('Checkpoint path: %s' % checkpoint_path)
log('Loading training data from: %s' % input_path)
log('Using model: %s' % args.model)
log(hparams_debug_string())
# Set up DataFeeder:
coord = tf.train.Coordinator()
with tf.variable_scope('datafeeder') as scope:
feeder = DataFeeder(coord, input_path, hparams)
# Set up model:
global_step = tf.Variable(0, name='global_step', trainable=False)
with tf.variable_scope('model') as scope:
model = create_model(args.model, hparams)
model.initialize(feeder.inputs, feeder.input_lengths, feeder.mel_targets, feeder.linear_targets)
model.add_loss()
model.add_optimizer(global_step)
stats = add_stats(model)
# Bookkeeping:
step = 0
time_window = ValueWindow(100)
loss_window = ValueWindow(100)
saver = tf.train.Saver(write_version=tf.train.SaverDef.V1)
# Train!
with tf.device('/gpu:0'):
with tf.Session() as sess:
try:
summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
sess.run(tf.global_variables_initializer())
if args.restore_step:
# Restore from a checkpoint if the user requested it.
restore_path = '%s-%d' % (checkpoint_path, args.restore_step)
saver.restore(sess, restore_path)
log('Resuming from checkpoint: %s at commit: %s' % (restore_path, commit), slack=True)
else:
log('Starting new training run at commit: %s' % commit, slack=True)
feeder.start_in_session(sess)
while not coord.should_stop():
start_time = time.time()
step, loss, opt = sess.run([global_step, model.loss, model.optimize])
time_window.append(time.time() - start_time)
loss_window.append(loss)
message = 'Step %-7d [%.03f sec/step, loss=%.05f, avg_loss=%.05f]' % (
step, time_window.average, loss, loss_window.average)
log(message, slack=(step % args.checkpoint_interval == 0))
if loss > 100 or math.isnan(loss):
log('Loss exploded to %.05f at step %d!' % (loss, step), slack=True)
raise Exception('Loss Exploded')
if step % args.summary_interval == 0:
log('Writing summary at step: %d' % step)
summary_writer.add_summary(sess.run(stats), step)
if step % args.checkpoint_interval == 0:
log('Saving checkpoint to: %s-%d' % (checkpoint_path, step))
saver.save(sess, checkpoint_path, global_step=step)
log('Saving audio and alignment...')
input_seq, spectrogram, alignment = sess.run([
model.inputs[0], model.linear_outputs[0], model.alignments[0]])
waveform = audio.inv_spectrogram(spectrogram.T)
audio.save_wav(waveform, os.path.join(log_dir, 'step-%d-audio.wav' % step))
plot.plot_alignment(alignment, os.path.join(log_dir, 'step-%d-align.png' % step),
info='%s, %s, %s, step=%d, loss=%.5f' % (args.model, commit, time_string(), step, loss))
log('Input: %s' % sequence_to_text(input_seq))
except Exception as e:
log('Exiting due to exception: %s' % e, slack=True)
traceback.print_exc()
coord.request_stop(e)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--base_dir', default=os.path.expanduser('~/tacotron'))
parser.add_argument('--input', default='training/train.txt')
parser.add_argument('--model', default='tacotron')
parser.add_argument('--name', help='Name of the run. Used for logging. Defaults to model name.')
parser.add_argument('--hparams', default='',
help='Hyperparameter overrides as a comma-separated list of name=value pairs')
parser.add_argument('--restore_step', type=int, help='Global step to restore from checkpoint.')
parser.add_argument('--summary_interval', type=int, default=100,
help='Steps between running summary ops.')
parser.add_argument('--checkpoint_interval', type=int, default=1000,
help='Steps between writing checkpoints.')
parser.add_argument('--slack_url', help='Slack webhook URL to get periodic reports.')
parser.add_argument('--tf_log_level', type=int, default=1, help='Tensorflow C++ log level.')
parser.add_argument('--git', action='store_true', help='If set, verify that the client is clean.')
args = parser.parse_args()
os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(args.tf_log_level)
run_name = args.name or args.model
log_dir = os.path.join(args.base_dir, 'logs-%s' % run_name)
os.makedirs(log_dir, exist_ok=True)
infolog.init(os.path.join(log_dir, 'train.log'), run_name, args.slack_url)
hparams.parse(args.hparams)
train(log_dir, args)
if __name__ == '__main__':
main()
| [] | [] | [
"TF_CPP_MIN_LOG_LEVEL"
] | [] | ["TF_CPP_MIN_LOG_LEVEL"] | python | 1 | 0 | |
tests/speed.py | """ Travis Ci Tests """
ActualData = ['8998bcff9765438bb6089ab93bfad4d3',
'f7c77d999f154a66a87dc4a51ef30d19',
'ed23c309c54645a7b4805f95a2fb76b0',
'43db704e10b140b3a38dce059de35a59',
'2b7f6bd60cfe4458a35a3312493772fb',
'042470004a1f4f3bab91001866784bc0',
'aec441cb6ee14601b000ec0ece396649',
'0d062b017cc54b5da034fc17fc26206c',
'446dea472dd0494b89260421b9981d15',
'b80a30a6d6d7472490c0c6081684b769']
Repeats = 5
import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], '..'))
import hypixel
from time import time
from random import shuffle
print("Test \"{}\" is now running...\n".format(os.path.basename(__file__)))
API_KEY = os.environ['apikey']
hypixel.setKeys([API_KEY])
start = time() # Start timer.
for i in range(0, Repeats-1):
shuffle(ActualData) # Randomize the order of the data
for InputUUID in ActualData:
Player = hypixel.Player(InputUUID)
print(Player.getPlayerInfo())
end = time()
totalTime = start-end
print("\nDone! Speed test finished. Time taken: {}".format(end-start))
| [] | [] | [
"apikey"
] | [] | ["apikey"] | python | 1 | 0 | |
api4/apitestlib.go | // Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved.
// See License.txt for license information.
package api4
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"os"
"path/filepath"
"reflect"
"strconv"
"strings"
"sync"
"testing"
"time"
"github.com/mattermost/mattermost-server/app"
"github.com/mattermost/mattermost-server/mlog"
"github.com/mattermost/mattermost-server/model"
"github.com/mattermost/mattermost-server/store"
"github.com/mattermost/mattermost-server/store/sqlstore"
"github.com/mattermost/mattermost-server/store/storetest"
"github.com/mattermost/mattermost-server/utils"
"github.com/mattermost/mattermost-server/web"
"github.com/mattermost/mattermost-server/wsapi"
s3 "github.com/minio/minio-go"
"github.com/minio/minio-go/pkg/credentials"
)
type TestHelper struct {
App *app.App
tempConfigPath string
Client *model.Client4
BasicUser *model.User
BasicUser2 *model.User
TeamAdminUser *model.User
BasicTeam *model.Team
BasicChannel *model.Channel
BasicPrivateChannel *model.Channel
BasicChannel2 *model.Channel
BasicPost *model.Post
SystemAdminClient *model.Client4
SystemAdminUser *model.User
tempWorkspace string
}
type persistentTestStore struct {
store.Store
}
func (*persistentTestStore) Close() {}
var testStoreContainer *storetest.RunningContainer
var testStore *persistentTestStore
// UseTestStore sets the container and corresponding settings to use for tests. Once the tests are
// complete (e.g. at the end of your TestMain implementation), you should call StopTestStore.
func UseTestStore(container *storetest.RunningContainer, settings *model.SqlSettings) {
testStoreContainer = container
testStore = &persistentTestStore{store.NewLayeredStore(sqlstore.NewSqlSupplier(*settings, nil), nil, nil)}
}
func StopTestStore() {
if testStoreContainer != nil {
testStoreContainer.Stop()
testStoreContainer = nil
}
}
func setupTestHelper(enterprise bool) *TestHelper {
permConfig, err := os.Open(utils.FindConfigFile("config.json"))
if err != nil {
panic(err)
}
defer permConfig.Close()
tempConfig, err := ioutil.TempFile("", "")
if err != nil {
panic(err)
}
_, err = io.Copy(tempConfig, permConfig)
tempConfig.Close()
if err != nil {
panic(err)
}
options := []app.Option{app.ConfigFile(tempConfig.Name()), app.DisableConfigWatch}
if testStore != nil {
options = append(options, app.StoreOverride(testStore))
}
a, err := app.New(options...)
if err != nil {
panic(err)
}
th := &TestHelper{
App: a,
tempConfigPath: tempConfig.Name(),
}
th.App.UpdateConfig(func(cfg *model.Config) {
*cfg.TeamSettings.MaxUsersPerTeam = 50
*cfg.RateLimitSettings.Enable = false
cfg.EmailSettings.SendEmailNotifications = true
})
prevListenAddress := *th.App.Config().ServiceSettings.ListenAddress
if testStore != nil {
th.App.UpdateConfig(func(cfg *model.Config) { *cfg.ServiceSettings.ListenAddress = ":0" })
}
serverErr := th.App.StartServer()
if serverErr != nil {
panic(serverErr)
}
th.App.UpdateConfig(func(cfg *model.Config) { *cfg.ServiceSettings.ListenAddress = prevListenAddress })
Init(th.App, th.App.Srv.Router)
web.NewWeb(th.App, th.App.Srv.Router)
wsapi.Init(th.App, th.App.Srv.WebSocketRouter)
th.App.Srv.Store.MarkSystemRanUnitTests()
th.App.DoAdvancedPermissionsMigration()
th.App.DoEmojisPermissionsMigration()
th.App.UpdateConfig(func(cfg *model.Config) { *cfg.TeamSettings.EnableOpenServer = true })
if enterprise {
th.App.SetLicense(model.NewTestLicense())
} else {
th.App.SetLicense(nil)
}
th.Client = th.CreateClient()
th.SystemAdminClient = th.CreateClient()
if th.tempWorkspace == "" {
dir, err := ioutil.TempDir("", "apptest")
if err != nil {
panic(err)
}
th.tempWorkspace = dir
}
pluginDir := filepath.Join(th.tempWorkspace, "plugins")
webappDir := filepath.Join(th.tempWorkspace, "webapp")
th.App.UpdateConfig(func(cfg *model.Config) {
*cfg.PluginSettings.Directory = pluginDir
*cfg.PluginSettings.ClientDirectory = webappDir
})
th.App.InitPlugins(pluginDir, webappDir)
return th
}
func SetupEnterprise() *TestHelper {
return setupTestHelper(true)
}
func Setup() *TestHelper {
return setupTestHelper(false)
}
func (me *TestHelper) TearDown() {
utils.DisableDebugLogForTest()
var wg sync.WaitGroup
wg.Add(3)
go func() {
defer wg.Done()
options := map[string]bool{}
options[store.USER_SEARCH_OPTION_NAMES_ONLY_NO_FULL_NAME] = true
if result := <-me.App.Srv.Store.User().Search("", "fakeuser", options); result.Err != nil {
mlog.Error("Error tearing down test users")
} else {
users := result.Data.([]*model.User)
for _, u := range users {
if err := me.App.PermanentDeleteUser(u); err != nil {
mlog.Error(err.Error())
}
}
}
}()
go func() {
defer wg.Done()
if result := <-me.App.Srv.Store.Team().SearchByName("faketeam"); result.Err != nil {
mlog.Error("Error tearing down test teams")
} else {
teams := result.Data.([]*model.Team)
for _, t := range teams {
if err := me.App.PermanentDeleteTeam(t); err != nil {
mlog.Error(err.Error())
}
}
}
}()
go func() {
defer wg.Done()
if result := <-me.App.Srv.Store.OAuth().GetApps(0, 1000); result.Err != nil {
mlog.Error("Error tearing down test oauth apps")
} else {
apps := result.Data.([]*model.OAuthApp)
for _, a := range apps {
if strings.HasPrefix(a.Name, "fakeoauthapp") {
<-me.App.Srv.Store.OAuth().DeleteApp(a.Id)
}
}
}
}()
wg.Wait()
me.App.Shutdown()
os.Remove(me.tempConfigPath)
utils.EnableDebugLogForTest()
if err := recover(); err != nil {
StopTestStore()
panic(err)
}
}
func (me *TestHelper) InitBasic() *TestHelper {
me.waitForConnectivity()
me.TeamAdminUser = me.CreateUser()
me.App.UpdateUserRoles(me.TeamAdminUser.Id, model.SYSTEM_USER_ROLE_ID, false)
me.LoginTeamAdmin()
me.BasicTeam = me.CreateTeam()
me.BasicChannel = me.CreatePublicChannel()
me.BasicPrivateChannel = me.CreatePrivateChannel()
me.BasicChannel2 = me.CreatePublicChannel()
me.BasicPost = me.CreatePost()
me.BasicUser = me.CreateUser()
me.LinkUserToTeam(me.BasicUser, me.BasicTeam)
me.BasicUser2 = me.CreateUser()
me.LinkUserToTeam(me.BasicUser2, me.BasicTeam)
me.App.AddUserToChannel(me.BasicUser, me.BasicChannel)
me.App.AddUserToChannel(me.BasicUser2, me.BasicChannel)
me.App.AddUserToChannel(me.BasicUser, me.BasicChannel2)
me.App.AddUserToChannel(me.BasicUser2, me.BasicChannel2)
me.App.AddUserToChannel(me.BasicUser, me.BasicPrivateChannel)
me.App.AddUserToChannel(me.BasicUser2, me.BasicPrivateChannel)
me.App.UpdateUserRoles(me.BasicUser.Id, model.SYSTEM_USER_ROLE_ID, false)
me.LoginBasic()
return me
}
func (me *TestHelper) InitSystemAdmin() *TestHelper {
me.waitForConnectivity()
me.SystemAdminUser = me.CreateUser()
me.App.UpdateUserRoles(me.SystemAdminUser.Id, model.SYSTEM_USER_ROLE_ID+" "+model.SYSTEM_ADMIN_ROLE_ID, false)
me.LoginSystemAdmin()
return me
}
func (me *TestHelper) waitForConnectivity() {
for i := 0; i < 1000; i++ {
conn, err := net.Dial("tcp", fmt.Sprintf("localhost:%v", me.App.Srv.ListenAddr.Port))
if err == nil {
conn.Close()
return
}
time.Sleep(time.Millisecond * 20)
}
panic("unable to connect")
}
func (me *TestHelper) CreateClient() *model.Client4 {
return model.NewAPIv4Client(fmt.Sprintf("http://localhost:%v", me.App.Srv.ListenAddr.Port))
}
func (me *TestHelper) CreateWebSocketClient() (*model.WebSocketClient, *model.AppError) {
return model.NewWebSocketClient4(fmt.Sprintf("ws://localhost:%v", me.App.Srv.ListenAddr.Port), me.Client.AuthToken)
}
func (me *TestHelper) CreateWebSocketSystemAdminClient() (*model.WebSocketClient, *model.AppError) {
return model.NewWebSocketClient4(fmt.Sprintf("ws://localhost:%v", me.App.Srv.ListenAddr.Port), me.SystemAdminClient.AuthToken)
}
func (me *TestHelper) CreateUser() *model.User {
return me.CreateUserWithClient(me.Client)
}
func (me *TestHelper) CreateTeam() *model.Team {
return me.CreateTeamWithClient(me.Client)
}
func (me *TestHelper) CreateTeamWithClient(client *model.Client4) *model.Team {
id := model.NewId()
team := &model.Team{
DisplayName: "dn_" + id,
Name: GenerateTestTeamName(),
Email: me.GenerateTestEmail(),
Type: model.TEAM_OPEN,
}
utils.DisableDebugLogForTest()
rteam, _ := client.CreateTeam(team)
utils.EnableDebugLogForTest()
return rteam
}
func (me *TestHelper) CreateUserWithClient(client *model.Client4) *model.User {
id := model.NewId()
user := &model.User{
Email: me.GenerateTestEmail(),
Username: GenerateTestUsername(),
Nickname: "nn_" + id,
FirstName: "f_" + id,
LastName: "l_" + id,
Password: "Password1",
}
utils.DisableDebugLogForTest()
ruser, response := client.CreateUser(user)
if response.Error != nil {
panic(response.Error)
}
ruser.Password = "Password1"
store.Must(me.App.Srv.Store.User().VerifyEmail(ruser.Id))
utils.EnableDebugLogForTest()
return ruser
}
func (me *TestHelper) CreatePublicChannel() *model.Channel {
return me.CreateChannelWithClient(me.Client, model.CHANNEL_OPEN)
}
func (me *TestHelper) CreatePrivateChannel() *model.Channel {
return me.CreateChannelWithClient(me.Client, model.CHANNEL_PRIVATE)
}
func (me *TestHelper) CreateChannelWithClient(client *model.Client4, channelType string) *model.Channel {
return me.CreateChannelWithClientAndTeam(client, channelType, me.BasicTeam.Id)
}
func (me *TestHelper) CreateChannelWithClientAndTeam(client *model.Client4, channelType string, teamId string) *model.Channel {
id := model.NewId()
channel := &model.Channel{
DisplayName: "dn_" + id,
Name: GenerateTestChannelName(),
Type: channelType,
TeamId: teamId,
}
utils.DisableDebugLogForTest()
rchannel, _ := client.CreateChannel(channel)
utils.EnableDebugLogForTest()
return rchannel
}
func (me *TestHelper) CreatePost() *model.Post {
return me.CreatePostWithClient(me.Client, me.BasicChannel)
}
func (me *TestHelper) CreatePinnedPost() *model.Post {
return me.CreatePinnedPostWithClient(me.Client, me.BasicChannel)
}
func (me *TestHelper) CreateMessagePost(message string) *model.Post {
return me.CreateMessagePostWithClient(me.Client, me.BasicChannel, message)
}
func (me *TestHelper) CreatePostWithClient(client *model.Client4, channel *model.Channel) *model.Post {
id := model.NewId()
post := &model.Post{
ChannelId: channel.Id,
Message: "message_" + id,
}
utils.DisableDebugLogForTest()
rpost, resp := client.CreatePost(post)
if resp.Error != nil {
panic(resp.Error)
}
utils.EnableDebugLogForTest()
return rpost
}
func (me *TestHelper) CreatePinnedPostWithClient(client *model.Client4, channel *model.Channel) *model.Post {
id := model.NewId()
post := &model.Post{
ChannelId: channel.Id,
Message: "message_" + id,
IsPinned: true,
}
utils.DisableDebugLogForTest()
rpost, resp := client.CreatePost(post)
if resp.Error != nil {
panic(resp.Error)
}
utils.EnableDebugLogForTest()
return rpost
}
func (me *TestHelper) CreateMessagePostWithClient(client *model.Client4, channel *model.Channel, message string) *model.Post {
post := &model.Post{
ChannelId: channel.Id,
Message: message,
}
utils.DisableDebugLogForTest()
rpost, resp := client.CreatePost(post)
if resp.Error != nil {
panic(resp.Error)
}
utils.EnableDebugLogForTest()
return rpost
}
func (me *TestHelper) LoginBasic() {
me.LoginBasicWithClient(me.Client)
}
func (me *TestHelper) LoginBasic2() {
me.LoginBasic2WithClient(me.Client)
}
func (me *TestHelper) LoginTeamAdmin() {
me.LoginTeamAdminWithClient(me.Client)
}
func (me *TestHelper) LoginSystemAdmin() {
me.LoginSystemAdminWithClient(me.SystemAdminClient)
}
func (me *TestHelper) LoginBasicWithClient(client *model.Client4) {
utils.DisableDebugLogForTest()
client.Login(me.BasicUser.Email, me.BasicUser.Password)
utils.EnableDebugLogForTest()
}
func (me *TestHelper) LoginBasic2WithClient(client *model.Client4) {
utils.DisableDebugLogForTest()
client.Login(me.BasicUser2.Email, me.BasicUser2.Password)
utils.EnableDebugLogForTest()
}
func (me *TestHelper) LoginTeamAdminWithClient(client *model.Client4) {
utils.DisableDebugLogForTest()
client.Login(me.TeamAdminUser.Email, me.TeamAdminUser.Password)
utils.EnableDebugLogForTest()
}
func (me *TestHelper) LoginSystemAdminWithClient(client *model.Client4) {
utils.DisableDebugLogForTest()
client.Login(me.SystemAdminUser.Email, me.SystemAdminUser.Password)
utils.EnableDebugLogForTest()
}
func (me *TestHelper) UpdateActiveUser(user *model.User, active bool) {
utils.DisableDebugLogForTest()
_, err := me.App.UpdateActive(user, active)
if err != nil {
mlog.Error(err.Error())
time.Sleep(time.Second)
panic(err)
}
utils.EnableDebugLogForTest()
}
func (me *TestHelper) LinkUserToTeam(user *model.User, team *model.Team) {
utils.DisableDebugLogForTest()
err := me.App.JoinUserToTeam(team, user, "")
if err != nil {
mlog.Error(err.Error())
time.Sleep(time.Second)
panic(err)
}
utils.EnableDebugLogForTest()
}
func (me *TestHelper) AddUserToChannel(user *model.User, channel *model.Channel) *model.ChannelMember {
utils.DisableDebugLogForTest()
member, err := me.App.AddUserToChannel(user, channel)
if err != nil {
mlog.Error(err.Error())
time.Sleep(time.Second)
panic(err)
}
utils.EnableDebugLogForTest()
return member
}
func (me *TestHelper) GenerateTestEmail() string {
if me.App.Config().EmailSettings.SMTPServer != "dockerhost" && os.Getenv("CI_INBUCKET_PORT") == "" {
return strings.ToLower("success+" + model.NewId() + "@simulator.amazonses.com")
}
return strings.ToLower(model.NewId() + "@dockerhost")
}
func GenerateTestUsername() string {
return "fakeuser" + model.NewRandomString(10)
}
func GenerateTestTeamName() string {
return "faketeam" + model.NewRandomString(6)
}
func GenerateTestChannelName() string {
return "fakechannel" + model.NewRandomString(10)
}
func GenerateTestAppName() string {
return "fakeoauthapp" + model.NewRandomString(10)
}
func GenerateTestId() string {
return model.NewId()
}
func CheckUserSanitization(t *testing.T, user *model.User) {
t.Helper()
if user.Password != "" {
t.Fatal("password wasn't blank")
}
if user.AuthData != nil && *user.AuthData != "" {
t.Fatal("auth data wasn't blank")
}
if user.MfaSecret != "" {
t.Fatal("mfa secret wasn't blank")
}
}
func CheckEtag(t *testing.T, data interface{}, resp *model.Response) {
t.Helper()
if !reflect.ValueOf(data).IsNil() {
t.Fatal("etag data was not nil")
}
if resp.StatusCode != http.StatusNotModified {
t.Log("actual: " + strconv.Itoa(resp.StatusCode))
t.Log("expected: " + strconv.Itoa(http.StatusNotModified))
t.Fatal("wrong status code for etag")
}
}
func CheckNoError(t *testing.T, resp *model.Response) {
t.Helper()
if resp.Error != nil {
t.Fatal("Expected no error, got " + resp.Error.Error())
}
}
func CheckCreatedStatus(t *testing.T, resp *model.Response) {
t.Helper()
if resp.StatusCode != http.StatusCreated {
t.Log("actual: " + strconv.Itoa(resp.StatusCode))
t.Log("expected: " + strconv.Itoa(http.StatusCreated))
t.Fatal("wrong status code")
}
}
func CheckForbiddenStatus(t *testing.T, resp *model.Response) {
t.Helper()
if resp.Error == nil {
t.Fatal("should have errored with status:" + strconv.Itoa(http.StatusForbidden))
return
}
if resp.StatusCode != http.StatusForbidden {
t.Log("actual: " + strconv.Itoa(resp.StatusCode))
t.Log("expected: " + strconv.Itoa(http.StatusForbidden))
t.Fatal("wrong status code")
}
}
func CheckUnauthorizedStatus(t *testing.T, resp *model.Response) {
t.Helper()
if resp.Error == nil {
t.Fatal("should have errored with status:" + strconv.Itoa(http.StatusUnauthorized))
return
}
if resp.StatusCode != http.StatusUnauthorized {
t.Log("actual: " + strconv.Itoa(resp.StatusCode))
t.Log("expected: " + strconv.Itoa(http.StatusUnauthorized))
t.Fatal("wrong status code")
}
}
func CheckNotFoundStatus(t *testing.T, resp *model.Response) {
t.Helper()
if resp.Error == nil {
t.Fatal("should have errored with status:" + strconv.Itoa(http.StatusNotFound))
return
}
if resp.StatusCode != http.StatusNotFound {
t.Log("actual: " + strconv.Itoa(resp.StatusCode))
t.Log("expected: " + strconv.Itoa(http.StatusNotFound))
t.Fatal("wrong status code")
}
}
func CheckBadRequestStatus(t *testing.T, resp *model.Response) {
t.Helper()
if resp.Error == nil {
t.Fatal("should have errored with status:" + strconv.Itoa(http.StatusBadRequest))
return
}
if resp.StatusCode != http.StatusBadRequest {
t.Log("actual: " + strconv.Itoa(resp.StatusCode))
t.Log("expected: " + strconv.Itoa(http.StatusBadRequest))
t.Fatal("wrong status code")
}
}
func CheckNotImplementedStatus(t *testing.T, resp *model.Response) {
t.Helper()
if resp.Error == nil {
t.Fatal("should have errored with status:" + strconv.Itoa(http.StatusNotImplemented))
return
}
if resp.StatusCode != http.StatusNotImplemented {
t.Log("actual: " + strconv.Itoa(resp.StatusCode))
t.Log("expected: " + strconv.Itoa(http.StatusNotImplemented))
t.Fatal("wrong status code")
}
}
func CheckOKStatus(t *testing.T, resp *model.Response) {
t.Helper()
CheckNoError(t, resp)
if resp.StatusCode != http.StatusOK {
t.Fatalf("wrong status code. expected %d got %d", http.StatusOK, resp.StatusCode)
}
}
func CheckErrorMessage(t *testing.T, resp *model.Response, errorId string) {
t.Helper()
if resp.Error == nil {
t.Fatal("should have errored with message:" + errorId)
return
}
if resp.Error.Id != errorId {
t.Log("actual: " + resp.Error.Id)
t.Log("expected: " + errorId)
t.Fatal("incorrect error message")
}
}
func CheckInternalErrorStatus(t *testing.T, resp *model.Response) {
t.Helper()
if resp.Error == nil {
t.Fatal("should have errored with status:" + strconv.Itoa(http.StatusInternalServerError))
return
}
if resp.StatusCode != http.StatusInternalServerError {
t.Log("actual: " + strconv.Itoa(resp.StatusCode))
t.Log("expected: " + strconv.Itoa(http.StatusInternalServerError))
t.Fatal("wrong status code")
}
}
func readTestFile(name string) ([]byte, error) {
path, _ := utils.FindDir("tests")
file, err := os.Open(filepath.Join(path, name))
if err != nil {
return nil, err
}
defer file.Close()
data := &bytes.Buffer{}
if _, err := io.Copy(data, file); err != nil {
return nil, err
} else {
return data.Bytes(), nil
}
}
// Similar to s3.New() but allows initialization of signature v2 or signature v4 client.
// If signV2 input is false, function always returns signature v4.
//
// Additionally this function also takes a user defined region, if set
// disables automatic region lookup.
func s3New(endpoint, accessKey, secretKey string, secure bool, signV2 bool, region string) (*s3.Client, error) {
var creds *credentials.Credentials
if signV2 {
creds = credentials.NewStatic(accessKey, secretKey, "", credentials.SignatureV2)
} else {
creds = credentials.NewStatic(accessKey, secretKey, "", credentials.SignatureV4)
}
return s3.NewWithCredentials(endpoint, creds, secure, region)
}
func (me *TestHelper) cleanupTestFile(info *model.FileInfo) error {
cfg := me.App.Config()
if *cfg.FileSettings.DriverName == model.IMAGE_DRIVER_S3 {
endpoint := cfg.FileSettings.AmazonS3Endpoint
accessKey := cfg.FileSettings.AmazonS3AccessKeyId
secretKey := cfg.FileSettings.AmazonS3SecretAccessKey
secure := *cfg.FileSettings.AmazonS3SSL
signV2 := *cfg.FileSettings.AmazonS3SignV2
region := cfg.FileSettings.AmazonS3Region
s3Clnt, err := s3New(endpoint, accessKey, secretKey, secure, signV2, region)
if err != nil {
return err
}
bucket := cfg.FileSettings.AmazonS3Bucket
if err := s3Clnt.RemoveObject(bucket, info.Path); err != nil {
return err
}
if info.ThumbnailPath != "" {
if err := s3Clnt.RemoveObject(bucket, info.ThumbnailPath); err != nil {
return err
}
}
if info.PreviewPath != "" {
if err := s3Clnt.RemoveObject(bucket, info.PreviewPath); err != nil {
return err
}
}
} else if *cfg.FileSettings.DriverName == model.IMAGE_DRIVER_LOCAL {
if err := os.Remove(cfg.FileSettings.Directory + info.Path); err != nil {
return err
}
if info.ThumbnailPath != "" {
if err := os.Remove(cfg.FileSettings.Directory + info.ThumbnailPath); err != nil {
return err
}
}
if info.PreviewPath != "" {
if err := os.Remove(cfg.FileSettings.Directory + info.PreviewPath); err != nil {
return err
}
}
}
return nil
}
func (me *TestHelper) MakeUserChannelAdmin(user *model.User, channel *model.Channel) {
utils.DisableDebugLogForTest()
if cmr := <-me.App.Srv.Store.Channel().GetMember(channel.Id, user.Id); cmr.Err == nil {
cm := cmr.Data.(*model.ChannelMember)
cm.SchemeAdmin = true
if sr := <-me.App.Srv.Store.Channel().UpdateMember(cm); sr.Err != nil {
utils.EnableDebugLogForTest()
panic(sr.Err)
}
} else {
utils.EnableDebugLogForTest()
panic(cmr.Err)
}
utils.EnableDebugLogForTest()
}
func (me *TestHelper) UpdateUserToTeamAdmin(user *model.User, team *model.Team) {
utils.DisableDebugLogForTest()
if tmr := <-me.App.Srv.Store.Team().GetMember(team.Id, user.Id); tmr.Err == nil {
tm := tmr.Data.(*model.TeamMember)
tm.SchemeAdmin = true
if sr := <-me.App.Srv.Store.Team().UpdateMember(tm); sr.Err != nil {
utils.EnableDebugLogForTest()
panic(sr.Err)
}
} else {
utils.EnableDebugLogForTest()
mlog.Error(tmr.Err.Error())
time.Sleep(time.Second)
panic(tmr.Err)
}
utils.EnableDebugLogForTest()
}
func (me *TestHelper) UpdateUserToNonTeamAdmin(user *model.User, team *model.Team) {
utils.DisableDebugLogForTest()
if tmr := <-me.App.Srv.Store.Team().GetMember(team.Id, user.Id); tmr.Err == nil {
tm := tmr.Data.(*model.TeamMember)
tm.SchemeAdmin = false
if sr := <-me.App.Srv.Store.Team().UpdateMember(tm); sr.Err != nil {
utils.EnableDebugLogForTest()
panic(sr.Err)
}
} else {
utils.EnableDebugLogForTest()
mlog.Error(tmr.Err.Error())
time.Sleep(time.Second)
panic(tmr.Err)
}
utils.EnableDebugLogForTest()
}
func (me *TestHelper) SaveDefaultRolePermissions() map[string][]string {
utils.DisableDebugLogForTest()
results := make(map[string][]string)
for _, roleName := range []string{
"system_user",
"system_admin",
"team_user",
"team_admin",
"channel_user",
"channel_admin",
} {
role, err1 := me.App.GetRoleByName(roleName)
if err1 != nil {
utils.EnableDebugLogForTest()
panic(err1)
}
results[roleName] = role.Permissions
}
utils.EnableDebugLogForTest()
return results
}
func (me *TestHelper) RestoreDefaultRolePermissions(data map[string][]string) {
utils.DisableDebugLogForTest()
for roleName, permissions := range data {
role, err1 := me.App.GetRoleByName(roleName)
if err1 != nil {
utils.EnableDebugLogForTest()
panic(err1)
}
if strings.Join(role.Permissions, " ") == strings.Join(permissions, " ") {
continue
}
role.Permissions = permissions
_, err2 := me.App.UpdateRole(role)
if err2 != nil {
utils.EnableDebugLogForTest()
panic(err2)
}
}
utils.EnableDebugLogForTest()
}
func (me *TestHelper) RemovePermissionFromRole(permission string, roleName string) {
utils.DisableDebugLogForTest()
role, err1 := me.App.GetRoleByName(roleName)
if err1 != nil {
utils.EnableDebugLogForTest()
panic(err1)
}
var newPermissions []string
for _, p := range role.Permissions {
if p != permission {
newPermissions = append(newPermissions, p)
}
}
if strings.Join(role.Permissions, " ") == strings.Join(newPermissions, " ") {
utils.EnableDebugLogForTest()
return
}
role.Permissions = newPermissions
_, err2 := me.App.UpdateRole(role)
if err2 != nil {
utils.EnableDebugLogForTest()
panic(err2)
}
utils.EnableDebugLogForTest()
}
func (me *TestHelper) AddPermissionToRole(permission string, roleName string) {
utils.DisableDebugLogForTest()
role, err1 := me.App.GetRoleByName(roleName)
if err1 != nil {
utils.EnableDebugLogForTest()
panic(err1)
}
for _, existingPermission := range role.Permissions {
if existingPermission == permission {
utils.EnableDebugLogForTest()
return
}
}
role.Permissions = append(role.Permissions, permission)
_, err2 := me.App.UpdateRole(role)
if err2 != nil {
utils.EnableDebugLogForTest()
panic(err2)
}
utils.EnableDebugLogForTest()
}
| [
"\"CI_INBUCKET_PORT\""
] | [] | [
"CI_INBUCKET_PORT"
] | [] | ["CI_INBUCKET_PORT"] | go | 1 | 0 | |
folio/asgi.py | """
ASGI config for folio project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'folio.settings')
application = get_asgi_application()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
Src/StdLib/Lib/test/support/__init__.py | """Supporting definitions for the Python regression tests."""
if __name__ != 'test.support':
raise ImportError('support must be imported from the test package')
import collections.abc
import contextlib
import errno
import faulthandler
import fnmatch
import functools
import gc
import importlib
import importlib.util
import logging.handlers
import nntplib
import os
import platform
import re
import shutil
import socket
import stat
import struct
import subprocess
import sys
import sysconfig
import tempfile
import time
import unittest
import urllib.error
import warnings
try:
import _thread, threading
except ImportError:
_thread = None
threading = None
try:
import multiprocessing.process
except ImportError:
multiprocessing = None
try:
import zlib
except ImportError:
zlib = None
try:
import gzip
except ImportError:
gzip = None
try:
import bz2
except ImportError:
bz2 = None
try:
import lzma
except ImportError:
lzma = None
try:
import resource
except ImportError:
resource = None
__all__ = [
# globals
"PIPE_MAX_SIZE", "verbose", "max_memuse", "use_resources", "failfast",
# exceptions
"Error", "TestFailed", "ResourceDenied",
# imports
"import_module", "import_fresh_module", "CleanImport",
# modules
"unload", "forget",
# io
"record_original_stdout", "get_original_stdout", "captured_stdout",
"captured_stdin", "captured_stderr",
# filesystem
"TESTFN", "SAVEDCWD", "unlink", "rmtree", "temp_cwd", "findfile",
"create_empty_file", "can_symlink", "fs_is_case_insensitive",
# unittest
"is_resource_enabled", "requires", "requires_freebsd_version",
"requires_linux_version", "requires_mac_ver", "check_syntax_error",
"TransientResource", "time_out", "socket_peer_reset", "ioerror_peer_reset",
"transient_internet", "BasicTestRunner", "run_unittest", "run_doctest",
"skip_unless_symlink", "requires_gzip", "requires_bz2", "requires_lzma",
"bigmemtest", "bigaddrspacetest", "cpython_only", "get_attribute",
"requires_IEEE_754", "skip_unless_xattr", "requires_zlib",
"anticipate_failure", "load_package_tests",
# sys
"is_jython", "check_impl_detail",
# network
"HOST", "IPV6_ENABLED", "find_unused_port", "bind_port", "open_urlresource",
# processes
'temp_umask', "reap_children",
# logging
"TestHandler",
# threads
"threading_setup", "threading_cleanup", "reap_threads", "start_threads",
# miscellaneous
"check_warnings", "EnvironmentVarGuard", "run_with_locale", "swap_item",
"swap_attr", "Matcher", "set_memlimit", "SuppressCrashReport", "sortdict",
"run_with_tz",
]
class Error(Exception):
"""Base class for regression test exceptions."""
class TestFailed(Error):
"""Test failed."""
class ResourceDenied(unittest.SkipTest):
"""Test skipped because it requested a disallowed resource.
This is raised when a test calls requires() for a resource that
has not be enabled. It is used to distinguish between expected
and unexpected skips.
"""
@contextlib.contextmanager
def _ignore_deprecated_imports(ignore=True):
"""Context manager to suppress package and module deprecation
warnings when importing them.
If ignore is False, this context manager has no effect.
"""
if ignore:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", ".+ (module|package)",
DeprecationWarning)
yield
else:
yield
def import_module(name, deprecated=False, *, required_on=()):
"""Import and return the module to be tested, raising SkipTest if
it is not available.
If deprecated is True, any module or package deprecation messages
will be suppressed. If a module is required on a platform but optional for
others, set required_on to an iterable of platform prefixes which will be
compared against sys.platform.
"""
with _ignore_deprecated_imports(deprecated):
try:
return importlib.import_module(name)
except ImportError as msg:
if sys.platform.startswith(tuple(required_on)):
raise
raise unittest.SkipTest(str(msg))
def _save_and_remove_module(name, orig_modules):
"""Helper function to save and remove a module from sys.modules
Raise ImportError if the module can't be imported.
"""
# try to import the module and raise an error if it can't be imported
if name not in sys.modules:
__import__(name)
del sys.modules[name]
for modname in list(sys.modules):
if modname == name or modname.startswith(name + '.'):
orig_modules[modname] = sys.modules[modname]
del sys.modules[modname]
def _save_and_block_module(name, orig_modules):
"""Helper function to save and block a module in sys.modules
Return True if the module was in sys.modules, False otherwise.
"""
saved = True
try:
orig_modules[name] = sys.modules[name]
except KeyError:
saved = False
sys.modules[name] = None
return saved
def anticipate_failure(condition):
"""Decorator to mark a test that is known to be broken in some cases
Any use of this decorator should have a comment identifying the
associated tracker issue.
"""
if condition:
return unittest.expectedFailure
return lambda f: f
def load_package_tests(pkg_dir, loader, standard_tests, pattern):
"""Generic load_tests implementation for simple test packages.
Most packages can implement load_tests using this function as follows:
def load_tests(*args):
return load_package_tests(os.path.dirname(__file__), *args)
"""
if pattern is None:
pattern = "test*"
top_dir = os.path.dirname( # Lib
os.path.dirname( # test
os.path.dirname(__file__))) # support
package_tests = loader.discover(start_dir=pkg_dir,
top_level_dir=top_dir,
pattern=pattern)
standard_tests.addTests(package_tests)
return standard_tests
def import_fresh_module(name, fresh=(), blocked=(), deprecated=False):
"""Import and return a module, deliberately bypassing sys.modules.
This function imports and returns a fresh copy of the named Python module
by removing the named module from sys.modules before doing the import.
Note that unlike reload, the original module is not affected by
this operation.
*fresh* is an iterable of additional module names that are also removed
from the sys.modules cache before doing the import.
*blocked* is an iterable of module names that are replaced with None
in the module cache during the import to ensure that attempts to import
them raise ImportError.
The named module and any modules named in the *fresh* and *blocked*
parameters are saved before starting the import and then reinserted into
sys.modules when the fresh import is complete.
Module and package deprecation messages are suppressed during this import
if *deprecated* is True.
This function will raise ImportError if the named module cannot be
imported.
"""
# NOTE: test_heapq, test_json and test_warnings include extra sanity checks
# to make sure that this utility function is working as expected
with _ignore_deprecated_imports(deprecated):
# Keep track of modules saved for later restoration as well
# as those which just need a blocking entry removed
orig_modules = {}
names_to_remove = []
_save_and_remove_module(name, orig_modules)
try:
for fresh_name in fresh:
_save_and_remove_module(fresh_name, orig_modules)
for blocked_name in blocked:
if not _save_and_block_module(blocked_name, orig_modules):
names_to_remove.append(blocked_name)
fresh_module = importlib.import_module(name)
except ImportError:
fresh_module = None
finally:
for orig_name, module in orig_modules.items():
sys.modules[orig_name] = module
for name_to_remove in names_to_remove:
del sys.modules[name_to_remove]
return fresh_module
def get_attribute(obj, name):
"""Get an attribute, raising SkipTest if AttributeError is raised."""
try:
attribute = getattr(obj, name)
except AttributeError:
raise unittest.SkipTest("object %r has no attribute %r" % (obj, name))
else:
return attribute
verbose = 1 # Flag set to 0 by regrtest.py
use_resources = None # Flag set to [] by regrtest.py
max_memuse = 0 # Disable bigmem tests (they will still be run with
# small sizes, to make sure they work.)
real_max_memuse = 0
failfast = False
match_tests = None
# _original_stdout is meant to hold stdout at the time regrtest began.
# This may be "the real" stdout, or IDLE's emulation of stdout, or whatever.
# The point is to have some flavor of stdout the user can actually see.
_original_stdout = None
def record_original_stdout(stdout):
global _original_stdout
_original_stdout = stdout
def get_original_stdout():
return _original_stdout or sys.stdout
def unload(name):
try:
del sys.modules[name]
except KeyError:
pass
if sys.platform.startswith("win"):
def _waitfor(func, pathname, waitall=False):
# Perform the operation
func(pathname)
# Now setup the wait loop
if waitall:
dirname = pathname
else:
dirname, name = os.path.split(pathname)
dirname = dirname or '.'
# Check for `pathname` to be removed from the filesystem.
# The exponential backoff of the timeout amounts to a total
# of ~1 second after which the deletion is probably an error
# anyway.
# Testing on an [email protected] shows that usually only 1 iteration is
# required when contention occurs.
timeout = 0.001
while timeout < 1.0:
# Note we are only testing for the existence of the file(s) in
# the contents of the directory regardless of any security or
# access rights. If we have made it this far, we have sufficient
# permissions to do that much using Python's equivalent of the
# Windows API FindFirstFile.
# Other Windows APIs can fail or give incorrect results when
# dealing with files that are pending deletion.
L = os.listdir(dirname)
if not (L if waitall else name in L):
return
# Increase the timeout and try again
time.sleep(timeout)
timeout *= 2
warnings.warn('tests may fail, delete still pending for ' + pathname,
RuntimeWarning, stacklevel=4)
def _unlink(filename):
_waitfor(os.unlink, filename)
def _rmdir(dirname):
_waitfor(os.rmdir, dirname)
def _rmtree(path):
def _rmtree_inner(path):
for name in os.listdir(path):
fullname = os.path.join(path, name)
try:
mode = os.lstat(fullname).st_mode
except OSError as exc:
print("support.rmtree(): os.lstat(%r) failed with %s" % (fullname, exc),
file=sys.__stderr__)
mode = 0
if stat.S_ISDIR(mode):
_waitfor(_rmtree_inner, fullname, waitall=True)
os.rmdir(fullname)
else:
os.unlink(fullname)
_waitfor(_rmtree_inner, path, waitall=True)
_waitfor(os.rmdir, path)
else:
_unlink = os.unlink
_rmdir = os.rmdir
_rmtree = shutil.rmtree
def unlink(filename):
try:
_unlink(filename)
except (FileNotFoundError, NotADirectoryError):
pass
def rmdir(dirname):
try:
_rmdir(dirname)
except FileNotFoundError:
pass
def rmtree(path):
try:
_rmtree(path)
except FileNotFoundError:
pass
def make_legacy_pyc(source):
"""Move a PEP 3147 pyc/pyo file to its legacy pyc/pyo location.
The choice of .pyc or .pyo extension is done based on the __debug__ flag
value.
:param source: The file system path to the source file. The source file
does not need to exist, however the PEP 3147 pyc file must exist.
:return: The file system path to the legacy pyc file.
"""
pyc_file = importlib.util.cache_from_source(source)
up_one = os.path.dirname(os.path.abspath(source))
legacy_pyc = os.path.join(up_one, source + ('c' if __debug__ else 'o'))
os.rename(pyc_file, legacy_pyc)
return legacy_pyc
def forget(modname):
"""'Forget' a module was ever imported.
This removes the module from sys.modules and deletes any PEP 3147 or
legacy .pyc and .pyo files.
"""
unload(modname)
for dirname in sys.path:
source = os.path.join(dirname, modname + '.py')
# It doesn't matter if they exist or not, unlink all possible
# combinations of PEP 3147 and legacy pyc and pyo files.
unlink(source + 'c')
unlink(source + 'o')
unlink(importlib.util.cache_from_source(source, debug_override=True))
unlink(importlib.util.cache_from_source(source, debug_override=False))
# Check whether a gui is actually available
def _is_gui_available():
if hasattr(_is_gui_available, 'result'):
return _is_gui_available.result
reason = None
if sys.platform.startswith('win'):
# if Python is running as a service (such as the buildbot service),
# gui interaction may be disallowed
import ctypes
import ctypes.wintypes
UOI_FLAGS = 1
WSF_VISIBLE = 0x0001
class USEROBJECTFLAGS(ctypes.Structure):
_fields_ = [("fInherit", ctypes.wintypes.BOOL),
("fReserved", ctypes.wintypes.BOOL),
("dwFlags", ctypes.wintypes.DWORD)]
dll = ctypes.windll.user32
h = dll.GetProcessWindowStation()
if not h:
raise ctypes.WinError()
uof = USEROBJECTFLAGS()
needed = ctypes.wintypes.DWORD()
res = dll.GetUserObjectInformationW(h,
UOI_FLAGS,
ctypes.byref(uof),
ctypes.sizeof(uof),
ctypes.byref(needed))
if not res:
raise ctypes.WinError()
if not bool(uof.dwFlags & WSF_VISIBLE):
reason = "gui not available (WSF_VISIBLE flag not set)"
elif sys.platform == 'darwin':
# The Aqua Tk implementations on OS X can abort the process if
# being called in an environment where a window server connection
# cannot be made, for instance when invoked by a buildbot or ssh
# process not running under the same user id as the current console
# user. To avoid that, raise an exception if the window manager
# connection is not available.
from ctypes import cdll, c_int, pointer, Structure
from ctypes.util import find_library
app_services = cdll.LoadLibrary(find_library("ApplicationServices"))
if app_services.CGMainDisplayID() == 0:
reason = "gui tests cannot run without OS X window manager"
else:
class ProcessSerialNumber(Structure):
_fields_ = [("highLongOfPSN", c_int),
("lowLongOfPSN", c_int)]
psn = ProcessSerialNumber()
psn_p = pointer(psn)
if ( (app_services.GetCurrentProcess(psn_p) < 0) or
(app_services.SetFrontProcess(psn_p) < 0) ):
reason = "cannot run without OS X gui process"
# check on every platform whether tkinter can actually do anything
if not reason:
try:
from tkinter import Tk
root = Tk()
root.update()
root.destroy()
except Exception as e:
err_string = str(e)
if len(err_string) > 50:
err_string = err_string[:50] + ' [...]'
reason = 'Tk unavailable due to {}: {}'.format(type(e).__name__,
err_string)
_is_gui_available.reason = reason
_is_gui_available.result = not reason
return _is_gui_available.result
def is_resource_enabled(resource):
"""Test whether a resource is enabled.
Known resources are set by regrtest.py. If not running under regrtest.py,
all resources are assumed enabled unless use_resources has been set.
"""
return use_resources is None or resource in use_resources
def requires(resource, msg=None):
"""Raise ResourceDenied if the specified resource is not available."""
if resource == 'gui' and not _is_gui_available():
raise ResourceDenied(_is_gui_available.reason)
if not is_resource_enabled(resource):
if msg is None:
msg = "Use of the %r resource not enabled" % resource
raise ResourceDenied(msg)
def _requires_unix_version(sysname, min_version):
"""Decorator raising SkipTest if the OS is `sysname` and the version is less
than `min_version`.
For example, @_requires_unix_version('FreeBSD', (7, 2)) raises SkipTest if
the FreeBSD version is less than 7.2.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
if platform.system() == sysname:
version_txt = platform.release().split('-', 1)[0]
try:
version = tuple(map(int, version_txt.split('.')))
except ValueError:
pass
else:
if version < min_version:
min_version_txt = '.'.join(map(str, min_version))
raise unittest.SkipTest(
"%s version %s or higher required, not %s"
% (sysname, min_version_txt, version_txt))
return func(*args, **kw)
wrapper.min_version = min_version
return wrapper
return decorator
def requires_freebsd_version(*min_version):
"""Decorator raising SkipTest if the OS is FreeBSD and the FreeBSD version is
less than `min_version`.
For example, @requires_freebsd_version(7, 2) raises SkipTest if the FreeBSD
version is less than 7.2.
"""
return _requires_unix_version('FreeBSD', min_version)
def requires_linux_version(*min_version):
"""Decorator raising SkipTest if the OS is Linux and the Linux version is
less than `min_version`.
For example, @requires_linux_version(2, 6, 32) raises SkipTest if the Linux
version is less than 2.6.32.
"""
return _requires_unix_version('Linux', min_version)
def requires_mac_ver(*min_version):
"""Decorator raising SkipTest if the OS is Mac OS X and the OS X
version if less than min_version.
For example, @requires_mac_ver(10, 5) raises SkipTest if the OS X version
is lesser than 10.5.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
if sys.platform == 'darwin':
version_txt = platform.mac_ver()[0]
try:
version = tuple(map(int, version_txt.split('.')))
except ValueError:
pass
else:
if version < min_version:
min_version_txt = '.'.join(map(str, min_version))
raise unittest.SkipTest(
"Mac OS X %s or higher required, not %s"
% (min_version_txt, version_txt))
return func(*args, **kw)
wrapper.min_version = min_version
return wrapper
return decorator
# Don't use "localhost", since resolving it uses the DNS under recent
# Windows versions (see issue #18792).
HOST = "127.0.0.1"
HOSTv6 = "::1"
def find_unused_port(family=socket.AF_INET, socktype=socket.SOCK_STREAM):
"""Returns an unused port that should be suitable for binding. This is
achieved by creating a temporary socket with the same family and type as
the 'sock' parameter (default is AF_INET, SOCK_STREAM), and binding it to
the specified host address (defaults to 0.0.0.0) with the port set to 0,
eliciting an unused ephemeral port from the OS. The temporary socket is
then closed and deleted, and the ephemeral port is returned.
Either this method or bind_port() should be used for any tests where a
server socket needs to be bound to a particular port for the duration of
the test. Which one to use depends on whether the calling code is creating
a python socket, or if an unused port needs to be provided in a constructor
or passed to an external program (i.e. the -accept argument to openssl's
s_server mode). Always prefer bind_port() over find_unused_port() where
possible. Hard coded ports should *NEVER* be used. As soon as a server
socket is bound to a hard coded port, the ability to run multiple instances
of the test simultaneously on the same host is compromised, which makes the
test a ticking time bomb in a buildbot environment. On Unix buildbots, this
may simply manifest as a failed test, which can be recovered from without
intervention in most cases, but on Windows, the entire python process can
completely and utterly wedge, requiring someone to log in to the buildbot
and manually kill the affected process.
(This is easy to reproduce on Windows, unfortunately, and can be traced to
the SO_REUSEADDR socket option having different semantics on Windows versus
Unix/Linux. On Unix, you can't have two AF_INET SOCK_STREAM sockets bind,
listen and then accept connections on identical host/ports. An EADDRINUSE
OSError will be raised at some point (depending on the platform and
the order bind and listen were called on each socket).
However, on Windows, if SO_REUSEADDR is set on the sockets, no EADDRINUSE
will ever be raised when attempting to bind two identical host/ports. When
accept() is called on each socket, the second caller's process will steal
the port from the first caller, leaving them both in an awkwardly wedged
state where they'll no longer respond to any signals or graceful kills, and
must be forcibly killed via OpenProcess()/TerminateProcess().
The solution on Windows is to use the SO_EXCLUSIVEADDRUSE socket option
instead of SO_REUSEADDR, which effectively affords the same semantics as
SO_REUSEADDR on Unix. Given the propensity of Unix developers in the Open
Source world compared to Windows ones, this is a common mistake. A quick
look over OpenSSL's 0.9.8g source shows that they use SO_REUSEADDR when
openssl.exe is called with the 's_server' option, for example. See
http://bugs.python.org/issue2550 for more info. The following site also
has a very thorough description about the implications of both REUSEADDR
and EXCLUSIVEADDRUSE on Windows:
http://msdn2.microsoft.com/en-us/library/ms740621(VS.85).aspx)
XXX: although this approach is a vast improvement on previous attempts to
elicit unused ports, it rests heavily on the assumption that the ephemeral
port returned to us by the OS won't immediately be dished back out to some
other process when we close and delete our temporary socket but before our
calling code has a chance to bind the returned port. We can deal with this
issue if/when we come across it.
"""
tempsock = socket.socket(family, socktype)
port = bind_port(tempsock)
tempsock.close()
del tempsock
return port
def bind_port(sock, host=HOST):
"""Bind the socket to a free port and return the port number. Relies on
ephemeral ports in order to ensure we are using an unbound port. This is
important as many tests may be running simultaneously, especially in a
buildbot environment. This method raises an exception if the sock.family
is AF_INET and sock.type is SOCK_STREAM, *and* the socket has SO_REUSEADDR
or SO_REUSEPORT set on it. Tests should *never* set these socket options
for TCP/IP sockets. The only case for setting these options is testing
multicasting via multiple UDP sockets.
Additionally, if the SO_EXCLUSIVEADDRUSE socket option is available (i.e.
on Windows), it will be set on the socket. This will prevent anyone else
from bind()'ing to our host/port for the duration of the test.
"""
if sock.family == socket.AF_INET and sock.type == socket.SOCK_STREAM:
if hasattr(socket, 'SO_REUSEADDR'):
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) == 1:
raise TestFailed("tests should never set the SO_REUSEADDR " \
"socket option on TCP/IP sockets!")
if hasattr(socket, 'SO_REUSEPORT'):
try:
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT) == 1:
raise TestFailed("tests should never set the SO_REUSEPORT " \
"socket option on TCP/IP sockets!")
except OSError:
# Python's socket module was compiled using modern headers
# thus defining SO_REUSEPORT but this process is running
# under an older kernel that does not support SO_REUSEPORT.
pass
if hasattr(socket, 'SO_EXCLUSIVEADDRUSE'):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
sock.bind((host, 0))
port = sock.getsockname()[1]
return port
def _is_ipv6_enabled():
"""Check whether IPv6 is enabled on this host."""
if socket.has_ipv6:
sock = None
try:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
sock.bind((HOSTv6, 0))
return True
except OSError:
pass
finally:
if sock:
sock.close()
return False
IPV6_ENABLED = _is_ipv6_enabled()
def system_must_validate_cert(f):
"""Skip the test on TLS certificate validation failures."""
@functools.wraps(f)
def dec(*args, **kwargs):
try:
f(*args, **kwargs)
except IOError as e:
if "CERTIFICATE_VERIFY_FAILED" in str(e):
raise unittest.SkipTest("system does not contain "
"necessary certificates")
raise
return dec
# A constant likely larger than the underlying OS pipe buffer size, to
# make writes blocking.
# Windows limit seems to be around 512 B, and many Unix kernels have a
# 64 KiB pipe buffer size or 16 * PAGE_SIZE: take a few megs to be sure.
# (see issue #17835 for a discussion of this number).
PIPE_MAX_SIZE = 4 * 1024 * 1024 + 1
# A constant likely larger than the underlying OS socket buffer size, to make
# writes blocking.
# The socket buffer sizes can usually be tuned system-wide (e.g. through sysctl
# on Linux), or on a per-socket basis (SO_SNDBUF/SO_RCVBUF). See issue #18643
# for a discussion of this number).
SOCK_MAX_SIZE = 16 * 1024 * 1024 + 1
# decorator for skipping tests on non-IEEE 754 platforms
requires_IEEE_754 = unittest.skipUnless(
float.__getformat__("double").startswith("IEEE"),
"test requires IEEE 754 doubles")
requires_zlib = unittest.skipUnless(zlib, 'requires zlib')
requires_gzip = unittest.skipUnless(gzip, 'requires gzip')
requires_bz2 = unittest.skipUnless(bz2, 'requires bz2')
requires_lzma = unittest.skipUnless(lzma, 'requires lzma')
is_jython = sys.platform.startswith('java')
# Filename used for testing
if os.name == 'java':
# Jython disallows @ in module names
TESTFN = '$test'
else:
TESTFN = '@test'
# Disambiguate TESTFN for parallel testing, while letting it remain a valid
# module name.
TESTFN = "{}_{}_tmp".format(TESTFN, os.getpid())
# FS_NONASCII: non-ASCII character encodable by os.fsencode(),
# or None if there is no such character.
FS_NONASCII = None
for character in (
# First try printable and common characters to have a readable filename.
# For each character, the encoding list are just example of encodings able
# to encode the character (the list is not exhaustive).
# U+00E6 (Latin Small Letter Ae): cp1252, iso-8859-1
'\u00E6',
# U+0130 (Latin Capital Letter I With Dot Above): cp1254, iso8859_3
'\u0130',
# U+0141 (Latin Capital Letter L With Stroke): cp1250, cp1257
'\u0141',
# U+03C6 (Greek Small Letter Phi): cp1253
'\u03C6',
# U+041A (Cyrillic Capital Letter Ka): cp1251
'\u041A',
# U+05D0 (Hebrew Letter Alef): Encodable to cp424
'\u05D0',
# U+060C (Arabic Comma): cp864, cp1006, iso8859_6, mac_arabic
'\u060C',
# U+062A (Arabic Letter Teh): cp720
'\u062A',
# U+0E01 (Thai Character Ko Kai): cp874
'\u0E01',
# Then try more "special" characters. "special" because they may be
# interpreted or displayed differently depending on the exact locale
# encoding and the font.
# U+00A0 (No-Break Space)
'\u00A0',
# U+20AC (Euro Sign)
'\u20AC',
):
try:
os.fsdecode(os.fsencode(character))
except UnicodeError:
pass
else:
FS_NONASCII = character
break
# TESTFN_UNICODE is a non-ascii filename
TESTFN_UNICODE = TESTFN + "-\xe0\xf2\u0258\u0141\u011f"
if sys.platform == 'darwin':
# In Mac OS X's VFS API file names are, by definition, canonically
# decomposed Unicode, encoded using UTF-8. See QA1173:
# http://developer.apple.com/mac/library/qa/qa2001/qa1173.html
import unicodedata
TESTFN_UNICODE = unicodedata.normalize('NFD', TESTFN_UNICODE)
TESTFN_ENCODING = sys.getfilesystemencoding()
# TESTFN_UNENCODABLE is a filename (str type) that should *not* be able to be
# encoded by the filesystem encoding (in strict mode). It can be None if we
# cannot generate such filename.
TESTFN_UNENCODABLE = None
if os.name in ('nt', 'ce'):
# skip win32s (0) or Windows 9x/ME (1)
if sys.getwindowsversion().platform >= 2:
# Different kinds of characters from various languages to minimize the
# probability that the whole name is encodable to MBCS (issue #9819)
TESTFN_UNENCODABLE = TESTFN + "-\u5171\u0141\u2661\u0363\uDC80"
try:
TESTFN_UNENCODABLE.encode(TESTFN_ENCODING)
except UnicodeEncodeError:
pass
else:
print('WARNING: The filename %r CAN be encoded by the filesystem encoding (%s). '
'Unicode filename tests may not be effective'
% (TESTFN_UNENCODABLE, TESTFN_ENCODING))
TESTFN_UNENCODABLE = None
# Mac OS X denies unencodable filenames (invalid utf-8)
elif sys.platform != 'darwin':
try:
# ascii and utf-8 cannot encode the byte 0xff
b'\xff'.decode(TESTFN_ENCODING)
except UnicodeDecodeError:
# 0xff will be encoded using the surrogate character u+DCFF
TESTFN_UNENCODABLE = TESTFN \
+ b'-\xff'.decode(TESTFN_ENCODING, 'surrogateescape')
else:
# File system encoding (eg. ISO-8859-* encodings) can encode
# the byte 0xff. Skip some unicode filename tests.
pass
# TESTFN_UNDECODABLE is a filename (bytes type) that should *not* be able to be
# decoded from the filesystem encoding (in strict mode). It can be None if we
# cannot generate such filename (ex: the latin1 encoding can decode any byte
# sequence). On UNIX, TESTFN_UNDECODABLE can be decoded by os.fsdecode() thanks
# to the surrogateescape error handler (PEP 383), but not from the filesystem
# encoding in strict mode.
TESTFN_UNDECODABLE = None
for name in (
# b'\xff' is not decodable by os.fsdecode() with code page 932. Windows
# accepts it to create a file or a directory, or don't accept to enter to
# such directory (when the bytes name is used). So test b'\xe7' first: it is
# not decodable from cp932.
b'\xe7w\xf0',
# undecodable from ASCII, UTF-8
b'\xff',
# undecodable from iso8859-3, iso8859-6, iso8859-7, cp424, iso8859-8, cp856
# and cp857
b'\xae\xd5'
# undecodable from UTF-8 (UNIX and Mac OS X)
b'\xed\xb2\x80', b'\xed\xb4\x80',
# undecodable from shift_jis, cp869, cp874, cp932, cp1250, cp1251, cp1252,
# cp1253, cp1254, cp1255, cp1257, cp1258
b'\x81\x98',
):
try:
name.decode(TESTFN_ENCODING)
except UnicodeDecodeError:
TESTFN_UNDECODABLE = os.fsencode(TESTFN) + name
break
if FS_NONASCII:
TESTFN_NONASCII = TESTFN + '-' + FS_NONASCII
else:
TESTFN_NONASCII = None
# Save the initial cwd
SAVEDCWD = os.getcwd()
@contextlib.contextmanager
def temp_dir(path=None, quiet=False):
"""Return a context manager that creates a temporary directory.
Arguments:
path: the directory to create temporarily. If omitted or None,
defaults to creating a temporary directory using tempfile.mkdtemp.
quiet: if False (the default), the context manager raises an exception
on error. Otherwise, if the path is specified and cannot be
created, only a warning is issued.
"""
dir_created = False
if path is None:
path = tempfile.mkdtemp()
dir_created = True
path = os.path.realpath(path)
else:
try:
os.mkdir(path)
dir_created = True
except OSError:
if not quiet:
raise
warnings.warn('tests may fail, unable to create temp dir: ' + path,
RuntimeWarning, stacklevel=3)
try:
yield path
finally:
if dir_created:
shutil.rmtree(path)
@contextlib.contextmanager
def change_cwd(path, quiet=False):
"""Return a context manager that changes the current working directory.
Arguments:
path: the directory to use as the temporary current working directory.
quiet: if False (the default), the context manager raises an exception
on error. Otherwise, it issues only a warning and keeps the current
working directory the same.
"""
saved_dir = os.getcwd()
try:
os.chdir(path)
except OSError:
if not quiet:
raise
warnings.warn('tests may fail, unable to change CWD to: ' + path,
RuntimeWarning, stacklevel=3)
try:
yield os.getcwd()
finally:
os.chdir(saved_dir)
@contextlib.contextmanager
def temp_cwd(name='tempcwd', quiet=False):
"""
Context manager that temporarily creates and changes the CWD.
The function temporarily changes the current working directory
after creating a temporary directory in the current directory with
name *name*. If *name* is None, the temporary directory is
created using tempfile.mkdtemp.
If *quiet* is False (default) and it is not possible to
create or change the CWD, an error is raised. If *quiet* is True,
only a warning is raised and the original CWD is used.
"""
with temp_dir(path=name, quiet=quiet) as temp_path:
with change_cwd(temp_path, quiet=quiet) as cwd_dir:
yield cwd_dir
if hasattr(os, "umask"):
@contextlib.contextmanager
def temp_umask(umask):
"""Context manager that temporarily sets the process umask."""
oldmask = os.umask(umask)
try:
yield
finally:
os.umask(oldmask)
# TEST_HOME_DIR refers to the top level directory of the "test" package
# that contains Python's regression test suite
TEST_SUPPORT_DIR = os.path.dirname(os.path.abspath(__file__))
TEST_HOME_DIR = os.path.dirname(TEST_SUPPORT_DIR)
# TEST_DATA_DIR is used as a target download location for remote resources
TEST_DATA_DIR = os.path.join(TEST_HOME_DIR, "data")
def findfile(filename, subdir=None):
"""Try to find a file on sys.path or in the test directory. If it is not
found the argument passed to the function is returned (this does not
necessarily signal failure; could still be the legitimate path).
Setting *subdir* indicates a relative path to use to find the file
rather than looking directly in the path directories.
"""
if os.path.isabs(filename):
return filename
if subdir is not None:
filename = os.path.join(subdir, filename)
path = [TEST_HOME_DIR] + sys.path
for dn in path:
fn = os.path.join(dn, filename)
if os.path.exists(fn): return fn
return filename
def create_empty_file(filename):
"""Create an empty file. If the file already exists, truncate it."""
fd = os.open(filename, os.O_WRONLY | os.O_CREAT | os.O_TRUNC)
os.close(fd)
def sortdict(dict):
"Like repr(dict), but in sorted order."
items = sorted(dict.items())
reprpairs = ["%r: %r" % pair for pair in items]
withcommas = ", ".join(reprpairs)
return "{%s}" % withcommas
def make_bad_fd():
"""
Create an invalid file descriptor by opening and closing a file and return
its fd.
"""
file = open(TESTFN, "wb")
try:
return file.fileno()
finally:
file.close()
unlink(TESTFN)
def check_syntax_error(testcase, statement):
testcase.assertRaises(SyntaxError, compile, statement,
'<test string>', 'exec')
def open_urlresource(url, *args, **kw):
import urllib.request, urllib.parse
check = kw.pop('check', None)
filename = urllib.parse.urlparse(url)[2].split('/')[-1] # '/': it's URL!
fn = os.path.join(TEST_DATA_DIR, filename)
def check_valid_file(fn):
f = open(fn, *args, **kw)
if check is None:
return f
elif check(f):
f.seek(0)
return f
f.close()
if os.path.exists(fn):
f = check_valid_file(fn)
if f is not None:
return f
unlink(fn)
# Verify the requirement before downloading the file
requires('urlfetch')
print('\tfetching %s ...' % url, file=get_original_stdout())
opener = urllib.request.build_opener()
if gzip:
opener.addheaders.append(('Accept-Encoding', 'gzip'))
f = opener.open(url, timeout=15)
if gzip and f.headers.get('Content-Encoding') == 'gzip':
f = gzip.GzipFile(fileobj=f)
try:
with open(fn, "wb") as out:
s = f.read()
while s:
out.write(s)
s = f.read()
finally:
f.close()
f = check_valid_file(fn)
if f is not None:
return f
raise TestFailed('invalid resource %r' % fn)
class WarningsRecorder(object):
"""Convenience wrapper for the warnings list returned on
entry to the warnings.catch_warnings() context manager.
"""
def __init__(self, warnings_list):
self._warnings = warnings_list
self._last = 0
def __getattr__(self, attr):
if len(self._warnings) > self._last:
return getattr(self._warnings[-1], attr)
elif attr in warnings.WarningMessage._WARNING_DETAILS:
return None
raise AttributeError("%r has no attribute %r" % (self, attr))
@property
def warnings(self):
return self._warnings[self._last:]
def reset(self):
self._last = len(self._warnings)
def _filterwarnings(filters, quiet=False):
"""Catch the warnings, then check if all the expected
warnings have been raised and re-raise unexpected warnings.
If 'quiet' is True, only re-raise the unexpected warnings.
"""
# Clear the warning registry of the calling module
# in order to re-raise the warnings.
frame = sys._getframe(2)
registry = frame.f_globals.get('__warningregistry__')
if registry:
registry.clear()
with warnings.catch_warnings(record=True) as w:
# Set filter "always" to record all warnings. Because
# test_warnings swap the module, we need to look up in
# the sys.modules dictionary.
sys.modules['warnings'].simplefilter("always")
yield WarningsRecorder(w)
# Filter the recorded warnings
reraise = list(w)
missing = []
for msg, cat in filters:
seen = False
for w in reraise[:]:
warning = w.message
# Filter out the matching messages
if (re.match(msg, str(warning), re.I) and
issubclass(warning.__class__, cat)):
seen = True
reraise.remove(w)
if not seen and not quiet:
# This filter caught nothing
missing.append((msg, cat.__name__))
if reraise:
raise AssertionError("unhandled warning %s" % reraise[0])
if missing:
raise AssertionError("filter (%r, %s) did not catch any warning" %
missing[0])
@contextlib.contextmanager
def check_warnings(*filters, **kwargs):
"""Context manager to silence warnings.
Accept 2-tuples as positional arguments:
("message regexp", WarningCategory)
Optional argument:
- if 'quiet' is True, it does not fail if a filter catches nothing
(default True without argument,
default False if some filters are defined)
Without argument, it defaults to:
check_warnings(("", Warning), quiet=True)
"""
quiet = kwargs.get('quiet')
if not filters:
filters = (("", Warning),)
# Preserve backward compatibility
if quiet is None:
quiet = True
return _filterwarnings(filters, quiet)
class CleanImport(object):
"""Context manager to force import to return a new module reference.
This is useful for testing module-level behaviours, such as
the emission of a DeprecationWarning on import.
Use like this:
with CleanImport("foo"):
importlib.import_module("foo") # new reference
"""
def __init__(self, *module_names):
self.original_modules = sys.modules.copy()
for module_name in module_names:
if module_name in sys.modules:
module = sys.modules[module_name]
# It is possible that module_name is just an alias for
# another module (e.g. stub for modules renamed in 3.x).
# In that case, we also need delete the real module to clear
# the import cache.
if module.__name__ != module_name:
del sys.modules[module.__name__]
del sys.modules[module_name]
def __enter__(self):
return self
def __exit__(self, *ignore_exc):
sys.modules.update(self.original_modules)
class EnvironmentVarGuard(collections.abc.MutableMapping):
"""Class to help protect the environment variable properly. Can be used as
a context manager."""
def __init__(self):
self._environ = os.environ
self._changed = {}
def __getitem__(self, envvar):
return self._environ[envvar]
def __setitem__(self, envvar, value):
# Remember the initial value on the first access
if envvar not in self._changed:
self._changed[envvar] = self._environ.get(envvar)
self._environ[envvar] = value
def __delitem__(self, envvar):
# Remember the initial value on the first access
if envvar not in self._changed:
self._changed[envvar] = self._environ.get(envvar)
if envvar in self._environ:
del self._environ[envvar]
def keys(self):
return self._environ.keys()
def __iter__(self):
return iter(self._environ)
def __len__(self):
return len(self._environ)
def set(self, envvar, value):
self[envvar] = value
def unset(self, envvar):
del self[envvar]
def __enter__(self):
return self
def __exit__(self, *ignore_exc):
for (k, v) in self._changed.items():
if v is None:
if k in self._environ:
del self._environ[k]
else:
self._environ[k] = v
os.environ = self._environ
class DirsOnSysPath(object):
"""Context manager to temporarily add directories to sys.path.
This makes a copy of sys.path, appends any directories given
as positional arguments, then reverts sys.path to the copied
settings when the context ends.
Note that *all* sys.path modifications in the body of the
context manager, including replacement of the object,
will be reverted at the end of the block.
"""
def __init__(self, *paths):
self.original_value = sys.path[:]
self.original_object = sys.path
sys.path.extend(paths)
def __enter__(self):
return self
def __exit__(self, *ignore_exc):
sys.path = self.original_object
sys.path[:] = self.original_value
class TransientResource(object):
"""Raise ResourceDenied if an exception is raised while the context manager
is in effect that matches the specified exception and attributes."""
def __init__(self, exc, **kwargs):
self.exc = exc
self.attrs = kwargs
def __enter__(self):
return self
def __exit__(self, type_=None, value=None, traceback=None):
"""If type_ is a subclass of self.exc and value has attributes matching
self.attrs, raise ResourceDenied. Otherwise let the exception
propagate (if any)."""
if type_ is not None and issubclass(self.exc, type_):
for attr, attr_value in self.attrs.items():
if not hasattr(value, attr):
break
if getattr(value, attr) != attr_value:
break
else:
raise ResourceDenied("an optional resource is not available")
# Context managers that raise ResourceDenied when various issues
# with the Internet connection manifest themselves as exceptions.
# XXX deprecate these and use transient_internet() instead
time_out = TransientResource(OSError, errno=errno.ETIMEDOUT)
socket_peer_reset = TransientResource(OSError, errno=errno.ECONNRESET)
ioerror_peer_reset = TransientResource(OSError, errno=errno.ECONNRESET)
@contextlib.contextmanager
def transient_internet(resource_name, *, timeout=30.0, errnos=()):
"""Return a context manager that raises ResourceDenied when various issues
with the Internet connection manifest themselves as exceptions."""
default_errnos = [
('ECONNREFUSED', 111),
('ECONNRESET', 104),
('EHOSTUNREACH', 113),
('ENETUNREACH', 101),
('ETIMEDOUT', 110),
]
default_gai_errnos = [
('EAI_AGAIN', -3),
('EAI_FAIL', -4),
('EAI_NONAME', -2),
('EAI_NODATA', -5),
# Encountered when trying to resolve IPv6-only hostnames
('WSANO_DATA', 11004),
]
denied = ResourceDenied("Resource %r is not available" % resource_name)
captured_errnos = errnos
gai_errnos = []
if not captured_errnos:
captured_errnos = [getattr(errno, name, num)
for (name, num) in default_errnos]
gai_errnos = [getattr(socket, name, num)
for (name, num) in default_gai_errnos]
def filter_error(err):
n = getattr(err, 'errno', None)
if (isinstance(err, socket.timeout) or
(isinstance(err, socket.gaierror) and n in gai_errnos) or
(isinstance(err, urllib.error.HTTPError) and
500 <= err.code <= 599) or
(isinstance(err, urllib.error.URLError) and
(("ConnectionRefusedError" in err.reason) or
("TimeoutError" in err.reason))) or
n in captured_errnos):
if not verbose:
sys.stderr.write(denied.args[0] + "\n")
raise denied from err
old_timeout = socket.getdefaulttimeout()
try:
if timeout is not None:
socket.setdefaulttimeout(timeout)
yield
except nntplib.NNTPTemporaryError as err:
if verbose:
sys.stderr.write(denied.args[0] + "\n")
raise denied from err
except OSError as err:
# urllib can wrap original socket errors multiple times (!), we must
# unwrap to get at the original error.
while True:
a = err.args
if len(a) >= 1 and isinstance(a[0], OSError):
err = a[0]
# The error can also be wrapped as args[1]:
# except socket.error as msg:
# raise OSError('socket error', msg).with_traceback(sys.exc_info()[2])
elif len(a) >= 2 and isinstance(a[1], OSError):
err = a[1]
else:
break
filter_error(err)
raise
# XXX should we catch generic exceptions and look for their
# __cause__ or __context__?
finally:
socket.setdefaulttimeout(old_timeout)
@contextlib.contextmanager
def captured_output(stream_name):
"""Return a context manager used by captured_stdout/stdin/stderr
that temporarily replaces the sys stream *stream_name* with a StringIO."""
import io
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, io.StringIO())
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stdout)
def captured_stdout():
"""Capture the output of sys.stdout:
with captured_stdout() as stdout:
print("hello")
self.assertEqual(stdout.getvalue(), "hello\\n")
"""
return captured_output("stdout")
def captured_stderr():
"""Capture the output of sys.stderr:
with captured_stderr() as stderr:
print("hello", file=sys.stderr)
self.assertEqual(stderr.getvalue(), "hello\\n")
"""
return captured_output("stderr")
def captured_stdin():
"""Capture the input to sys.stdin:
with captured_stdin() as stdin:
stdin.write('hello\\n')
stdin.seek(0)
# call test code that consumes from sys.stdin
captured = input()
self.assertEqual(captured, "hello")
"""
return captured_output("stdin")
def gc_collect():
"""Force as many objects as possible to be collected.
In non-CPython implementations of Python, this is needed because timely
deallocation is not guaranteed by the garbage collector. (Even in CPython
this can be the case in case of reference cycles.) This means that __del__
methods may be called later than expected and weakrefs may remain alive for
longer than expected. This function tries its best to force all garbage
objects to disappear.
"""
gc.collect()
if is_jython:
time.sleep(0.1)
gc.collect()
gc.collect()
@contextlib.contextmanager
def disable_gc():
have_gc = gc.isenabled()
gc.disable()
try:
yield
finally:
if have_gc:
gc.enable()
def python_is_optimized():
"""Find if Python was built with optimizations."""
cflags = sysconfig.get_config_var('PY_CFLAGS') or ''
final_opt = ""
for opt in cflags.split():
if opt.startswith('-O'):
final_opt = opt
return final_opt not in ('', '-O0', '-Og')
_header = 'nP'
_align = '0n'
if hasattr(sys, "gettotalrefcount"):
_header = '2P' + _header
_align = '0P'
_vheader = _header + 'n'
def calcobjsize(fmt):
return struct.calcsize(_header + fmt + _align)
def calcvobjsize(fmt):
return struct.calcsize(_vheader + fmt + _align)
_TPFLAGS_HAVE_GC = 1<<14
_TPFLAGS_HEAPTYPE = 1<<9
def check_sizeof(test, o, size):
import _testcapi
result = sys.getsizeof(o)
# add GC header size
if ((type(o) == type) and (o.__flags__ & _TPFLAGS_HEAPTYPE) or\
((type(o) != type) and (type(o).__flags__ & _TPFLAGS_HAVE_GC))):
size += _testcapi.SIZEOF_PYGC_HEAD
msg = 'wrong size for %s: got %d, expected %d' \
% (type(o), result, size)
test.assertEqual(result, size, msg)
#=======================================================================
# Decorator for running a function in a different locale, correctly resetting
# it afterwards.
def run_with_locale(catstr, *locales):
def decorator(func):
def inner(*args, **kwds):
try:
import locale
category = getattr(locale, catstr)
orig_locale = locale.setlocale(category)
except AttributeError:
# if the test author gives us an invalid category string
raise
except:
# cannot retrieve original locale, so do nothing
locale = orig_locale = None
else:
for loc in locales:
try:
locale.setlocale(category, loc)
break
except:
pass
# now run the function, resetting the locale on exceptions
try:
return func(*args, **kwds)
finally:
if locale and orig_locale:
locale.setlocale(category, orig_locale)
inner.__name__ = func.__name__
inner.__doc__ = func.__doc__
return inner
return decorator
#=======================================================================
# Decorator for running a function in a specific timezone, correctly
# resetting it afterwards.
def run_with_tz(tz):
def decorator(func):
def inner(*args, **kwds):
try:
tzset = time.tzset
except AttributeError:
raise unittest.SkipTest("tzset required")
if 'TZ' in os.environ:
orig_tz = os.environ['TZ']
else:
orig_tz = None
os.environ['TZ'] = tz
tzset()
# now run the function, resetting the tz on exceptions
try:
return func(*args, **kwds)
finally:
if orig_tz is None:
del os.environ['TZ']
else:
os.environ['TZ'] = orig_tz
time.tzset()
inner.__name__ = func.__name__
inner.__doc__ = func.__doc__
return inner
return decorator
#=======================================================================
# Big-memory-test support. Separate from 'resources' because memory use
# should be configurable.
# Some handy shorthands. Note that these are used for byte-limits as well
# as size-limits, in the various bigmem tests
_1M = 1024*1024
_1G = 1024 * _1M
_2G = 2 * _1G
_4G = 4 * _1G
MAX_Py_ssize_t = sys.maxsize
def set_memlimit(limit):
global max_memuse
global real_max_memuse
sizes = {
'k': 1024,
'm': _1M,
'g': _1G,
't': 1024*_1G,
}
m = re.match(r'(\d+(\.\d+)?) (K|M|G|T)b?$', limit,
re.IGNORECASE | re.VERBOSE)
if m is None:
raise ValueError('Invalid memory limit %r' % (limit,))
memlimit = int(float(m.group(1)) * sizes[m.group(3).lower()])
real_max_memuse = memlimit
if memlimit > MAX_Py_ssize_t:
memlimit = MAX_Py_ssize_t
if memlimit < _2G - 1:
raise ValueError('Memory limit %r too low to be useful' % (limit,))
max_memuse = memlimit
class _MemoryWatchdog:
"""An object which periodically watches the process' memory consumption
and prints it out.
"""
def __init__(self):
self.procfile = '/proc/{pid}/statm'.format(pid=os.getpid())
self.started = False
def start(self):
try:
f = open(self.procfile, 'r')
except OSError as e:
warnings.warn('/proc not available for stats: {}'.format(e),
RuntimeWarning)
sys.stderr.flush()
return
watchdog_script = findfile("memory_watchdog.py")
self.mem_watchdog = subprocess.Popen([sys.executable, watchdog_script],
stdin=f, stderr=subprocess.DEVNULL)
f.close()
self.started = True
def stop(self):
if self.started:
self.mem_watchdog.terminate()
self.mem_watchdog.wait()
def bigmemtest(size, memuse, dry_run=True):
"""Decorator for bigmem tests.
'minsize' is the minimum useful size for the test (in arbitrary,
test-interpreted units.) 'memuse' is the number of 'bytes per size' for
the test, or a good estimate of it.
if 'dry_run' is False, it means the test doesn't support dummy runs
when -M is not specified.
"""
def decorator(f):
def wrapper(self):
size = wrapper.size
memuse = wrapper.memuse
if not real_max_memuse:
maxsize = 5147
else:
maxsize = size
if ((real_max_memuse or not dry_run)
and real_max_memuse < maxsize * memuse):
raise unittest.SkipTest(
"not enough memory: %.1fG minimum needed"
% (size * memuse / (1024 ** 3)))
if real_max_memuse and verbose:
print()
print(" ... expected peak memory use: {peak:.1f}G"
.format(peak=size * memuse / (1024 ** 3)))
watchdog = _MemoryWatchdog()
watchdog.start()
else:
watchdog = None
try:
return f(self, maxsize)
finally:
if watchdog:
watchdog.stop()
wrapper.size = size
wrapper.memuse = memuse
return wrapper
return decorator
def bigaddrspacetest(f):
"""Decorator for tests that fill the address space."""
def wrapper(self):
if max_memuse < MAX_Py_ssize_t:
if MAX_Py_ssize_t >= 2**63 - 1 and max_memuse >= 2**31:
raise unittest.SkipTest(
"not enough memory: try a 32-bit build instead")
else:
raise unittest.SkipTest(
"not enough memory: %.1fG minimum needed"
% (MAX_Py_ssize_t / (1024 ** 3)))
else:
return f(self)
return wrapper
#=======================================================================
# unittest integration.
class BasicTestRunner:
def run(self, test):
result = unittest.TestResult()
test(result)
return result
def _id(obj):
return obj
def requires_resource(resource):
if resource == 'gui' and not _is_gui_available():
return unittest.skip(_is_gui_available.reason)
if is_resource_enabled(resource):
return _id
else:
return unittest.skip("resource {0!r} is not enabled".format(resource))
def cpython_only(test):
"""
Decorator for tests only applicable on CPython.
"""
return impl_detail(cpython=True)(test)
def impl_detail(msg=None, **guards):
if check_impl_detail(**guards):
return _id
if msg is None:
guardnames, default = _parse_guards(guards)
if default:
msg = "implementation detail not available on {0}"
else:
msg = "implementation detail specific to {0}"
guardnames = sorted(guardnames.keys())
msg = msg.format(' or '.join(guardnames))
return unittest.skip(msg)
def _parse_guards(guards):
# Returns a tuple ({platform_name: run_me}, default_value)
if not guards:
return ({'cpython': True}, False)
is_true = list(guards.values())[0]
assert list(guards.values()) == [is_true] * len(guards) # all True or all False
return (guards, not is_true)
# Use the following check to guard CPython's implementation-specific tests --
# or to run them only on the implementation(s) guarded by the arguments.
def check_impl_detail(**guards):
"""This function returns True or False depending on the host platform.
Examples:
if check_impl_detail(): # only on CPython (default)
if check_impl_detail(jython=True): # only on Jython
if check_impl_detail(cpython=False): # everywhere except on CPython
"""
guards, default = _parse_guards(guards)
return guards.get(platform.python_implementation().lower(), default)
def no_tracing(func):
"""Decorator to temporarily turn off tracing for the duration of a test."""
if not hasattr(sys, 'gettrace'):
return func
else:
@functools.wraps(func)
def wrapper(*args, **kwargs):
original_trace = sys.gettrace()
try:
sys.settrace(None)
return func(*args, **kwargs)
finally:
sys.settrace(original_trace)
return wrapper
def refcount_test(test):
"""Decorator for tests which involve reference counting.
To start, the decorator does not run the test if is not run by CPython.
After that, any trace function is unset during the test to prevent
unexpected refcounts caused by the trace function.
"""
return no_tracing(cpython_only(test))
def _filter_suite(suite, pred):
"""Recursively filter test cases in a suite based on a predicate."""
newtests = []
for test in suite._tests:
if isinstance(test, unittest.TestSuite):
_filter_suite(test, pred)
newtests.append(test)
else:
if pred(test):
newtests.append(test)
suite._tests = newtests
def _run_suite(suite):
"""Run tests from a unittest.TestSuite-derived class."""
if verbose:
runner = unittest.TextTestRunner(sys.stdout, verbosity=2,
failfast=failfast)
else:
runner = BasicTestRunner()
result = runner.run(suite)
if not result.wasSuccessful():
if len(result.errors) == 1 and not result.failures:
err = result.errors[0][1]
elif len(result.failures) == 1 and not result.errors:
err = result.failures[0][1]
else:
err = "multiple errors occurred"
if not verbose: err += "; run in verbose mode for details"
raise TestFailed(err)
def run_unittest(*classes):
"""Run tests from unittest.TestCase-derived classes."""
valid_types = (unittest.TestSuite, unittest.TestCase)
suite = unittest.TestSuite()
for cls in classes:
if isinstance(cls, str):
if cls in sys.modules:
suite.addTest(unittest.findTestCases(sys.modules[cls]))
else:
raise ValueError("str arguments must be keys in sys.modules")
elif isinstance(cls, valid_types):
suite.addTest(cls)
else:
suite.addTest(unittest.makeSuite(cls))
def case_pred(test):
if match_tests is None:
return True
for name in test.id().split("."):
if fnmatch.fnmatchcase(name, match_tests):
return True
return False
_filter_suite(suite, case_pred)
_run_suite(suite)
#=======================================================================
# Check for the presence of docstrings.
# Rather than trying to enumerate all the cases where docstrings may be
# disabled, we just check for that directly
def _check_docstrings():
"""Just used to check if docstrings are enabled"""
MISSING_C_DOCSTRINGS = (check_impl_detail() and
sys.platform != 'win32' and
not sysconfig.get_config_var('WITH_DOC_STRINGS'))
HAVE_DOCSTRINGS = (_check_docstrings.__doc__ is not None and
not MISSING_C_DOCSTRINGS)
requires_docstrings = unittest.skipUnless(HAVE_DOCSTRINGS,
"test requires docstrings")
#=======================================================================
# doctest driver.
def run_doctest(module, verbosity=None, optionflags=0):
"""Run doctest on the given module. Return (#failures, #tests).
If optional argument verbosity is not specified (or is None), pass
support's belief about verbosity on to doctest. Else doctest's
usual behavior is used (it searches sys.argv for -v).
"""
import doctest
if verbosity is None:
verbosity = verbose
else:
verbosity = None
f, t = doctest.testmod(module, verbose=verbosity, optionflags=optionflags)
if f:
raise TestFailed("%d of %d doctests failed" % (f, t))
if verbose:
print('doctest (%s) ... %d tests with zero failures' %
(module.__name__, t))
return f, t
#=======================================================================
# Support for saving and restoring the imported modules.
def modules_setup():
return sys.modules.copy(),
def modules_cleanup(oldmodules):
# Encoders/decoders are registered permanently within the internal
# codec cache. If we destroy the corresponding modules their
# globals will be set to None which will trip up the cached functions.
encodings = [(k, v) for k, v in sys.modules.items()
if k.startswith('encodings.')]
sys.modules.clear()
sys.modules.update(encodings)
# XXX: This kind of problem can affect more than just encodings. In particular
# extension modules (such as _ssl) don't cope with reloading properly.
# Really, test modules should be cleaning out the test specific modules they
# know they added (ala test_runpy) rather than relying on this function (as
# test_importhooks and test_pkg do currently).
# Implicitly imported *real* modules should be left alone (see issue 10556).
sys.modules.update(oldmodules)
#=======================================================================
# Threading support to prevent reporting refleaks when running regrtest.py -R
# NOTE: we use thread._count() rather than threading.enumerate() (or the
# moral equivalent thereof) because a threading.Thread object is still alive
# until its __bootstrap() method has returned, even after it has been
# unregistered from the threading module.
# thread._count(), on the other hand, only gets decremented *after* the
# __bootstrap() method has returned, which gives us reliable reference counts
# at the end of a test run.
if sys.implementation.name == "ironpython":
# disable reap_threads on Mono due to https://github.com/IronLanguages/ironpython3/issues/1005
import clr
if clr.IsMono:
_thread = None
def threading_setup():
if _thread:
threading.current_thread() # ironpython: register the current thread if not running on a known thread
return _thread._count(), threading._dangling.copy()
else:
return 1, ()
def threading_cleanup(*original_values):
if not _thread:
return
_MAX_COUNT = 100
for count in range(_MAX_COUNT):
values = _thread._count(), threading._dangling
if values == original_values:
break
time.sleep(0.01)
gc_collect()
# XXX print a warning in case of failure?
def reap_threads(func):
"""Use this function when threads are being used. This will
ensure that the threads are cleaned up even when the test fails.
If threading is unavailable this function does nothing.
"""
if not _thread:
return func
@functools.wraps(func)
def decorator(*args):
key = threading_setup()
try:
return func(*args)
finally:
threading_cleanup(*key)
return decorator
def reap_children():
"""Use this function at the end of test_main() whenever sub-processes
are started. This will help ensure that no extra children (zombies)
stick around to hog resources and create problems when looking
for refleaks.
"""
# Reap all our dead child processes so we don't leave zombies around.
# These hog resources and might be causing some of the buildbots to die.
if hasattr(os, 'waitpid'):
any_process = -1
while True:
try:
# This will raise an exception on Windows. That's ok.
pid, status = os.waitpid(any_process, os.WNOHANG)
if pid == 0:
break
except:
break
@contextlib.contextmanager
def start_threads(threads, unlock=None):
threads = list(threads)
started = []
try:
try:
for t in threads:
t.start()
started.append(t)
except:
if verbose:
print("Can't start %d threads, only %d threads started" %
(len(threads), len(started)))
raise
yield
finally:
try:
if unlock:
unlock()
endtime = starttime = time.time()
for timeout in range(1, 16):
endtime += 60
for t in started:
t.join(max(endtime - time.time(), 0.01))
started = [t for t in started if t.isAlive()]
if not started:
break
if verbose:
print('Unable to join %d threads during a period of '
'%d minutes' % (len(started), timeout))
finally:
started = [t for t in started if t.isAlive()]
if started:
faulthandler.dump_traceback(sys.stdout)
raise AssertionError('Unable to join %d threads' % len(started))
@contextlib.contextmanager
def swap_attr(obj, attr, new_val):
"""Temporary swap out an attribute with a new object.
Usage:
with swap_attr(obj, "attr", 5):
...
This will set obj.attr to 5 for the duration of the with: block,
restoring the old value at the end of the block. If `attr` doesn't
exist on `obj`, it will be created and then deleted at the end of the
block.
"""
if hasattr(obj, attr):
real_val = getattr(obj, attr)
setattr(obj, attr, new_val)
try:
yield
finally:
setattr(obj, attr, real_val)
else:
setattr(obj, attr, new_val)
try:
yield
finally:
delattr(obj, attr)
@contextlib.contextmanager
def swap_item(obj, item, new_val):
"""Temporary swap out an item with a new object.
Usage:
with swap_item(obj, "item", 5):
...
This will set obj["item"] to 5 for the duration of the with: block,
restoring the old value at the end of the block. If `item` doesn't
exist on `obj`, it will be created and then deleted at the end of the
block.
"""
if item in obj:
real_val = obj[item]
obj[item] = new_val
try:
yield
finally:
obj[item] = real_val
else:
obj[item] = new_val
try:
yield
finally:
del obj[item]
def strip_python_stderr(stderr):
"""Strip the stderr of a Python process from potential debug output
emitted by the interpreter.
This will typically be run on the result of the communicate() method
of a subprocess.Popen object.
"""
stderr = re.sub(br"\[\d+ refs, \d+ blocks\]\r?\n?", b"", stderr).strip()
return stderr
def args_from_interpreter_flags():
"""Return a list of command-line arguments reproducing the current
settings in sys.flags and sys.warnoptions."""
return subprocess._args_from_interpreter_flags()
#============================================================
# Support for assertions about logging.
#============================================================
class TestHandler(logging.handlers.BufferingHandler):
def __init__(self, matcher):
# BufferingHandler takes a "capacity" argument
# so as to know when to flush. As we're overriding
# shouldFlush anyway, we can set a capacity of zero.
# You can call flush() manually to clear out the
# buffer.
logging.handlers.BufferingHandler.__init__(self, 0)
self.matcher = matcher
def shouldFlush(self):
return False
def emit(self, record):
self.format(record)
self.buffer.append(record.__dict__)
def matches(self, **kwargs):
"""
Look for a saved dict whose keys/values match the supplied arguments.
"""
result = False
for d in self.buffer:
if self.matcher.matches(d, **kwargs):
result = True
break
return result
class Matcher(object):
_partial_matches = ('msg', 'message')
def matches(self, d, **kwargs):
"""
Try to match a single dict with the supplied arguments.
Keys whose values are strings and which are in self._partial_matches
will be checked for partial (i.e. substring) matches. You can extend
this scheme to (for example) do regular expression matching, etc.
"""
result = True
for k in kwargs:
v = kwargs[k]
dv = d.get(k)
if not self.match_value(k, dv, v):
result = False
break
return result
def match_value(self, k, dv, v):
"""
Try to match a single stored value (dv) with a supplied value (v).
"""
if type(v) != type(dv):
result = False
elif type(dv) is not str or k not in self._partial_matches:
result = (v == dv)
else:
result = dv.find(v) >= 0
return result
_can_symlink = None
def can_symlink():
global _can_symlink
if _can_symlink is not None:
return _can_symlink
symlink_path = TESTFN + "can_symlink"
try:
os.symlink(TESTFN, symlink_path)
can = True
except (OSError, NotImplementedError, AttributeError):
can = False
else:
os.remove(symlink_path)
_can_symlink = can
return can
def skip_unless_symlink(test):
"""Skip decorator for tests that require functional symlink"""
ok = can_symlink()
msg = "Requires functional symlink implementation"
return test if ok else unittest.skip(msg)(test)
_can_xattr = None
def can_xattr():
global _can_xattr
if _can_xattr is not None:
return _can_xattr
if not hasattr(os, "setxattr"):
can = False
else:
tmp_fp, tmp_name = tempfile.mkstemp()
try:
with open(TESTFN, "wb") as fp:
try:
# TESTFN & tempfile may use different file systems with
# different capabilities
os.setxattr(tmp_fp, b"user.test", b"")
os.setxattr(fp.fileno(), b"user.test", b"")
# Kernels < 2.6.39 don't respect setxattr flags.
kernel_version = platform.release()
m = re.match("2.6.(\d{1,2})", kernel_version)
can = m is None or int(m.group(1)) >= 39
except OSError:
can = False
finally:
unlink(TESTFN)
unlink(tmp_name)
_can_xattr = can
return can
def skip_unless_xattr(test):
"""Skip decorator for tests that require functional extended attributes"""
ok = can_xattr()
msg = "no non-broken extended attribute support"
return test if ok else unittest.skip(msg)(test)
def fs_is_case_insensitive(directory):
"""Detects if the file system for the specified directory is case-insensitive."""
with tempfile.NamedTemporaryFile(dir=directory) as base:
base_path = base.name
case_path = base_path.upper()
if case_path == base_path:
case_path = base_path.lower()
try:
return os.path.samefile(base_path, case_path)
except FileNotFoundError:
return False
class SuppressCrashReport:
"""Try to prevent a crash report from popping up.
On Windows, don't display the Windows Error Reporting dialog. On UNIX,
disable the creation of coredump file.
"""
old_value = None
def __enter__(self):
"""On Windows, disable Windows Error Reporting dialogs using
SetErrorMode.
On UNIX, try to save the previous core file size limit, then set
soft limit to 0.
"""
if sys.platform.startswith('win'):
# see http://msdn.microsoft.com/en-us/library/windows/desktop/ms680621.aspx
# GetErrorMode is not available on Windows XP and Windows Server 2003,
# but SetErrorMode returns the previous value, so we can use that
import ctypes
self._k32 = ctypes.windll.kernel32
SEM_NOGPFAULTERRORBOX = 0x02
self.old_value = self._k32.SetErrorMode(SEM_NOGPFAULTERRORBOX)
self._k32.SetErrorMode(self.old_value | SEM_NOGPFAULTERRORBOX)
else:
if resource is not None:
try:
self.old_value = resource.getrlimit(resource.RLIMIT_CORE)
resource.setrlimit(resource.RLIMIT_CORE,
(0, self.old_value[1]))
except (ValueError, OSError):
pass
if sys.platform == 'darwin':
# Check if the 'Crash Reporter' on OSX was configured
# in 'Developer' mode and warn that it will get triggered
# when it is.
#
# This assumes that this context manager is used in tests
# that might trigger the next manager.
value = subprocess.Popen(['/usr/bin/defaults', 'read',
'com.apple.CrashReporter', 'DialogType'],
stdout=subprocess.PIPE).communicate()[0]
if value.strip() == b'developer':
print("this test triggers the Crash Reporter, "
"that is intentional", end='', flush=True)
return self
def __exit__(self, *ignore_exc):
"""Restore Windows ErrorMode or core file behavior to initial value."""
if self.old_value is None:
return
if sys.platform.startswith('win'):
self._k32.SetErrorMode(self.old_value)
else:
if resource is not None:
try:
resource.setrlimit(resource.RLIMIT_CORE, self.old_value)
except (ValueError, OSError):
pass
def patch(test_instance, object_to_patch, attr_name, new_value):
"""Override 'object_to_patch'.'attr_name' with 'new_value'.
Also, add a cleanup procedure to 'test_instance' to restore
'object_to_patch' value for 'attr_name'.
The 'attr_name' should be a valid attribute for 'object_to_patch'.
"""
# check that 'attr_name' is a real attribute for 'object_to_patch'
# will raise AttributeError if it does not exist
getattr(object_to_patch, attr_name)
# keep a copy of the old value
attr_is_local = False
try:
old_value = object_to_patch.__dict__[attr_name]
except (AttributeError, KeyError):
old_value = getattr(object_to_patch, attr_name, None)
else:
attr_is_local = True
# restore the value when the test is done
def cleanup():
if attr_is_local:
setattr(object_to_patch, attr_name, old_value)
else:
delattr(object_to_patch, attr_name)
test_instance.addCleanup(cleanup)
# actually override the attribute
setattr(object_to_patch, attr_name, new_value)
def run_in_subinterp(code):
"""
Run code in a subinterpreter. Raise unittest.SkipTest if the tracemalloc
module is enabled.
"""
# Issue #10915, #15751: PyGILState_*() functions don't work with
# sub-interpreters, the tracemalloc module uses these functions internally
try:
import tracemalloc
except ImportError:
pass
else:
if tracemalloc.is_tracing():
raise unittest.SkipTest("run_in_subinterp() cannot be used "
"if tracemalloc module is tracing "
"memory allocations")
import _testcapi
return _testcapi.run_in_subinterp(code)
| [] | [] | [
"TZ"
] | [] | ["TZ"] | python | 1 | 0 | |
cabutils.py | """
CAB-level utils
"""
import sys
import os
import configparser
import pathlib
import pymongo as pm
#########################
########GENERAL##########
#########################
#load default settings
settings = configparser.ConfigParser()
this_dir = pathlib.Path(__file__).parent.absolute()
settings_file = os.path.join(this_dir,
"settings.conf")
settings.read(settings_file)
#expose several specific default settings
DEFAULT_CONFIG_FILENAME = settings['DEFAULTS']['CONFIG_FILENAME']
DEFAULT_MONGODB_PORT = settings['DEFAULTS']['MONGODB_PORT']
DEFAULT_MONGODB_HOST = settings['DEFAULTS']['MONGODB_HOST']
DEFAULT_MONGODB_USER = settings['DEFAULTS']['MONGODB_USER']
#load the user-level config file
#location of this file can be set by environment variable "CAB_CONFIGFILE"
#or it can be the default location ~/.cabconfig
if "CAB_CONFIGFILE" in os.environ:
CONFIGFILE = os.environ["CAB_CONFIGFILE"]
else:
CONFIGFILE = os.path.join(os.environ["HOME"],
DEFAULT_CONFIG_FILENAME)
_cab_configs = None
def get_cab_configs():
"""actually get the user-level cab configs
"""
global _cab_configs
if _cab_configs is None:
if os.path.exists(CONFIGFILE):
config = configparser.ConfigParser()
config.read(CONFIGFILE)
_cab_configs = config
else:
print("No config exists at path %s, check settings" % CONFIGFILE)
sys.exit()
return _cab_configs
#########################
########MONGODB##########
#########################
def get_db_port():
"""get db port, either from config file if specified, otherwise default
to specify port in DB file, .cabconfig should have a section of the form:
[DB]
...
port=DESIRED_PORT
...
"""
configs = get_cab_configs()
if 'port' in configs['DB']:
return configs['DB']['port']
else:
return DEFAULT_MONGODB_PORT
def get_db_host():
"""get db host, either from config file if specified, otherwise default
to specify host in DB file, .cabconfig should have a section of the form:
[DB]
...
host=DESIRED_HOST
...
"""
configs = get_cab_configs()
if 'host' in configs['DB']:
return configs['DB']['host']
else:
return DEFAULT_MONGODB_HOST
def get_db_user():
"""get db user, either from config file if specified, otherwise default
to specify host in DB file, .cabconfig should have a section of the form:
[DB]
...
username=DESIRED_USERNAME
...
"""
configs = get_cab_configs()
if 'username' in configs['DB']:
return configs['DB']['username']
else:
return DEFAULT_MONGODB_USER
def get_db_connection(connectionTimeoutMS=5000):
"""get DB connection.
user-level config file must exist (see above) and have a
section with the form:
[DB]
username=[...]
password=[...]
"""
configs = get_cab_configs()
user = get_db_user()
pwd = configs['DB']['password']
host = get_db_host()
port = get_db_port()
connstr = "mongodb://%s:%s@%s:%s" % (user, pwd, host, port)
conn = pm.MongoClient(connstr, serverSelectionTimeoutMS=connectionTimeoutMS)
print("Checking database connection...")
conn.server_info()
print("Connection established!")
return conn
| [] | [] | [
"HOME",
"CAB_CONFIGFILE"
] | [] | ["HOME", "CAB_CONFIGFILE"] | python | 2 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.