filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
tests/00_sgxlkl_server_client.py | #!/usr/bin/env python
import os
from subprocess import *
from time import sleep
from shlex import split
import sys
external_iface = 'eth0'
if 'EXTERNAL_IFACE' in os.environ:
external_iface = os.environ['EXTERNAL_IFACE']
class SGXLKLTestCase():
socat_process = None
def setup_iptables(self):
cmds = ["sudo ip tuntap add dev sgxlkl_tap0 mode tap user `whoami`",
"sudo ip link set dev sgxlkl_tap0 up",
"sudo ip addr add dev sgxlkl_tap0 10.0.1.254/24",
"sudo iptables -I FORWARD -i sgxlkl_tap0 -o "+external_iface+" -s 10.0.1.0/24 -m conntrack --ctstate NEW -j ACCEPT",
"sudo iptables -I FORWARD -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT",
"sudo iptables -t nat -I POSTROUTING -o "+external_iface+" -j MASQUERADE"]
for cmd in cmds:
check_call(cmd, shell=True)
def setup(self):
self.setup_iptables()
self.socat_process = Popen("exec socat -t10 TCP-LISTEN:1234,bind=10.0.1.254,reuseaddr,fork,range=10.0.1.0/8 UNIX-CLIENT:/var/run/aesmd/aesm.socket", shell=True)
sleep(1)
assert self.socat_process.poll() == None
def teardown_iptables(self):
cmds = ["sudo iptables -t nat -D POSTROUTING -o "+external_iface+" -j MASQUERADE",
"sudo iptables -D FORWARD -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT",
"sudo iptables -D FORWARD -s 10.0.1.0/24 -i sgxlkl_tap0 -o "+external_iface+" -m conntrack --ctstate NEW -j ACCEPT",
"sudo ip tuntap del dev sgxlkl_tap0 mode tap"]
for cmd in cmds:
check_call(split(cmd))
def teardown(self):
self.socat_process.terminate()
self.socat_process = None
self.teardown_iptables()
def main(self):
server_process = None
cmd = 'exec sgxlkl/sgx-lkl/build/sgx-lkl-run sgxlkl/sgx-lkl/apps/ratls/sgxlkl-miniroot-fs.img /sgxlkl-wolfssl-ssl-server'
env = dict(os.environ)
env.update({'SGXLKL_TAP' : 'sgxlkl_tap0',
'SGXLKL_VERBOSE' : '1',
'RATLS_AESMD_IP' : '10.0.1.254'})
server_process = Popen(cmd, env=env, shell=True)
sleep(10)
assert server_process.poll() == None
check_call(split('./openssl-client -p 11111 -h 10.0.1.1'))
server_process.terminate()
sleep(1)
assert server_process.poll() != None
| [] | [] | [
"EXTERNAL_IFACE"
] | [] | ["EXTERNAL_IFACE"] | python | 1 | 0 | |
examples/variables-de-entorno/environment-variables.go | // Las [Variables de entorno](http://es.wikipedia.org/wiki/Variable_de_entorno)
// son un mecanismo universal para [transmitir datos de configuración a
// nuestros programas](http://www.12factor.net/config).
// A continuación veremos como definir, obtener y listar variables de entorno.
package main
import "os"
import "strings"
import "fmt"
func main() {
// Para definir un par variable/valor usamos la función `os.Setenv`. Para
// obtener el valor de una variable de entorno usamos `os.Getenv`, ésta
// última función regresará una cadena vacía si la variable no está definida
// en el entorno.
os.Setenv("FOO", "1")
fmt.Println("FOO:", os.Getenv("FOO"))
fmt.Println("BAR:", os.Getenv("BAR"))
// Podemos usar `os.Environ` para listar todos los pares variable/valor
// presentes en el entorno. Ésta función regresa un slice de cadenas en la
// forma `NOMBRE=valor`. Puedes usar `strings.Split` o `strings.SplitN` en
// estos valores para separar el nombre de la variable y su valor. El
// siguiente ejemplo imprime el nombre de todas las variables que están
// definidas en el entorno.
fmt.Println()
for _, e := range os.Environ() {
pair := strings.Split(e, "=")
fmt.Println(pair[0])
}
}
| [
"\"FOO\"",
"\"BAR\""
] | [] | [
"BAR",
"FOO"
] | [] | ["BAR", "FOO"] | go | 2 | 0 | |
test/kubernetes/tools.go | package kubernetes
import (
"bufio"
"errors"
"fmt"
"net"
"os"
"os/exec"
"sort"
"strconv"
"strings"
"testing"
"time"
"github.com/coredns/caddy"
"github.com/coredns/coredns/plugin/test"
ctest "github.com/coredns/coredns/test"
"github.com/miekg/dns"
// Load all managed plugins in github.com/coredns/coredns
_ "github.com/coredns/coredns/core/plugin"
)
// DoIntegrationTest executes a test case
func DoIntegrationTest(tc test.Case, namespace string) (*dns.Msg, error) {
digCmd := "dig -t " + dns.TypeToString[tc.Qtype] + " " + tc.Qname + " +search +showsearch +time=10 +tries=6"
// attach to client and execute query.
var cmdout string
var err error
tries := 3
for {
cmdout, err = Kubectl("-n " + namespace + " exec " + clientName + " -- " + digCmd)
if err == nil {
break
}
tries = tries - 1
if tries == 0 {
return nil, errors.New("failed to execute query '" + digCmd + "' got error: '" + err.Error() + "'")
}
time.Sleep(500 * time.Millisecond)
}
results, err := ParseDigResponse(cmdout)
if err != nil {
return nil, errors.New("failed to parse result: (" + err.Error() + ")" + cmdout)
}
if len(results) != 1 {
resultStr := ""
for i, r := range results {
resultStr += fmt.Sprintf("\nResponse %v\n", i) + r.String()
}
return nil, errors.New("expected 1 query attempt, observed " + strconv.Itoa(len(results)) + resultStr)
}
return results[0], nil
}
// DoIntegrationTests executes test cases
func DoIntegrationTests(t *testing.T, testCases []test.Case, namespace string) {
err := StartClientPod(namespace)
if err != nil {
t.Fatalf("failed to start client pod: %s", err)
}
for _, tc := range testCases {
t.Run(fmt.Sprintf("%s %s", tc.Qname, dns.TypeToString[tc.Qtype]), func(t *testing.T) {
res, err := DoIntegrationTest(tc, namespace)
if err != nil {
t.Errorf(err.Error())
}
test.CNAMEOrder(res)
sort.Sort(test.RRSet(tc.Answer))
sort.Sort(test.RRSet(tc.Ns))
sort.Sort(test.RRSet(tc.Extra))
if err := test.SortAndCheck(res, tc); err != nil {
t.Error(err)
}
if t.Failed() {
t.Errorf("coredns log: %s", CorednsLogs())
}
})
}
}
// StartClientPod starts a dns client pod in the namespace
func StartClientPod(namespace string) error {
_, err := Kubectl("-n " + namespace + " run " + clientName + " --image=infoblox/dnstools --restart=Never -- -c 'while [ 1 ]; do sleep 100; done'")
if err != nil {
// ignore error (pod already running)
return nil
}
maxWait := 60 // 60 seconds
for {
o, _ := Kubectl("-n " + namespace + " get pod " + clientName)
if strings.Contains(o, "Running") {
return nil
}
time.Sleep(time.Second)
maxWait = maxWait - 1
if maxWait == 0 {
break
}
}
return errors.New("timeout waiting for " + clientName + " to be ready")
}
// WaitForClientPodRecord waits for the client pod A record to be served by CoreDNS
func WaitForClientPodRecord(namespace string) error {
maxWait := 120 // 120 seconds
for {
dashedip, err := Kubectl("-n " + namespace + " get pods -o wide " + clientName + " | grep " + clientName + " | awk '{print $6}' | tr . - | tr -d '\n'")
if err == nil && dashedip != "" {
digcmd := "dig -t a " + dashedip + "." + namespace + ".pod.cluster.local. +short | tr -d '\n'"
digout, err := Kubectl("-n " + namespace + " exec " + clientName + " -- " + digcmd)
if err == nil && digout != "" {
return nil
}
}
// wait and try again until timeout
time.Sleep(time.Second)
maxWait = maxWait - 1
if maxWait == 0 {
break
}
}
return errors.New("timeout waiting for " + clientName + " A record.")
}
// UpstreamServer starts a local instance of coredns with the given zone file
func UpstreamServer(t *testing.T, zone, zoneFile string) (func(), *caddy.Instance, string) {
upfile, rmFunc, err := test.TempFile(os.TempDir(), zoneFile)
if err != nil {
t.Fatalf("could not create file for CNAME upstream lookups: %s", err)
}
upstreamCorefile := `.:0 {
file ` + upfile + ` ` + zone + `
bind ` + locaIP().String() + `
}`
server, udp, _, err := ctest.CoreDNSServerAndPorts(upstreamCorefile)
if err != nil {
t.Fatalf("could not get CoreDNS serving instance: %s", err)
}
return rmFunc, server, udp
}
// localIP returns the local system's first ipv4 non-loopback address
func locaIP() net.IP {
addrs, err := net.InterfaceAddrs()
if err != nil {
return nil
}
for _, addr := range addrs {
ip, _, _ := net.ParseCIDR(addr.String())
ip = ip.To4()
if ip == nil || ip.IsLoopback() {
continue
}
return ip
}
return nil
}
// LoadCorefile calls loadCorefileAndZonefile without a zone file
func LoadCorefile(corefile string) error {
return LoadCorefileAndZonefile(corefile, "", true)
}
// LoadCorefileAndZonefile constructs a configmap defining files for the corefile and zone,
// If restart is true, restarts the coredns pod to load the new configmap, and waits for the coredns pod to be ready.
func LoadCorefileAndZonefile(corefile, zonefile string, restart bool) error {
// apply configmap yaml
yamlString := configmap + "\n"
yamlString += " Corefile: |\n" + prepForConfigMap(corefile)
yamlString += " Zonefile: |\n" + prepForConfigMap(zonefile)
file, rmFunc, err := test.TempFile(os.TempDir(), yamlString)
if err != nil {
return err
}
defer rmFunc()
_, err = Kubectl("apply -f " + file)
if err != nil {
return err
}
if restart {
// force coredns pod reload the config
Kubectl("-n kube-system delete pods -l k8s-app=kube-dns")
return WaitReady(30)
}
return nil
}
func LoadKubednsConfigmap(stubdata, upstreamdata string) error {
//apply configmap yaml
yamlString := KubednsConfigmap + "\n"
yamlString += " upstreamNameservers: |\n" + prepForConfigMap(upstreamdata)
yamlString += " stubDomains: |\n" + prepForConfigMap(stubdata)
file, rmFunc, err := test.TempFile(os.TempDir(), yamlString)
if err != nil {
return err
}
defer rmFunc()
_, err = Kubectl("apply -f " + file)
if err != nil {
return err
}
return nil
}
// WaitReady waits for 1 coredns to be ready or times out after maxWait seconds with an error
func WaitReady(maxWait int) error {
return WaitNReady(maxWait, 1)
}
// WaitReady waits for n corednses to be ready or times out after maxWait seconds with an error
func WaitNReady(maxWait, n int) error {
for {
o, _ := Kubectl("-n kube-system get pods -l k8s-app=kube-dns -o jsonpath='{.items[*].status.containerStatuses[*].ready}'")
if strings.Count(o, "true") == n {
break
}
time.Sleep(time.Second)
maxWait = maxWait - 1
if maxWait == 0 {
logs := CorednsLogs()
return errors.New("timeout waiting for coredns to be ready. coredns log: " + logs)
}
}
return nil
}
// CorednsLogs returns the current coredns log
func CorednsLogs() string {
name, _ := Kubectl("-n kube-system get pods -l k8s-app=kube-dns | grep coredns | cut -f1 -d' ' | tr -d '\n'")
logs, _ := Kubectl("-n kube-system logs " + name)
return logs
}
// prepForConfigMap returns a config prepared for inclusion in a configmap definition
func prepForConfigMap(config string) string {
var configOut string
lines := strings.Split(config, "\n")
for _, line := range lines {
// replace all tabs with spaces
line = strings.Replace(line, "\t", " ", -1)
// indent line with 4 addtl spaces
configOut += " " + line + "\n"
}
return configOut
}
// CoreDNSPodIPs return the ips of all coredns pods
func CoreDNSPodIPs() ([]string, error) {
lines, err := Kubectl("-n kube-system get pods -l k8s-app=kube-dns -o wide | awk '{print $6}' | tail -n+2")
if err != nil {
return nil, err
}
var ips []string
for _, l := range strings.Split(lines, "\n") {
p := net.ParseIP(l)
if p == nil {
continue
}
ips = append(ips, p.String())
}
return ips, nil
}
// HasResourceRestarted verifies if any of the specified containers in the kube-system namespace has restarted.
func HasResourceRestarted(label string) (bool, error) {
// magic number
wait := 5
for {
hasRestarted := false
restartCount, err := Kubectl(fmt.Sprintf("-n kube-system get pods -l %s -ojsonpath='{.items[*].status.containerStatuses[0].restartCount}'", label))
if err != nil {
return false, err
}
individualCount := strings.Split(restartCount, " ")
for _, count := range individualCount {
if count != "0" {
hasRestarted = true
}
}
if hasRestarted {
break
}
time.Sleep(time.Second)
wait--
if wait == 0 {
return false, nil
}
}
return true, nil
}
// FetchDockerContainerID fetches the docker container ID from the container name
func FetchDockerContainerID(containerName string) (string, error) {
containerID, err := exec.Command("sh", "-c", fmt.Sprintf("docker ps -aqf \"name=%s\"", containerName)).CombinedOutput()
if err != nil {
return "", errors.New("error executing docker command to fetch container ID")
}
if containerID == nil {
return "", errors.New("no containerID found")
}
return strings.TrimSpace(string(containerID)), nil
}
func ScrapeMetrics(t *testing.T) []byte {
containerID, err := FetchDockerContainerID("kind-control-plane")
if err != nil {
t.Fatalf("docker container ID not found, err: %s", err)
}
ips, err := CoreDNSPodIPs()
if err != nil {
t.Errorf("could not get coredns pod ip: %v", err)
}
if len(ips) != 1 {
t.Errorf("expected 1 pod ip, found: %v", len(ips))
}
ip := ips[0]
cmd := fmt.Sprintf("docker exec -i %s /bin/sh -c \"curl -s http://%s:9153/metrics\"", containerID, ip)
mf, err := exec.Command("sh", "-c", cmd).CombinedOutput()
if err != nil {
t.Errorf("error while trying to run command in docker container: %s %v", err, mf)
}
if len(mf) == 0 {
t.Errorf("unable to scrape metrics from %v", ip)
}
return mf
}
// Kubectl executes the kubectl command with the given arguments
func Kubectl(args string) (result string, err error) {
kctl := os.Getenv("KUBECTL")
if kctl == "" {
kctl = "kubectl"
}
cmdOut, err := exec.Command("sh", "-c", kctl+" "+args).CombinedOutput()
if err != nil {
return "", errors.New("got error '" + string(cmdOut) + "' for command " + kctl + " " + args)
}
return string(cmdOut), nil
}
// ParseDigResponse parses dig-like command output and returns a dns.Msg
func ParseDigResponse(r string) ([]*dns.Msg, error) {
s := bufio.NewScanner(strings.NewReader(r))
var msgs []*dns.Msg
var err error
for err == nil {
m, err := parseDig(s)
if err != nil {
break
}
if m == nil {
return nil, errors.New("Unexpected nil message")
}
msgs = append(msgs, m)
}
if len(msgs) == 0 {
return nil, err
}
return msgs, nil
}
// parseDig parses a single dig-like response and returns a dns.Msg
func parseDig(s *bufio.Scanner) (*dns.Msg, error) {
m := new(dns.Msg)
err := parseDigHeader(s, m)
if err != nil {
return nil, err
}
err = parseDigQuestion(s, m)
if err != nil {
return nil, err
}
err = parseDigSections(s, m)
if err != nil {
return nil, err
}
return m, nil
}
func parseDigHeader(s *bufio.Scanner, m *dns.Msg) error {
headerSection := ";; ->>HEADER<<- "
for {
if strings.HasPrefix(s.Text(), headerSection) {
break
}
if !s.Scan() {
return errors.New("header section not found")
}
}
l := s.Text()
strings.Replace(l, headerSection, "", 1)
nvps := strings.Split(l, ", ")
for _, nvp := range nvps {
nva := strings.Split(nvp, ": ")
if nva[0] == "opcode" {
m.Opcode = invertIntMap(dns.OpcodeToString)[nva[1]]
}
if nva[0] == "status" {
m.Rcode = invertIntMap(dns.RcodeToString)[nva[1]]
}
if nva[0] == "id" {
i, err := strconv.Atoi(nva[1])
if err != nil {
return err
}
m.MsgHdr.Id = uint16(i)
}
}
return nil
}
func parseDigQuestion(s *bufio.Scanner, m *dns.Msg) error {
for {
if strings.HasPrefix(s.Text(), ";; QUESTION SECTION:") {
break
}
if !s.Scan() {
return errors.New("question section not found")
}
}
s.Scan()
l := s.Text()
l = strings.TrimLeft(l, ";")
fields := strings.Fields(l)
m.SetQuestion(fields[0], invertUint16Map(dns.TypeToString)[fields[2]])
return nil
}
func parseDigSections(s *bufio.Scanner, m *dns.Msg) error {
var section string
for s.Scan() {
if strings.HasSuffix(s.Text(), " SECTION:") {
section = strings.Fields(s.Text())[1]
continue
}
if s.Text() == "" {
continue
}
if strings.HasPrefix(s.Text(), ";;") {
break
}
r, err := dns.NewRR(s.Text())
if err != nil {
return err
}
if section == "ANSWER" {
m.Answer = append(m.Answer, r)
}
if section == "AUTHORITY" {
m.Ns = append(m.Ns, r)
}
if section == "ADDITIONAL" {
m.Extra = append(m.Extra, r)
}
}
return nil
}
func invertIntMap(m map[int]string) map[string]int {
n := make(map[string]int)
for k, v := range m {
n[v] = k
}
return n
}
func invertUint16Map(m map[uint16]string) map[string]uint16 {
n := make(map[string]uint16)
for k, v := range m {
n[v] = k
}
return n
}
// configmap is the header used for defining the coredns configmap
const (
configmap = `apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
data:`
// KubednsConfigmap is the header used for defining the kube-dns configmap
KubednsConfigmap = `apiVersion: v1
kind: ConfigMap
metadata:
name: kube-dns
namespace: kube-system
data:
`
// ExampleNet is an example upstream zone file
ExampleNet = `; example.net. test file for cname tests
example.net. IN SOA ns.example.net. admin.example.net. 2015082541 7200 3600 1209600 3600
example.net. IN A 13.14.15.16
`
clientName = "coredns-test-client"
CoreDNSLabel = "k8s-app=kube-dns"
APIServerLabel = "component=kube-apiserver"
)
| [
"\"KUBECTL\""
] | [] | [
"KUBECTL"
] | [] | ["KUBECTL"] | go | 1 | 0 | |
pkg/log/log.go | package log
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"github.com/jesseduffield/lazydocker/pkg/config"
"github.com/sirupsen/logrus"
)
// NewLogger returns a new logger
func NewLogger(config *config.AppConfig, rollrusHook string) *logrus.Entry {
var log *logrus.Logger
if config.Debug || os.Getenv("DEBUG") == "TRUE" {
log = newDevelopmentLogger(config)
} else {
log = newProductionLogger()
}
// highly recommended: tail -f development.log | humanlog
// https://github.com/aybabtme/humanlog
log.Formatter = &logrus.JSONFormatter{}
return log.WithFields(logrus.Fields{
"debug": config.Debug,
"version": config.Version,
"commit": config.Commit,
"buildDate": config.BuildDate,
})
}
func getLogLevel() logrus.Level {
strLevel := os.Getenv("LOG_LEVEL")
level, err := logrus.ParseLevel(strLevel)
if err != nil {
return logrus.DebugLevel
}
return level
}
func newDevelopmentLogger(config *config.AppConfig) *logrus.Logger {
log := logrus.New()
log.SetLevel(getLogLevel())
file, err := os.OpenFile(filepath.Join(config.ConfigDir, "development.log"), os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o666)
if err != nil {
fmt.Println("unable to log to file")
os.Exit(1)
}
log.SetOutput(file)
return log
}
func newProductionLogger() *logrus.Logger {
log := logrus.New()
log.Out = ioutil.Discard
log.SetLevel(logrus.ErrorLevel)
return log
}
| [
"\"DEBUG\"",
"\"LOG_LEVEL\""
] | [] | [
"LOG_LEVEL",
"DEBUG"
] | [] | ["LOG_LEVEL", "DEBUG"] | go | 2 | 0 | |
authentication/models/core.py | import pytz
from datetime import datetime
from typing import Optional, List
from tortoise import models, fields
from tortoise.manager import Manager
from limeutils import modstr
from app.authentication.models.manager import ActiveManager
class DTMixin(object):
deleted_at = fields.DatetimeField(null=True)
updated_at = fields.DatetimeField(auto_now=True)
created_at = fields.DatetimeField(auto_now_add=True)
class SharedMixin(object):
full = Manager()
def to_dict(self, exclude: Optional[List[str]] = None):
d = {}
exclude = ['created_at', 'deleted_at', 'updated_at'] if exclude is None else exclude
for field in self._meta.db_fields: # noqa
if hasattr(self, field) and field not in exclude:
d[field] = getattr(self, field)
return d
async def soft_delete(self):
self.deleted_at = datetime.now(tz=pytz.UTC) # noqa
await self.save(update_fields=['deleted_at']) # noqa
class Option(SharedMixin, models.Model):
name = fields.CharField(max_length=20)
value = fields.CharField(max_length=191)
user = fields.ForeignKeyField('models.UserMod', related_name='options', null=True)
is_active = fields.BooleanField(default=True)
admin_only = fields.BooleanField(default=False)
deleted_at = fields.DatetimeField(null=True)
updated_at = fields.DatetimeField(auto_now=True)
full = Manager()
class Meta:
table = 'core_option'
manager = ActiveManager()
def __str__(self):
return modstr(self, 'name')
class Taxonomy(DTMixin, SharedMixin, models.Model):
name = fields.CharField(max_length=191)
type = fields.CharField(max_length=20)
sort = fields.SmallIntField(default=100)
author = fields.ForeignKeyField('models.UserMod', related_name='tax_of_author')
parent = fields.ForeignKeyField('models.Taxonomy', related_name='tax_of_parent')
class Meta:
table = 'core_taxonomy'
manager = ActiveManager()
def __str__(self):
return modstr(self, 'name')
# # class HashMod(SharedMixin, models.Model):
# # user = fields.ForeignKeyField('models.UserMod', related_name='hashes')
# # hash = fields.CharField(max_length=199, index=True)
# # use_type = fields.CharField(max_length=20)
# # expires = fields.DatetimeField(null=True)
# # created_at = fields.DatetimeField(auto_now_add=True)
# #
# # class Meta:
# # table = 'auth_hash'
# #
# # def __str__(self):
# # return modstr(self, 'hash')
class TokenMod(models.Model):
token = fields.CharField(max_length=128, unique=True)
expires = fields.DatetimeField(index=True)
is_blacklisted = fields.BooleanField(default=False)
author = fields.ForeignKeyField('models.UserMod', on_delete=fields.CASCADE,
related_name='author_tokens')
full = Manager()
class Meta:
table = 'auth_token'
manager = ActiveManager()
def __str__(self):
return modstr(self, 'token') | [] | [] | [] | [] | [] | python | null | null | null |
source/lambda/firehose_topic_proxy/test/test_lambda_function.py | #!/usr/bin/env python
######################################################################################################################
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES #
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions #
# and limitations under the License. #
######################################################################################################################
import json
import unittest
from unittest.mock import patch
import boto3
import pytest
from moto import mock_kinesis
def create_s3_delivery_stream(client, stream_name):
return client.create_delivery_stream(
DeliveryStreamName=stream_name,
DeliveryStreamType="DirectPut",
ExtendedS3DestinationConfiguration={
"RoleARN": "arn:aws:iam::{}:role/firehose_delivery_role".format(123456789012),
"BucketARN": "arn:aws:s3:::kinesis-test",
"Prefix": "myFolder/",
"CompressionFormat": "UNCOMPRESSED",
"DataFormatConversionConfiguration": {
"Enabled": True,
"InputFormatConfiguration": {"Deserializer": {"HiveJsonSerDe": {}}},
"OutputFormatConfiguration": {"Serializer": {"ParquetSerDe": {"Compression": "UNCOMPRESSED"}}},
"SchemaConfiguration": {
"DatabaseName": "socialmediadb",
"RoleARN": "arn:aws:iam::{}:role/firehose_delivery_role".format(123456789012),
"TableName": "topics",
},
},
},
)
@mock_kinesis
def test_lambda_function_with_topic_event():
firehose = boto3.client("firehose", region_name="us-east-1")
create_s3_delivery_stream(firehose, "Topics")
from lambda_function import handler
topic_event = {
"version": "0",
"id": "de55e880-0f1d-4b1d-982e-23ed13e45aaa",
"detail-type": "topics",
"source": "com.analyze.topic.inference.topics",
"account": "FAKEACCOUNT",
"time": "2020-06-24T17:16:02Z",
"region": "us-west-2",
"resources": [],
"detail": {
"000": [
{
"job_id": "1234567890123456789012345",
"job_timestamp": "2020-06-26T19:05:16.785Z",
"topic": "000",
"term": "health",
"weight": "0.09484477",
},
{
"job_id": "1234567890123456789012345",
"job_timestamp": "2020-06-26T19:05:16.785Z",
"topic": "000",
"term": "walk",
"weight": "0.020982718",
},
{
"job_id": "1234567890123456789012345",
"job_timestamp": "2020-06-26T19:05:16.785Z",
"topic": "000",
"term": "place",
"weight": "0.004689377",
"created_at": "2020-06-24",
},
{
"job_id": "1234567890123456789012345",
"job_timestamp": "2020-06-26T19:05:16.785Z",
"topic": "000",
"term": "like",
"weight": "0.0056834435",
},
],
"001": [
{
"job_id": "1234567890123456789012345",
"job_timestamp": "2020-06-26T19:05:16.785Z",
"topic": "001",
"term": "fun",
"weight": "0.13023746",
},
{
"job_id": "1234567890123456789012345",
"job_timestamp": "2020-06-26T19:05:16.785Z",
"topic": "001",
"term": "movie",
"weight": "0.002189455",
},
{
"job_id": "1234567890123456789012345",
"job_timestamp": "2020-06-26T19:05:16.785Z",
"topic": "001",
"term": "song",
"weight": "0.002034978",
},
],
},
}
with patch.dict(
"os.environ",
{"TOPICS_NS": "com.analyze.topic.inference.topics", "TOPIC_MAPPINGS_NS": "com.analyze.inference.mappings"},
):
handler(topic_event, None)
@mock_kinesis
def test_lambda_function_with_mapping_event():
firehose = boto3.client("firehose", region_name="us-east-1")
create_s3_delivery_stream(firehose, "TopicMappings")
from lambda_function import handler
mapping_event = {
"version": "0",
"id": "b2123492-5ecc-1a7a-33b6-58e9798e9a27",
"detail-type": "mappings",
"source": "com.analyze.topic.inference.mappings",
"account": "FAKEACCOUNT",
"time": "2020-06-24T17:16:05Z",
"region": "us-west-2",
"resources": [],
"detail": {
"platform": "twitter",
"job_id": "1234567890123456789012345",
"job_timestamp": "2020-06-26T19:05:16.785Z",
"id_str": "1274357316737957888",
"topic": "000",
},
}
with patch.dict(
"os.environ",
{"TOPICS_NS": "com.analyze.inference.topics", "TOPIC_MAPPINGS_NS": "com.analyze.topic.inference.mappings"},
):
handler(mapping_event, None)
| [] | [] | [] | [] | [] | python | 0 | 0 | |
test/testdata/verify/apps/grpcserver/main.go | package main
import (
"context"
"fmt"
"log"
"net"
"os"
pb "github.com/gebv/grpc-conn-err-human-msg/api/services/simple"
"google.golang.org/grpc"
)
func main() {
port := os.Getenv("PORT")
if port == "" {
port = "8080"
log.Printf("Defaulting to port %s", port)
}
addr := ":" + port
lis, err := net.Listen("tcp", addr)
if err != nil {
log.Fatalf("failed to listen: %v", err)
}
log.Println("Listen:", addr)
var opts []grpc.ServerOption
grpcServer := grpc.NewServer(opts...)
pb.RegisterSimpleServiceServer(grpcServer, &SimpleServer{})
grpcServer.Serve(lis)
}
type SimpleServer struct {
pb.UnsafeSimpleServiceServer
}
var _ pb.SimpleServiceServer = (*SimpleServer)(nil)
func (s *SimpleServer) Echo(ctx context.Context, req *pb.EchoRequest) (*pb.EchoResponse, error) {
log.Println("simple/SimpleServer.Echo: Request")
return &pb.EchoResponse{
Out: fmt.Sprintf("in:%q", req.GetIn()),
}, nil
}
| [
"\"PORT\""
] | [] | [
"PORT"
] | [] | ["PORT"] | go | 1 | 0 | |
internal/database/dbtesting/dbtesting.go | // Package dbtesting provides database test helpers.
package dbtesting
import (
"context"
"database/sql"
"fmt"
"hash/crc32"
"hash/fnv"
"io"
"log"
"os"
"os/exec"
"runtime"
"strconv"
"strings"
"sync"
"testing"
"github.com/cockroachdb/errors"
"github.com/sourcegraph/sourcegraph/internal/database/dbconn"
)
// MockHashPassword if non-nil is used instead of database.hashPassword. This is useful
// when running tests since we can use a faster implementation.
var (
MockHashPassword func(password string) (sql.NullString, error)
MockValidPassword func(hash, password string) bool
)
func useFastPasswordMocks() {
// We can't care about security in tests, we care about speed.
MockHashPassword = func(password string) (sql.NullString, error) {
h := fnv.New64()
_, _ = io.WriteString(h, password)
return sql.NullString{Valid: true, String: strconv.FormatUint(h.Sum64(), 16)}, nil
}
MockValidPassword = func(hash, password string) bool {
h := fnv.New64()
_, _ = io.WriteString(h, password)
return hash == strconv.FormatUint(h.Sum64(), 16)
}
}
// BeforeTest functions are called before each test is run (by SetupGlobalTestDB).
var BeforeTest []func()
var (
connectOnce sync.Once
connectErr error
)
// setupGlobalTestDB creates a temporary test DB handle, sets
// `dbconn.Global` to it and setups other test configuration.
func setupGlobalTestDB(t testing.TB) {
useFastPasswordMocks()
if testing.Short() {
t.Skip()
}
connectOnce.Do(func() {
connectErr = initTest()
})
if connectErr != nil {
// only ignore connection errors if not on CI
if os.Getenv("CI") == "" {
t.Skip("Could not connect to DB", connectErr)
}
t.Fatal("Could not connect to DB", connectErr)
}
for _, f := range BeforeTest {
f()
}
emptyDBPreserveSchema(t, dbconn.Global)
}
// GetDB calls SetupGlobalTestDB and returns dbconn.Global.
// It is meant to ease the migration away from dbconn.Global.
//
// New callers and callers actually wishing to migrate fully away from a global DB connection
// should use the new ../dbtest package instead of this one.
func GetDB(t testing.TB) *sql.DB {
setupGlobalTestDB(t)
return dbconn.Global
}
func emptyDBPreserveSchema(t testing.TB, d *sql.DB) {
_, err := d.Exec(`SELECT * FROM schema_migrations`)
if err != nil {
t.Fatalf("Table schema_migrations not found: %v", err)
}
var conds []string
conds = append(conds, fmt.Sprintf("table_name != '%s'", dbconn.Frontend.MigrationsTable))
conds = append(conds, fmt.Sprintf("table_name != '%s'", dbconn.CodeIntel.MigrationsTable))
rows, err := d.Query("SELECT table_name FROM information_schema.tables WHERE table_schema='public' AND table_type='BASE TABLE' AND " + strings.Join(conds, " AND "))
if err != nil {
t.Fatal(err)
}
var tables []string
for rows.Next() {
var table string
if err := rows.Scan(&table); err != nil {
t.Fatal(err)
}
tables = append(tables, table)
}
if err := rows.Close(); err != nil {
t.Fatal(err)
}
if err := rows.Err(); err != nil {
t.Fatal(err)
}
if testing.Verbose() {
t.Logf("Truncating all %d tables", len(tables))
}
_, err = d.Exec("TRUNCATE " + strings.Join(tables, ", ") + " RESTART IDENTITY")
if err != nil {
t.Fatal(err)
}
}
// initTest creates a test database (dropping it if it already exists), and
// configures this package to use it. It is called by integration tests (in a
// package init func) that need to use a real database.
func initTest() error {
dbname, err := dbName()
if err != nil {
return err
}
if os.Getenv("TEST_SKIP_DROP_DB_BEFORE_TESTS") == "" {
// When running the database-backcompat.sh tests, we need to *keep* the DB around because it has
// the new schema produced by the new version. If we dropped the DB here, then we'd recreate
// it at the OLD schema, which is not desirable because we need to run tests against the NEW
// schema. Thus database-backcompat.sh sets TEST_SKIP_DROP_DB_BEFORE_TESTS=true.
out, err := exec.Command("dropdb", "--if-exists", dbname).CombinedOutput()
if err != nil {
return errors.Errorf("dropdb --if-exists failed: %v\n%s", err, string(out))
}
}
out, err := exec.Command("createdb", dbname).CombinedOutput()
if err != nil {
if strings.Contains(string(out), "already exists") {
log.Printf("DB %s exists already (run `dropdb %s` to delete and force re-creation)", dbname, dbname)
} else {
return errors.Errorf("createdb failed: %v\n%s", err, string(out))
}
}
opts := dbconn.Opts{DSN: "dbname=" + dbname, DBName: dbname, AppName: "tests"}
if err := dbconn.SetupGlobalConnection(opts); err != nil {
return err
}
for _, database := range []*dbconn.Database{
dbconn.Frontend,
dbconn.CodeIntel,
} {
if err := dbconn.MigrateDB(dbconn.Global, database); err != nil {
return err
}
}
return nil
}
// dbName generates a unique name for the package currently being tested.
func dbName() (string, error) {
pkg := testPkgName()
if pkg == "" {
return "", errors.New("dbtesting: could not detect test package")
}
// Postgres identifier limit is 64. Conservatively shorten name if bigger
// than 32.
if len(pkg) > 32 {
pkg = fmt.Sprintf("%X-%s", crc32.ChecksumIEEE([]byte(pkg)), pkg[len(pkg)-32:])
}
return "sourcegraph-test-" + strings.ReplaceAll(pkg, "/", "-"), nil
}
// testPkgName finds the relative name of the sourcegraph package being tested
// by inspecting the call stack. If it fails, it returns an empty string.
func testPkgName() string {
pc := make([]uintptr, 20)
n := runtime.Callers(1, pc)
if n == 0 {
return ""
}
pc = pc[:n]
frames := runtime.CallersFrames(pc)
modulePrefix := "github.com/sourcegraph/sourcegraph/"
pkg := ""
var (
frame runtime.Frame
more = true
)
// Look for last function name that looks like a sourcegraph test
for more {
frame, more = frames.Next()
// Example name of a function we are looking for and the example pkg
//
// github.com/sourcegraph/sourcegraph/cmd/frontend/backend.TestGetFirstServiceVersion
// =>
// cmd/frontend/backend
testNameIdx := strings.Index(frame.Function, ".Test")
if testNameIdx < 0 {
continue
}
if !strings.HasPrefix(frame.Function, modulePrefix) {
continue
}
pkg = frame.Function[len(modulePrefix):testNameIdx]
}
return pkg
}
// MockDB implements the dbutil.DB interface and is intended to be used
// in tests that require the database handle but never call it.
type MockDB struct {
T *testing.T
}
func (db *MockDB) QueryContext(ctx context.Context, q string, args ...interface{}) (*sql.Rows, error) {
if db.T != nil {
db.T.Fatal("mock db methods are not supposed to be called")
}
panic("mock db methods are not supposed to be called")
}
func (db *MockDB) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) {
if db.T != nil {
db.T.Fatal("mock db methods are not supposed to be called")
}
panic("mock db methods are not supposed to be called")
}
func (db *MockDB) QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row {
if db.T != nil {
db.T.Fatal("mock db methods are not supposed to be called")
}
panic("mock db methods are not supposed to be called")
}
| [
"\"CI\"",
"\"TEST_SKIP_DROP_DB_BEFORE_TESTS\""
] | [] | [
"TEST_SKIP_DROP_DB_BEFORE_TESTS",
"CI"
] | [] | ["TEST_SKIP_DROP_DB_BEFORE_TESTS", "CI"] | go | 2 | 0 | |
hugolib/testhelpers_test.go | package hugolib
import (
"path/filepath"
"testing"
"bytes"
"fmt"
"regexp"
"strings"
"text/template"
"github.com/sanity-io/litter"
jww "github.com/spf13/jwalterweatherman"
"github.com/gohugoio/hugo/config"
"github.com/gohugoio/hugo/deps"
"github.com/spf13/afero"
"github.com/gohugoio/hugo/helpers"
"github.com/gohugoio/hugo/tpl"
"github.com/spf13/viper"
"io/ioutil"
"os"
"log"
"github.com/gohugoio/hugo/hugofs"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const ()
type sitesBuilder struct {
Cfg config.Provider
Fs *hugofs.Fs
T testing.TB
logger *jww.Notepad
dumper litter.Options
// Aka the Hugo server mode.
running bool
H *HugoSites
theme string
// Default toml
configFormat string
// Default is empty.
// TODO(bep) revisit this and consider always setting it to something.
// Consider this in relation to using the BaseFs.PublishFs to all publishing.
workingDir string
// Base data/content
contentFilePairs []string
templateFilePairs []string
i18nFilePairs []string
dataFilePairs []string
// Additional data/content.
// As in "use the base, but add these on top".
contentFilePairsAdded []string
templateFilePairsAdded []string
i18nFilePairsAdded []string
dataFilePairsAdded []string
}
func newTestSitesBuilder(t testing.TB) *sitesBuilder {
v := viper.New()
fs := hugofs.NewMem(v)
litterOptions := litter.Options{
HidePrivateFields: true,
StripPackageNames: true,
Separator: " ",
}
return &sitesBuilder{T: t, Fs: fs, configFormat: "toml", dumper: litterOptions}
}
func (s *sitesBuilder) Running() *sitesBuilder {
s.running = true
return s
}
func (s *sitesBuilder) WithLogger(logger *jww.Notepad) *sitesBuilder {
s.logger = logger
return s
}
func (s *sitesBuilder) WithWorkingDir(dir string) *sitesBuilder {
s.workingDir = dir
return s
}
func (s *sitesBuilder) WithConfigTemplate(data interface{}, format, configTemplate string) *sitesBuilder {
if format == "" {
format = "toml"
}
templ, err := template.New("test").Parse(configTemplate)
if err != nil {
s.Fatalf("Template parse failed: %s", err)
}
var b bytes.Buffer
templ.Execute(&b, data)
return s.WithConfigFile(format, b.String())
}
func (s *sitesBuilder) WithViper(v *viper.Viper) *sitesBuilder {
loadDefaultSettingsFor(v)
s.Cfg = v
return s
}
func (s *sitesBuilder) WithConfigFile(format, conf string) *sitesBuilder {
writeSource(s.T, s.Fs, "config."+format, conf)
s.configFormat = format
return s
}
func (s *sitesBuilder) WithThemeConfigFile(format, conf string) *sitesBuilder {
if s.theme == "" {
s.theme = "test-theme"
}
filename := filepath.Join("themes", s.theme, "config."+format)
writeSource(s.T, s.Fs, filename, conf)
return s
}
func (s *sitesBuilder) WithSimpleConfigFile() *sitesBuilder {
var config = `
baseURL = "http://example.com/"
`
return s.WithConfigFile("toml", config)
}
func (s *sitesBuilder) WithDefaultMultiSiteConfig() *sitesBuilder {
var defaultMultiSiteConfig = `
baseURL = "http://example.com/blog"
paginate = 1
disablePathToLower = true
defaultContentLanguage = "en"
defaultContentLanguageInSubdir = true
[permalinks]
other = "/somewhere/else/:filename"
[blackfriday]
angledQuotes = true
[Taxonomies]
tag = "tags"
[Languages]
[Languages.en]
weight = 10
title = "In English"
languageName = "English"
[Languages.en.blackfriday]
angledQuotes = false
[[Languages.en.menu.main]]
url = "/"
name = "Home"
weight = 0
[Languages.fr]
weight = 20
title = "Le Français"
languageName = "Français"
[Languages.fr.Taxonomies]
plaque = "plaques"
[Languages.nn]
weight = 30
title = "På nynorsk"
languageName = "Nynorsk"
paginatePath = "side"
[Languages.nn.Taxonomies]
lag = "lag"
[[Languages.nn.menu.main]]
url = "/"
name = "Heim"
weight = 1
[Languages.nb]
weight = 40
title = "På bokmål"
languageName = "Bokmål"
paginatePath = "side"
[Languages.nb.Taxonomies]
lag = "lag"
`
return s.WithConfigFile("toml", defaultMultiSiteConfig)
}
func (s *sitesBuilder) WithContent(filenameContent ...string) *sitesBuilder {
s.contentFilePairs = append(s.contentFilePairs, filenameContent...)
return s
}
func (s *sitesBuilder) WithContentAdded(filenameContent ...string) *sitesBuilder {
s.contentFilePairsAdded = append(s.contentFilePairsAdded, filenameContent...)
return s
}
func (s *sitesBuilder) WithTemplates(filenameContent ...string) *sitesBuilder {
s.templateFilePairs = append(s.templateFilePairs, filenameContent...)
return s
}
func (s *sitesBuilder) WithTemplatesAdded(filenameContent ...string) *sitesBuilder {
s.templateFilePairsAdded = append(s.templateFilePairsAdded, filenameContent...)
return s
}
func (s *sitesBuilder) WithData(filenameContent ...string) *sitesBuilder {
s.dataFilePairs = append(s.dataFilePairs, filenameContent...)
return s
}
func (s *sitesBuilder) WithDataAdded(filenameContent ...string) *sitesBuilder {
s.dataFilePairsAdded = append(s.dataFilePairsAdded, filenameContent...)
return s
}
func (s *sitesBuilder) WithI18n(filenameContent ...string) *sitesBuilder {
s.i18nFilePairs = append(s.i18nFilePairs, filenameContent...)
return s
}
func (s *sitesBuilder) WithI18nAdded(filenameContent ...string) *sitesBuilder {
s.i18nFilePairsAdded = append(s.i18nFilePairsAdded, filenameContent...)
return s
}
func (s *sitesBuilder) writeFilePairs(folder string, filenameContent []string) *sitesBuilder {
if len(filenameContent)%2 != 0 {
s.Fatalf("expect filenameContent for %q in pairs (%d)", folder, len(filenameContent))
}
for i := 0; i < len(filenameContent); i += 2 {
filename, content := filenameContent[i], filenameContent[i+1]
target := folder
// TODO(bep) clean up this magic.
if strings.HasPrefix(filename, folder) {
target = ""
}
if s.workingDir != "" {
target = filepath.Join(s.workingDir, target)
}
writeSource(s.T, s.Fs, filepath.Join(target, filename), content)
}
return s
}
func (s *sitesBuilder) CreateSites() *sitesBuilder {
s.addDefaults()
s.writeFilePairs("content", s.contentFilePairs)
s.writeFilePairs("content", s.contentFilePairsAdded)
s.writeFilePairs("layouts", s.templateFilePairs)
s.writeFilePairs("layouts", s.templateFilePairsAdded)
s.writeFilePairs("data", s.dataFilePairs)
s.writeFilePairs("data", s.dataFilePairsAdded)
s.writeFilePairs("i18n", s.i18nFilePairs)
s.writeFilePairs("i18n", s.i18nFilePairsAdded)
if s.Cfg == nil {
cfg, configFiles, err := LoadConfig(ConfigSourceDescriptor{Fs: s.Fs.Source, Filename: "config." + s.configFormat})
if err != nil {
s.Fatalf("Failed to load config: %s", err)
}
expectedConfigs := 1
if s.theme != "" {
expectedConfigs = 2
}
require.Equal(s.T, expectedConfigs, len(configFiles), fmt.Sprintf("Configs: %v", configFiles))
s.Cfg = cfg
}
sites, err := NewHugoSites(deps.DepsCfg{Fs: s.Fs, Cfg: s.Cfg, Logger: s.logger, Running: s.running})
if err != nil {
s.Fatalf("Failed to create sites: %s", err)
}
s.H = sites
return s
}
func (s *sitesBuilder) Build(cfg BuildCfg) *sitesBuilder {
return s.build(cfg, false)
}
func (s *sitesBuilder) BuildFail(cfg BuildCfg) *sitesBuilder {
return s.build(cfg, true)
}
func (s *sitesBuilder) build(cfg BuildCfg, shouldFail bool) *sitesBuilder {
if s.H == nil {
s.CreateSites()
}
err := s.H.Build(cfg)
if err != nil && !shouldFail {
s.Fatalf("Build failed: %s", err)
} else if err == nil && shouldFail {
s.Fatalf("Expected error")
}
return s
}
func (s *sitesBuilder) addDefaults() {
var (
contentTemplate = `---
title: doc1
weight: 1
tags:
- tag1
date: "2018-02-28"
---
# doc1
*some "content"*
{{< shortcode >}}
{{< lingo >}}
`
defaultContent = []string{
"content/sect/doc1.en.md", contentTemplate,
"content/sect/doc1.fr.md", contentTemplate,
"content/sect/doc1.nb.md", contentTemplate,
"content/sect/doc1.nn.md", contentTemplate,
}
defaultTemplates = []string{
"_default/single.html", "Single: {{ .Title }}|{{ i18n \"hello\" }}|{{.Lang}}|{{ .Content }}",
"_default/list.html", "{{ $p := .Paginator }}List Page {{ $p.PageNumber }}: {{ .Title }}|{{ i18n \"hello\" }}|{{ .Permalink }}|Pager: {{ template \"_internal/pagination.html\" . }}",
"index.html", "{{ $p := .Paginator }}Default Home Page {{ $p.PageNumber }}: {{ .Title }}|{{ .IsHome }}|{{ i18n \"hello\" }}|{{ .Permalink }}|{{ .Site.Data.hugo.slogan }}",
"index.fr.html", "{{ $p := .Paginator }}French Home Page {{ $p.PageNumber }}: {{ .Title }}|{{ .IsHome }}|{{ i18n \"hello\" }}|{{ .Permalink }}|{{ .Site.Data.hugo.slogan }}",
// Shortcodes
"shortcodes/shortcode.html", "Shortcode: {{ i18n \"hello\" }}",
// A shortcode in multiple languages
"shortcodes/lingo.html", "LingoDefault",
"shortcodes/lingo.fr.html", "LingoFrench",
}
defaultI18n = []string{
"en.yaml", `
hello:
other: "Hello"
`,
"fr.yaml", `
hello:
other: "Bonjour"
`,
}
defaultData = []string{
"hugo.toml", "slogan = \"Hugo Rocks!\"",
}
)
if len(s.contentFilePairs) == 0 {
s.writeFilePairs("content", defaultContent)
}
if len(s.templateFilePairs) == 0 {
s.writeFilePairs("layouts", defaultTemplates)
}
if len(s.dataFilePairs) == 0 {
s.writeFilePairs("data", defaultData)
}
if len(s.i18nFilePairs) == 0 {
s.writeFilePairs("i18n", defaultI18n)
}
}
func (s *sitesBuilder) Fatalf(format string, args ...interface{}) {
Fatalf(s.T, format, args...)
}
func Fatalf(t testing.TB, format string, args ...interface{}) {
trace := strings.Join(assert.CallerInfo(), "\n\r\t\t\t")
format = format + "\n%s"
args = append(args, trace)
t.Fatalf(format, args...)
}
func (s *sitesBuilder) AssertFileContent(filename string, matches ...string) {
content := readDestination(s.T, s.Fs, filename)
for _, match := range matches {
if !strings.Contains(content, match) {
s.Fatalf("No match for %q in content for %s\n%q", match, filename, content)
}
}
}
func (s *sitesBuilder) AssertObject(expected string, object interface{}) {
got := s.dumper.Sdump(object)
expected = strings.TrimSpace(expected)
if expected != got {
fmt.Println(got)
diff := helpers.DiffStrings(expected, got)
s.Fatalf("diff:\n%s\nexpected\n%s\ngot\n%s", diff, expected, got)
}
}
func (s *sitesBuilder) AssertFileContentRe(filename string, matches ...string) {
content := readDestination(s.T, s.Fs, filename)
for _, match := range matches {
r := regexp.MustCompile(match)
if !r.MatchString(content) {
s.Fatalf("No match for %q in content for %s\n%q", match, filename, content)
}
}
}
func (s *sitesBuilder) CheckExists(filename string) bool {
return destinationExists(s.Fs, filepath.Clean(filename))
}
type testHelper struct {
Cfg config.Provider
Fs *hugofs.Fs
T testing.TB
}
func (th testHelper) assertFileContent(filename string, matches ...string) {
filename = th.replaceDefaultContentLanguageValue(filename)
content := readDestination(th.T, th.Fs, filename)
for _, match := range matches {
match = th.replaceDefaultContentLanguageValue(match)
require.True(th.T, strings.Contains(content, match), fmt.Sprintf("File no match for\n%q in\n%q:\n%s", strings.Replace(match, "%", "%%", -1), filename, strings.Replace(content, "%", "%%", -1)))
}
}
func (th testHelper) assertFileContentRegexp(filename string, matches ...string) {
filename = th.replaceDefaultContentLanguageValue(filename)
content := readDestination(th.T, th.Fs, filename)
for _, match := range matches {
match = th.replaceDefaultContentLanguageValue(match)
r := regexp.MustCompile(match)
require.True(th.T, r.MatchString(content), fmt.Sprintf("File no match for\n%q in\n%q:\n%s", strings.Replace(match, "%", "%%", -1), filename, strings.Replace(content, "%", "%%", -1)))
}
}
func (th testHelper) assertFileNotExist(filename string) {
exists, err := helpers.Exists(filename, th.Fs.Destination)
require.NoError(th.T, err)
require.False(th.T, exists)
}
func (th testHelper) replaceDefaultContentLanguageValue(value string) string {
defaultInSubDir := th.Cfg.GetBool("defaultContentLanguageInSubDir")
replace := th.Cfg.GetString("defaultContentLanguage") + "/"
if !defaultInSubDir {
value = strings.Replace(value, replace, "", 1)
}
return value
}
func newTestPathSpec(fs *hugofs.Fs, v *viper.Viper) *helpers.PathSpec {
l := helpers.NewDefaultLanguage(v)
ps, _ := helpers.NewPathSpec(fs, l)
return ps
}
func newTestDefaultPathSpec() *helpers.PathSpec {
v := viper.New()
// Easier to reason about in tests.
v.Set("disablePathToLower", true)
v.Set("contentDir", "content")
fs := hugofs.NewDefault(v)
ps, _ := helpers.NewPathSpec(fs, v)
return ps
}
func newTestCfg() (*viper.Viper, *hugofs.Fs) {
v := viper.New()
fs := hugofs.NewMem(v)
v.SetFs(fs.Source)
loadDefaultSettingsFor(v)
// Default is false, but true is easier to use as default in tests
v.Set("defaultContentLanguageInSubdir", true)
return v, fs
}
// newTestSite creates a new site in the English language with in-memory Fs.
// The site will have a template system loaded and ready to use.
// Note: This is only used in single site tests.
func newTestSite(t testing.TB, configKeyValues ...interface{}) *Site {
cfg, fs := newTestCfg()
for i := 0; i < len(configKeyValues); i += 2 {
cfg.Set(configKeyValues[i].(string), configKeyValues[i+1])
}
d := deps.DepsCfg{Language: helpers.NewLanguage("en", cfg), Fs: fs, Cfg: cfg}
s, err := NewSiteForCfg(d)
if err != nil {
Fatalf(t, "Failed to create Site: %s", err)
}
return s
}
func newTestSitesFromConfig(t testing.TB, afs afero.Fs, tomlConfig string, layoutPathContentPairs ...string) (testHelper, *HugoSites) {
if len(layoutPathContentPairs)%2 != 0 {
Fatalf(t, "Layouts must be provided in pairs")
}
writeToFs(t, afs, "config.toml", tomlConfig)
cfg, err := LoadConfigDefault(afs)
require.NoError(t, err)
fs := hugofs.NewFrom(afs, cfg)
th := testHelper{cfg, fs, t}
for i := 0; i < len(layoutPathContentPairs); i += 2 {
writeSource(t, fs, layoutPathContentPairs[i], layoutPathContentPairs[i+1])
}
h, err := NewHugoSites(deps.DepsCfg{Fs: fs, Cfg: cfg})
require.NoError(t, err)
return th, h
}
func newTestSitesFromConfigWithDefaultTemplates(t testing.TB, tomlConfig string) (testHelper, *HugoSites) {
return newTestSitesFromConfig(t, afero.NewMemMapFs(), tomlConfig,
"layouts/_default/single.html", "Single|{{ .Title }}|{{ .Content }}",
"layouts/_default/list.html", "List|{{ .Title }}|{{ .Content }}",
"layouts/_default/terms.html", "Terms List|{{ .Title }}|{{ .Content }}",
)
}
func newDebugLogger() *jww.Notepad {
return jww.NewNotepad(jww.LevelDebug, jww.LevelError, os.Stdout, ioutil.Discard, "", log.Ldate|log.Ltime)
}
func newErrorLogger() *jww.Notepad {
return jww.NewNotepad(jww.LevelError, jww.LevelError, os.Stdout, ioutil.Discard, "", log.Ldate|log.Ltime)
}
func newWarningLogger() *jww.Notepad {
return jww.NewNotepad(jww.LevelWarn, jww.LevelError, os.Stdout, ioutil.Discard, "", log.Ldate|log.Ltime)
}
func createWithTemplateFromNameValues(additionalTemplates ...string) func(templ tpl.TemplateHandler) error {
return func(templ tpl.TemplateHandler) error {
for i := 0; i < len(additionalTemplates); i += 2 {
err := templ.AddTemplate(additionalTemplates[i], additionalTemplates[i+1])
if err != nil {
return err
}
}
return nil
}
}
func buildSingleSite(t testing.TB, depsCfg deps.DepsCfg, buildCfg BuildCfg) *Site {
return buildSingleSiteExpected(t, false, depsCfg, buildCfg)
}
func buildSingleSiteExpected(t testing.TB, expectBuildError bool, depsCfg deps.DepsCfg, buildCfg BuildCfg) *Site {
h, err := NewHugoSites(depsCfg)
require.NoError(t, err)
require.Len(t, h.Sites, 1)
if expectBuildError {
require.Error(t, h.Build(buildCfg))
return nil
}
require.NoError(t, h.Build(buildCfg))
return h.Sites[0]
}
func writeSourcesToSource(t *testing.T, base string, fs *hugofs.Fs, sources ...[2]string) {
for _, src := range sources {
writeSource(t, fs, filepath.Join(base, src[0]), src[1])
}
}
func dumpPages(pages ...*Page) {
for i, p := range pages {
fmt.Printf("%d: Kind: %s Title: %-10s RelPermalink: %-10s Path: %-10s sections: %s Len Sections(): %d\n",
i+1,
p.Kind, p.title, p.RelPermalink(), p.Path(), p.sections, len(p.Sections()))
}
}
func isCI() bool {
return os.Getenv("CI") != ""
}
| [
"\"CI\""
] | [] | [
"CI"
] | [] | ["CI"] | go | 1 | 0 | |
backend/bubbles/wsgi.py | """
WSGI config for bubbles project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "bubbles.settings")
application = get_wsgi_application()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
bybit_trash/api_bybit.py | import os
import time
import bybit
import config
from termcolor import colored
# Get environment variables
api_key = os.environ.get('BYBIT_KEY')
api_secret = os.environ.get('BYBIT_SECRET')
client = bybit.bybit(test=False, api_key=api_key, api_secret=api_secret)
live_trade = config.live_trade
def get_timestamp(recent):
return int(time.time()) - recent
def position_information(pair):
return client.LinearPositions.LinearPositions_myPosition(symbol=pair).result()[0].get('result')#[0_or_1].get('symbol')
print(position_information("BTCUSDT")[0])
def LONG_SIDE(response):
if response[0].get('size') > 0: return "LONGING"
elif response[0].get('size') == 0: return "NO_POSITION"
def SHORT_SIDE(response):
if response[1].get('size') > 0 : return "SHORTING"
elif response[1].get('size') == 0: return "NO_POSITION"
def change_leverage(pair, leverage):
if live_trade: client.LinearPositions.LinearPositions_saveLeverage(symbol=pair, buy_leverage=leverage, sell_leverage=leverage).result()
def change_margin_to_ISOLATED(pair, leverage):
if live_trade: client.LinearPositions.LinearPositions_switchIsolated(symbol=pair,is_isolated=True, buy_leverage=leverage, sell_leverage=leverage).result()
def change_margin_to_CROSSED(pair, leverage):
if live_trade: client.LinearPositions.LinearPositions_switchIsolated(symbol=pair,is_isolated=False, buy_leverage=leverage, sell_leverage=leverage).result()
def disable_auto_add_margin(pair):
client.LinearPositions.LinearPositions_setAutoAddMargin(symbol=pair, side="Buy", auto_add_margin=False).result()
client.LinearPositions.LinearPositions_setAutoAddMargin(symbol=pair, side="Sell", auto_add_margin=False).result()
def market_open_long(pair, quantity):
if live_trade:
client.LinearOrder.LinearOrder_new(symbol=pair,
qty=quantity,
side="Buy",
order_type="Market",
time_in_force="ImmediateOrCancel",
reduce_only=False,
close_on_trigger=False).result()
print(colored("🚀 GO_LONG 🚀", "green"))
def market_open_short(pair, quantity):
if live_trade:
client.LinearOrder.LinearOrder_new(symbol=pair,
qty=quantity,
side="Sell",
order_type="Market",
time_in_force="ImmediateOrCancel",
reduce_only=False,
close_on_trigger=False).result()
print(colored("💥 GO_SHORT 💥", "red"))
def market_close_long(pair, response):
if live_trade:
client.LinearOrder.LinearOrder_new(symbol=pair,
qty=response[0].get('size'),
side="Sell",
order_type="Market",
time_in_force="ImmediateOrCancel",
reduce_only=True,
close_on_trigger=False).result()
print("💰 CLOSE_LONG 💰")
def market_close_short(pair, response):
if live_trade:
client.LinearOrder.LinearOrder_new(symbol=pair,
qty=response[1].get('size'),
side="Buy",
order_type="Market",
time_in_force="ImmediateOrCancel",
reduce_only=True,
close_on_trigger=False).result()
print("💰 CLOSE_SHORT 💰")
def test_trailing_stop(pair, quantity):
client.Positions.Positions_tradingStop(symbol="BTCUSD",
take_profit="0",
stop_loss="9110",
trailing_stop="0",
new_trailing_active="0").result() | [] | [] | [
"BYBIT_SECRET",
"BYBIT_KEY"
] | [] | ["BYBIT_SECRET", "BYBIT_KEY"] | python | 2 | 0 | |
cmd/postgres_exporter/postgres_exporter.go | package main
import (
"crypto/sha256"
"database/sql"
"errors"
"fmt"
"io/ioutil"
"math"
"net/http"
"net/url"
"os"
"regexp"
"runtime"
"strconv"
"strings"
"sync"
"time"
"github.com/blang/semver"
"github.com/lib/pq"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/prometheus/common/log"
"gopkg.in/alecthomas/kingpin.v2"
"gopkg.in/yaml.v2"
)
// Version is set during build to the git describe version
// (semantic version)-(commitish) form.
var Version = "0.0.1"
var (
listenAddress = kingpin.Flag("web.listen-address", "Address to listen on for web interface and telemetry.").Default(":9187").Envar("PG_EXPORTER_WEB_LISTEN_ADDRESS").String()
metricPath = kingpin.Flag("web.telemetry-path", "Path under which to expose metrics.").Default("/metrics").Envar("PG_EXPORTER_WEB_TELEMETRY_PATH").String()
disableDefaultMetrics = kingpin.Flag("disable-default-metrics", "Do not include default metrics.").Default("false").Envar("PG_EXPORTER_DISABLE_DEFAULT_METRICS").Bool()
disableSettingsMetrics = kingpin.Flag("disable-settings-metrics", "Do not include pg_settings metrics.").Default("false").Envar("PG_EXPORTER_DISABLE_SETTINGS_METRICS").Bool()
autoDiscoverDatabases = kingpin.Flag("auto-discover-databases", "Whether to discover the databases on a server dynamically.").Default("false").Envar("PG_EXPORTER_AUTO_DISCOVER_DATABASES").Bool()
queriesPath = kingpin.Flag("extend.query-path", "Path to custom queries to run.").Default("").Envar("PG_EXPORTER_EXTEND_QUERY_PATH").String()
onlyDumpMaps = kingpin.Flag("dumpmaps", "Do not run, simply dump the maps.").Bool()
constantLabelsList = kingpin.Flag("constantLabels", "A list of label=value separated by comma(,).").Default("").Envar("PG_EXPORTER_CONSTANT_LABELS").String()
excludeDatabases = kingpin.Flag("exclude-databases", "A list of databases to remove when autoDiscoverDatabases is enabled").Default("").Envar("PG_EXPORTER_EXCLUDE_DATABASES").String()
)
// Metric name parts.
const (
// Namespace for all metrics.
namespace = "pg"
// Subsystems.
exporter = "exporter"
// Metric label used for static string data thats handy to send to Prometheus
// e.g. version
staticLabelName = "static"
// Metric label used for server identification.
serverLabelName = "server"
)
// ColumnUsage should be one of several enum values which describe how a
// queried row is to be converted to a Prometheus metric.
type ColumnUsage int
// nolint: golint
const (
DISCARD ColumnUsage = iota // Ignore this column
LABEL ColumnUsage = iota // Use this column as a label
COUNTER ColumnUsage = iota // Use this column as a counter
GAUGE ColumnUsage = iota // Use this column as a gauge
MAPPEDMETRIC ColumnUsage = iota // Use this column with the supplied mapping of text values
DURATION ColumnUsage = iota // This column should be interpreted as a text duration (and converted to milliseconds)
)
// UnmarshalYAML implements the yaml.Unmarshaller interface.
func (cu *ColumnUsage) UnmarshalYAML(unmarshal func(interface{}) error) error {
var value string
if err := unmarshal(&value); err != nil {
return err
}
columnUsage, err := stringToColumnUsage(value)
if err != nil {
return err
}
*cu = columnUsage
return nil
}
// MappingOptions is a copy of ColumnMapping used only for parsing
type MappingOptions struct {
Usage string `yaml:"usage"`
Description string `yaml:"description"`
Mapping map[string]float64 `yaml:"metric_mapping"` // Optional column mapping for MAPPEDMETRIC
SupportedVersions semver.Range `yaml:"pg_version"` // Semantic version ranges which are supported. Unsupported columns are not queried (internally converted to DISCARD).
}
// nolint: golint
type Mapping map[string]MappingOptions
// nolint: golint
type UserQuery struct {
Query string `yaml:"query"`
Metrics []Mapping `yaml:"metrics"`
CacheSeconds uint64 `yaml:"cache_seconds"` // Number of seconds to cache the namespace result metrics for.
}
// nolint: golint
type UserQueries map[string]UserQuery
// Regex used to get the "short-version" from the postgres version field.
var versionRegex = regexp.MustCompile(`^\w+ ((\d+)(\.\d+)?(\.\d+)?)`)
var lowestSupportedVersion = semver.MustParse("9.1.0")
// Parses the version of postgres into the short version string we can use to
// match behaviors.
func parseVersion(versionString string) (semver.Version, error) {
submatches := versionRegex.FindStringSubmatch(versionString)
if len(submatches) > 1 {
return semver.ParseTolerant(submatches[1])
}
return semver.Version{},
errors.New(fmt.Sprintln("Could not find a postgres version in string:", versionString))
}
// ColumnMapping is the user-friendly representation of a prometheus descriptor map
type ColumnMapping struct {
usage ColumnUsage `yaml:"usage"`
description string `yaml:"description"`
mapping map[string]float64 `yaml:"metric_mapping"` // Optional column mapping for MAPPEDMETRIC
supportedVersions semver.Range `yaml:"pg_version"` // Semantic version ranges which are supported. Unsupported columns are not queried (internally converted to DISCARD).
}
// UnmarshalYAML implements yaml.Unmarshaller
func (cm *ColumnMapping) UnmarshalYAML(unmarshal func(interface{}) error) error {
type plain ColumnMapping
return unmarshal((*plain)(cm))
}
// intermediateMetricMap holds the partially loaded metric map parsing.
// This is mainly so we can parse cacheSeconds around.
type intermediateMetricMap struct {
columnMappings map[string]ColumnMapping
cacheSeconds uint64
}
// MetricMapNamespace groups metric maps under a shared set of labels.
type MetricMapNamespace struct {
labels []string // Label names for this namespace
columnMappings map[string]MetricMap // Column mappings in this namespace
cacheSeconds uint64 // Number of seconds this metric namespace can be cached. 0 disables.
}
// MetricMap stores the prometheus metric description which a given column will
// be mapped to by the collector
type MetricMap struct {
discard bool // Should metric be discarded during mapping?
vtype prometheus.ValueType // Prometheus valuetype
desc *prometheus.Desc // Prometheus descriptor
conversion func(interface{}) (float64, bool) // Conversion function to turn PG result into float64
}
// ErrorConnectToServer is a connection to PgSQL server error
type ErrorConnectToServer struct {
Msg string
}
// Error returns error
func (e *ErrorConnectToServer) Error() string {
return e.Msg
}
// TODO: revisit this with the semver system
func dumpMaps() {
// TODO: make this function part of the exporter
for name, cmap := range builtinMetricMaps {
query, ok := queryOverrides[name]
if !ok {
fmt.Println(name)
} else {
for _, queryOverride := range query {
fmt.Println(name, queryOverride.versionRange, queryOverride.query)
}
}
for column, details := range cmap.columnMappings {
fmt.Printf(" %-40s %v\n", column, details)
}
fmt.Println()
}
}
var builtinMetricMaps = map[string]intermediateMetricMap{
"pg_stat_database": {
map[string]ColumnMapping{
"datid": {LABEL, "OID of a database", nil, nil},
"datname": {LABEL, "Name of this database", nil, nil},
"numbackends": {GAUGE, "Number of backends currently connected to this database. This is the only column in this view that returns a value reflecting current state; all other columns return the accumulated values since the last reset.", nil, nil},
"xact_commit": {COUNTER, "Number of transactions in this database that have been committed", nil, nil},
"xact_rollback": {COUNTER, "Number of transactions in this database that have been rolled back", nil, nil},
"blks_read": {COUNTER, "Number of disk blocks read in this database", nil, nil},
"blks_hit": {COUNTER, "Number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache)", nil, nil},
"tup_returned": {COUNTER, "Number of rows returned by queries in this database", nil, nil},
"tup_fetched": {COUNTER, "Number of rows fetched by queries in this database", nil, nil},
"tup_inserted": {COUNTER, "Number of rows inserted by queries in this database", nil, nil},
"tup_updated": {COUNTER, "Number of rows updated by queries in this database", nil, nil},
"tup_deleted": {COUNTER, "Number of rows deleted by queries in this database", nil, nil},
"conflicts": {COUNTER, "Number of queries canceled due to conflicts with recovery in this database. (Conflicts occur only on standby servers; see pg_stat_database_conflicts for details.)", nil, nil},
"temp_files": {COUNTER, "Number of temporary files created by queries in this database. All temporary files are counted, regardless of why the temporary file was created (e.g., sorting or hashing), and regardless of the log_temp_files setting.", nil, nil},
"temp_bytes": {COUNTER, "Total amount of data written to temporary files by queries in this database. All temporary files are counted, regardless of why the temporary file was created, and regardless of the log_temp_files setting.", nil, nil},
"deadlocks": {COUNTER, "Number of deadlocks detected in this database", nil, nil},
"blk_read_time": {COUNTER, "Time spent reading data file blocks by backends in this database, in milliseconds", nil, nil},
"blk_write_time": {COUNTER, "Time spent writing data file blocks by backends in this database, in milliseconds", nil, nil},
"stats_reset": {COUNTER, "Time at which these statistics were last reset", nil, nil},
},
0,
},
"pg_stat_database_conflicts": {
map[string]ColumnMapping{
"datid": {LABEL, "OID of a database", nil, nil},
"datname": {LABEL, "Name of this database", nil, nil},
"confl_tablespace": {COUNTER, "Number of queries in this database that have been canceled due to dropped tablespaces", nil, nil},
"confl_lock": {COUNTER, "Number of queries in this database that have been canceled due to lock timeouts", nil, nil},
"confl_snapshot": {COUNTER, "Number of queries in this database that have been canceled due to old snapshots", nil, nil},
"confl_bufferpin": {COUNTER, "Number of queries in this database that have been canceled due to pinned buffers", nil, nil},
"confl_deadlock": {COUNTER, "Number of queries in this database that have been canceled due to deadlocks", nil, nil},
},
0,
},
"pg_locks": {
map[string]ColumnMapping{
"datname": {LABEL, "Name of this database", nil, nil},
"mode": {LABEL, "Name of lock mode", nil, nil},
"type": {LABEL, "Type of locked object", nil, nil},
"count": {GAUGE, "Number of locks", nil, nil},
},
0,
},
"pg_stat_replication": {
map[string]ColumnMapping{
"procpid": {DISCARD, "Process ID of a WAL sender process", nil, semver.MustParseRange("<9.2.0")},
"pid": {DISCARD, "Process ID of a WAL sender process", nil, semver.MustParseRange(">=9.2.0")},
"usesysid": {DISCARD, "OID of the user logged into this WAL sender process", nil, nil},
"usename": {DISCARD, "Name of the user logged into this WAL sender process", nil, nil},
"application_name": {LABEL, "Name of the application that is connected to this WAL sender", nil, nil},
"client_addr": {LABEL, "IP address of the client connected to this WAL sender. If this field is null, it indicates that the client is connected via a Unix socket on the server machine.", nil, nil},
"client_hostname": {DISCARD, "Host name of the connected client, as reported by a reverse DNS lookup of client_addr. This field will only be non-null for IP connections, and only when log_hostname is enabled.", nil, nil},
"client_port": {DISCARD, "TCP port number that the client is using for communication with this WAL sender, or -1 if a Unix socket is used", nil, nil},
"backend_start": {DISCARD, "with time zone Time when this process was started, i.e., when the client connected to this WAL sender", nil, nil},
"backend_xmin": {DISCARD, "The current backend's xmin horizon.", nil, nil},
"state": {LABEL, "Current WAL sender state", nil, nil},
"sent_location": {DISCARD, "Last transaction log position sent on this connection", nil, semver.MustParseRange("<10.0.0")},
"write_location": {DISCARD, "Last transaction log position written to disk by this standby server", nil, semver.MustParseRange("<10.0.0")},
"flush_location": {DISCARD, "Last transaction log position flushed to disk by this standby server", nil, semver.MustParseRange("<10.0.0")},
"replay_location": {DISCARD, "Last transaction log position replayed into the database on this standby server", nil, semver.MustParseRange("<10.0.0")},
"sent_lsn": {DISCARD, "Last transaction log position sent on this connection", nil, semver.MustParseRange(">=10.0.0")},
"write_lsn": {DISCARD, "Last transaction log position written to disk by this standby server", nil, semver.MustParseRange(">=10.0.0")},
"flush_lsn": {DISCARD, "Last transaction log position flushed to disk by this standby server", nil, semver.MustParseRange(">=10.0.0")},
"replay_lsn": {DISCARD, "Last transaction log position replayed into the database on this standby server", nil, semver.MustParseRange(">=10.0.0")},
"sync_priority": {DISCARD, "Priority of this standby server for being chosen as the synchronous standby", nil, nil},
"sync_state": {DISCARD, "Synchronous state of this standby server", nil, nil},
"slot_name": {LABEL, "A unique, cluster-wide identifier for the replication slot", nil, semver.MustParseRange(">=9.2.0")},
"plugin": {DISCARD, "The base name of the shared object containing the output plugin this logical slot is using, or null for physical slots", nil, nil},
"slot_type": {DISCARD, "The slot type - physical or logical", nil, nil},
"datoid": {DISCARD, "The OID of the database this slot is associated with, or null. Only logical slots have an associated database", nil, nil},
"database": {DISCARD, "The name of the database this slot is associated with, or null. Only logical slots have an associated database", nil, nil},
"active": {DISCARD, "True if this slot is currently actively being used", nil, nil},
"active_pid": {DISCARD, "Process ID of a WAL sender process", nil, nil},
"xmin": {DISCARD, "The oldest transaction that this slot needs the database to retain. VACUUM cannot remove tuples deleted by any later transaction", nil, nil},
"catalog_xmin": {DISCARD, "The oldest transaction affecting the system catalogs that this slot needs the database to retain. VACUUM cannot remove catalog tuples deleted by any later transaction", nil, nil},
"restart_lsn": {DISCARD, "The address (LSN) of oldest WAL which still might be required by the consumer of this slot and thus won't be automatically removed during checkpoints", nil, nil},
"pg_current_xlog_location": {DISCARD, "pg_current_xlog_location", nil, nil},
"pg_current_wal_lsn": {DISCARD, "pg_current_xlog_location", nil, semver.MustParseRange(">=10.0.0")},
"pg_current_wal_lsn_bytes": {GAUGE, "WAL position in bytes", nil, semver.MustParseRange(">=10.0.0")},
"pg_xlog_location_diff": {GAUGE, "Lag in bytes between master and slave", nil, semver.MustParseRange(">=9.2.0 <10.0.0")},
"pg_wal_lsn_diff": {GAUGE, "Lag in bytes between master and slave", nil, semver.MustParseRange(">=10.0.0")},
"confirmed_flush_lsn": {DISCARD, "LSN position a consumer of a slot has confirmed flushing the data received", nil, nil},
"write_lag": {DISCARD, "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written it (but not yet flushed it or applied it). This can be used to gauge the delay that synchronous_commit level remote_write incurred while committing if this server was configured as a synchronous standby.", nil, semver.MustParseRange(">=10.0.0")},
"flush_lag": {DISCARD, "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written and flushed it (but not yet applied it). This can be used to gauge the delay that synchronous_commit level remote_flush incurred while committing if this server was configured as a synchronous standby.", nil, semver.MustParseRange(">=10.0.0")},
"replay_lag": {DISCARD, "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written, flushed and applied it. This can be used to gauge the delay that synchronous_commit level remote_apply incurred while committing if this server was configured as a synchronous standby.", nil, semver.MustParseRange(">=10.0.0")},
},
0,
},
"pg_stat_activity": {
map[string]ColumnMapping{
"datname": {LABEL, "Name of this database", nil, nil},
"state": {LABEL, "connection state", nil, semver.MustParseRange(">=9.2.0")},
"count": {GAUGE, "number of connections in this state", nil, nil},
"max_tx_duration": {GAUGE, "max duration in seconds any active transaction has been running", nil, nil},
},
0,
},
}
// OverrideQuery 's are run in-place of simple namespace look ups, and provide
// advanced functionality. But they have a tendency to postgres version specific.
// There aren't too many versions, so we simply store customized versions using
// the semver matching we do for columns.
type OverrideQuery struct {
versionRange semver.Range
query string
}
// Overriding queries for namespaces above.
// TODO: validate this is a closed set in tests, and there are no overlaps
var queryOverrides = map[string][]OverrideQuery{
"pg_locks": {
{
semver.MustParseRange(">0.0.0"),
`SELECT pg_database.datname,tmp.mode,tmp2.locktype as type,COALESCE(count,0) as count
FROM
(
VALUES ('accesssharelock'),
('rowsharelock'),
('rowexclusivelock'),
('shareupdateexclusivelock'),
('sharelock'),
('sharerowexclusivelock'),
('exclusivelock'),
('accessexclusivelock')
) AS tmp(mode) CROSS JOIN pg_database
LEFT JOIN
(SELECT database, lower(mode) AS mode, locktype, count(*) AS count
FROM pg_locks WHERE database IS NOT NULL
GROUP BY database, lower(mode), locktype
) AS tmp2
ON tmp.mode=tmp2.mode and pg_database.oid = tmp2.database ORDER BY 1`,
},
},
"pg_stat_replication": {
{
semver.MustParseRange(">=10.0.0"),
`
SELECT *,
(case pg_is_in_recovery() when 't' then null else pg_current_wal_lsn() end) AS pg_current_wal_lsn,
(case pg_is_in_recovery() when 't' then null else pg_wal_lsn_diff(pg_current_wal_lsn(), pg_lsn('0/0'))::float end) AS pg_current_wal_lsn_bytes,
(case pg_is_in_recovery() when 't' then null else pg_wal_lsn_diff(pg_current_wal_lsn(), replay_lsn)::float end) AS pg_wal_lsn_diff
FROM pg_stat_replication
`,
},
{
semver.MustParseRange(">=9.2.0 <10.0.0"),
`
SELECT *,
(case pg_is_in_recovery() when 't' then null else pg_current_xlog_location() end) AS pg_current_xlog_location,
(case pg_is_in_recovery() when 't' then null else pg_xlog_location_diff(pg_current_xlog_location(), replay_location)::float end) AS pg_xlog_location_diff
FROM pg_stat_replication
`,
},
{
semver.MustParseRange("<9.2.0"),
`
SELECT *,
(case pg_is_in_recovery() when 't' then null else pg_current_xlog_location() end) AS pg_current_xlog_location
FROM pg_stat_replication
`,
},
},
"pg_stat_activity": {
// This query only works
{
semver.MustParseRange(">=9.2.0"),
`
SELECT
pg_database.datname,
tmp.state,
COALESCE(count,0) as count,
COALESCE(max_tx_duration,0) as max_tx_duration
FROM
(
VALUES ('active'),
('idle'),
('idle in transaction'),
('idle in transaction (aborted)'),
('fastpath function call'),
('disabled')
) AS tmp(state) CROSS JOIN pg_database
LEFT JOIN
(
SELECT
datname,
state,
count(*) AS count,
MAX(EXTRACT(EPOCH FROM now() - xact_start))::float AS max_tx_duration
FROM pg_stat_activity GROUP BY datname,state) AS tmp2
ON tmp.state = tmp2.state AND pg_database.datname = tmp2.datname
`,
},
{
semver.MustParseRange("<9.2.0"),
`
SELECT
datname,
'unknown' AS state,
COALESCE(count(*),0) AS count,
COALESCE(MAX(EXTRACT(EPOCH FROM now() - xact_start))::float,0) AS max_tx_duration
FROM pg_stat_activity GROUP BY datname
`,
},
},
}
// Convert the query override file to the version-specific query override file
// for the exporter.
func makeQueryOverrideMap(pgVersion semver.Version, queryOverrides map[string][]OverrideQuery) map[string]string {
resultMap := make(map[string]string)
for name, overrideDef := range queryOverrides {
// Find a matching semver. We make it an error to have overlapping
// ranges at test-time, so only 1 should ever match.
matched := false
for _, queryDef := range overrideDef {
if queryDef.versionRange(pgVersion) {
resultMap[name] = queryDef.query
matched = true
break
}
}
if !matched {
log.Warnln("No query matched override for", name, "- disabling metric space.")
resultMap[name] = ""
}
}
return resultMap
}
func parseUserQueries(content []byte) (map[string]intermediateMetricMap, map[string]string, error) {
var userQueries UserQueries
err := yaml.Unmarshal(content, &userQueries)
if err != nil {
return nil, nil, err
}
// Stores the loaded map representation
metricMaps := make(map[string]intermediateMetricMap)
newQueryOverrides := make(map[string]string)
for metric, specs := range userQueries {
log.Debugln("New user metric namespace from YAML:", metric, "Will cache results for:", specs.CacheSeconds)
newQueryOverrides[metric] = specs.Query
metricMap, ok := metricMaps[metric]
if !ok {
// Namespace for metric not found - add it.
newMetricMap := make(map[string]ColumnMapping)
metricMap = intermediateMetricMap{
columnMappings: newMetricMap,
cacheSeconds: specs.CacheSeconds,
}
metricMaps[metric] = metricMap
}
for _, metric := range specs.Metrics {
for name, mappingOption := range metric {
var columnMapping ColumnMapping
tmpUsage, _ := stringToColumnUsage(mappingOption.Usage)
columnMapping.usage = tmpUsage
columnMapping.description = mappingOption.Description
// TODO: we should support cu
columnMapping.mapping = nil
// Should we support this for users?
columnMapping.supportedVersions = nil
metricMap.columnMappings[name] = columnMapping
}
}
}
return metricMaps, newQueryOverrides, nil
}
// Add queries to the builtinMetricMaps and queryOverrides maps. Added queries do not
// respect version requirements, because it is assumed that the user knows
// what they are doing with their version of postgres.
//
// This function modifies metricMap and queryOverrideMap to contain the new
// queries.
// TODO: test code for all cu.
// TODO: the YAML this supports is "non-standard" - we should move away from it.
func addQueries(content []byte, pgVersion semver.Version, server *Server) error {
metricMaps, newQueryOverrides, err := parseUserQueries(content)
if err != nil {
return nil
}
// Convert the loaded metric map into exporter representation
partialExporterMap := makeDescMap(pgVersion, server.labels, metricMaps)
// Merge the two maps (which are now quite flatteend)
for k, v := range partialExporterMap {
_, found := server.metricMap[k]
if found {
log.Debugln("Overriding metric", k, "from user YAML file.")
} else {
log.Debugln("Adding new metric", k, "from user YAML file.")
}
server.metricMap[k] = v
}
// Merge the query override map
for k, v := range newQueryOverrides {
_, found := server.queryOverrides[k]
if found {
log.Debugln("Overriding query override", k, "from user YAML file.")
} else {
log.Debugln("Adding new query override", k, "from user YAML file.")
}
server.queryOverrides[k] = v
}
return nil
}
// Turn the MetricMap column mapping into a prometheus descriptor mapping.
func makeDescMap(pgVersion semver.Version, serverLabels prometheus.Labels, metricMaps map[string]intermediateMetricMap) map[string]MetricMapNamespace {
var metricMap = make(map[string]MetricMapNamespace)
for namespace, intermediateMappings := range metricMaps {
thisMap := make(map[string]MetricMap)
// Get the constant labels
var variableLabels []string
for columnName, columnMapping := range intermediateMappings.columnMappings {
if columnMapping.usage == LABEL {
variableLabels = append(variableLabels, columnName)
}
}
for columnName, columnMapping := range intermediateMappings.columnMappings {
// Check column version compatibility for the current map
// Force to discard if not compatible.
if columnMapping.supportedVersions != nil {
if !columnMapping.supportedVersions(pgVersion) {
// It's very useful to be able to see what columns are being
// rejected.
log.Debugln(columnName, "is being forced to discard due to version incompatibility.")
thisMap[columnName] = MetricMap{
discard: true,
conversion: func(_ interface{}) (float64, bool) {
return math.NaN(), true
},
}
continue
}
}
// Determine how to convert the column based on its usage.
// nolint: dupl
switch columnMapping.usage {
case DISCARD, LABEL:
thisMap[columnName] = MetricMap{
discard: true,
conversion: func(_ interface{}) (float64, bool) {
return math.NaN(), true
},
}
case COUNTER:
thisMap[columnName] = MetricMap{
vtype: prometheus.CounterValue,
desc: prometheus.NewDesc(fmt.Sprintf("%s_%s", namespace, columnName), columnMapping.description, variableLabels, serverLabels),
conversion: func(in interface{}) (float64, bool) {
return dbToFloat64(in)
},
}
case GAUGE:
thisMap[columnName] = MetricMap{
vtype: prometheus.GaugeValue,
desc: prometheus.NewDesc(fmt.Sprintf("%s_%s", namespace, columnName), columnMapping.description, variableLabels, serverLabels),
conversion: func(in interface{}) (float64, bool) {
return dbToFloat64(in)
},
}
case MAPPEDMETRIC:
thisMap[columnName] = MetricMap{
vtype: prometheus.GaugeValue,
desc: prometheus.NewDesc(fmt.Sprintf("%s_%s", namespace, columnName), columnMapping.description, variableLabels, serverLabels),
conversion: func(in interface{}) (float64, bool) {
text, ok := in.(string)
if !ok {
return math.NaN(), false
}
val, ok := columnMapping.mapping[text]
if !ok {
return math.NaN(), false
}
return val, true
},
}
case DURATION:
thisMap[columnName] = MetricMap{
vtype: prometheus.GaugeValue,
desc: prometheus.NewDesc(fmt.Sprintf("%s_%s_milliseconds", namespace, columnName), columnMapping.description, variableLabels, serverLabels),
conversion: func(in interface{}) (float64, bool) {
var durationString string
switch t := in.(type) {
case []byte:
durationString = string(t)
case string:
durationString = t
default:
log.Errorln("DURATION conversion metric was not a string")
return math.NaN(), false
}
if durationString == "-1" {
return math.NaN(), false
}
d, err := time.ParseDuration(durationString)
if err != nil {
log.Errorln("Failed converting result to metric:", columnName, in, err)
return math.NaN(), false
}
return float64(d / time.Millisecond), true
},
}
}
}
metricMap[namespace] = MetricMapNamespace{variableLabels, thisMap, intermediateMappings.cacheSeconds}
}
return metricMap
}
// convert a string to the corresponding ColumnUsage
func stringToColumnUsage(s string) (ColumnUsage, error) {
var u ColumnUsage
var err error
switch s {
case "DISCARD":
u = DISCARD
case "LABEL":
u = LABEL
case "COUNTER":
u = COUNTER
case "GAUGE":
u = GAUGE
case "MAPPEDMETRIC":
u = MAPPEDMETRIC
case "DURATION":
u = DURATION
default:
err = fmt.Errorf("wrong ColumnUsage given : %s", s)
}
return u, err
}
// Convert database.sql types to float64s for Prometheus consumption. Null types are mapped to NaN. string and []byte
// types are mapped as NaN and !ok
func dbToFloat64(t interface{}) (float64, bool) {
switch v := t.(type) {
case int64:
return float64(v), true
case float64:
return v, true
case time.Time:
return float64(v.Unix()), true
case []byte:
// Try and convert to string and then parse to a float64
strV := string(v)
result, err := strconv.ParseFloat(strV, 64)
if err != nil {
log.Infoln("Could not parse []byte:", err)
return math.NaN(), false
}
return result, true
case string:
result, err := strconv.ParseFloat(v, 64)
if err != nil {
log.Infoln("Could not parse string:", err)
return math.NaN(), false
}
return result, true
case bool:
if v {
return 1.0, true
}
return 0.0, true
case nil:
return math.NaN(), true
default:
return math.NaN(), false
}
}
// Convert database.sql to string for Prometheus labels. Null types are mapped to empty strings.
func dbToString(t interface{}) (string, bool) {
switch v := t.(type) {
case int64:
return fmt.Sprintf("%v", v), true
case float64:
return fmt.Sprintf("%v", v), true
case time.Time:
return fmt.Sprintf("%v", v.Unix()), true
case nil:
return "", true
case []byte:
// Try and convert to string
return string(v), true
case string:
return v, true
case bool:
if v {
return "true", true
}
return "false", true
default:
return "", false
}
}
func parseFingerprint(url string) (string, error) {
dsn, err := pq.ParseURL(url)
if err != nil {
dsn = url
}
pairs := strings.Split(dsn, " ")
kv := make(map[string]string, len(pairs))
for _, pair := range pairs {
splitted := strings.SplitN(pair, "=", 2)
if len(splitted) != 2 {
return "", fmt.Errorf("malformed dsn %q", dsn)
}
kv[splitted[0]] = splitted[1]
}
var fingerprint string
if host, ok := kv["host"]; ok {
fingerprint += host
} else {
fingerprint += "localhost"
}
if port, ok := kv["port"]; ok {
fingerprint += ":" + port
} else {
fingerprint += ":5432"
}
return fingerprint, nil
}
func loggableDSN(dsn string) string {
pDSN, err := url.Parse(dsn)
if err != nil {
return "could not parse DATA_SOURCE_NAME"
}
// Blank user info if not nil
if pDSN.User != nil {
pDSN.User = url.UserPassword(pDSN.User.Username(), "PASSWORD_REMOVED")
}
return pDSN.String()
}
type cachedMetrics struct {
metrics []prometheus.Metric
lastScrape time.Time
}
// Server describes a connection to Postgres.
// Also it contains metrics map and query overrides.
type Server struct {
db *sql.DB
labels prometheus.Labels
master bool
// Last version used to calculate metric map. If mismatch on scrape,
// then maps are recalculated.
lastMapVersion semver.Version
// Currently active metric map
metricMap map[string]MetricMapNamespace
// Currently active query overrides
queryOverrides map[string]string
mappingMtx sync.RWMutex
// Currently cached metrics
metricCache map[string]cachedMetrics
cacheMtx sync.Mutex
}
// ServerOpt configures a server.
type ServerOpt func(*Server)
// ServerWithLabels configures a set of labels.
func ServerWithLabels(labels prometheus.Labels) ServerOpt {
return func(s *Server) {
for k, v := range labels {
s.labels[k] = v
}
}
}
// NewServer establishes a new connection using DSN.
func NewServer(dsn string, opts ...ServerOpt) (*Server, error) {
fingerprint, err := parseFingerprint(dsn)
if err != nil {
return nil, err
}
db, err := sql.Open("postgres", dsn)
if err != nil {
return nil, err
}
db.SetMaxOpenConns(1)
db.SetMaxIdleConns(1)
log.Infof("Established new database connection to %q.", fingerprint)
s := &Server{
db: db,
master: false,
labels: prometheus.Labels{
serverLabelName: fingerprint,
},
metricCache: make(map[string]cachedMetrics),
}
for _, opt := range opts {
opt(s)
}
return s, nil
}
// Close disconnects from Postgres.
func (s *Server) Close() error {
return s.db.Close()
}
// Ping checks connection availability and possibly invalidates the connection if it fails.
func (s *Server) Ping() error {
if err := s.db.Ping(); err != nil {
if cerr := s.Close(); cerr != nil {
log.Errorf("Error while closing non-pinging DB connection to %q: %v", s, cerr)
}
return err
}
return nil
}
// String returns server's fingerprint.
func (s *Server) String() string {
return s.labels[serverLabelName]
}
// Scrape loads metrics.
func (s *Server) Scrape(ch chan<- prometheus.Metric, disableSettingsMetrics bool) error {
s.mappingMtx.RLock()
defer s.mappingMtx.RUnlock()
var err error
if (!disableSettingsMetrics && !*autoDiscoverDatabases) || (!disableSettingsMetrics && *autoDiscoverDatabases && s.master) {
if err = querySettings(ch, s); err != nil {
err = fmt.Errorf("error retrieving settings: %s", err)
}
}
errMap := queryNamespaceMappings(ch, s)
if len(errMap) > 0 {
err = fmt.Errorf("queryNamespaceMappings returned %d errors", len(errMap))
}
return err
}
// Servers contains a collection of servers to Postgres.
type Servers struct {
m sync.Mutex
servers map[string]*Server
opts []ServerOpt
}
// NewServers creates a collection of servers to Postgres.
func NewServers(opts ...ServerOpt) *Servers {
return &Servers{
servers: make(map[string]*Server),
opts: opts,
}
}
// GetServer returns established connection from a collection.
func (s *Servers) GetServer(dsn string) (*Server, error) {
s.m.Lock()
defer s.m.Unlock()
var err error
var ok bool
errCount := 0 // start at zero because we increment before doing work
retries := 3
var server *Server
for {
if errCount++; errCount > retries {
return nil, err
}
server, ok = s.servers[dsn]
if !ok {
server, err = NewServer(dsn, s.opts...)
if err != nil {
time.Sleep(time.Duration(errCount) * time.Second)
continue
}
s.servers[dsn] = server
}
if err = server.Ping(); err != nil {
delete(s.servers, dsn)
time.Sleep(time.Duration(errCount) * time.Second)
continue
}
break
}
return server, nil
}
// Close disconnects from all known servers.
func (s *Servers) Close() {
s.m.Lock()
defer s.m.Unlock()
for _, server := range s.servers {
if err := server.Close(); err != nil {
log.Errorf("failed to close connection to %q: %v", server, err)
}
}
}
// Exporter collects Postgres metrics. It implements prometheus.Collector.
type Exporter struct {
// Holds a reference to the build in column mappings. Currently this is for testing purposes
// only, since it just points to the global.
builtinMetricMaps map[string]intermediateMetricMap
disableDefaultMetrics, disableSettingsMetrics, autoDiscoverDatabases bool
excludeDatabases []string
dsn []string
userQueriesPath string
constantLabels prometheus.Labels
duration prometheus.Gauge
error prometheus.Gauge
psqlUp prometheus.Gauge
userQueriesError *prometheus.GaugeVec
totalScrapes prometheus.Counter
// servers are used to allow re-using the DB connection between scrapes.
// servers contains metrics map and query overrides.
servers *Servers
}
// ExporterOpt configures Exporter.
type ExporterOpt func(*Exporter)
// DisableDefaultMetrics configures default metrics export.
func DisableDefaultMetrics(b bool) ExporterOpt {
return func(e *Exporter) {
e.disableDefaultMetrics = b
}
}
// DisableSettingsMetrics configures pg_settings export.
func DisableSettingsMetrics(b bool) ExporterOpt {
return func(e *Exporter) {
e.disableSettingsMetrics = b
}
}
// AutoDiscoverDatabases allows scraping all databases on a database server.
func AutoDiscoverDatabases(b bool) ExporterOpt {
return func(e *Exporter) {
e.autoDiscoverDatabases = b
}
}
// ExcludeDatabases allows to filter out result from AutoDiscoverDatabases
func ExcludeDatabases(s string) ExporterOpt {
return func(e *Exporter) {
e.excludeDatabases = strings.Split(s, ",")
}
}
// WithUserQueriesPath configures user's queries path.
func WithUserQueriesPath(p string) ExporterOpt {
return func(e *Exporter) {
e.userQueriesPath = p
}
}
// WithConstantLabels configures constant labels.
func WithConstantLabels(s string) ExporterOpt {
return func(e *Exporter) {
e.constantLabels = parseConstLabels(s)
}
}
func parseConstLabels(s string) prometheus.Labels {
labels := make(prometheus.Labels)
s = strings.TrimSpace(s)
if len(s) == 0 {
return labels
}
parts := strings.Split(s, ",")
for _, p := range parts {
keyValue := strings.Split(strings.TrimSpace(p), "=")
if len(keyValue) != 2 {
log.Errorf(`Wrong constant labels format %q, should be "key=value"`, p)
continue
}
key := strings.TrimSpace(keyValue[0])
value := strings.TrimSpace(keyValue[1])
if key == "" || value == "" {
continue
}
labels[key] = value
}
return labels
}
// NewExporter returns a new PostgreSQL exporter for the provided DSN.
func NewExporter(dsn []string, opts ...ExporterOpt) *Exporter {
e := &Exporter{
dsn: dsn,
builtinMetricMaps: builtinMetricMaps,
}
for _, opt := range opts {
opt(e)
}
e.setupInternalMetrics()
e.setupServers()
return e
}
func (e *Exporter) setupServers() {
e.servers = NewServers(ServerWithLabels(e.constantLabels))
}
func (e *Exporter) setupInternalMetrics() {
e.duration = prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: exporter,
Name: "last_scrape_duration_seconds",
Help: "Duration of the last scrape of metrics from PostgresSQL.",
ConstLabels: e.constantLabels,
})
e.totalScrapes = prometheus.NewCounter(prometheus.CounterOpts{
Namespace: namespace,
Subsystem: exporter,
Name: "scrapes_total",
Help: "Total number of times PostgresSQL was scraped for metrics.",
ConstLabels: e.constantLabels,
})
e.error = prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: exporter,
Name: "last_scrape_error",
Help: "Whether the last scrape of metrics from PostgreSQL resulted in an error (1 for error, 0 for success).",
ConstLabels: e.constantLabels,
})
e.psqlUp = prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "up",
Help: "Whether the last scrape of metrics from PostgreSQL was able to connect to the server (1 for yes, 0 for no).",
ConstLabels: e.constantLabels,
})
e.userQueriesError = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: exporter,
Name: "user_queries_load_error",
Help: "Whether the user queries file was loaded and parsed successfully (1 for error, 0 for success).",
ConstLabels: e.constantLabels,
}, []string{"filename", "hashsum"})
}
// Describe implements prometheus.Collector.
func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {
// We cannot know in advance what metrics the exporter will generate
// from Postgres. So we use the poor man's describe method: Run a collect
// and send the descriptors of all the collected metrics. The problem
// here is that we need to connect to the Postgres DB. If it is currently
// unavailable, the descriptors will be incomplete. Since this is a
// stand-alone exporter and not used as a library within other code
// implementing additional metrics, the worst that can happen is that we
// don't detect inconsistent metrics created by this exporter
// itself. Also, a change in the monitored Postgres instance may change the
// exported metrics during the runtime of the exporter.
metricCh := make(chan prometheus.Metric)
doneCh := make(chan struct{})
go func() {
for m := range metricCh {
ch <- m.Desc()
}
close(doneCh)
}()
e.Collect(metricCh)
close(metricCh)
<-doneCh
}
// Collect implements prometheus.Collector.
func (e *Exporter) Collect(ch chan<- prometheus.Metric) {
e.scrape(ch)
ch <- e.duration
ch <- e.totalScrapes
ch <- e.error
ch <- e.psqlUp
e.userQueriesError.Collect(ch)
}
func newDesc(subsystem, name, help string, labels prometheus.Labels) *prometheus.Desc {
return prometheus.NewDesc(
prometheus.BuildFQName(namespace, subsystem, name),
help, nil, labels,
)
}
func queryDatabases(server *Server) ([]string, error) {
rows, err := server.db.Query("SELECT datname FROM pg_database WHERE datallowconn = true AND datistemplate = false AND datname != current_database()") // nolint: safesql
if err != nil {
return nil, fmt.Errorf("Error retrieving databases: %v", err)
}
defer rows.Close() // nolint: errcheck
var databaseName string
result := make([]string, 0)
for rows.Next() {
err = rows.Scan(&databaseName)
if err != nil {
return nil, errors.New(fmt.Sprintln("Error retrieving rows:", err))
}
result = append(result, databaseName)
}
return result, nil
}
// Query within a namespace mapping and emit metrics. Returns fatal errors if
// the scrape fails, and a slice of errors if they were non-fatal.
func queryNamespaceMapping(server *Server, namespace string, mapping MetricMapNamespace) ([]prometheus.Metric, []error, error) {
// Check for a query override for this namespace
query, found := server.queryOverrides[namespace]
// Was this query disabled (i.e. nothing sensible can be queried on cu
// version of PostgreSQL?
if query == "" && found {
// Return success (no pertinent data)
return []prometheus.Metric{}, []error{}, nil
}
// Don't fail on a bad scrape of one metric
var rows *sql.Rows
var err error
if !found {
// I've no idea how to avoid this properly at the moment, but this is
// an admin tool so you're not injecting SQL right?
rows, err = server.db.Query(fmt.Sprintf("SELECT * FROM %s;", namespace)) // nolint: gas, safesql
} else {
rows, err = server.db.Query(query) // nolint: safesql
}
if err != nil {
return []prometheus.Metric{}, []error{}, fmt.Errorf("Error running query on database %q: %s %v", server, namespace, err)
}
defer rows.Close() // nolint: errcheck
var columnNames []string
columnNames, err = rows.Columns()
if err != nil {
return []prometheus.Metric{}, []error{}, errors.New(fmt.Sprintln("Error retrieving column list for: ", namespace, err))
}
// Make a lookup map for the column indices
var columnIdx = make(map[string]int, len(columnNames))
for i, n := range columnNames {
columnIdx[n] = i
}
var columnData = make([]interface{}, len(columnNames))
var scanArgs = make([]interface{}, len(columnNames))
for i := range columnData {
scanArgs[i] = &columnData[i]
}
nonfatalErrors := []error{}
metrics := make([]prometheus.Metric, 0)
for rows.Next() {
err = rows.Scan(scanArgs...)
if err != nil {
return []prometheus.Metric{}, []error{}, errors.New(fmt.Sprintln("Error retrieving rows:", namespace, err))
}
// Get the label values for this row.
labels := make([]string, len(mapping.labels))
for idx, label := range mapping.labels {
labels[idx], _ = dbToString(columnData[columnIdx[label]])
}
// Loop over column names, and match to scan data. Unknown columns
// will be filled with an untyped metric number *if* they can be
// converted to float64s. NULLs are allowed and treated as NaN.
for idx, columnName := range columnNames {
var metric prometheus.Metric
if metricMapping, ok := mapping.columnMappings[columnName]; ok {
// Is this a metricy metric?
if metricMapping.discard {
continue
}
value, ok := dbToFloat64(columnData[idx])
if !ok {
nonfatalErrors = append(nonfatalErrors, errors.New(fmt.Sprintln("Unexpected error parsing column: ", namespace, columnName, columnData[idx])))
continue
}
// Generate the metric
metric = prometheus.MustNewConstMetric(metricMapping.desc, metricMapping.vtype, value, labels...)
} else {
// Unknown metric. Report as untyped if scan to float64 works, else note an error too.
metricLabel := fmt.Sprintf("%s_%s", namespace, columnName)
desc := prometheus.NewDesc(metricLabel, fmt.Sprintf("Unknown metric from %s", namespace), mapping.labels, server.labels)
// Its not an error to fail here, since the values are
// unexpected anyway.
value, ok := dbToFloat64(columnData[idx])
if !ok {
nonfatalErrors = append(nonfatalErrors, errors.New(fmt.Sprintln("Unparseable column type - discarding: ", namespace, columnName, err)))
continue
}
metric = prometheus.MustNewConstMetric(desc, prometheus.UntypedValue, value, labels...)
}
metrics = append(metrics, metric)
}
}
return metrics, nonfatalErrors, nil
}
// Iterate through all the namespace mappings in the exporter and run their
// queries.
func queryNamespaceMappings(ch chan<- prometheus.Metric, server *Server) map[string]error {
// Return a map of namespace -> errors
namespaceErrors := make(map[string]error)
scrapeStart := time.Now()
for namespace, mapping := range server.metricMap {
log.Debugln("Querying namespace: ", namespace)
scrapeMetric := false
// Check if the metric is cached
server.cacheMtx.Lock()
cachedMetric, found := server.metricCache[namespace]
server.cacheMtx.Unlock()
// If found, check if needs refresh from cache
if found {
if scrapeStart.Sub(cachedMetric.lastScrape).Seconds() > float64(mapping.cacheSeconds) {
scrapeMetric = true
}
} else {
scrapeMetric = true
}
var metrics []prometheus.Metric
var nonFatalErrors []error
var err error
if scrapeMetric {
metrics, nonFatalErrors, err = queryNamespaceMapping(server, namespace, mapping)
} else {
metrics = cachedMetric.metrics
}
// Serious error - a namespace disappeared
if err != nil {
namespaceErrors[namespace] = err
log.Infoln(err)
}
// Non-serious errors - likely version or parsing problems.
if len(nonFatalErrors) > 0 {
for _, err := range nonFatalErrors {
log.Infoln(err.Error())
}
}
// Emit the metrics into the channel
for _, metric := range metrics {
ch <- metric
}
if scrapeMetric {
// Only cache if metric is meaningfully cacheable
if mapping.cacheSeconds > 0 {
server.cacheMtx.Lock()
server.metricCache[namespace] = cachedMetrics{
metrics: metrics,
lastScrape: scrapeStart,
}
server.cacheMtx.Unlock()
}
}
}
return namespaceErrors
}
// Check and update the exporters query maps if the version has changed.
func (e *Exporter) checkMapVersions(ch chan<- prometheus.Metric, server *Server) error {
log.Debugf("Querying Postgres Version on %q", server)
versionRow := server.db.QueryRow("SELECT version();")
var versionString string
err := versionRow.Scan(&versionString)
if err != nil {
return fmt.Errorf("Error scanning version string on %q: %v", server, err)
}
semanticVersion, err := parseVersion(versionString)
if err != nil {
return fmt.Errorf("Error parsing version string on %q: %v", server, err)
}
if !e.disableDefaultMetrics && semanticVersion.LT(lowestSupportedVersion) {
log.Warnf("PostgreSQL version is lower on %q then our lowest supported version! Got %s minimum supported is %s.", server, semanticVersion, lowestSupportedVersion)
}
// Check if semantic version changed and recalculate maps if needed.
if semanticVersion.NE(server.lastMapVersion) || server.metricMap == nil {
log.Infof("Semantic Version Changed on %q: %s -> %s", server, server.lastMapVersion, semanticVersion)
server.mappingMtx.Lock()
if e.disableDefaultMetrics || (!e.disableDefaultMetrics && e.autoDiscoverDatabases && !server.master) {
server.metricMap = make(map[string]MetricMapNamespace)
server.queryOverrides = make(map[string]string)
} else {
server.metricMap = makeDescMap(semanticVersion, server.labels, e.builtinMetricMaps)
server.queryOverrides = makeQueryOverrideMap(semanticVersion, queryOverrides)
}
server.lastMapVersion = semanticVersion
if e.userQueriesPath != "" {
// Clear the metric while a reload is happening
e.userQueriesError.Reset()
// Calculate the hashsum of the useQueries
userQueriesData, err := ioutil.ReadFile(e.userQueriesPath)
if err != nil {
log.Errorln("Failed to reload user queries:", e.userQueriesPath, err)
e.userQueriesError.WithLabelValues(e.userQueriesPath, "").Set(1)
} else {
hashsumStr := fmt.Sprintf("%x", sha256.Sum256(userQueriesData))
if err := addQueries(userQueriesData, semanticVersion, server); err != nil {
log.Errorln("Failed to reload user queries:", e.userQueriesPath, err)
e.userQueriesError.WithLabelValues(e.userQueriesPath, hashsumStr).Set(1)
} else {
// Mark user queries as successfully loaded
e.userQueriesError.WithLabelValues(e.userQueriesPath, hashsumStr).Set(0)
}
}
}
server.mappingMtx.Unlock()
}
// Output the version as a special metric
versionDesc := prometheus.NewDesc(fmt.Sprintf("%s_%s", namespace, staticLabelName),
"Version string as reported by postgres", []string{"version", "short_version"}, server.labels)
if !e.disableDefaultMetrics && (server.master && e.autoDiscoverDatabases) {
ch <- prometheus.MustNewConstMetric(versionDesc,
prometheus.UntypedValue, 1, versionString, semanticVersion.String())
}
return nil
}
func (e *Exporter) scrape(ch chan<- prometheus.Metric) {
defer func(begun time.Time) {
e.duration.Set(time.Since(begun).Seconds())
}(time.Now())
e.totalScrapes.Inc()
dsns := e.dsn
if e.autoDiscoverDatabases {
dsns = e.discoverDatabaseDSNs()
}
var errorsCount int
var connectionErrorsCount int
for _, dsn := range dsns {
if err := e.scrapeDSN(ch, dsn); err != nil {
errorsCount++
log.Errorf(err.Error())
if _, ok := err.(*ErrorConnectToServer); ok {
connectionErrorsCount++
}
}
}
switch {
case connectionErrorsCount >= len(dsns):
e.psqlUp.Set(0)
default:
e.psqlUp.Set(1) // Didn't fail, can mark connection as up for this scrape.
}
switch errorsCount {
case 0:
e.error.Set(0)
default:
e.error.Set(1)
}
}
func (e *Exporter) discoverDatabaseDSNs() []string {
dsns := make(map[string]struct{})
for _, dsn := range e.dsn {
parsedDSN, err := url.Parse(dsn)
if err != nil {
log.Errorf("Unable to parse DSN (%s): %v", loggableDSN(dsn), err)
continue
}
dsns[dsn] = struct{}{}
server, err := e.servers.GetServer(dsn)
if err != nil {
log.Errorf("Error opening connection to database (%s): %v", loggableDSN(dsn), err)
continue
}
server.master = true
databaseNames, err := queryDatabases(server)
if err != nil {
log.Errorf("Error querying databases (%s): %v", loggableDSN(dsn), err)
continue
}
for _, databaseName := range databaseNames {
if contains(e.excludeDatabases, databaseName) {
continue
}
parsedDSN.Path = databaseName
dsns[parsedDSN.String()] = struct{}{}
}
}
result := make([]string, len(dsns))
index := 0
for dsn := range dsns {
result[index] = dsn
index++
}
return result
}
func (e *Exporter) scrapeDSN(ch chan<- prometheus.Metric, dsn string) error {
server, err := e.servers.GetServer(dsn)
if err != nil {
return &ErrorConnectToServer{fmt.Sprintf("Error opening connection to database (%s): %s", loggableDSN(dsn), err.Error())}
}
// Check if map versions need to be updated
if err := e.checkMapVersions(ch, server); err != nil {
log.Warnln("Proceeding with outdated query maps, as the Postgres version could not be determined:", err)
}
return server.Scrape(ch, e.disableSettingsMetrics)
}
// try to get the DataSource
// DATA_SOURCE_NAME always wins so we do not break older versions
// reading secrets from files wins over secrets in environment variables
// DATA_SOURCE_NAME > DATA_SOURCE_{USER|PASS}_FILE > DATA_SOURCE_{USER|PASS}
func getDataSources() []string {
var dsn = os.Getenv("DATA_SOURCE_NAME")
if len(dsn) == 0 {
var user string
var pass string
if len(os.Getenv("DATA_SOURCE_USER_FILE")) != 0 {
fileContents, err := ioutil.ReadFile(os.Getenv("DATA_SOURCE_USER_FILE"))
if err != nil {
panic(err)
}
user = strings.TrimSpace(string(fileContents))
} else {
user = os.Getenv("DATA_SOURCE_USER")
}
if len(os.Getenv("DATA_SOURCE_PASS_FILE")) != 0 {
fileContents, err := ioutil.ReadFile(os.Getenv("DATA_SOURCE_PASS_FILE"))
if err != nil {
panic(err)
}
pass = strings.TrimSpace(string(fileContents))
} else {
pass = os.Getenv("DATA_SOURCE_PASS")
}
ui := url.UserPassword(user, pass).String()
uri := os.Getenv("DATA_SOURCE_URI")
dsn = "postgresql://" + ui + "@" + uri
return []string{dsn}
}
return strings.Split(dsn, ",")
}
func contains(a []string, x string) bool {
for _, n := range a {
if x == n {
return true
}
}
return false
}
func main() {
kingpin.Version(fmt.Sprintf("postgres_exporter %s (built with %s)\n", Version, runtime.Version()))
log.AddFlags(kingpin.CommandLine)
kingpin.Parse()
// landingPage contains the HTML served at '/'.
// TODO: Make this nicer and more informative.
var landingPage = []byte(`<html>
<head><title>Postgres exporter</title></head>
<body>
<h1>Postgres exporter</h1>
<p><a href='` + *metricPath + `'>Metrics</a></p>
</body>
</html>
`)
if *onlyDumpMaps {
dumpMaps()
return
}
dsn := getDataSources()
if len(dsn) == 0 {
log.Fatal("couldn't find environment variables describing the datasource to use")
}
exporter := NewExporter(dsn,
DisableDefaultMetrics(*disableDefaultMetrics),
DisableSettingsMetrics(*disableSettingsMetrics),
AutoDiscoverDatabases(*autoDiscoverDatabases),
WithUserQueriesPath(*queriesPath),
WithConstantLabels(*constantLabelsList),
ExcludeDatabases(*excludeDatabases),
)
defer func() {
exporter.servers.Close()
}()
prometheus.MustRegister(exporter)
http.Handle(*metricPath, promhttp.Handler())
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/html; charset=UTF-8") // nolint: errcheck
w.Write(landingPage) // nolint: errcheck
})
log.Infof("Starting Server: %s", *listenAddress)
log.Fatal(http.ListenAndServe(*listenAddress, nil))
}
| [
"\"DATA_SOURCE_NAME\"",
"\"DATA_SOURCE_USER_FILE\"",
"\"DATA_SOURCE_USER_FILE\"",
"\"DATA_SOURCE_USER\"",
"\"DATA_SOURCE_PASS_FILE\"",
"\"DATA_SOURCE_PASS_FILE\"",
"\"DATA_SOURCE_PASS\"",
"\"DATA_SOURCE_URI\""
] | [] | [
"DATA_SOURCE_PASS",
"DATA_SOURCE_USER",
"DATA_SOURCE_NAME",
"DATA_SOURCE_PASS_FILE",
"DATA_SOURCE_USER_FILE",
"DATA_SOURCE_URI"
] | [] | ["DATA_SOURCE_PASS", "DATA_SOURCE_USER", "DATA_SOURCE_NAME", "DATA_SOURCE_PASS_FILE", "DATA_SOURCE_USER_FILE", "DATA_SOURCE_URI"] | go | 6 | 0 | |
codegen/linker_test.go | package codegen
import (
"os"
"strings"
"testing"
)
func TestLinkFailed(t *testing.T) {
l := newDefaultLinker("")
err := l.link("dummy", []string{"not-exist.o"})
if err == nil {
t.Fatalf("No error occurred")
}
msg := err.Error()
if !strings.Contains(msg, "Linker command failed: ") {
t.Fatalf("Unexpected error message '%s'", msg)
}
}
func TestMultiGOPATH(t *testing.T) {
gopath := os.Getenv("GOPATH")
defer os.Setenv("GOPATH", gopath)
os.Setenv("GOPATH", "unknown-path:"+gopath)
l := newDefaultLinker("")
err := l.link("dummy", []string{"not-exist.o"})
if !strings.Contains(err.Error(), "Linker command failed: ") {
t.Fatalf("Unexpected error message '%s'", err.Error())
}
}
func TestRuntimeNotFound(t *testing.T) {
gopath := os.Getenv("GOPATH")
defer os.Setenv("GOPATH", gopath)
os.Setenv("GOPATH", "/unknown/path/to/somewhere")
l := newDefaultLinker("")
err := l.link("dummy", []string{"not-exist.o"})
if !strings.Contains(err.Error(), "Runtime library (gocamlrt.a) was not found") {
t.Fatalf("Unexpected error message '%s'", err.Error())
}
}
func TestCustomizeLinkerCommand(t *testing.T) {
saved := os.Getenv("GOCAML_LINKER_CMD")
defer os.Setenv("GOCAML_LINKER_CMD", saved)
os.Setenv("GOCAML_LINKER_CMD", "linker-command-for-test")
l := newDefaultLinker("")
if l.linkerCmd != "linker-command-for-test" {
t.Fatalf("Wanted 'linker-command-for-test' as linker command but had '%s'", l.linkerCmd)
}
}
| [
"\"GOPATH\"",
"\"GOPATH\"",
"\"GOCAML_LINKER_CMD\""
] | [] | [
"GOPATH",
"GOCAML_LINKER_CMD"
] | [] | ["GOPATH", "GOCAML_LINKER_CMD"] | go | 2 | 0 | |
config/config.go | // Package config contains a Config struct for kites.
package config
import (
"errors"
"fmt"
"net"
"net/http"
"net/http/cookiejar"
"os"
"strconv"
"time"
"github.com/koding/kite/kitekey"
"github.com/koding/kite/protocol"
jwt "github.com/dgrijalva/jwt-go"
"github.com/gorilla/websocket"
"github.com/igm/sockjs-go/sockjs"
)
// the implementation of New() doesn't have any error to be returned yet it
// returns, so it's totally safe to neglect the error
var CookieJar, _ = cookiejar.New(nil)
// Options is passed to kite.New when creating new instance.
type Config struct {
// Options for Kite
Username string // Username to set when registering to Kontrol.
Environment string // Kite environment to set when registering to Kontrol.
Region string // Kite region to set when registering to Kontrol.
Id string // Kite ID to use when registering to Kontrol.
KiteKey string // The kite.key value to use for "kiteKey" authentication.
DisableAuthentication bool // Do not require authentication for requests.
DisableConcurrency bool // Do not process messages concurrently.
Transport Transport // SockJS transport to use.
IP string // IP of the kite server.
Port int // Port number of the kite server.
// VerifyFunc is used to verify the public key of the signed token.
//
// If the pub key is not to be trusted, the function must return
// kite.ErrKeyNotTrusted error.
//
// If nil, the default verify is used. By default the public key
// is verified by calling Kontrol and the result cached for
// VerifyTTL seconds if KontrolVerify is true. Otherwise
// only public keys that are the same as the KontrolKey one are
// accepted.
VerifyFunc func(pub string) error
// VerifyTTL is used to control time after result of a single
// VerifyFunc's call expires.
//
// When <0, the result is not cached.
//
// When 0, the default value of 300s is used.
VerifyTTL time.Duration
// VerifyAudienceFunc is used to verify the audience of JWT token.
//
// If nil, the default audience verify function is used which
// expects the aud to be a kite path that matches the username,
// environment and name of the client.
VerifyAudienceFunc func(client *protocol.Kite, aud string) error
// SockJS server / client connection configuration details.
// XHR is a HTTP client used for polling on responses for a XHR transport.
//
// Required.
XHR *http.Client
// Timeout specified max time waiting for the following operations to complete:
//
// - polling on an XHR connection
// - default timeout for certain kite requests (Kontrol API)
// - HTTP heartbeats and register method
//
// NOTE: Ensure the Timeout is higher than SockJS.HeartbeatDelay, otherwise
// XHR connections may get randomly closed.
//
// TODO(rjeczalik): Make kite heartbeats configurable as well.
Timeout time.Duration
// Client is a HTTP client used for issuing HTTP register request and
// HTTP heartbeats.
Client *http.Client
// Websocket is used for creating a client for a websocket transport.
//
// If custom one is used, ensure any complemenrary field is also
// set in sockjs.WebSocketUpgrader value (for server connections).
//
// Required.
Websocket *websocket.Dialer
// SockJS are used to configure SockJS handler.
//
// Required.
SockJS *sockjs.Options
// Serve is serving HTTP requests using handler on requests
// comming from the given listener.
//
// If Serve is nil, http.Serve is used by default.
Serve func(net.Listener, http.Handler) error
KontrolURL string
KontrolKey string
KontrolUser string
}
// DefaultConfig contains the default settings.
var DefaultConfig = &Config{
Username: "unknown",
Environment: "unknown",
Region: "unknown",
IP: "0.0.0.0",
Port: 0,
Transport: Auto,
Timeout: 15 * time.Second,
XHR: &http.Client{
Jar: CookieJar,
},
Client: &http.Client{
Timeout: 15 * time.Second,
Jar: CookieJar,
},
Websocket: &websocket.Dialer{
HandshakeTimeout: 15 * time.Second,
Jar: CookieJar,
},
SockJS: &sockjs.Options{
Websocket: sockjs.DefaultOptions.Websocket,
JSessionID: sockjs.DefaultOptions.JSessionID,
SockJSURL: sockjs.DefaultOptions.SockJSURL,
HeartbeatDelay: 10 * time.Second, // better fit for AWS ELB; empirically picked
DisconnectDelay: 10 * time.Second, // >= Timeout
ResponseLimit: sockjs.DefaultOptions.ResponseLimit,
},
}
// New returns a new Config initialized with defaults.
func New() *Config {
return DefaultConfig.Copy()
}
// NewFromKiteKey parses the given kite key file and gives a new Config value.
func NewFromKiteKey(file string) (*Config, error) {
key, err := kitekey.ParseFile(file)
if err != nil {
return nil, err
}
var c Config
if err := c.ReadToken(key); err != nil {
return nil, err
}
return &c, nil
}
func Get() (*Config, error) {
c := New()
if err := c.ReadKiteKey(); err != nil {
return nil, err
}
if err := c.ReadEnvironmentVariables(); err != nil {
return nil, err
}
return c, nil
}
func MustGet() *Config {
c, err := Get()
if err != nil {
fmt.Printf("Cannot read kite.key: %s\n", err.Error())
os.Exit(1)
}
return c
}
func (c *Config) ReadEnvironmentVariables() error {
var err error
if username := os.Getenv("KITE_USERNAME"); username != "" {
c.Username = username
}
if environment := os.Getenv("KITE_ENVIRONMENT"); environment != "" {
c.Environment = environment
}
if region := os.Getenv("KITE_REGION"); region != "" {
c.Region = region
}
if ip := os.Getenv("KITE_IP"); ip != "" {
c.IP = ip
}
if port := os.Getenv("KITE_PORT"); port != "" {
c.Port, err = strconv.Atoi(port)
if err != nil {
return err
}
}
if kontrolURL := os.Getenv("KITE_KONTROL_URL"); kontrolURL != "" {
c.KontrolURL = kontrolURL
}
if transportName := os.Getenv("KITE_TRANSPORT"); transportName != "" {
transport, ok := Transports[transportName]
if !ok {
return fmt.Errorf("transport '%s' doesn't exists", transportName)
}
c.Transport = transport
}
if ttl, err := time.ParseDuration(os.Getenv("KITE_VERIFY_TTL")); err == nil {
c.VerifyTTL = ttl
}
if timeout, err := time.ParseDuration(os.Getenv("KITE_TIMEOUT")); err == nil {
c.Timeout = timeout
c.Client.Timeout = timeout
}
if timeout, err := time.ParseDuration(os.Getenv("KITE_HANDSHAKE_TIMEOUT")); err == nil {
c.Websocket.HandshakeTimeout = timeout
}
return nil
}
// ReadKiteKey parsed the user's kite key and returns a new Config.
func (c *Config) ReadKiteKey() error {
key, err := kitekey.Parse()
if err != nil {
return err
}
return c.ReadToken(key)
}
// ReadToken reads Kite Claims from JWT token and uses them to initialize Config.
func (c *Config) ReadToken(key *jwt.Token) error {
c.KiteKey = key.Raw
claims, ok := key.Claims.(*kitekey.KiteClaims)
if !ok {
return errors.New("no claims found")
}
c.Username = claims.Subject
c.KontrolUser = claims.Issuer
c.Id = claims.Id // jti is used for jwt's but let's also use it for kite ID
c.KontrolURL = claims.KontrolURL
c.KontrolKey = claims.KontrolKey
return nil
}
// Copy returns a new copy of the config object.
func (c *Config) Copy() *Config {
copy := *c
if c.XHR != nil {
xhr := *copy.XHR
copy.XHR = &xhr
}
if c.Client != nil {
client := *copy.Client
copy.Client = &client
}
if c.Websocket != nil {
ws := *copy.Websocket
copy.Websocket = &ws
}
return ©
}
| [
"\"KITE_USERNAME\"",
"\"KITE_ENVIRONMENT\"",
"\"KITE_REGION\"",
"\"KITE_IP\"",
"\"KITE_PORT\"",
"\"KITE_KONTROL_URL\"",
"\"KITE_TRANSPORT\"",
"\"KITE_VERIFY_TTL\"",
"\"KITE_TIMEOUT\"",
"\"KITE_HANDSHAKE_TIMEOUT\""
] | [] | [
"KITE_PORT",
"KITE_REGION",
"KITE_KONTROL_URL",
"KITE_USERNAME",
"KITE_VERIFY_TTL",
"KITE_TIMEOUT",
"KITE_HANDSHAKE_TIMEOUT",
"KITE_IP",
"KITE_ENVIRONMENT",
"KITE_TRANSPORT"
] | [] | ["KITE_PORT", "KITE_REGION", "KITE_KONTROL_URL", "KITE_USERNAME", "KITE_VERIFY_TTL", "KITE_TIMEOUT", "KITE_HANDSHAKE_TIMEOUT", "KITE_IP", "KITE_ENVIRONMENT", "KITE_TRANSPORT"] | go | 10 | 0 | |
serveup/settings.py | """
Django settings for serveup project.
Generated by 'django-admin startproject' using Django 3.0.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
import json
from serveup.getip import IP
from uuid import uuid4
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.environ.get('BASE_DIR', os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
with open(os.path.join(BASE_DIR, 'settings.json'), 'r') as settings_file:
settings = json.load(settings_file)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = settings.get('secret_key', os.environ.get('SECRET_KEY'))
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get('DEBUG', False)
ALLOWED_HOSTS = [IP, 'localhost']
PORT = settings.get('port', os.environ.get('PORT', 8080))
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'tables',
'visitors',
'menu',
'orders',
'serve_admin',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'serveup.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'serveup.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (os.path.join(BASE_DIR, 'static'), )
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
WHITENOISE_AUTOREFRESH = True
# One time admin token created on server start
ADMIN_TOKEN = uuid4().hex
| [] | [] | [
"PORT",
"SECRET_KEY",
"BASE_DIR",
"DEBUG"
] | [] | ["PORT", "SECRET_KEY", "BASE_DIR", "DEBUG"] | python | 4 | 0 | |
soracom/generated/cmd/sims_delete_tag.go | // Code generated by soracom-cli generate-cmd. DO NOT EDIT.
package cmd
import (
"fmt"
"net/url"
"os"
"github.com/spf13/cobra"
)
// SimsDeleteTagCmdSimId holds value of 'sim_id' option
var SimsDeleteTagCmdSimId string
// SimsDeleteTagCmdTagName holds value of 'tag_name' option
var SimsDeleteTagCmdTagName string
func init() {
SimsDeleteTagCmd.Flags().StringVar(&SimsDeleteTagCmdSimId, "sim-id", "", TRAPI("Sim Id of the target SIM."))
SimsDeleteTagCmd.Flags().StringVar(&SimsDeleteTagCmdTagName, "tag-name", "", TRAPI("Tag name to be deleted. (This will be part of a URL path, so it needs to be percent-encoded. In JavaScript, specify the name after it has been encoded using encodeURIComponent().)"))
SimsCmd.AddCommand(SimsDeleteTagCmd)
}
// SimsDeleteTagCmd defines 'delete-tag' subcommand
var SimsDeleteTagCmd = &cobra.Command{
Use: "delete-tag",
Short: TRAPI("/sims/{sim_id}/tags/{tag_name}:delete:summary"),
Long: TRAPI(`/sims/{sim_id}/tags/{tag_name}:delete:description`),
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) > 0 {
return fmt.Errorf("unexpected arguments passed => %v", args)
}
opt := &apiClientOptions{
BasePath: "/v1",
Language: getSelectedLanguage(),
}
ac := newAPIClient(opt)
if v := os.Getenv("SORACOM_VERBOSE"); v != "" {
ac.SetVerbose(true)
}
err := authHelper(ac, cmd, args)
if err != nil {
cmd.SilenceUsage = true
return err
}
param, err := collectSimsDeleteTagCmdParams(ac)
if err != nil {
return err
}
body, err := ac.callAPI(param)
if err != nil {
cmd.SilenceUsage = true
return err
}
if body == "" {
return nil
}
if rawOutput {
_, err = os.Stdout.Write([]byte(body))
} else {
return prettyPrintStringAsJSON(body)
}
return err
},
}
func collectSimsDeleteTagCmdParams(ac *apiClient) (*apiParams, error) {
var parsedBody interface{}
var err error
err = checkIfRequiredStringParameterIsSupplied("sim_id", "sim-id", "path", parsedBody, SimsDeleteTagCmdSimId)
if err != nil {
return nil, err
}
err = checkIfRequiredStringParameterIsSupplied("tag_name", "tag-name", "path", parsedBody, SimsDeleteTagCmdTagName)
if err != nil {
return nil, err
}
return &apiParams{
method: "DELETE",
path: buildPathForSimsDeleteTagCmd("/sims/{sim_id}/tags/{tag_name}"),
query: buildQueryForSimsDeleteTagCmd(),
noRetryOnError: noRetryOnError,
}, nil
}
func buildPathForSimsDeleteTagCmd(path string) string {
escapedSimId := url.PathEscape(SimsDeleteTagCmdSimId)
path = strReplace(path, "{"+"sim_id"+"}", escapedSimId, -1)
escapedTagName := url.PathEscape(SimsDeleteTagCmdTagName)
path = strReplace(path, "{"+"tag_name"+"}", escapedTagName, -1)
return path
}
func buildQueryForSimsDeleteTagCmd() url.Values {
result := url.Values{}
return result
}
| [
"\"SORACOM_VERBOSE\""
] | [] | [
"SORACOM_VERBOSE"
] | [] | ["SORACOM_VERBOSE"] | go | 1 | 0 | |
poc/modules/hostfile_windows.go | // +build windows
package modules
import (
"os"
)
var hostsFilePath = os.Getenv("SystemRoot") + `\System32\drivers\etc\hosts`
| [
"\"SystemRoot\""
] | [] | [
"SystemRoot"
] | [] | ["SystemRoot"] | go | 1 | 0 | |
examples/v2/authCodeFlow/main.go | package main
import (
"context"
"encoding/json"
"fmt"
"log"
"net/http"
"os"
"github.com/FuzzyStatic/blizzard/v2"
"github.com/FuzzyStatic/blizzard/v2/oauth"
"golang.org/x/oauth2"
)
var (
clientID string
clientSecret string
myDomain string
cfg oauth2.Config
blizz *blizzard.Client
)
func homepage(w http.ResponseWriter, r *http.Request) {
fmt.Println("Homepage Hit!")
http.Redirect(w, r, cfg.AuthCodeURL("my_random_state"), http.StatusFound)
}
func authorize(w http.ResponseWriter, r *http.Request) {
err := r.ParseForm()
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
state := r.Form.Get("state")
if state != "my_random_state" {
http.Error(w, "State invalid", http.StatusBadRequest)
return
}
code := r.Form.Get("code")
if code == "" {
http.Error(w, "Code not found", http.StatusBadRequest)
return
}
token, err := cfg.Exchange(context.Background(), code)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
e := json.NewEncoder(w)
e.SetIndent("", " ")
err = e.Encode(*token)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
validation, _, err := blizz.TokenValidation(context.Background(), token)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
fmt.Printf("%+v\n", validation)
userInfo, _, err := blizz.UserInfoHeader(token)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
fmt.Printf("%+v\n", userInfo)
}
func init() {
clientID = os.Getenv("CLIENT_ID")
if clientID == "" {
log.Fatal("Set the environment variable CLIENT_ID before retrying.")
}
clientSecret = os.Getenv("CLIENT_SECRET")
if clientSecret == "" {
log.Fatal("Set the environment variable CLIENT_SECRET before retrying.")
}
myDomain = os.Getenv("MY_DOMAIN")
if myDomain == "" {
log.Fatal("Set the environment variable MY_DOMAIN before retrying.")
}
}
func main() {
blizz = blizzard.NewClient(clientID, clientSecret, blizzard.US, blizzard.EnUS)
cfg = blizz.AuthorizeConfig(fmt.Sprintf("http://%s:9094/oauth2", myDomain), oauth.ProfileD3, oauth.ProfileSC2, oauth.ProfileWoW)
http.HandleFunc("/", homepage)
http.HandleFunc("/oauth2", authorize)
// We start up our Client on port 9094
log.Println("Client is running at 9094 port.")
log.Fatal(http.ListenAndServe(":9094", nil))
}
| [
"\"CLIENT_ID\"",
"\"CLIENT_SECRET\"",
"\"MY_DOMAIN\""
] | [] | [
"MY_DOMAIN",
"CLIENT_SECRET",
"CLIENT_ID"
] | [] | ["MY_DOMAIN", "CLIENT_SECRET", "CLIENT_ID"] | go | 3 | 0 | |
tests/trainer/legacy_deprecate_flow_log_tests/test_trainer_steps_result_return.py | """
Tests to ensure that the training loop works with a dict
"""
import os
import pytest
import torch
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.core.step_result import TrainResult
from tests.base import EvalModelTemplate
from tests.base.deterministic_model import DeterministicModel
from pytorch_lightning.utilities.exceptions import MisconfigurationException
# test with train_step_end
# add logging + row interval tests
def test_training_step_result_log_step_only(tmpdir):
"""
Tests that only training_step can be used with TrainResult
Makes sure that things are routed to pbar, loggers and loss accordingly
Makes sure pbar and logs happen on step only when requested
"""
# enable internal debugging actions
os.environ['PL_DEV_DEBUG'] = '1'
model = DeterministicModel()
model.training_step = model.training_step_result_log_step_only
model.training_step_end = None
model.training_epoch_end = None
model.val_dataloader = None
batches = 3
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=batches,
limit_val_batches=batches,
log_every_n_steps=1,
max_epochs=1,
weights_summary=None,
)
trainer.fit(model)
# make sure correct steps were called
assert model.training_step_called
assert not model.training_step_end_called
assert not model.training_epoch_end_called
# make sure correct metrics are logged (one per batch step as requested)
assert len(trainer.dev_debugger.logged_metrics) == batches
for batch_idx, logged_metrics in enumerate(trainer.dev_debugger.logged_metrics):
assert logged_metrics[f'step_log_and_pbar_acc1_b{batch_idx}'] == 11.0
assert logged_metrics[f'step_log_acc2_b{batch_idx}'] == 12.0
assert f'step_pbar_acc3_b{batch_idx}' not in logged_metrics
assert len(logged_metrics) == 4
# make sure we are using the correct metrics for callbacks
assert len(trainer.logger_connector.callback_metrics) == 11
assert trainer.logger_connector.callback_metrics['checkpoint_on'] == 171
# make sure pbar metrics are correct ang log metrics did not leak
for batch_idx in range(batches):
assert trainer.logger_connector.progress_bar_metrics[f'step_log_and_pbar_acc1_b{batch_idx}'] == 11
assert trainer.logger_connector.progress_bar_metrics[f'step_pbar_acc3_b{batch_idx}'] == 13
assert f'step_log_acc2_b{batch_idx}' not in trainer.logger_connector.progress_bar_metrics
# make sure training outputs what is expected
for batch_idx, batch in enumerate(model.train_dataloader()):
break
out = trainer.train_loop.run_training_batch(batch, batch_idx, 0)
assert out.signal == 0
assert out.batch_log_metrics[f'step_log_and_pbar_acc1_b{batch_idx}'] == 11.0
assert out.batch_log_metrics[f'step_log_acc2_b{batch_idx}'] == 12.0
train_step_out = out.training_step_output_for_epoch_end
assert len(train_step_out) == 1
train_step_out = train_step_out[0][0]
assert isinstance(train_step_out, TrainResult)
assert 'minimize' in train_step_out
assert f'step_log_and_pbar_acc1_b{batch_idx}' in train_step_out
assert f'step_log_acc2_b{batch_idx}' in train_step_out
# make sure the optimizer closure returns the correct things
opt_closure_result = trainer.train_loop.training_step_and_backward(
batch, batch_idx, 0, trainer.optimizers[0], trainer.hiddens)
assert opt_closure_result['loss'] == (42.0 * 3) + (15.0 * 3)
def test_training_step_result_log_epoch_only(tmpdir):
"""
Tests that only training_step can be used with TrainResult
Makes sure that things are routed to pbar, loggers and loss accordingly
Makes sure pbar and logs happen on epoch only when requested
"""
# enable internal debugging actions
os.environ['PL_DEV_DEBUG'] = '1'
model = DeterministicModel()
model.training_step = model.training_step_result_log_epoch_only
model.training_step_end = None
model.training_epoch_end = None
model.val_dataloader = None
epochs = 3
batches = 2
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=batches,
limit_val_batches=batches,
log_every_n_steps=1,
max_epochs=epochs,
weights_summary=None,
)
trainer.fit(model)
# make sure correct steps were called
assert model.training_step_called
assert not model.training_step_end_called
assert not model.training_epoch_end_called
assert len(trainer.logger_connector.callback_metrics) == 11
# make sure correct metrics are logged (one per batch step as requested)
assert len(trainer.dev_debugger.logged_metrics) == epochs
epoch_metrics = trainer.dev_debugger.logged_metrics
assert len(epoch_metrics) == epochs
for batch_idx, logged_metrics in enumerate(epoch_metrics):
assert logged_metrics[f'epoch_log_and_pbar_acc1_e{batch_idx}'] == 14.0
assert logged_metrics[f'epoch_log_acc2_e{batch_idx}'] == 15.0
assert f'epoch_pbar_acc3_e{batch_idx}' not in logged_metrics
assert len(logged_metrics) == 4
# make sure we are using the correct metrics for callbacks
assert trainer.logger_connector.callback_metrics['checkpoint_on'] == 171
# make sure pbar metrics are correct ang log metrics did not leak
for epoch_idx in range(epochs):
assert trainer.logger_connector.progress_bar_metrics[f'epoch_log_and_pbar_acc1_e{epoch_idx}'] == 14
assert trainer.logger_connector.progress_bar_metrics[f'epoch_pbar_acc3_e{epoch_idx}'] == 16
assert f'epoch_log_acc2_e{epoch_idx}' not in trainer.logger_connector.progress_bar_metrics
# make sure training outputs what is expected
for batch_idx, batch in enumerate(model.train_dataloader()):
break
out = trainer.train_loop.run_training_batch(batch, batch_idx, 0)
assert out.signal == 0
assert len(out.batch_log_metrics) == 0
train_step_out = out.training_step_output_for_epoch_end
assert len(train_step_out) == 1
train_step_out = train_step_out[0][0]
assert isinstance(train_step_out, TrainResult)
assert 'minimize' in train_step_out
assert f'epoch_log_and_pbar_acc1_e{trainer.current_epoch}' in train_step_out
assert f'epoch_log_acc2_e{trainer.current_epoch}' in train_step_out
# make sure the optimizer closure returns the correct things
opt_closure_result = trainer.train_loop.training_step_and_backward(
batch, batch_idx, 0, trainer.optimizers[0], trainer.hiddens)
assert opt_closure_result['loss'] == (42.0 * 3) + (15.0 * 3)
def test_training_step_result_log_step_and_epoch(tmpdir):
"""
Tests that only training_step can be used with TrainResult
Makes sure that things are routed to pbar, loggers and loss accordingly
Makes sure pbar and logs happen on epoch only when requested
"""
# enable internal debugging actions
os.environ['PL_DEV_DEBUG'] = '1'
model = DeterministicModel()
model.training_step = model.training_step_result_log_epoch_and_step
model.training_step_end = None
model.training_epoch_end = None
model.val_dataloader = None
epochs = 3
batches = 2
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=batches,
limit_val_batches=batches,
log_every_n_steps=1,
max_epochs=epochs,
weights_summary=None,
)
trainer.fit(model)
# make sure correct steps were called
assert model.training_step_called
assert not model.training_step_end_called
assert not model.training_epoch_end_called
assert len(trainer.logger_connector.callback_metrics) == 11
# make sure correct metrics are logged (one per batch step as requested)
assert len(trainer.dev_debugger.logged_metrics) == (epochs * batches) + epochs
epoch_metrics = trainer.dev_debugger.logged_metrics
epoch_idx = -1
for i_start in range(0, len(epoch_metrics), batches + 1):
epoch_idx += 1
epoch_outputs = epoch_metrics[i_start: i_start + batches + 1]
mean_vals = {
'epoch_step_epoch_log_and_pbar_acc1': [],
'epoch_step_epoch_log_acc2': []
}
# make sure each batch logged the expected value
for batch_idx in range(len(epoch_outputs) - 1):
logged_metrics = epoch_outputs[batch_idx]
expected_val_1 = (5 + batch_idx) * (epoch_idx + 1)
expected_val_2 = (6 + batch_idx) * (epoch_idx + 1)
mean_vals['epoch_step_epoch_log_and_pbar_acc1'].append(torch.tensor(expected_val_1).float())
mean_vals['epoch_step_epoch_log_acc2'].append(torch.tensor(expected_val_2).float())
assert logged_metrics['step_epoch_log_and_pbar_acc1_step'] == expected_val_1
assert logged_metrics['step_epoch_log_acc2_step'] == expected_val_2
assert 'step_epoch_pbar_acc3' not in logged_metrics
assert len(logged_metrics) == 6
# make sure the metrics for the epoch end are actual means (the default reduce fx) or all the batches
epoch_end_metrics = epoch_outputs[-1]
eval_1 = torch.stack(mean_vals['epoch_step_epoch_log_and_pbar_acc1']).mean()
eval_2 = torch.stack(mean_vals['epoch_step_epoch_log_acc2']).mean()
assert epoch_end_metrics['step_epoch_log_and_pbar_acc1_epoch'] == eval_1
assert epoch_end_metrics['step_epoch_log_acc2_epoch'] == eval_2
assert 'step_epoch_pbar_acc3' not in epoch_end_metrics
assert len(logged_metrics) == 6
# make sure we are using the correct metrics for callbacks
assert trainer.logger_connector.callback_metrics['checkpoint_on'] == 171
# -------------------------------
# VERIFY PBAR METRICS
# -------------------------------
# make sure pbar metrics are correct ang log metrics did not leak
all_pbar_metrics = trainer.dev_debugger.pbar_added_metrics
assert len(all_pbar_metrics) == (epochs * batches) + epochs
epoch_idx = -1
for i_start in range(0, len(all_pbar_metrics), batches + 1):
epoch_idx += 1
epoch_outputs = all_pbar_metrics[i_start: i_start + batches + 1]
mean_vals = {
'epoch_step_epoch_log_and_pbar_acc1': [],
'epoch_step_epoch_pbar_acc3': []
}
# make sure each batch logged the expected value
for batch_idx in range(len(epoch_outputs) - 1):
logged_metrics = epoch_outputs[batch_idx]
expected_val_1 = (5 + batch_idx) * (epoch_idx + 1)
expected_val_2 = (7 + batch_idx) * (epoch_idx + 1)
mean_vals['epoch_step_epoch_log_and_pbar_acc1'].append(torch.tensor(expected_val_1).float())
mean_vals['epoch_step_epoch_pbar_acc3'].append(torch.tensor(expected_val_2).float())
assert logged_metrics['step_epoch_log_and_pbar_acc1_step'] == expected_val_1
assert logged_metrics['step_epoch_pbar_acc3_step'] == expected_val_2
assert 'epoch_log_acc2_step' not in logged_metrics
assert len(logged_metrics) == 5
# make sure the metrics for the epoch end are actual means (the default reduce fx) or all the batches
epoch_end_metrics = epoch_outputs[-1]
eval_1 = torch.stack(mean_vals['epoch_step_epoch_log_and_pbar_acc1']).mean()
eval_2 = torch.stack(mean_vals['epoch_step_epoch_pbar_acc3']).mean()
assert epoch_end_metrics['step_epoch_log_and_pbar_acc1_epoch'] == eval_1
assert epoch_end_metrics['step_epoch_pbar_acc3_epoch'] == eval_2
assert 'epoch_log_acc2_step' not in epoch_end_metrics
assert len(logged_metrics) == 5
# -----------------------------------------
# make sure training outputs what is expected
# -----------------------------------------
for batch_idx, batch in enumerate(model.train_dataloader()):
break
out = trainer.train_loop.run_training_batch(batch, batch_idx, 0)
assert out.signal == 0
assert len(out.batch_log_metrics) == 4
train_step_out = out.training_step_output_for_epoch_end
assert len(train_step_out) == 1
train_step_out = train_step_out[0][0]
assert isinstance(train_step_out, TrainResult)
assert 'minimize' in train_step_out
assert 'step_epoch_log_and_pbar_acc1_step' in train_step_out
assert 'step_epoch_log_acc2_step' in train_step_out
assert 'step_epoch_log_and_pbar_acc1_epoch' in train_step_out
assert 'step_epoch_log_acc2_epoch' in train_step_out
# make sure the optimizer closure returns the correct things
opt_closure_result = trainer.train_loop.training_step_and_backward(
batch, batch_idx, 0, trainer.optimizers[0], trainer.hiddens)
assert opt_closure_result['loss'] == (42.0 * 3) + (15.0 * 3)
def test_training_step_epoch_end_result(tmpdir):
"""
Makes sure training_step and epoch_end can be used with Results (without batch_end)
"""
os.environ['PL_DEV_DEBUG'] = '1'
model = DeterministicModel()
model.training_step = model.training_step_result_log_epoch_and_step
model.training_epoch_end = model.training_epoch_end_return_for_log_epoch_and_step
model.val_dataloader = None
batches = 3
epochs = 1
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=epochs,
log_every_n_steps=1,
limit_train_batches=batches,
weights_summary=None,
)
trainer.fit(model)
assert len(trainer.logger_connector.callback_metrics) == 17
# make sure correct steps were called
assert model.training_step_called
assert not model.training_step_end_called
assert model.training_epoch_end_called
# make sure correct metrics were logged
logged_metrics = trainer.dev_debugger.logged_metrics
assert len(logged_metrics) == (epochs * batches) + epochs
last_logged = logged_metrics[-1]
assert last_logged['step_epoch_log_and_pbar_acc1_epoch'] == 210.0
assert last_logged['step_epoch_log_acc2_epoch'] == 336.0
assert last_logged['epoch_end_log_acc_epoch'] == 1212.0
assert last_logged['epoch_end_log_pbar_acc_epoch'] == 1214.0
assert 'epoch_end_pbar_acc' not in last_logged
# make sure pbar metrics are correct
logged_pbar = trainer.dev_debugger.pbar_added_metrics
assert len(logged_pbar) == (epochs * batches) + epochs
assert trainer.logger_connector.progress_bar_metrics['step_epoch_log_and_pbar_acc1_epoch'] == 210.0
assert trainer.logger_connector.progress_bar_metrics['step_epoch_log_and_pbar_acc1_step'] == 7.0
assert trainer.logger_connector.progress_bar_metrics['step_epoch_pbar_acc3_epoch'] == 504.0
assert trainer.logger_connector.progress_bar_metrics['epoch_end_pbar_acc_epoch'] == 1213.0
assert trainer.logger_connector.progress_bar_metrics['epoch_end_log_pbar_acc_epoch'] == 1214.0
assert 'epoch_end_log_acc' not in trainer.logger_connector.progress_bar_metrics
assert 'log_acc2' not in trainer.logger_connector.progress_bar_metrics
# make sure callback metrics didn't change
assert trainer.logger_connector.callback_metrics['checkpoint_on'] == 171
# -----------------------------------------
# make sure training outputs what is expected
# -----------------------------------------
for batch_idx, batch in enumerate(model.train_dataloader()):
break
out = trainer.train_loop.run_training_batch(batch, batch_idx, 0)
assert out.signal == 0
assert len(out.batch_log_metrics) == 4
train_step_out = out.training_step_output_for_epoch_end
assert len(train_step_out) == 1
train_step_out = train_step_out[0][0]
assert isinstance(train_step_out, TrainResult)
assert 'minimize' in train_step_out
assert 'step_epoch_log_and_pbar_acc1_step' in train_step_out
assert 'step_epoch_log_and_pbar_acc1_epoch' in train_step_out
assert 'step_epoch_log_acc2_step' in train_step_out
assert 'step_epoch_log_acc2_epoch' in train_step_out
# make sure the optimizer closure returns the correct things
opt_closure_result = trainer.train_loop.training_step_and_backward(
batch, batch_idx, 0, trainer.optimizers[0], trainer.hiddens)
assert opt_closure_result['loss'] == (42.0 * 3) + (15.0 * 3)
def test_no_auto_callbacks_with_train_loop_only(tmpdir):
"""
Make sure early stop + checkpoint work with only a train loop
"""
os.environ['PL_DEV_DEBUG'] = '1'
model = DeterministicModel()
model.training_step = model.training_step_no_default_callbacks_for_train_loop
model.training_epoch_end = None
model.val_dataloader = None
batches = 3
epochs = 3
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=epochs,
log_every_n_steps=1,
limit_train_batches=batches,
weights_summary=None,
)
trainer.fit(model)
assert len(trainer.logger_connector.callback_metrics) == 1
all_losses = trainer.dev_debugger.saved_train_losses
assert len(all_losses) == batches * epochs
assert trainer.checkpoint_callback.monitor == 'checkpoint_on'
assert trainer.early_stop_callback is None
trainer = Trainer(
default_root_dir=tmpdir,
early_stop_callback=True,
max_epochs=epochs,
log_every_n_steps=1,
limit_train_batches=batches,
weights_summary=None,
)
trainer.fit(model)
assert trainer.early_stop_callback.monitor == 'early_stop_on'
def test_no_callbacks_with_train_loop_only(tmpdir):
"""
Make sure early stop + checkpoint work with only a train loop
"""
os.environ['PL_DEV_DEBUG'] = '1'
model = DeterministicModel()
model.training_step = model.training_step_no_callbacks_result_obj
model.training_epoch_end = None
model.val_dataloader = None
batches = 3
epochs = 3
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=epochs,
log_every_n_steps=1,
limit_train_batches=batches,
weights_summary=None,
)
trainer.fit(model)
all_losses = trainer.dev_debugger.saved_train_losses
assert len(all_losses) == batches * epochs
assert trainer.early_stop_callback is None
assert len(trainer.dev_debugger.checkpoint_callback_history) == 3
assert len(trainer.dev_debugger.early_stopping_history) == 0
def test_use_callbacks_with_train_loop_only(tmpdir):
os.environ['PL_DEV_DEBUG'] = '1'
model = DeterministicModel()
model.training_step = model.training_step_result_log_epoch_and_step_for_callbacks
model.training_epoch_end = None
model.val_dataloader = None
batches = 3
epochs = 300
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=epochs,
early_stop_callback=True,
log_every_n_steps=1,
limit_train_batches=batches,
weights_summary=None,
)
trainer.fit(model)
num_expected_epochs = 10
# ----------------------------------
# VERIFY EARLY STOPPING BEHAVIOR
# ----------------------------------
# with train loop only it happens on every epoch
early_stop_vals = trainer.dev_debugger.early_stopping_history
assert len(early_stop_vals) == num_expected_epochs
min_val = min([x['best'] for x in early_stop_vals])
assert min_val == 171 + 9
all_losses = trainer.dev_debugger.saved_train_losses
from collections import Counter
batch_idxs = Counter([x['batch_idx'] for x in all_losses])
for i, val in batch_idxs.items():
assert val == num_expected_epochs
assert i in [0, 1, 2]
# ----------------------------------
# VERIFY CHECKPOINTING BEHAVIOR
# ----------------------------------
ckpt_vals = trainer.dev_debugger.checkpoint_callback_history
assert len(ckpt_vals) == 5, '5 ckpts should have been saved'
for ckpt_val, expected_epoch in zip(ckpt_vals, [0, 1, 2, 3, 6]):
assert ckpt_val['epoch'] == expected_epoch
assert ckpt_val['monitor'] == 'checkpoint_on'
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine")
def test_full_train_loop_with_results_obj_dp(tmpdir):
os.environ['PL_DEV_DEBUG'] = '1'
batches = 10
epochs = 3
model = EvalModelTemplate()
model.validation_step = None
model.test_step = None
model.training_step = model.training_step_full_loop_result_obj_dp
model.training_step_end = model.training_step_end_full_loop_result_obj_dp
model.training_epoch_end = model.training_epoch_end_full_loop_result_obj_dp
model.val_dataloader = None
model.test_dataloader = None
trainer = Trainer(
default_root_dir=tmpdir,
distributed_backend='dp',
gpus=[0, 1],
max_epochs=epochs,
early_stop_callback=True,
log_every_n_steps=2,
limit_train_batches=batches,
weights_summary=None,
)
trainer.fit(model)
# make sure we saw all the correct keys
seen_keys = set()
for metric in trainer.dev_debugger.logged_metrics:
seen_keys.update(metric.keys())
assert 'train_step_metric' in seen_keys
assert 'train_step_end_metric' in seen_keys
assert 'train_epoch_end_metric_epoch' in seen_keys
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine")
def test_loop_steps_only_dp(tmpdir):
os.environ['PL_DEV_DEBUG'] = '1'
batches = 10
epochs = 3
model = EvalModelTemplate()
model.validation_step = None
model.test_step = None
model.training_step = model.training_step_result_obj_dp
model.training_step_end = None
model.training_epoch_end = None
model.validation_step = model.validation_step_result_obj_dp
model.validation_step_end = None
model.validation_epoch_end = None
model.test_dataloader = None
trainer = Trainer(
default_root_dir=tmpdir,
distributed_backend='dp',
gpus=[0, 1],
max_epochs=epochs,
early_stop_callback=True,
log_every_n_steps=2,
limit_train_batches=batches,
weights_summary=None,
)
trainer.fit(model)
assert model.training_step_called
assert model.validation_step_called
def test_result_map(tmpdir):
result = TrainResult()
result.log_dict({'x1': torch.tensor(1), 'x2': torch.tensor(2)})
result.rename_keys({'x1': 'y1', 'x2': 'y2'})
assert 'x1' not in result
assert 'x2' not in result
assert 'y1' in result
assert 'y2' in result
def test_result_monitor_warnings(tmpdir):
"""
Tests that we warn when the monitor key is changed and we use Results obj
"""
model = EvalModelTemplate()
model.test_step = None
model.training_step = model.training_step_result_obj
model.training_step_end = None
model.training_epoch_end = None
model.validation_step = model.validation_step_result_obj
model.validation_step_end = None
model.validation_epoch_end = None
model.test_dataloader = None
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=2,
early_stop_callback=True,
log_every_n_steps=2,
limit_train_batches=2,
weights_summary=None,
checkpoint_callback=ModelCheckpoint(monitor='not_checkpoint_on')
)
# warn that the key was changed but metric was not found
with pytest.raises(MisconfigurationException, match="not found in the returned metrics"):
trainer.fit(model)
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=2,
log_every_n_steps=2,
limit_train_batches=2,
weights_summary=None,
early_stop_callback=EarlyStopping(monitor='not_val_loss')
)
with pytest.raises(RuntimeError, match=r'.*Early stopping conditioned on metric `not_val_loss` which is not*'):
trainer.fit(model)
def test_eval_loop_return_none(tmpdir):
"""
Tests that we warn when the monitor key is changed and we use Results obj
"""
model = EvalModelTemplate()
model.test_step = None
model.training_step = model.training_step_result_obj
model.training_step_end = None
model.training_epoch_end = None
model.validation_step = model.validation_step_result_obj
model.validation_step_end = None
model.validation_epoch_end = model.validation_epoch_end_return_none
model.test_dataloader = None
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=2,
log_every_n_steps=2,
limit_train_batches=2,
weights_summary=None,
)
trainer.fit(model)
| [] | [] | [
"PL_DEV_DEBUG"
] | [] | ["PL_DEV_DEBUG"] | python | 1 | 0 | |
lithops/storage/storage.py |
# (C) Copyright IBM Corp. 2020
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import json
import logging
import itertools
import importlib
from lithops.version import __version__
from lithops.config import CACHE_DIR, RUNTIMES_PREFIX, JOBS_PREFIX, TEMP_PREFIX
from lithops.utils import is_lithops_function
from lithops.storage.utils import create_status_key, create_output_key, \
status_key_suffix, init_key_suffix, CloudObject, StorageNoSuchKeyError
logger = logging.getLogger(__name__)
class Storage:
"""
An Storage object is used by partitioner and other components to access
underlying storage backend without exposing the the implementation details.
"""
def __init__(self, storage_config=None, lithops_config=None, storage_backend=None, executor_id=None):
self._created_cobjects_n = itertools.count()
if storage_config:
self.storage_config = storage_config
self.backend = self.storage_config['backend']
self.bucket = self.storage_config['bucket']
if 'user_agent' not in self.storage_config[self.backend]:
self.storage_config[self.backend]['user_agent'] = 'lithops/{}'.format(__version__)
try:
module_location = 'lithops.storage.backends.{}'.format(self.backend)
sb_module = importlib.import_module(module_location)
StorageBackend = getattr(sb_module, 'StorageBackend')
self.storage_handler = StorageBackend(self.storage_config[self.backend],
bucket=self.bucket,
executor_id=executor_id)
except Exception as e:
raise NotImplementedError("An exception was produced trying to create the "
"'{}' storage backend: {}".format(self.backend, e))
else:
self.lithops_config = lithops_config
self.backend = storage_backend
self.bucket = lithops_config['lithops']['storage_bucket']
try:
module_location = 'lithops.storage.backends.{}'.format(self.backend)
sb_module = importlib.import_module(module_location)
storage_config = self.lithops_config[self.backend]
storage_config['user_agent'] = 'lithops/{}'.format(__version__)
StorageBackend = getattr(sb_module, 'StorageBackend')
self.storage_handler = StorageBackend(storage_config)
except Exception as e:
raise NotImplementedError("An exception was produced trying to create the "
"'{}' storage backend: {}".format(self.backend, e))
def get_client(self):
return self.storage_handler.get_client()
def put_object(self, bucket_name, key, data):
return self.storage_handler.put_object(bucket_name, key, data)
def get_object(self, bucket_name, key, stream=False, extra_get_args={}):
return self.storage_handler.get_object(bucket_name, key, stream, extra_get_args)
def head_object(self, bucket_name, key):
return self.storage_handler.head_object(bucket_name, key)
def delete_object(self, bucket_name, key):
return self.storage_handler.delete_object(bucket_name, key)
def delete_objects(self, bucket_name, key_list):
return self.storage_handler.delete_objects(bucket_name, key_list)
def bucket_exists(self, bucket_name):
return self.storage_handler.bucket_exists(bucket_name)
def head_bucket(self, bucket_name):
return self.storage_handler.head_bucket(bucket_name)
def list_objects(self, bucket_name, prefix=None):
return self.storage_handler.list_objects(bucket_name, prefix)
def list_keys(self, bucket_name, prefix=None):
return self.storage_handler.list_keys(bucket_name, prefix)
def put_cobject(self, body, bucket=None, key=None):
"""
Put CloudObject into storage.
:param body: data content
:param bucket: destination bucket
:param key: destination key
:return: CloudObject instance
"""
prefix = os.environ.get('LITHOPS_EXECUTION_ID', '')
coid = hex(next(self._created_cobjects_n))[2:]
name = '{}/cloudobject_{}'.format(prefix, coid)
key = key or '/'.join([TEMP_PREFIX, name])
bucket = bucket or self.bucket
self.storage_handler.put_object(bucket, key, body)
return CloudObject(self.backend, bucket, key)
def get_cobject(self, cloudobject=None, bucket=None, key=None, stream=False):
"""
Get CloudObject from storage.
:param cloudobject: CloudObject instance
:param bucket: destination bucket
:param key: destination key
:return: body text
"""
if cloudobject:
if cloudobject.backend == self.backend:
bucket = cloudobject.bucket
key = cloudobject.key
return self.storage_handler.get_object(bucket, key, stream=stream)
else:
raise Exception("CloudObject: Invalid Storage backend")
elif (bucket and key) or key:
bucket = bucket or self.bucket
return self.storage_handler.get_object(bucket, key, stream=stream)
else:
return None
def delete_cobject(self, cloudobject=None, bucket=None, key=None):
"""
Get CloudObject from storage.
:param cloudobject: CloudObject instance
:param bucket: destination bucket
:param key: destination key
:return: body text
"""
if cloudobject:
if cloudobject.backend == self.backend:
bucket = cloudobject.bucket
key = cloudobject.key
return self.storage_handler.delete_object(bucket, key)
else:
raise Exception("CloudObject: Invalid Storage backend")
elif (bucket and key) or key:
bucket = bucket or self.bucket
return self.storage_handler.delete_object(bucket, key)
else:
return None
def delete_cobjects(self, cloudobjects):
"""
Get CloudObject from storage.
:param cloudobject: CloudObject instance
:param bucket: destination bucket
:param key: destination key
:return: body text
"""
cobjs = {}
for co in cloudobjects:
if co.backend not in cobjs:
cobjs[co.backend] = {}
if co.bucket not in cobjs[co.backend]:
cobjs[co.backend][co.bucket] = []
cobjs[co.backend][co.bucket].append(co.key)
for backend in cobjs:
if backend == self.backend:
for bucket in cobjs[backend]:
self.storage_handler.delete_objects(bucket, cobjs[backend][co.bucket])
else:
raise Exception("CloudObject: Invalid Storage backend")
class InternalStorage:
"""
An InternalStorage object is used by executors and other components to access
underlying storage backend without exposing the the implementation details.
"""
def __init__(self, storage_config, executor_id=None):
self.storage_config = storage_config
self.backend = self.storage_config['backend']
self.bucket = self.storage_config['bucket']
self.storage = Storage(storage_config=storage_config, executor_id=executor_id)
def get_client(self):
return self.storage.get_client()
def get_storage_config(self):
"""
Retrieves the configuration of this storage handler.
:return: storage configuration
"""
return self.storage_config
def put_data(self, key, data):
"""
Put data object into storage.
:param key: data key
:param data: data content
:return: None
"""
return self.storage.put_object(self.bucket, key, data)
def put_func(self, key, func):
"""
Put serialized function into storage.
:param key: function key
:param func: serialized function
:return: None
"""
return self.storage.put_object(self.bucket, key, func)
def get_data(self, key, stream=False, extra_get_args={}):
"""
Get data object from storage.
:param key: data key
:return: data content
"""
return self.storage.get_object(self.bucket, key, stream, extra_get_args)
def get_func(self, key):
"""
Get serialized function from storage.
:param key: function key
:return: serialized function
"""
return self.storage.get_object(self.bucket, key)
def get_job_status(self, executor_id, job_id):
"""
Get the status of a callset.
:param executor_id: executor's ID
:return: A list of call IDs that have updated status.
"""
callset_prefix = '/'.join([JOBS_PREFIX, executor_id, job_id])
keys = self.storage.list_keys(self.bucket, callset_prefix)
running_keys = [k[len(JOBS_PREFIX)+1:-len(init_key_suffix)].rsplit("/", 3)
for k in keys if init_key_suffix in k]
running_callids = [((k[0], k[1], k[2]), k[3]) for k in running_keys]
done_keys = [k for k in keys if status_key_suffix in k]
done_callids = [tuple(k[len(JOBS_PREFIX)+1:].rsplit("/", 3)[:3]) for k in done_keys]
return set(running_callids), set(done_callids)
def get_call_status(self, executor_id, job_id, call_id):
"""
Get status of a call.
:param executor_id: executor ID of the call
:param call_id: call ID of the call
:return: A dictionary containing call's status, or None if no updated status
"""
status_key = create_status_key(JOBS_PREFIX, executor_id, job_id, call_id)
try:
data = self.storage.get_object(self.bucket, status_key)
return json.loads(data.decode('ascii'))
except StorageNoSuchKeyError:
return None
def get_call_output(self, executor_id, job_id, call_id):
"""
Get the output of a call.
:param executor_id: executor ID of the call
:param call_id: call ID of the call
:return: Output of the call.
"""
output_key = create_output_key(JOBS_PREFIX, executor_id, job_id, call_id)
try:
return self.storage.get_object(self.bucket, output_key)
except StorageNoSuchKeyError:
return None
def get_runtime_meta(self, key):
"""
Get the metadata given a runtime name.
:param runtime: name of the runtime
:return: runtime metadata
"""
path = [RUNTIMES_PREFIX, __version__, key+".meta.json"]
filename_local_path = os.path.join(CACHE_DIR, *path)
if os.path.exists(filename_local_path) and not is_lithops_function():
logger.debug("Runtime metadata found in local cache")
with open(filename_local_path, "r") as f:
runtime_meta = json.loads(f.read())
return runtime_meta
else:
logger.debug("Runtime metadata not found in local cache. Retrieving it from storage")
try:
obj_key = '/'.join(path).replace('\\', '/')
logger.debug('Trying to download runtime metadata from: {}://{}/{}'
.format(self.backend, self.bucket, obj_key))
json_str = self.storage.get_object(self.bucket, obj_key)
logger.debug('Runtime metadata found in storage')
runtime_meta = json.loads(json_str.decode("ascii"))
# Save runtime meta to cache
if not os.path.exists(os.path.dirname(filename_local_path)):
os.makedirs(os.path.dirname(filename_local_path))
with open(filename_local_path, "w") as f:
f.write(json.dumps(runtime_meta))
return runtime_meta
except StorageNoSuchKeyError:
logger.debug('Runtime metadata not found in storage')
raise Exception('The runtime {} is not installed.'.format(obj_key))
def put_runtime_meta(self, key, runtime_meta):
"""
Puit the metadata given a runtime config.
:param runtime: name of the runtime
:param runtime_meta metadata
"""
path = [RUNTIMES_PREFIX, __version__, key+".meta.json"]
obj_key = '/'.join(path).replace('\\', '/')
logger.debug("Uploading runtime metadata to: {}://{}/{}"
.format(self.backend, self.bucket, obj_key))
self.storage.put_object(self.bucket, obj_key, json.dumps(runtime_meta))
if not is_lithops_function():
filename_local_path = os.path.join(CACHE_DIR, *path)
logger.debug("Storing runtime metadata into local cache: {}".format(filename_local_path))
if not os.path.exists(os.path.dirname(filename_local_path)):
os.makedirs(os.path.dirname(filename_local_path))
with open(filename_local_path, "w") as f:
f.write(json.dumps(runtime_meta))
def delete_runtime_meta(self, key):
"""
Puit the metadata given a runtime config.
:param runtime: name of the runtime
:param runtime_meta metadata
"""
path = [RUNTIMES_PREFIX, __version__, key+".meta.json"]
obj_key = '/'.join(path).replace('\\', '/')
filename_local_path = os.path.join(CACHE_DIR, *path)
if os.path.exists(filename_local_path):
os.remove(filename_local_path)
self.storage.delete_object(self.bucket, obj_key)
| [] | [] | [
"LITHOPS_EXECUTION_ID"
] | [] | ["LITHOPS_EXECUTION_ID"] | python | 1 | 0 | |
src/light-stemcell-builder/driver/create_ami_driver_test.go | package driver_test
import (
"fmt"
"light-stemcell-builder/config"
"light-stemcell-builder/driverset"
"light-stemcell-builder/resources"
"log"
"os"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ec2"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/satori/go.uuid"
)
var _ = Describe("CreateAmiDriver", func() {
It("creates a bootable HVM AMI from an existing snapshot", func() {
logger := log.New(GinkgoWriter, "CreateAmiDriver - Bootable HVM Test: ", log.LstdFlags)
accessKey := os.Getenv("AWS_ACCESS_KEY_ID")
Expect(accessKey).ToNot(BeEmpty(), "AWS_ACCESS_KEY_ID must be set")
secretKey := os.Getenv("AWS_SECRET_ACCESS_KEY")
Expect(secretKey).ToNot(BeEmpty(), "AWS_SECRET_ACCESS_KEY must be set")
region := os.Getenv("AWS_REGION")
Expect(region).ToNot(BeEmpty(), "AWS_REGION must be set")
creds := config.Credentials{
AccessKey: accessKey,
SecretKey: secretKey,
Region: region,
}
snapshotID := os.Getenv("EBS_SNAPSHOT_ID")
Expect(snapshotID).ToNot(BeEmpty(), "EBS_SNAPSHOT_ID must be set")
amiDriverConfig := resources.AmiDriverConfig{SnapshotID: snapshotID}
amiUniqueID := strings.ToUpper(uuid.NewV4().String())
amiName := fmt.Sprintf("BOSH-%s", amiUniqueID)
amiDriverConfig.Name = amiName
amiDriverConfig.VirtualizationType = resources.HvmAmiVirtualization
amiDriverConfig.Accessibility = resources.PublicAmiAccessibility
amiDriverConfig.Description = "bosh cpi test ami"
ds := driverset.NewStandardRegionDriverSet(GinkgoWriter, creds)
amiDriver := ds.CreateAmiDriver()
ami, err := amiDriver.Create(amiDriverConfig)
Expect(err).ToNot(HaveOccurred())
Expect(ami.VirtualizationType).To(Equal(resources.HvmAmiVirtualization))
ec2Client := ec2.New(session.New(), &aws.Config{Region: aws.String(ami.Region)})
reqOutput, err := ec2Client.DescribeImages(&ec2.DescribeImagesInput{ImageIds: []*string{aws.String(ami.ID)}})
Expect(err).ToNot(HaveOccurred())
Expect(len(reqOutput.Images)).To(Equal(1))
Expect(*reqOutput.Images[0].Name).To(Equal(amiName))
Expect(*reqOutput.Images[0].Architecture).To(Equal(resources.AmiArchitecture))
Expect(*reqOutput.Images[0].VirtualizationType).To(Equal(ami.VirtualizationType))
Expect(*reqOutput.Images[0].EnaSupport).To(BeTrue())
Expect(*reqOutput.Images[0].SriovNetSupport).To(Equal("simple"))
Expect(*reqOutput.Images[0].Public).To(BeTrue())
instanceReservation, err := ec2Client.RunInstances(&ec2.RunInstancesInput{
ImageId: aws.String(ami.ID),
InstanceType: aws.String(ec2.InstanceTypeM3Medium),
MinCount: aws.Int64(1),
MaxCount: aws.Int64(1),
NetworkInterfaces: []*ec2.InstanceNetworkInterfaceSpecification{
&ec2.InstanceNetworkInterfaceSpecification{
DeviceIndex: aws.Int64(0),
AssociatePublicIpAddress: aws.Bool(true), // Associate a public address to avoid explicitly defining subnet information
},
},
})
Expect(err).ToNot(HaveOccurred())
instanceID := instanceReservation.Instances[0].InstanceId
logger.Printf("Created VM with instance ID: %s", *instanceID)
Eventually(func() error {
// there is a bug in the Instance Waiters where the status InvalidInstanceID.NotFound is not properly handled
// retry waiting in an Eventually block to work around this problem
return ec2Client.WaitUntilInstanceRunning(&ec2.DescribeInstancesInput{InstanceIds: []*string{instanceID}})
}, 15*time.Minute, 10*time.Second).Should(BeNil())
err = ec2Client.WaitUntilInstanceStatusOk(&ec2.DescribeInstanceStatusInput{InstanceIds: []*string{instanceID}})
if err != nil {
logger.Printf("Encountered error waiting for VM to boot, retrying once: %s", err)
err = ec2Client.WaitUntilInstanceStatusOk(&ec2.DescribeInstanceStatusInput{InstanceIds: []*string{instanceID}})
Expect(err).ToNot(HaveOccurred())
}
_, err = ec2Client.TerminateInstances(&ec2.TerminateInstancesInput{InstanceIds: []*string{instanceID}}) // Ignore TerminateInstancesOutput
Expect(err).ToNot(HaveOccurred())
err = ec2Client.WaitUntilInstanceTerminated(&ec2.DescribeInstancesInput{InstanceIds: []*string{instanceID}})
Expect(err).ToNot(HaveOccurred())
_, err = ec2Client.DeregisterImage(&ec2.DeregisterImageInput{ImageId: &ami.ID}) // Ignore DeregisterImageOutput
Expect(err).ToNot(HaveOccurred())
})
It("creates a bootable PV AMI from an existing snapshot", func() {
logger := log.New(GinkgoWriter, "CreateAmiDriver - Bootable PV Test: ", log.LstdFlags)
accessKey := os.Getenv("AWS_ACCESS_KEY_ID")
Expect(accessKey).ToNot(BeEmpty(), "AWS_ACCESS_KEY_ID must be set")
secretKey := os.Getenv("AWS_SECRET_ACCESS_KEY")
Expect(secretKey).ToNot(BeEmpty(), "AWS_SECRET_ACCESS_KEY must be set")
region := os.Getenv("AWS_REGION")
Expect(region).ToNot(BeEmpty(), "AWS_REGION must be set")
creds := config.Credentials{
AccessKey: accessKey,
SecretKey: secretKey,
Region: region,
}
snapshotID := os.Getenv("EBS_SNAPSHOT_ID")
Expect(snapshotID).ToNot(BeEmpty(), "EBS_SNAPSHOT_ID must be set")
amiUniqueID := strings.ToUpper(uuid.NewV4().String())
amiName := fmt.Sprintf("BOSH-%s", amiUniqueID)
amiDriverConfig := resources.AmiDriverConfig{SnapshotID: snapshotID}
amiDriverConfig.VirtualizationType = resources.PvAmiVirtualization
amiDriverConfig.Accessibility = resources.PublicAmiAccessibility
amiDriverConfig.Name = amiName
amiDriverConfig.Description = "bosh cpi test ami"
ds := driverset.NewStandardRegionDriverSet(GinkgoWriter, creds)
amiDriver := ds.CreateAmiDriver()
ami, err := amiDriver.Create(amiDriverConfig)
Expect(err).ToNot(HaveOccurred())
ec2Client := ec2.New(session.New(), &aws.Config{Region: aws.String(region)})
reqOutput, err := ec2Client.DescribeImages(&ec2.DescribeImagesInput{ImageIds: []*string{aws.String(ami.ID)}})
Expect(err).ToNot(HaveOccurred())
Expect(len(reqOutput.Images)).To(Equal(1))
Expect(*reqOutput.Images[0].Architecture).To(Equal(resources.AmiArchitecture))
Expect(reqOutput.Images[0].SriovNetSupport).To(BeNil())
Expect(*reqOutput.Images[0].VirtualizationType).To(Equal(resources.PvAmiVirtualization))
Expect(*reqOutput.Images[0].Public).To(BeTrue())
instanceReservation, err := ec2Client.RunInstances(&ec2.RunInstancesInput{
ImageId: aws.String(ami.ID),
InstanceType: aws.String(ec2.InstanceTypeM3Medium),
MinCount: aws.Int64(1),
MaxCount: aws.Int64(1),
NetworkInterfaces: []*ec2.InstanceNetworkInterfaceSpecification{
&ec2.InstanceNetworkInterfaceSpecification{
DeviceIndex: aws.Int64(0),
AssociatePublicIpAddress: aws.Bool(true), // Associate a public address to avoid explicitly defining subnet information
},
},
})
Expect(err).ToNot(HaveOccurred())
instanceID := instanceReservation.Instances[0].InstanceId
logger.Printf("Created VM with instance ID: %s", *instanceID)
Eventually(func() error {
// there is a bug in the Instance Waiters where the status InvalidInstanceID.NotFound is not properly handled
// retry waiting in an Eventually block to work around this problem
return ec2Client.WaitUntilInstanceRunning(&ec2.DescribeInstancesInput{InstanceIds: []*string{instanceID}})
}, 15*time.Minute, 10*time.Second).Should(BeNil())
err = ec2Client.WaitUntilInstanceStatusOk(&ec2.DescribeInstanceStatusInput{InstanceIds: []*string{instanceID}})
if err != nil {
logger.Printf("Encountered error waiting for VM to boot, retrying once: %s", err)
err = ec2Client.WaitUntilInstanceStatusOk(&ec2.DescribeInstanceStatusInput{InstanceIds: []*string{instanceID}})
Expect(err).ToNot(HaveOccurred())
}
_, err = ec2Client.TerminateInstances(&ec2.TerminateInstancesInput{InstanceIds: []*string{instanceID}}) // Ignore TerminateInstancesOutput
Expect(err).ToNot(HaveOccurred())
err = ec2Client.WaitUntilInstanceTerminated(&ec2.DescribeInstancesInput{InstanceIds: []*string{instanceID}})
Expect(err).ToNot(HaveOccurred())
_, err = ec2Client.DeregisterImage(&ec2.DeregisterImageInput{ImageId: &ami.ID}) // Ignore DeregisterImageOutput
Expect(err).ToNot(HaveOccurred())
})
})
| [
"\"AWS_ACCESS_KEY_ID\"",
"\"AWS_SECRET_ACCESS_KEY\"",
"\"AWS_REGION\"",
"\"EBS_SNAPSHOT_ID\"",
"\"AWS_ACCESS_KEY_ID\"",
"\"AWS_SECRET_ACCESS_KEY\"",
"\"AWS_REGION\"",
"\"EBS_SNAPSHOT_ID\""
] | [] | [
"EBS_SNAPSHOT_ID",
"AWS_ACCESS_KEY_ID",
"AWS_SECRET_ACCESS_KEY",
"AWS_REGION"
] | [] | ["EBS_SNAPSHOT_ID", "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_REGION"] | go | 4 | 0 | |
main.go | package main
import (
"context"
"fmt"
"log"
"os"
"github.com/zmb3/spotify"
"golang.org/x/oauth2/clientcredentials"
)
// single song retriever
// playlist retriever
type playlist struct {
Songs []song
}
type song struct {
Artists []string `json:"artists"`
Name string `json:"name"`
Duration int `json:"duration"`
}
func msToSeconds(duration int) int {
trackSeconds := (duration / 1000)
return trackSeconds
}
func main() {
config := &clientcredentials.Config{
ClientID: os.Getenv("SPOTIFY_ID"),
ClientSecret: os.Getenv("SPOTIFY_SECRET"),
TokenURL: spotify.TokenURL,
}
token, err := config.Token(context.Background())
if err != nil {
log.Fatalf("couldn't get token: %v", err)
}
client := spotify.Authenticator{}.NewClient(token)
// NOTE: requires playlist ID
playlist, err := client.GetPlaylist("3CUKkyF0LGlg0HDSof03Cq")
if err != nil {
fmt.Fprintf(os.Stderr, err.Error())
return
}
for _, track := range playlist.Tracks.Tracks {
var artists []string
for _, artist := range track.Track.Artists {
artists = append(artists, artist.Name)
}
// calculate duration from int (ms) to 0:00 minute/seconds
trackDurationSeconds := msToSeconds(track.Track.Duration)
// remove 'original mix' and 'radio edit' from track name?
// TODO: add date added to playlist + album?
songPayload := &song{
Artists: artists,
Name: track.Track.Name,
// track duration in minutes
Duration: trackDurationSeconds,
}
fmt.Println(songPayload)
}
}
| [
"\"SPOTIFY_ID\"",
"\"SPOTIFY_SECRET\""
] | [] | [
"SPOTIFY_ID",
"SPOTIFY_SECRET"
] | [] | ["SPOTIFY_ID", "SPOTIFY_SECRET"] | go | 2 | 0 | |
PyObjCTest/test_nsnotificationqueue.py | import Foundation
from PyObjCTools.TestSupport import TestCase
class TestNSNotificationQueue(TestCase):
def testConstants(self):
self.assertEqual(Foundation.NSPostWhenIdle, 1)
self.assertEqual(Foundation.NSPostASAP, 2)
self.assertEqual(Foundation.NSPostNow, 3)
self.assertEqual(Foundation.NSNotificationNoCoalescing, 0)
self.assertEqual(Foundation.NSNotificationCoalescingOnName, 1)
self.assertEqual(Foundation.NSNotificationCoalescingOnSender, 2)
| [] | [] | [] | [] | [] | python | null | null | null |
plugins/commands.py | import os
import math
import json
import time
import shutil
import heroku3
import requests
from pyrogram import filters
from pyrogram import Client as trojanz
from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton
if bool(os.environ.get("WEBHOOK", False)):
from sample_config import Config
else:
from config import Config
from script import Script
from plugins.helpers import humanbytes
from database.filters_mdb import filter_stats
from database.users_mdb import add_user, find_user, all_users
@trojanz.on_message(filters.command('id') & (filters.private | filters.group))
async def showid(client, message):
chat_type = message.chat.type
if chat_type == "private":
user_id = message.chat.id
await message.reply_text(
f"Your ID : `{user_id}`",
parse_mode="md",
quote=True
)
elif (chat_type == "group") or (chat_type == "supergroup"):
user_id = message.from_user.id
chat_id = message.chat.id
if message.reply_to_message:
reply_id = f"Replied User ID : `{message.reply_to_message.from_user.id}`"
else:
reply_id = ""
await message.reply_text(
f"Your ID : `{user_id}`\nThis Group ID : `{chat_id}`\n\n{reply_id}",
parse_mode="md",
quote=True
)
@trojanz.on_message(filters.command('info') & (filters.private | filters.group))
async def showinfo(client, message):
try:
cmd, id = message.text.split(" ", 1)
except:
id = False
pass
if id:
if (len(id) == 10 or len(id) == 9):
try:
checkid = int(id)
except:
await message.reply_text("__Enter a valid USER ID__", quote=True, parse_mode="md")
return
else:
await message.reply_text("__Enter a valid USER ID__", quote=True, parse_mode="md")
return
if Config.SAVE_USER == "yes":
name, username, dcid = await find_user(str(id))
else:
try:
user = await client.get_users(int(id))
name = str(user.first_name + (user.last_name or ""))
username = user.username
dcid = user.dc_id
except:
name = False
pass
if not name:
await message.reply_text("__USER Details not found!!__", quote=True, parse_mode="md")
return
else:
if message.reply_to_message:
name = str(message.reply_to_message.from_user.first_name\
+ (message.reply_to_message.from_user.last_name or ""))
id = message.reply_to_message.from_user.id
username = message.reply_to_message.from_user.username
dcid = message.reply_to_message.from_user.dc_id
else:
name = str(message.from_user.first_name\
+ (message.from_user.last_name or ""))
id = message.from_user.id
username = message.from_user.username
dcid = message.from_user.dc_id
if not str(username) == "None":
user_name = f"@{username}"
else:
user_name = "none"
await message.reply_text(
f"<b>Name</b> : {name}\n\n"
f"<b>User ID</b> : <code>{id}</code>\n\n"
f"<b>Username</b> : {user_name}\n\n"
f"<b>Permanant USER link</b> : <a href='tg://user?id={id}'>Click here!</a>\n\n"
f"<b>DC ID</b> : {dcid}\n\n",
quote=True,
parse_mode="html"
)
@trojanz.on_message((filters.private | filters.group) & filters.command('status'))
async def bot_status(client,message):
if str(message.from_user.id) not in Config.AUTH_USERS:
return
chats, filters = await filter_stats()
if Config.SAVE_USER == "yes":
users = await all_users()
userstats = f"> __**{users} users have interacted with your bot!**__\n\n"
else:
userstats = ""
if Config.HEROKU_API_KEY:
try:
server = heroku3.from_key(Config.HEROKU_API_KEY)
user_agent = (
'Mozilla/5.0 (Linux; Android 10; SM-G975F) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/80.0.3987.149 Mobile Safari/537.36'
)
accountid = server.account().id
headers = {
'User-Agent': user_agent,
'Authorization': f'Bearer {Config.HEROKU_API_KEY}',
'Accept': 'application/vnd.heroku+json; version=3.account-quotas',
}
path = "/accounts/" + accountid + "/actions/get-quota"
request = requests.get("https://api.heroku.com" + path, headers=headers)
if request.status_code == 200:
result = request.json()
total_quota = result['account_quota']
quota_used = result['quota_used']
quota_left = total_quota - quota_used
total = math.floor(total_quota/3600)
used = math.floor(quota_used/3600)
hours = math.floor(quota_left/3600)
minutes = math.floor(quota_left/60 % 60)
days = math.floor(hours/24)
usedperc = math.floor(quota_used / total_quota * 100)
leftperc = math.floor(quota_left / total_quota * 100)
quota_details = f"""
**Heroku Account Status**
> __You have **{total} hours** of free dyno quota available each month.__
> __Dyno hours used this month__ ;
- **{used} hours** ( {usedperc}% )
> __Dyno hours remaining this month__ ;
- **{hours} hours** ( {leftperc}% )
- **Approximately {days} days!**
"""
else:
quota_details = ""
except:
print("Check your Heroku API key")
quota_details = ""
else:
quota_details = ""
uptime = time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - Config.BOT_START_TIME))
try:
t, u, f = shutil.disk_usage(".")
total = humanbytes(t)
used = humanbytes(u)
free = humanbytes(f)
disk = "\n**Disk Details**\n\n" \
f"> USED : {used} / {total}\n" \
f"> FREE : {free}\n\n"
except:
disk = ""
await message.reply_text(
"**Current status of your bot!**\n\n"
f"> __**{filters}** filters across **{chats}** chats__\n\n"
f"{userstats}"
f"> __BOT Uptime__ : **{uptime}**\n\n"
f"{quota_details}"
f"{disk}",
quote=True,
parse_mode="md"
)
@trojanz.on_message(filters.command('start') & filters.private)
async def start(client, message):
await message.reply_text(
text=Script.START_MSG.format(message.from_user.mention),
disable_web_page_preview=True,
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton("♻️ 𝙂𝙍𝙊𝙐𝙋", url="https://t.me/cinemazilla"),
InlineKeyboardButton("👨💻 𝙈𝘼𝙎𝙏𝙀𝙍", url="https://t.me/no_ones_like_me")
],
[
InlineKeyboardButton("💿 𝙅𝙊𝙄𝙉 𝙊𝙐𝙍 𝘾𝙃𝘼𝙉𝙉𝙀𝙇 💿", url="https://t.me/joinchat/CXRICR1ok3ViZjk9")
],
[
InlineKeyboardButton("🔐 𝘾𝙇𝙊𝙎𝙀", callback_data="close_data"),
InlineKeyboardButton("💡𝙃𝙀𝙇𝙋", callback_data="help_data"),
]
]
),
reply_to_message_id=message.message_id
)
if Config.SAVE_USER == "yes":
try:
await add_user(
str(message.from_user.id),
str(message.from_user.username),
str(message.from_user.first_name + " " + (message.from_user.last_name or "")),
str(message.from_user.dc_id)
)
except:
pass
@trojanz.on_message(filters.command('help') & filters.private)
async def help(client, message):
await message.reply_text(
text=Script.HELP_MSG,
disable_web_page_preview=True,
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton("👰♀ 𝗮𝗯𝗼𝘂𝘁 𝗺𝗲", callback_data="about_data")
],
[
InlineKeyboardButton("🔙 𝗯𝗮𝗰𝗸", callback_data="start_data"),
InlineKeyboardButton("🔐 𝗰𝗹𝗼𝘀𝗲", callback_data="close_data"),
]
]
),
reply_to_message_id=message.message_id
)
@trojanz.on_message(filters.command('about') & filters.private)
async def about(client, message):
await message.reply_text(
text=Script.ABOUT_MSG,
disable_web_page_preview=True,
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton("BACK", callback_data="help_data"),
InlineKeyboardButton("CLOSE", callback_data="close_data"),
]
]
),
reply_to_message_id=message.message_id
)
| [] | [] | [
"WEBHOOK"
] | [] | ["WEBHOOK"] | python | 1 | 0 | |
manager/controllers/motion/motion_controllers.go | // Copyright 2020 IBM Corp.
// SPDX-License-Identifier: Apache-2.0
package motion
import (
"os"
motionv1 "github.com/ibm/the-mesh-for-data/manager/apis/motion/v1alpha1"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/manager"
)
// This function sets up all motion controllers including the webhooks given a controller manager.
// Webhooks can be activated/deactivated using the ENABLE_WEBHOOKS environment variable.
// This currently includes:
// - a manager for BatchTransfers
// - a manager for StreamTransfers
func SetupMotionControllers(mgr manager.Manager) {
setupLog := ctrl.Log.WithName("setup")
if err := NewBatchTransferReconciler(mgr, "BatchTransferController").SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "BatchTransfer")
os.Exit(1)
}
if os.Getenv("ENABLE_WEBHOOKS") != "false" {
if err := (&motionv1.BatchTransfer{}).SetupWebhookWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create webhook", "webhook", "Captain")
os.Exit(1)
}
}
if err := NewStreamTransferReconciler(mgr, "StreamTransferController").SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "StreamTransfer")
os.Exit(1)
}
if os.Getenv("ENABLE_WEBHOOKS") != "false" {
if err := (&motionv1.StreamTransfer{}).SetupWebhookWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create webhook", "webhook", "StreamTransfer")
os.Exit(1)
}
}
}
| [
"\"ENABLE_WEBHOOKS\"",
"\"ENABLE_WEBHOOKS\""
] | [] | [
"ENABLE_WEBHOOKS"
] | [] | ["ENABLE_WEBHOOKS"] | go | 1 | 0 | |
server.py | import logging
import os
from flask import Flask, jsonify, request
from flask.logging import default_handler
import workers
import prometheus_metrics
from collect_json_schema import CollectJSONSchema
def create_application():
"""Create Flask application instance with AWS client enabled."""
app = Flask(__name__)
app.config['NEXT_MICROSERVICE_HOST'] = \
os.environ.get('NEXT_MICROSERVICE_HOST')
return app
APP = create_application()
ROOT_LOGGER = logging.getLogger()
ROOT_LOGGER.setLevel(APP.logger.level)
ROOT_LOGGER.addHandler(default_handler)
VERSION = "0.0.1"
ROUTE_PREFIX = "/r/insights/platform/aiops-data-collector"
# Schema for the Collect API
SCHEMA = CollectJSONSchema()
@APP.route(ROUTE_PREFIX, methods=['GET'])
def get_root():
"""Root Endpoint for 3scale."""
return jsonify(
status='OK',
version=VERSION,
message='Up and Running'
)
@APP.route(f'{ROUTE_PREFIX}/api/v0/version', methods=['GET'])
def get_version():
"""Endpoint for getting the current version."""
return jsonify(
status='OK',
version=VERSION,
message='AIOPS Data Collector Version 0.0.1'
)
@APP.route(f'{ROUTE_PREFIX}/api/v0/collect', methods=['POST'])
def post_collect():
"""Endpoint servicing data collection."""
input_data = request.get_json(force=True)
validation = SCHEMA.load(input_data)
prometheus_metrics.METRICS['jobs_total'].inc()
if validation.errors:
prometheus_metrics.METRICS['jobs_denied'].inc()
return jsonify(
status='Error',
errors=validation.errors,
message='Input payload validation failed'
), 400
next_service = APP.config['NEXT_MICROSERVICE_HOST']
source_id = input_data.get('payload_id')
b64_identity = request.headers.get('x-rh-identity')
workers.download_job(
input_data.get('url'),
source_id,
next_service,
b64_identity
)
APP.logger.info('Job started.')
prometheus_metrics.METRICS['jobs_initiated'].inc()
return jsonify(status="OK", message="Job initiated")
@APP.route("/metrics", methods=['GET'])
def get_metrics():
"""Metrics Endpoint."""
return prometheus_metrics.generate_aggregated_metrics()
if __name__ == "__main__":
# pylama:ignore=C0103
port = int(os.environ.get("PORT", 8004))
APP.run(port=port)
| [] | [] | [
"PORT",
"NEXT_MICROSERVICE_HOST"
] | [] | ["PORT", "NEXT_MICROSERVICE_HOST"] | python | 2 | 0 | |
example/Deliverable1/test_02_vehicle_following.py | #!/usr/bin/env python3
"""Filename: test_02_vehicle_following.py
Description: Testing EGO vechile following other vehicle in a single lane road.
Auther: Minli He
Date: 2021-03-28
Class: SJSU Spring 2021 CMPE 187 Sec 1
"""
import os
import lgsvl
import time
import sys
import logging
import unittest
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from lib.utils import *
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(message)s', datefmt='%m-%d %H:%M:%S')
class TestEncroachingVehicles(unittest.TestCase):
# sim_config = {'EGO_MODEL': "2e9095fa-c9b9-4f3f-8d7d-65fa2bb03921",
sim_config = {'EGO_MODEL': "Apollo Modular",
'TIME_LIMIT': 20,
'TIME_DELAY': 2,
'MAX_EGO_SPEED': mph_to_mps(25),
'MAX_POV_SPEED': mph_to_mps(15),
'MAX_FOLLOWING_DISTANCE': 20}
sim = None
@classmethod
def setUpClass(cls):
# Initializing Simulator...
cls.sim = lgsvl.Simulator(os.environ.get("SIMULATOR_HOST", "127.0.0.1"), 8181)
# Initializing Map...
if cls.sim.current_scene == "SingleLaneRoad":
cls.sim.reset()
else:
cls.sim.load("SingleLaneRoad")
cls.sim.set_time_of_day(12)
def test_encroaching_oncoming_vehicles(self):
# Initializing EGO Car...
egoState = lgsvl.AgentState()
egoState.transform = self.sim.get_spawn()[0]
ego = self.sim.add_agent(self.sim_config['EGO_MODEL'], lgsvl.AgentType.EGO, egoState)
ego.connect_bridge(os.environ.get("BRIDGE_HOST", "127.0.0.1"), 9090)
# Initializing NPC Car...
POVState = lgsvl.AgentState()
forward = lgsvl.utils.transform_to_forward(egoState.transform)
POVState.transform = self.sim.map_point_on_lane(egoState.transform.position + 50 * forward)
POV = self.sim.add_agent("Sedan", lgsvl.AgentType.NPC, POVState)
ego.on_collision(on_collision)
POV.on_collision(on_collision)
t0 = time.time()
self.sim.run(self.sim_config['TIME_DELAY']) # Delay POV start by TIME_DELAY seconds
POV.follow_closest_lane(True, self.sim_config['MAX_POV_SPEED'], False)
while True:
self.sim.run(0.5)
egoCurrentState = ego.state
POVCurrentState = POV.state
logging.info('Distance Between Cars: ' + str(separation(egoCurrentState.position, POVCurrentState.position)))
logging.info('EGO Car speed: ' + str(mps_to_mph(egoCurrentState.speed)) + 'MPH')
logging.info('POV Car speed: ' + str(mps_to_mph(POVCurrentState.speed)) + 'MPH')
self.assertGreater(separation(egoCurrentState.position, POVCurrentState.position),
self.sim_config['MAX_FOLLOWING_DISTANCE'],
msg='EGO car is getting to close with POV car')
if time.time() - t0 > self.sim_config['TIME_LIMIT']:
break | [] | [] | [
"BRIDGE_HOST",
"SIMULATOR_HOST"
] | [] | ["BRIDGE_HOST", "SIMULATOR_HOST"] | python | 2 | 0 | |
tests/utils/test_requirements_utils.py | import os
import sys
import importlib
from unittest import mock
import importlib_metadata
import pytest
import mlflow
from mlflow.utils.requirements_utils import (
_is_comment,
_is_empty,
_is_requirements_file,
_strip_inline_comment,
_join_continued_lines,
_parse_requirements,
_prune_packages,
_strip_local_version_label,
_get_installed_version,
_get_pinned_requirement,
_module_to_packages,
_infer_requirements,
)
def test_is_comment():
assert _is_comment("# comment")
assert _is_comment("#")
assert _is_comment("### comment ###")
assert not _is_comment("comment")
assert not _is_comment("")
def test_is_empty():
assert _is_empty("")
assert not _is_empty(" ")
assert not _is_empty("a")
def test_is_requirements_file():
assert _is_requirements_file("-r req.txt")
assert _is_requirements_file("-r req.txt")
assert _is_requirements_file("--requirement req.txt")
assert _is_requirements_file("--requirement req.txt")
assert not _is_requirements_file("req")
def test_strip_inline_comment():
assert _strip_inline_comment("aaa # comment") == "aaa"
assert _strip_inline_comment("aaa # comment") == "aaa"
assert _strip_inline_comment("aaa # comment") == "aaa"
assert _strip_inline_comment("aaa # com1 # com2") == "aaa"
# Ensure a URI fragment is not stripped
assert (
_strip_inline_comment("git+https://git/repo.git#subdirectory=subdir")
== "git+https://git/repo.git#subdirectory=subdir"
)
def test_join_continued_lines():
assert list(_join_continued_lines(["a"])) == ["a"]
assert list(_join_continued_lines(["a\\", "b"])) == ["ab"]
assert list(_join_continued_lines(["a\\", "b\\", "c"])) == ["abc"]
assert list(_join_continued_lines(["a\\", " b"])) == ["a b"]
assert list(_join_continued_lines(["a\\", " b\\", " c"])) == ["a b c"]
assert list(_join_continued_lines(["a\\", "\\", "b"])) == ["ab"]
assert list(_join_continued_lines(["a\\", "b", "c\\", "d"])) == ["ab", "cd"]
assert list(_join_continued_lines(["a\\", "", "b"])) == ["a", "b"]
assert list(_join_continued_lines(["a\\"])) == ["a"]
assert list(_join_continued_lines(["\\", "a"])) == ["a"]
def test_parse_requirements(request, tmpdir):
"""
Ensures `_parse_requirements` returns the same result as `pip._internal.req.parse_requirements`
"""
from pip._internal.req import parse_requirements as pip_parse_requirements
from pip._internal.network.session import PipSession
root_req_src = """
# No version specifier
noverspec
no-ver-spec
# Version specifiers
verspec<1.0
ver-spec == 2.0
# Environment marker
env-marker; python_version < "3.8"
inline-comm # Inline comment
inlinecomm # Inline comment
# Git URIs
git+https://github.com/git/uri
git+https://github.com/sub/dir#subdirectory=subdir
# Requirements files
-r {relative_req}
--requirement {absolute_req}
# Constraints files
-c {relative_con}
--constraint {absolute_con}
# Line continuation
line-cont\
==\
1.0
# Line continuation with spaces
line-cont-space \
== \
1.0
# Line continuation with a blank line
line-cont-blank\
# Line continuation at EOF
line-cont-eof\
""".strip()
try:
os.chdir(tmpdir)
root_req = tmpdir.join("requirements.txt")
# Requirements files
rel_req = tmpdir.join("relative_req.txt")
abs_req = tmpdir.join("absolute_req.txt")
# Constraints files
rel_con = tmpdir.join("relative_con.txt")
abs_con = tmpdir.join("absolute_con.txt")
# pip's requirements parser collapses an absolute requirements file path:
# https://github.com/pypa/pip/issues/10121
# As a workaround, use a relative path on Windows.
absolute_req = abs_req.basename if os.name == "nt" else abs_req.strpath
absolute_con = abs_con.basename if os.name == "nt" else abs_con.strpath
root_req.write(
root_req_src.format(
relative_req=rel_req.basename,
absolute_req=absolute_req,
relative_con=rel_con.basename,
absolute_con=absolute_con,
)
)
rel_req.write("rel-req-xxx\nrel-req-yyy")
abs_req.write("abs-req-zzz")
rel_con.write("rel-con-xxx\nrel-con-yyy")
abs_con.write("abs-con-zzz")
expected_cons = [
"rel-con-xxx",
"rel-con-yyy",
"abs-con-zzz",
]
expected_reqs = [
"noverspec",
"no-ver-spec",
"verspec<1.0",
"ver-spec == 2.0",
'env-marker; python_version < "3.8"',
"inline-comm",
"inlinecomm",
"git+https://github.com/git/uri",
"git+https://github.com/sub/dir#subdirectory=subdir",
"rel-req-xxx",
"rel-req-yyy",
"abs-req-zzz",
"line-cont==1.0",
"line-cont-space == 1.0",
"line-cont-blank",
"line-cont-eof",
]
parsed_reqs = list(_parse_requirements(root_req.basename, is_constraint=False))
pip_reqs = list(pip_parse_requirements(root_req.basename, session=PipSession()))
# Requirements
assert [r.req_str for r in parsed_reqs if not r.is_constraint] == expected_reqs
assert [r.requirement for r in pip_reqs if not r.constraint] == expected_reqs
# Constraints
assert [r.req_str for r in parsed_reqs if r.is_constraint] == expected_cons
assert [r.requirement for r in pip_reqs if r.constraint] == expected_cons
finally:
os.chdir(request.config.invocation_dir)
def test_prune_packages():
assert _prune_packages(["mlflow"]) == {"mlflow"}
assert _prune_packages(["mlflow", "packaging"]) == {"mlflow"}
assert _prune_packages(["mlflow", "scikit-learn"]) == {"mlflow", "scikit-learn"}
def test_capture_imported_modules():
from mlflow.utils._capture_modules import _CaptureImportedModules
with _CaptureImportedModules() as cap:
# pylint: disable=unused-import,unused-variable
import math
__import__("pandas")
importlib.import_module("numpy")
assert "math" in cap.imported_modules
assert "pandas" in cap.imported_modules
assert "numpy" in cap.imported_modules
def test_strip_local_version_label():
assert _strip_local_version_label("1.2.3") == "1.2.3"
assert _strip_local_version_label("1.2.3+ab") == "1.2.3"
assert _strip_local_version_label("1.2.3rc0+ab") == "1.2.3rc0"
assert _strip_local_version_label("1.2.3.dev0+ab") == "1.2.3.dev0"
assert _strip_local_version_label("1.2.3.post0+ab") == "1.2.3.post0"
assert _strip_local_version_label("invalid") == "invalid"
def test_get_installed_version(tmpdir):
import numpy as np
import pandas as pd
import sklearn
assert _get_installed_version("mlflow") == mlflow.__version__
assert _get_installed_version("numpy") == np.__version__
assert _get_installed_version("pandas") == pd.__version__
assert _get_installed_version("scikit-learn", module="sklearn") == sklearn.__version__
not_found_package = tmpdir.join("not_found.py")
not_found_package.write("__version__ = '1.2.3'")
sys.path.insert(0, tmpdir.strpath)
with pytest.raises(importlib_metadata.PackageNotFoundError):
importlib_metadata.version("not_found")
assert _get_installed_version("not_found") == "1.2.3"
def test_get_pinned_requirement(tmpdir):
assert _get_pinned_requirement("mlflow") == f"mlflow=={mlflow.__version__}"
assert _get_pinned_requirement("mlflow", version="1.2.3") == "mlflow==1.2.3"
not_found_package = tmpdir.join("not_found.py")
not_found_package.write("__version__ = '1.2.3'")
sys.path.insert(0, tmpdir.strpath)
with pytest.raises(importlib_metadata.PackageNotFoundError):
importlib_metadata.version("not_found")
assert _get_pinned_requirement("not_found") == "not_found==1.2.3"
def test_get_pinned_requirement_local_version_label(tmpdir):
package = tmpdir.join("my_package.py")
lvl = "abc.def.ghi" # Local version label
package.write(f"__version__ = '1.2.3+{lvl}'")
sys.path.insert(0, tmpdir.strpath)
with mock.patch("mlflow.utils.requirements_utils._logger.warning") as mock_warning:
req = _get_pinned_requirement("my_package")
mock_warning.assert_called_once()
(first_pos_arg,) = mock_warning.call_args[0]
assert first_pos_arg.startswith(
f"Found my_package version (1.2.3+{lvl}) contains a local version label (+{lvl})."
)
assert req == "my_package==1.2.3"
def test_infer_requirements_excludes_mlflow():
with mock.patch(
"mlflow.utils.requirements_utils._capture_imported_modules",
return_value=["mlflow", "pytest"],
):
mlflow_package = "mlflow-skinny" if "MLFLOW_SKINNY" in os.environ else "mlflow"
assert mlflow_package in _module_to_packages("mlflow")
assert _infer_requirements("path/to/model", "sklearn") == [f"pytest=={pytest.__version__}"]
| [] | [] | [] | [] | [] | python | 0 | 0 | |
cmd/accounts-oauth/main.go | package main
import (
"errors"
"github.com/gin-gonic/gin"
"github.com/mitchellh/mapstructure"
hydra "github.com/ory/hydra-client-go"
kratos "github.com/ory/kratos-client-go"
"net/http"
"net/url"
"os"
)
var (
hydraClient = NewHydraClient()
kratosClient = NewKratosClient()
)
type Traits struct {
NetID string `mapstructure:"netid"`
Name string `mapstructure:"name"`
Email string `mapstructure:"email"`
}
func NewHydraClient() *hydra.APIClient {
conf := hydra.NewConfiguration()
conf.Servers = hydra.ServerConfigurations{
{
URL: os.Getenv("HYDRA_ADMIN_URL"),
},
}
return hydra.NewAPIClient(conf)
}
func NewKratosClient() *kratos.APIClient {
conf := kratos.NewConfiguration()
conf.Servers = kratos.ServerConfigurations{
{
URL: os.Getenv("KRATOS_ADMIN_URL"),
},
}
return kratos.NewAPIClient(conf)
}
func acceptLoginChallenge(c *gin.Context, challenge, subject string) {
acceptBody, _, err := hydraClient.AdminApi.AcceptLoginRequest(c).
LoginChallenge(challenge).
AcceptLoginRequest(*hydra.NewAcceptLoginRequest(subject)).
Execute()
if err != nil {
c.AbortWithError(http.StatusInternalServerError, err)
return
}
c.Redirect(http.StatusTemporaryRedirect, acceptBody.RedirectTo)
}
func main() {
r := gin.Default()
r.GET("/kratos-hydra/login", func(c *gin.Context) {
challenge := c.Query("login_challenge")
body, _, err := hydraClient.AdminApi.GetLoginRequest(c).LoginChallenge(challenge).Execute()
if err != nil {
c.AbortWithError(http.StatusInternalServerError, err)
return
}
// If hydra was already able to authenticate the user, skip will be true and we don't need to re-authenticate
// the user.
if body.Skip {
// Now it's time to grant the login request. You could also deny the request if something went terribly wrong
// (for example your arch-enemy logging in!)
acceptLoginChallenge(c, challenge, body.Subject)
return
}
// Now we're going to kratos login screen
callbackQuery := url.Values{}
callbackQuery.Add("login_challenge", challenge)
callbackURL := url.URL{
Scheme: c.Request.URL.Scheme,
Host: c.Request.URL.Host,
Path: "/kratos-hydra/callback",
RawQuery: callbackQuery.Encode(),
}
redirectQuery := url.Values{}
redirectQuery.Add("return_to", callbackURL.String())
redirectTo := url.URL{
Scheme: c.Request.URL.Scheme,
Host: c.Request.URL.Host,
Path: "/kratos/self-service/login/browser",
RawQuery: redirectQuery.Encode(),
}
c.Redirect(http.StatusTemporaryRedirect, redirectTo.String())
})
r.GET("/kratos-hydra/callback", func(c *gin.Context) {
sess, _, err := kratosClient.V0alpha2Api.ToSession(c).Cookie(c.GetHeader("cookie")).Execute()
if err != nil {
c.AbortWithError(http.StatusInternalServerError, err)
return
}
traits, ok := sess.Identity.Traits.(map[string]interface{})
if !ok {
c.AbortWithError(http.StatusInternalServerError, errors.New("traits not deserializable"))
return
}
netID, ok := traits["netid"].(string)
if !ok {
c.AbortWithError(http.StatusInternalServerError, errors.New("traits field not deserializable"))
return
}
acceptLoginChallenge(c, c.Query("login_challenge"), netID)
})
r.GET("/kratos-hydra/consent", func(c *gin.Context) {
challenge := c.Query("consent_challenge")
reqBody, _, err := hydraClient.AdminApi.GetConsentRequest(c).ConsentChallenge(challenge).Execute()
if err != nil {
c.AbortWithError(http.StatusInternalServerError, err)
return
}
sess, _, err := kratosClient.V0alpha2Api.ToSession(c).Cookie(c.GetHeader("cookie")).Execute()
if err != nil {
c.AbortWithError(http.StatusInternalServerError, err)
return
}
var traits Traits
err = mapstructure.Decode(sess.Identity.Traits, &traits)
if err != nil {
c.AbortWithError(http.StatusInternalServerError, errors.New("traits not deserializable"))
return
}
idTokenData := make(map[string]interface{}, 0)
idTokenData["netid"] = traits.NetID
idTokenData["name"] = traits.Name
idTokenData["given_name"] = traits.Name
idTokenData["email"] = traits.Email
remember := false
acceptReq := hydra.AcceptConsentRequest{
GrantAccessTokenAudience: reqBody.RequestedAccessTokenAudience,
GrantScope: reqBody.RequestedScope,
Remember: &remember,
RememberFor: nil,
Session: &hydra.ConsentRequestSession{
IdToken: idTokenData,
},
}
acceptBody, _, err := hydraClient.AdminApi.AcceptConsentRequest(c).
ConsentChallenge(challenge).
AcceptConsentRequest(acceptReq).
Execute()
if err != nil {
c.AbortWithError(http.StatusInternalServerError, err)
}
c.Redirect(http.StatusTemporaryRedirect, acceptBody.RedirectTo)
})
r.Run() // listen and serve on 0.0.0.0:8080 (for windows "localhost:8080")
}
| [
"\"HYDRA_ADMIN_URL\"",
"\"KRATOS_ADMIN_URL\""
] | [] | [
"HYDRA_ADMIN_URL",
"KRATOS_ADMIN_URL"
] | [] | ["HYDRA_ADMIN_URL", "KRATOS_ADMIN_URL"] | go | 2 | 0 | |
openbmc/build/tmp/deploy/sdk/witherspoon-2019-08-08/sysroots/x86_64-oesdk-linux/usr/lib/python3.7/site-packages/mesonbuild/compilers/c.py | # Copyright 2012-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import glob
import os.path
import subprocess
import functools
import itertools
from pathlib import Path
from typing import List
from .. import mlog
from .. import coredata
from . import compilers
from ..mesonlib import (
EnvironmentException, MachineChoice, MesonException, Popen_safe, listify,
version_compare, for_windows, for_darwin, for_cygwin, for_haiku,
for_openbsd, darwin_get_object_archs
)
from .c_function_attributes import C_FUNC_ATTRIBUTES
from .compilers import (
get_largefile_args,
gnu_winlibs,
msvc_winlibs,
unixy_compiler_internal_libs,
vs32_instruction_set_args,
vs64_instruction_set_args,
ArmCompiler,
ArmclangCompiler,
ClangCompiler,
Compiler,
CompilerArgs,
CompilerType,
CrossNoRunException,
GnuCompiler,
ElbrusCompiler,
IntelCompiler,
PGICompiler,
RunResult,
CcrxCompiler,
)
class CCompiler(Compiler):
# TODO: Replace this manual cache with functools.lru_cache
library_dirs_cache = {}
program_dirs_cache = {}
find_library_cache = {}
find_framework_cache = {}
internal_libs = unixy_compiler_internal_libs
@staticmethod
def attribute_check_func(name):
try:
return C_FUNC_ATTRIBUTES[name]
except KeyError:
raise MesonException('Unknown function attribute "{}"'.format(name))
def __init__(self, exelist, version, is_cross, exe_wrapper=None, **kwargs):
# If a child ObjC or CPP class has already set it, don't set it ourselves
if not hasattr(self, 'language'):
self.language = 'c'
super().__init__(exelist, version, **kwargs)
self.id = 'unknown'
self.is_cross = is_cross
self.can_compile_suffixes.add('h')
# If the exe wrapper was not found, pretend it wasn't set so that the
# sanity check is skipped and compiler checks use fallbacks.
if not exe_wrapper or not exe_wrapper.found():
self.exe_wrapper = None
else:
self.exe_wrapper = exe_wrapper.get_command()
# Set to None until we actually need to check this
self.has_fatal_warnings_link_arg = None
def needs_static_linker(self):
return True # When compiling static libraries, so yes.
def get_always_args(self):
'''
Args that are always-on for all C compilers other than MSVC
'''
return ['-pipe'] + get_largefile_args(self)
def get_linker_debug_crt_args(self):
"""
Arguments needed to select a debug crt for the linker
This is only needed for MSVC
"""
return []
def get_no_stdinc_args(self):
return ['-nostdinc']
def get_no_stdlib_link_args(self):
return ['-nostdlib']
def get_warn_args(self, level):
return self.warn_args[level]
def get_no_warn_args(self):
# Almost every compiler uses this for disabling warnings
return ['-w']
def get_soname_args(self, *args):
return []
def split_shlib_to_parts(self, fname):
return None, fname
# The default behavior is this, override in MSVC
@functools.lru_cache(maxsize=None)
def build_rpath_args(self, build_dir, from_dir, rpath_paths, build_rpath, install_rpath):
if self.compiler_type.is_windows_compiler:
return []
return self.build_unix_rpath_args(build_dir, from_dir, rpath_paths, build_rpath, install_rpath)
def get_dependency_gen_args(self, outtarget, outfile):
return ['-MD', '-MQ', outtarget, '-MF', outfile]
def depfile_for_object(self, objfile):
return objfile + '.' + self.get_depfile_suffix()
def get_depfile_suffix(self):
return 'd'
def get_exelist(self):
return self.exelist[:]
def get_linker_exelist(self):
return self.exelist[:]
def get_preprocess_only_args(self):
return ['-E', '-P']
def get_compile_only_args(self):
return ['-c']
def get_no_optimization_args(self):
return ['-O0']
def get_compiler_check_args(self):
'''
Get arguments useful for compiler checks such as being permissive in
the code quality and not doing any optimization.
'''
return self.get_no_optimization_args()
def get_output_args(self, target):
return ['-o', target]
def get_linker_output_args(self, outputname):
return ['-o', outputname]
def get_coverage_args(self):
return ['--coverage']
def get_coverage_link_args(self):
return ['--coverage']
def get_werror_args(self):
return ['-Werror']
def get_std_exe_link_args(self):
return []
def get_include_args(self, path, is_system):
if path == '':
path = '.'
if is_system:
return ['-isystem', path]
return ['-I' + path]
def get_std_shared_lib_link_args(self):
return ['-shared']
@functools.lru_cache()
def _get_search_dirs(self, env):
extra_args = ['--print-search-dirs']
stdo = None
with self._build_wrapper('', env, extra_args=extra_args,
dependencies=None, mode='compile',
want_output=True) as p:
stdo = p.stdo
return stdo
@staticmethod
def _split_fetch_real_dirs(pathstr, sep=':'):
paths = []
for p in pathstr.split(sep):
# GCC returns paths like this:
# /usr/lib/gcc/x86_64-linux-gnu/8/../../../../x86_64-linux-gnu/lib
# It would make sense to normalize them to get rid of the .. parts
# Sadly when you are on a merged /usr fs it also kills these:
# /lib/x86_64-linux-gnu
# since /lib is a symlink to /usr/lib. This would mean
# paths under /lib would be considered not a "system path",
# which is wrong and breaks things. Store everything, just to be sure.
pobj = Path(p)
unresolved = pobj.as_posix()
if pobj.exists():
if unresolved not in paths:
paths.append(unresolved)
try:
resolved = Path(p).resolve().as_posix()
if resolved not in paths:
paths.append(resolved)
except FileNotFoundError:
pass
return tuple(paths)
def get_compiler_dirs(self, env, name):
'''
Get dirs from the compiler, either `libraries:` or `programs:`
'''
stdo = self._get_search_dirs(env)
for line in stdo.split('\n'):
if line.startswith(name + ':'):
return CCompiler._split_fetch_real_dirs(line.split('=', 1)[1])
return ()
@functools.lru_cache()
def get_library_dirs(self, env, elf_class = None):
dirs = self.get_compiler_dirs(env, 'libraries')
if elf_class is None or elf_class == 0:
return dirs
# if we do have an elf class for 32-bit or 64-bit, we want to check that
# the directory in question contains libraries of the appropriate class. Since
# system directories aren't mixed, we only need to check one file for each
# directory and go by that. If we can't check the file for some reason, assume
# the compiler knows what it's doing, and accept the directory anyway.
retval = []
for d in dirs:
files = [f for f in os.listdir(d) if f.endswith('.so') and os.path.isfile(os.path.join(d, f))]
# if no files, accept directory and move on
if len(files) == 0:
retval.append(d)
continue
file_to_check = os.path.join(d, files[0])
with open(file_to_check, 'rb') as fd:
header = fd.read(5)
# if file is not an ELF file, it's weird, but accept dir
# if it is elf, and the class matches, accept dir
if header[1:4] != b'ELF' or int(header[4]) == elf_class:
retval.append(d)
# at this point, it's an ELF file which doesn't match the
# appropriate elf_class, so skip this one
pass
return tuple(retval)
@functools.lru_cache()
def get_program_dirs(self, env):
'''
Programs used by the compiler. Also where toolchain DLLs such as
libstdc++-6.dll are found with MinGW.
'''
return self.get_compiler_dirs(env, 'programs')
def get_pic_args(self):
return ['-fPIC']
def name_string(self):
return ' '.join(self.exelist)
def get_pch_use_args(self, pch_dir, header):
return ['-include', os.path.basename(header)]
def get_pch_name(self, header_name):
return os.path.basename(header_name) + '.' + self.get_pch_suffix()
def get_linker_search_args(self, dirname):
return ['-L' + dirname]
def get_default_include_dirs(self):
return []
def gen_export_dynamic_link_args(self, env):
if for_windows(env.is_cross_build(), env) or for_cygwin(env.is_cross_build(), env):
return ['-Wl,--export-all-symbols']
elif for_darwin(env.is_cross_build(), env):
return []
else:
return ['-Wl,-export-dynamic']
def gen_import_library_args(self, implibname):
"""
The name of the outputted import library
This implementation is used only on Windows by compilers that use GNU ld
"""
return ['-Wl,--out-implib=' + implibname]
def sanity_check_impl(self, work_dir, environment, sname, code):
mlog.debug('Sanity testing ' + self.get_display_language() + ' compiler:', ' '.join(self.exelist))
mlog.debug('Is cross compiler: %s.' % str(self.is_cross))
extra_flags = []
source_name = os.path.join(work_dir, sname)
binname = sname.rsplit('.', 1)[0]
if self.is_cross:
binname += '_cross'
extra_flags += environment.coredata.get_external_args(MachineChoice.HOST, self.language)
if self.exe_wrapper is None:
# Linking cross built apps is painful. You can't really
# tell if you should use -nostdlib or not and for example
# on OSX the compiler binary is the same but you need
# a ton of compiler flags to differentiate between
# arm and x86_64. So just compile.
extra_flags += self.get_compile_only_args()
else:
extra_flags += environment.coredata.get_external_link_args(MachineChoice.HOST, self.language)
# Is a valid executable output for all toolchains and platforms
binname += '.exe'
# Write binary check source
binary_name = os.path.join(work_dir, binname)
with open(source_name, 'w') as ofile:
ofile.write(code)
# Compile sanity check
cmdlist = self.exelist + extra_flags + [source_name] + self.get_output_args(binary_name)
pc, stdo, stde = Popen_safe(cmdlist, cwd=work_dir)
mlog.debug('Sanity check compiler command line:', ' '.join(cmdlist))
mlog.debug('Sanity check compile stdout:')
mlog.debug(stdo)
mlog.debug('-----\nSanity check compile stderr:')
mlog.debug(stde)
mlog.debug('-----')
if pc.returncode != 0:
raise EnvironmentException('Compiler {0} can not compile programs.'.format(self.name_string()))
# Run sanity check
if self.is_cross:
if self.exe_wrapper is None:
# Can't check if the binaries run so we have to assume they do
return
cmdlist = self.exe_wrapper + [binary_name]
else:
cmdlist = [binary_name]
mlog.debug('Running test binary command: ' + ' '.join(cmdlist))
try:
pe = subprocess.Popen(cmdlist)
except Exception as e:
raise EnvironmentException('Could not invoke sanity test executable: %s.' % str(e))
pe.wait()
if pe.returncode != 0:
raise EnvironmentException('Executables created by {0} compiler {1} are not runnable.'.format(self.language, self.name_string()))
def sanity_check(self, work_dir, environment):
code = 'int main(int argc, char **argv) { int class=0; return class; }\n'
return self.sanity_check_impl(work_dir, environment, 'sanitycheckc.c', code)
def check_header(self, hname, prefix, env, *, extra_args=None, dependencies=None):
fargs = {'prefix': prefix, 'header': hname}
code = '''{prefix}
#include <{header}>'''
return self.compiles(code.format(**fargs), env, extra_args=extra_args,
dependencies=dependencies)
def has_header(self, hname, prefix, env, *, extra_args=None, dependencies=None):
fargs = {'prefix': prefix, 'header': hname}
code = '''{prefix}
#ifdef __has_include
#if !__has_include("{header}")
#error "Header '{header}' could not be found"
#endif
#else
#include <{header}>
#endif'''
return self.compiles(code.format(**fargs), env, extra_args=extra_args,
dependencies=dependencies, mode='preprocess')
def has_header_symbol(self, hname, symbol, prefix, env, *, extra_args=None, dependencies=None):
fargs = {'prefix': prefix, 'header': hname, 'symbol': symbol}
t = '''{prefix}
#include <{header}>
int main () {{
/* If it's not defined as a macro, try to use as a symbol */
#ifndef {symbol}
{symbol};
#endif
}}'''
return self.compiles(t.format(**fargs), env, extra_args=extra_args,
dependencies=dependencies)
def _get_compiler_check_args(self, env, extra_args, dependencies, mode='compile'):
if extra_args is None:
extra_args = []
else:
extra_args = listify(extra_args)
extra_args = listify([e(mode) if callable(e) else e for e in extra_args])
if dependencies is None:
dependencies = []
elif not isinstance(dependencies, list):
dependencies = [dependencies]
# Collect compiler arguments
args = CompilerArgs(self)
for d in dependencies:
# Add compile flags needed by dependencies
args += d.get_compile_args()
if d.need_threads():
args += self.thread_flags(env)
elif d.need_openmp():
args += self.openmp_flags()
if mode == 'link':
# Add link flags needed to find dependencies
args += d.get_link_args()
if d.need_threads():
args += self.thread_link_flags(env)
# Select a CRT if needed since we're linking
if mode == 'link':
args += self.get_linker_debug_crt_args()
if env.is_cross_build() and not self.is_cross:
for_machine = MachineChoice.BUILD
else:
for_machine = MachineChoice.HOST
if mode == 'preprocess':
# Add CPPFLAGS from the env.
args += env.coredata.get_external_preprocess_args(for_machine, self.language)
elif mode == 'compile':
# Add CFLAGS/CXXFLAGS/OBJCFLAGS/OBJCXXFLAGS from the env
sys_args = env.coredata.get_external_args(for_machine, self.language)
# Apparently it is a thing to inject linker flags both
# via CFLAGS _and_ LDFLAGS, even though the former are
# also used during linking. These flags can break
# argument checks. Thanks, Autotools.
cleaned_sys_args = self.remove_linkerlike_args(sys_args)
args += cleaned_sys_args
elif mode == 'link':
# Add LDFLAGS from the env
args += env.coredata.get_external_link_args(for_machine, self.language)
args += self.get_compiler_check_args()
# extra_args must override all other arguments, so we add them last
args += extra_args
return args
def compiles(self, code, env, *, extra_args=None, dependencies=None, mode='compile'):
with self._build_wrapper(code, env, extra_args, dependencies, mode) as p:
return p.returncode == 0
def _build_wrapper(self, code, env, extra_args, dependencies=None, mode='compile', want_output=False):
args = self._get_compiler_check_args(env, extra_args, dependencies, mode)
return self.compile(code, args, mode, want_output=want_output)
def links(self, code, env, *, extra_args=None, dependencies=None):
return self.compiles(code, env, extra_args=extra_args,
dependencies=dependencies, mode='link')
def run(self, code: str, env, *, extra_args=None, dependencies=None):
if self.is_cross and self.exe_wrapper is None:
raise CrossNoRunException('Can not run test applications in this cross environment.')
with self._build_wrapper(code, env, extra_args, dependencies, mode='link', want_output=True) as p:
if p.returncode != 0:
mlog.debug('Could not compile test file %s: %d\n' % (
p.input_name,
p.returncode))
return RunResult(False)
if self.is_cross:
cmdlist = self.exe_wrapper + [p.output_name]
else:
cmdlist = p.output_name
try:
pe, so, se = Popen_safe(cmdlist)
except Exception as e:
mlog.debug('Could not run: %s (error: %s)\n' % (cmdlist, e))
return RunResult(False)
mlog.debug('Program stdout:\n')
mlog.debug(so)
mlog.debug('Program stderr:\n')
mlog.debug(se)
return RunResult(True, pe.returncode, so, se)
def _compile_int(self, expression, prefix, env, extra_args, dependencies):
fargs = {'prefix': prefix, 'expression': expression}
t = '''#include <stdio.h>
{prefix}
int main() {{ static int a[1-2*!({expression})]; a[0]=0; return 0; }}'''
return self.compiles(t.format(**fargs), env, extra_args=extra_args,
dependencies=dependencies)
def cross_compute_int(self, expression, low, high, guess, prefix, env, extra_args, dependencies):
# Try user's guess first
if isinstance(guess, int):
if self._compile_int('%s == %d' % (expression, guess), prefix, env, extra_args, dependencies):
return guess
# If no bounds are given, compute them in the limit of int32
maxint = 0x7fffffff
minint = -0x80000000
if not isinstance(low, int) or not isinstance(high, int):
if self._compile_int('%s >= 0' % (expression), prefix, env, extra_args, dependencies):
low = cur = 0
while self._compile_int('%s > %d' % (expression, cur), prefix, env, extra_args, dependencies):
low = cur + 1
if low > maxint:
raise EnvironmentException('Cross-compile check overflowed')
cur = cur * 2 + 1
if cur > maxint:
cur = maxint
high = cur
else:
low = cur = -1
while self._compile_int('%s < %d' % (expression, cur), prefix, env, extra_args, dependencies):
high = cur - 1
if high < minint:
raise EnvironmentException('Cross-compile check overflowed')
cur = cur * 2
if cur < minint:
cur = minint
low = cur
else:
# Sanity check limits given by user
if high < low:
raise EnvironmentException('high limit smaller than low limit')
condition = '%s <= %d && %s >= %d' % (expression, high, expression, low)
if not self._compile_int(condition, prefix, env, extra_args, dependencies):
raise EnvironmentException('Value out of given range')
# Binary search
while low != high:
cur = low + int((high - low) / 2)
if self._compile_int('%s <= %d' % (expression, cur), prefix, env, extra_args, dependencies):
high = cur
else:
low = cur + 1
return low
def compute_int(self, expression, low, high, guess, prefix, env, *, extra_args=None, dependencies=None):
if extra_args is None:
extra_args = []
if self.is_cross:
return self.cross_compute_int(expression, low, high, guess, prefix, env, extra_args, dependencies)
fargs = {'prefix': prefix, 'expression': expression}
t = '''#include<stdio.h>
{prefix}
int main(int argc, char **argv) {{
printf("%ld\\n", (long)({expression}));
return 0;
}};'''
res = self.run(t.format(**fargs), env, extra_args=extra_args,
dependencies=dependencies)
if not res.compiled:
return -1
if res.returncode != 0:
raise EnvironmentException('Could not run compute_int test binary.')
return int(res.stdout)
def cross_sizeof(self, typename, prefix, env, *, extra_args=None, dependencies=None):
if extra_args is None:
extra_args = []
fargs = {'prefix': prefix, 'type': typename}
t = '''#include <stdio.h>
{prefix}
int main(int argc, char **argv) {{
{type} something;
}}'''
if not self.compiles(t.format(**fargs), env, extra_args=extra_args,
dependencies=dependencies):
return -1
return self.cross_compute_int('sizeof(%s)' % typename, None, None, None, prefix, env, extra_args, dependencies)
def sizeof(self, typename, prefix, env, *, extra_args=None, dependencies=None):
if extra_args is None:
extra_args = []
fargs = {'prefix': prefix, 'type': typename}
if self.is_cross:
return self.cross_sizeof(typename, prefix, env, extra_args=extra_args,
dependencies=dependencies)
t = '''#include<stdio.h>
{prefix}
int main(int argc, char **argv) {{
printf("%ld\\n", (long)(sizeof({type})));
return 0;
}};'''
res = self.run(t.format(**fargs), env, extra_args=extra_args,
dependencies=dependencies)
if not res.compiled:
return -1
if res.returncode != 0:
raise EnvironmentException('Could not run sizeof test binary.')
return int(res.stdout)
def cross_alignment(self, typename, prefix, env, *, extra_args=None, dependencies=None):
if extra_args is None:
extra_args = []
fargs = {'prefix': prefix, 'type': typename}
t = '''#include <stdio.h>
{prefix}
int main(int argc, char **argv) {{
{type} something;
}}'''
if not self.compiles(t.format(**fargs), env, extra_args=extra_args,
dependencies=dependencies):
return -1
t = '''#include <stddef.h>
{prefix}
struct tmp {{
char c;
{type} target;
}};'''
return self.cross_compute_int('offsetof(struct tmp, target)', None, None, None, t.format(**fargs), env, extra_args, dependencies)
def alignment(self, typename, prefix, env, *, extra_args=None, dependencies=None):
if extra_args is None:
extra_args = []
if self.is_cross:
return self.cross_alignment(typename, prefix, env, extra_args=extra_args,
dependencies=dependencies)
fargs = {'prefix': prefix, 'type': typename}
t = '''#include <stdio.h>
#include <stddef.h>
{prefix}
struct tmp {{
char c;
{type} target;
}};
int main(int argc, char **argv) {{
printf("%d", (int)offsetof(struct tmp, target));
return 0;
}}'''
res = self.run(t.format(**fargs), env, extra_args=extra_args,
dependencies=dependencies)
if not res.compiled:
raise EnvironmentException('Could not compile alignment test.')
if res.returncode != 0:
raise EnvironmentException('Could not run alignment test binary.')
align = int(res.stdout)
if align == 0:
raise EnvironmentException('Could not determine alignment of %s. Sorry. You might want to file a bug.' % typename)
return align
def get_define(self, dname, prefix, env, extra_args, dependencies):
delim = '"MESON_GET_DEFINE_DELIMITER"'
fargs = {'prefix': prefix, 'define': dname, 'delim': delim}
code = '''
{prefix}
#ifndef {define}
# define {define}
#endif
{delim}\n{define}'''
args = self._get_compiler_check_args(env, extra_args, dependencies,
mode='preprocess').to_native()
with self.compile(code.format(**fargs), args, 'preprocess') as p:
if p.returncode != 0:
raise EnvironmentException('Could not get define {!r}'.format(dname))
# Get the preprocessed value after the delimiter,
# minus the extra newline at the end and
# merge string literals.
return CCompiler.concatenate_string_literals(p.stdo.split(delim + '\n')[-1][:-1])
def get_return_value(self, fname, rtype, prefix, env, extra_args, dependencies):
if rtype == 'string':
fmt = '%s'
cast = '(char*)'
elif rtype == 'int':
fmt = '%lli'
cast = '(long long int)'
else:
raise AssertionError('BUG: Unknown return type {!r}'.format(rtype))
fargs = {'prefix': prefix, 'f': fname, 'cast': cast, 'fmt': fmt}
code = '''{prefix}
#include <stdio.h>
int main(int argc, char *argv[]) {{
printf ("{fmt}", {cast} {f}());
}}'''.format(**fargs)
res = self.run(code, env, extra_args=extra_args, dependencies=dependencies)
if not res.compiled:
m = 'Could not get return value of {}()'
raise EnvironmentException(m.format(fname))
if rtype == 'string':
return res.stdout
elif rtype == 'int':
try:
return int(res.stdout.strip())
except ValueError:
m = 'Return value of {}() is not an int'
raise EnvironmentException(m.format(fname))
@staticmethod
def _no_prototype_templ():
"""
Try to find the function without a prototype from a header by defining
our own dummy prototype and trying to link with the C library (and
whatever else the compiler links in by default). This is very similar
to the check performed by Autoconf for AC_CHECK_FUNCS.
"""
# Define the symbol to something else since it is defined by the
# includes or defines listed by the user or by the compiler. This may
# include, for instance _GNU_SOURCE which must be defined before
# limits.h, which includes features.h
# Then, undef the symbol to get rid of it completely.
head = '''
#define {func} meson_disable_define_of_{func}
{prefix}
#include <limits.h>
#undef {func}
'''
# Override any GCC internal prototype and declare our own definition for
# the symbol. Use char because that's unlikely to be an actual return
# value for a function which ensures that we override the definition.
head += '''
#ifdef __cplusplus
extern "C"
#endif
char {func} ();
'''
# The actual function call
main = '''
int main () {{
return {func} ();
}}'''
return head, main
@staticmethod
def _have_prototype_templ():
"""
Returns a head-er and main() call that uses the headers listed by the
user for the function prototype while checking if a function exists.
"""
# Add the 'prefix', aka defines, includes, etc that the user provides
# This may include, for instance _GNU_SOURCE which must be defined
# before limits.h, which includes features.h
head = '{prefix}\n#include <limits.h>\n'
# We don't know what the function takes or returns, so return it as an int.
# Just taking the address or comparing it to void is not enough because
# compilers are smart enough to optimize it away. The resulting binary
# is not run so we don't care what the return value is.
main = '''\nint main() {{
void *a = (void*) &{func};
long b = (long) a;
return (int) b;
}}'''
return head, main
def has_function(self, funcname, prefix, env, *, extra_args=None, dependencies=None):
"""
First, this function looks for the symbol in the default libraries
provided by the compiler (stdlib + a few others usually). If that
fails, it checks if any of the headers specified in the prefix provide
an implementation of the function, and if that fails, it checks if it's
implemented as a compiler-builtin.
"""
if extra_args is None:
extra_args = []
# Short-circuit if the check is already provided by the cross-info file
varname = 'has function ' + funcname
varname = varname.replace(' ', '_')
if self.is_cross:
val = env.properties.host.get(varname, None)
if val is not None:
if isinstance(val, bool):
return val
raise EnvironmentException('Cross variable {0} is not a boolean.'.format(varname))
fargs = {'prefix': prefix, 'func': funcname}
# glibc defines functions that are not available on Linux as stubs that
# fail with ENOSYS (such as e.g. lchmod). In this case we want to fail
# instead of detecting the stub as a valid symbol.
# We already included limits.h earlier to ensure that these are defined
# for stub functions.
stubs_fail = '''
#if defined __stub_{func} || defined __stub___{func}
fail fail fail this function is not going to work
#endif
'''
# If we have any includes in the prefix supplied by the user, assume
# that the user wants us to use the symbol prototype defined in those
# includes. If not, then try to do the Autoconf-style check with
# a dummy prototype definition of our own.
# This is needed when the linker determines symbol availability from an
# SDK based on the prototype in the header provided by the SDK.
# Ignoring this prototype would result in the symbol always being
# marked as available.
if '#include' in prefix:
head, main = self._have_prototype_templ()
else:
head, main = self._no_prototype_templ()
templ = head + stubs_fail + main
if self.links(templ.format(**fargs), env, extra_args=extra_args,
dependencies=dependencies):
return True
# MSVC does not have compiler __builtin_-s.
if self.get_id() == 'msvc':
return False
# Detect function as a built-in
#
# Some functions like alloca() are defined as compiler built-ins which
# are inlined by the compiler and you can't take their address, so we
# need to look for them differently. On nice compilers like clang, we
# can just directly use the __has_builtin() macro.
fargs['no_includes'] = '#include' not in prefix
t = '''{prefix}
int main() {{
#ifdef __has_builtin
#if !__has_builtin(__builtin_{func})
#error "__builtin_{func} not found"
#endif
#elif ! defined({func})
/* Check for __builtin_{func} only if no includes were added to the
* prefix above, which means no definition of {func} can be found.
* We would always check for this, but we get false positives on
* MSYS2 if we do. Their toolchain is broken, but we can at least
* give them a workaround. */
#if {no_includes:d}
__builtin_{func};
#else
#error "No definition for __builtin_{func} found in the prefix"
#endif
#endif
}}'''
return self.links(t.format(**fargs), env, extra_args=extra_args,
dependencies=dependencies)
def has_members(self, typename, membernames, prefix, env, *, extra_args=None, dependencies=None):
if extra_args is None:
extra_args = []
fargs = {'prefix': prefix, 'type': typename, 'name': 'foo'}
# Create code that accesses all members
members = ''
for member in membernames:
members += '{}.{};\n'.format(fargs['name'], member)
fargs['members'] = members
t = '''{prefix}
void bar() {{
{type} {name};
{members}
}};'''
return self.compiles(t.format(**fargs), env, extra_args=extra_args,
dependencies=dependencies)
def has_type(self, typename, prefix, env, extra_args, dependencies=None):
fargs = {'prefix': prefix, 'type': typename}
t = '''{prefix}
void bar() {{
sizeof({type});
}};'''
return self.compiles(t.format(**fargs), env, extra_args=extra_args,
dependencies=dependencies)
def symbols_have_underscore_prefix(self, env):
'''
Check if the compiler prefixes an underscore to global C symbols
'''
symbol_name = b'meson_uscore_prefix'
code = '''#ifdef __cplusplus
extern "C" {
#endif
void ''' + symbol_name.decode() + ''' () {}
#ifdef __cplusplus
}
#endif
'''
args = self.get_compiler_check_args()
n = 'symbols_have_underscore_prefix'
with self.compile(code, args, 'compile', want_output=True) as p:
if p.returncode != 0:
m = 'BUG: Unable to compile {!r} check: {}'
raise RuntimeError(m.format(n, p.stdo))
if not os.path.isfile(p.output_name):
m = 'BUG: Can\'t find compiled test code for {!r} check'
raise RuntimeError(m.format(n))
with open(p.output_name, 'rb') as o:
for line in o:
# Check if the underscore form of the symbol is somewhere
# in the output file.
if b'_' + symbol_name in line:
return True
# Else, check if the non-underscored form is present
elif symbol_name in line:
return False
raise RuntimeError('BUG: {!r} check failed unexpectedly'.format(n))
def _get_patterns(self, env, prefixes, suffixes, shared=False):
patterns = []
for p in prefixes:
for s in suffixes:
patterns.append(p + '{}.' + s)
if shared and for_openbsd(self.is_cross, env):
# Shared libraries on OpenBSD can be named libfoo.so.X.Y:
# https://www.openbsd.org/faq/ports/specialtopics.html#SharedLibs
#
# This globbing is probably the best matching we can do since regex
# is expensive. It's wrong in many edge cases, but it will match
# correctly-named libraries and hopefully no one on OpenBSD names
# their files libfoo.so.9a.7b.1.0
patterns.append('lib{}.so.[0-9]*.[0-9]*')
return patterns
def get_library_naming(self, env, libtype, strict=False):
'''
Get library prefixes and suffixes for the target platform ordered by
priority
'''
stlibext = ['a']
# We've always allowed libname to be both `foo` and `libfoo`, and now
# people depend on it. Also, some people use prebuilt `foo.so` instead
# of `libfoo.so` for unknown reasons, and may also want to create
# `foo.so` by setting name_prefix to ''
if strict and not isinstance(self, VisualStudioCCompiler): # lib prefix is not usually used with msvc
prefixes = ['lib']
else:
prefixes = ['lib', '']
# Library suffixes and prefixes
if for_darwin(env.is_cross_build(), env):
shlibext = ['dylib', 'so']
elif for_windows(env.is_cross_build(), env):
# FIXME: .lib files can be import or static so we should read the
# file, figure out which one it is, and reject the wrong kind.
if isinstance(self, VisualStudioCCompiler):
shlibext = ['lib']
else:
shlibext = ['dll.a', 'lib', 'dll']
# Yep, static libraries can also be foo.lib
stlibext += ['lib']
elif for_cygwin(env.is_cross_build(), env):
shlibext = ['dll', 'dll.a']
prefixes = ['cyg'] + prefixes
else:
# Linux/BSDs
shlibext = ['so']
patterns = []
# Search priority
if libtype in ('default', 'shared-static'):
patterns += self._get_patterns(env, prefixes, shlibext, True)
patterns += self._get_patterns(env, prefixes, stlibext, False)
elif libtype == 'static-shared':
patterns += self._get_patterns(env, prefixes, stlibext, False)
patterns += self._get_patterns(env, prefixes, shlibext, True)
elif libtype == 'shared':
patterns += self._get_patterns(env, prefixes, shlibext, True)
elif libtype == 'static':
patterns += self._get_patterns(env, prefixes, stlibext, False)
else:
raise AssertionError('BUG: unknown libtype {!r}'.format(libtype))
return tuple(patterns)
@staticmethod
def _sort_shlibs_openbsd(libs):
filtered = []
for lib in libs:
# Validate file as a shared library of type libfoo.so.X.Y
ret = lib.rsplit('.so.', maxsplit=1)
if len(ret) != 2:
continue
try:
float(ret[1])
except ValueError:
continue
filtered.append(lib)
float_cmp = lambda x: float(x.rsplit('.so.', maxsplit=1)[1])
return sorted(filtered, key=float_cmp, reverse=True)
@classmethod
def _get_trials_from_pattern(cls, pattern, directory, libname):
f = Path(directory) / pattern.format(libname)
# Globbing for OpenBSD
if '*' in pattern:
# NOTE: globbing matches directories and broken symlinks
# so we have to do an isfile test on it later
return cls._sort_shlibs_openbsd(glob.glob(str(f)))
return [f.as_posix()]
@staticmethod
def _get_file_from_list(env, files: List[str]) -> str:
'''
We just check whether the library exists. We can't do a link check
because the library might have unresolved symbols that require other
libraries. On macOS we check if the library matches our target
architecture.
'''
# If not building on macOS for Darwin, do a simple file check
if not env.machines.host.is_darwin() or not env.machines.build.is_darwin():
for f in files:
if os.path.isfile(f):
return f
# Run `lipo` and check if the library supports the arch we want
for f in files:
if not os.path.isfile(f):
continue
archs = darwin_get_object_archs(f)
if archs and env.machines.host.cpu_family in archs:
return f
else:
mlog.debug('Rejected {}, supports {} but need {}'
.format(f, archs, env.machines.host.cpu_family))
return None
@functools.lru_cache()
def output_is_64bit(self, env):
'''
returns true if the output produced is 64-bit, false if 32-bit
'''
return self.sizeof('void *', '', env) == 8
def find_library_real(self, libname, env, extra_dirs, code, libtype):
# First try if we can just add the library as -l.
# Gcc + co seem to prefer builtin lib dirs to -L dirs.
# Only try to find std libs if no extra dirs specified.
if not extra_dirs or libname in self.internal_libs:
args = ['-l' + libname]
largs = self.linker_to_compiler_args(self.get_allow_undefined_link_args())
if self.links(code, env, extra_args=(args + largs)):
return args
# Don't do a manual search for internal libs
if libname in self.internal_libs:
return None
# Not found or we want to use a specific libtype? Try to find the
# library file itself.
patterns = self.get_library_naming(env, libtype)
# try to detect if we are 64-bit or 32-bit. If we can't
# detect, we will just skip path validity checks done in
# get_library_dirs() call
try:
if self.output_is_64bit(env):
elf_class = 2
else:
elf_class = 1
except:
elf_class = 0
# Search in the specified dirs, and then in the system libraries
for d in itertools.chain(extra_dirs, self.get_library_dirs(env, elf_class)):
for p in patterns:
trial = self._get_trials_from_pattern(p, d, libname)
if not trial:
continue
trial = self._get_file_from_list(env, trial)
if not trial:
continue
return [trial]
return None
def find_library_impl(self, libname, env, extra_dirs, code, libtype):
# These libraries are either built-in or invalid
if libname in self.ignore_libs:
return []
if isinstance(extra_dirs, str):
extra_dirs = [extra_dirs]
key = (tuple(self.exelist), libname, tuple(extra_dirs), code, libtype)
if key not in self.find_library_cache:
value = self.find_library_real(libname, env, extra_dirs, code, libtype)
self.find_library_cache[key] = value
else:
value = self.find_library_cache[key]
if value is None:
return None
return value[:]
def find_library(self, libname, env, extra_dirs, libtype='default'):
code = 'int main(int argc, char **argv) { return 0; }'
return self.find_library_impl(libname, env, extra_dirs, code, libtype)
def find_framework_paths(self, env):
'''
These are usually /Library/Frameworks and /System/Library/Frameworks,
unless you select a particular macOS SDK with the -isysroot flag.
You can also add to this by setting -F in CFLAGS.
'''
if self.id != 'clang':
raise MesonException('Cannot find framework path with non-clang compiler')
# Construct the compiler command-line
commands = self.get_exelist() + ['-v', '-E', '-']
commands += self.get_always_args()
# Add CFLAGS/CXXFLAGS/OBJCFLAGS/OBJCXXFLAGS from the env
if env.is_cross_build() and not self.is_cross:
for_machine = MachineChoice.BUILD
else:
for_machine = MachineChoice.HOST
commands += env.coredata.get_external_args(for_machine, self.language)
mlog.debug('Finding framework path by running: ', ' '.join(commands), '\n')
os_env = os.environ.copy()
os_env['LC_ALL'] = 'C'
_, _, stde = Popen_safe(commands, env=os_env, stdin=subprocess.PIPE)
paths = []
for line in stde.split('\n'):
if '(framework directory)' not in line:
continue
# line is of the form:
# ` /path/to/framework (framework directory)`
paths.append(line[:-21].strip())
return paths
def find_framework_real(self, name, env, extra_dirs, allow_system):
code = 'int main(int argc, char **argv) { return 0; }'
link_args = []
for d in extra_dirs:
link_args += ['-F' + d]
# We can pass -Z to disable searching in the system frameworks, but
# then we must also pass -L/usr/lib to pick up libSystem.dylib
extra_args = [] if allow_system else ['-Z', '-L/usr/lib']
link_args += ['-framework', name]
if self.links(code, env, extra_args=(extra_args + link_args)):
return link_args
def find_framework_impl(self, name, env, extra_dirs, allow_system):
if isinstance(extra_dirs, str):
extra_dirs = [extra_dirs]
key = (tuple(self.exelist), name, tuple(extra_dirs), allow_system)
if key in self.find_framework_cache:
value = self.find_framework_cache[key]
else:
value = self.find_framework_real(name, env, extra_dirs, allow_system)
self.find_framework_cache[key] = value
if value is None:
return None
return value[:]
def find_framework(self, name, env, extra_dirs, allow_system=True):
'''
Finds the framework with the specified name, and returns link args for
the same or returns None when the framework is not found.
'''
if self.id != 'clang':
raise MesonException('Cannot find frameworks with non-clang compiler')
return self.find_framework_impl(name, env, extra_dirs, allow_system)
def thread_flags(self, env):
if for_haiku(self.is_cross, env) or for_darwin(self.is_cross, env):
return []
return ['-pthread']
def thread_link_flags(self, env):
if for_haiku(self.is_cross, env) or for_darwin(self.is_cross, env):
return []
return ['-pthread']
def linker_to_compiler_args(self, args):
return args
def has_arguments(self, args, env, code, mode):
return self.compiles(code, env, extra_args=args, mode=mode)
def has_multi_arguments(self, args, env):
for arg in args[:]:
# some compilers, e.g. GCC, don't warn for unsupported warning-disable
# flags, so when we are testing a flag like "-Wno-forgotten-towel", also
# check the equivalent enable flag too "-Wforgotten-towel"
if arg.startswith('-Wno-'):
args.append('-W' + arg[5:])
if arg.startswith('-Wl,'):
mlog.warning('{} looks like a linker argument, '
'but has_argument and other similar methods only '
'support checking compiler arguments. Using them '
'to check linker arguments are never supported, '
'and results are likely to be wrong regardless of '
'the compiler you are using. has_link_argument or '
'other similar method can be used instead.'
.format(arg))
code = 'int i;\n'
return self.has_arguments(args, env, code, mode='compile')
def has_multi_link_arguments(self, args, env):
# First time we check for link flags we need to first check if we have
# --fatal-warnings, otherwise some linker checks could give some
# false positive.
fatal_warnings_args = ['-Wl,--fatal-warnings']
if self.has_fatal_warnings_link_arg is None:
self.has_fatal_warnings_link_arg = False
self.has_fatal_warnings_link_arg = self.has_multi_link_arguments(fatal_warnings_args, env)
if self.has_fatal_warnings_link_arg:
args = fatal_warnings_args + args
args = self.linker_to_compiler_args(args)
code = 'int main(int argc, char **argv) { return 0; }'
return self.has_arguments(args, env, code, mode='link')
@staticmethod
def concatenate_string_literals(s):
pattern = re.compile(r'(?P<pre>.*([^\\]")|^")(?P<str1>([^\\"]|\\.)*)"\s+"(?P<str2>([^\\"]|\\.)*)(?P<post>".*)')
ret = s
m = pattern.match(ret)
while m:
ret = ''.join(m.group('pre', 'str1', 'str2', 'post'))
m = pattern.match(ret)
return ret
def has_func_attribute(self, name, env):
# Just assume that if we're not on windows that dllimport and dllexport
# don't work
if not (for_windows(env.is_cross_build(), env) or
for_cygwin(env.is_cross_build(), env)):
if name in ['dllimport', 'dllexport']:
return False
# Clang and GCC both return warnings if the __attribute__ is undefined,
# so set -Werror
return self.compiles(self.attribute_check_func(name), env, extra_args='-Werror')
class ClangCCompiler(ClangCompiler, CCompiler):
def __init__(self, exelist, version, compiler_type, is_cross, exe_wrapper=None, **kwargs):
CCompiler.__init__(self, exelist, version, is_cross, exe_wrapper, **kwargs)
ClangCompiler.__init__(self, compiler_type)
default_warn_args = ['-Wall', '-Winvalid-pch']
self.warn_args = {'0': [],
'1': default_warn_args,
'2': default_warn_args + ['-Wextra'],
'3': default_warn_args + ['-Wextra', '-Wpedantic']}
def get_options(self):
opts = CCompiler.get_options(self)
opts.update({'c_std': coredata.UserComboOption('c_std', 'C language standard to use',
['none', 'c89', 'c99', 'c11',
'gnu89', 'gnu99', 'gnu11'],
'none')})
return opts
def get_option_compile_args(self, options):
args = []
std = options['c_std']
if std.value != 'none':
args.append('-std=' + std.value)
return args
def get_option_link_args(self, options):
return []
def get_linker_always_args(self):
basic = super().get_linker_always_args()
if self.compiler_type.is_osx_compiler:
return basic + ['-Wl,-headerpad_max_install_names']
return basic
class ArmclangCCompiler(ArmclangCompiler, CCompiler):
def __init__(self, exelist, version, compiler_type, is_cross, exe_wrapper=None, **kwargs):
CCompiler.__init__(self, exelist, version, is_cross, exe_wrapper, **kwargs)
ArmclangCompiler.__init__(self, compiler_type)
default_warn_args = ['-Wall', '-Winvalid-pch']
self.warn_args = {'0': [],
'1': default_warn_args,
'2': default_warn_args + ['-Wextra'],
'3': default_warn_args + ['-Wextra', '-Wpedantic']}
def get_options(self):
opts = CCompiler.get_options(self)
opts.update({'c_std': coredata.UserComboOption('c_std', 'C language standard to use',
['none', 'c90', 'c99', 'c11',
'gnu90', 'gnu99', 'gnu11'],
'none')})
return opts
def get_option_compile_args(self, options):
args = []
std = options['c_std']
if std.value != 'none':
args.append('-std=' + std.value)
return args
def get_option_link_args(self, options):
return []
class GnuCCompiler(GnuCompiler, CCompiler):
def __init__(self, exelist, version, compiler_type, is_cross, exe_wrapper=None, defines=None, **kwargs):
CCompiler.__init__(self, exelist, version, is_cross, exe_wrapper, **kwargs)
GnuCompiler.__init__(self, compiler_type, defines)
default_warn_args = ['-Wall', '-Winvalid-pch']
self.warn_args = {'0': [],
'1': default_warn_args,
'2': default_warn_args + ['-Wextra'],
'3': default_warn_args + ['-Wextra', '-Wpedantic']}
def get_options(self):
opts = CCompiler.get_options(self)
opts.update({'c_std': coredata.UserComboOption('c_std', 'C language standard to use',
['none', 'c89', 'c99', 'c11',
'gnu89', 'gnu99', 'gnu11'],
'none')})
if self.compiler_type.is_windows_compiler:
opts.update({
'c_winlibs': coredata.UserArrayOption('c_winlibs', 'Standard Win libraries to link against',
gnu_winlibs), })
return opts
def get_option_compile_args(self, options):
args = []
std = options['c_std']
if std.value != 'none':
args.append('-std=' + std.value)
return args
def get_option_link_args(self, options):
if self.compiler_type.is_windows_compiler:
return options['c_winlibs'].value[:]
return []
def get_pch_use_args(self, pch_dir, header):
return ['-fpch-preprocess', '-include', os.path.basename(header)]
class PGICCompiler(PGICompiler, CCompiler):
def __init__(self, exelist, version, is_cross, exe_wrapper=None, **kwargs):
CCompiler.__init__(self, exelist, version, is_cross, exe_wrapper, **kwargs)
PGICompiler.__init__(self, CompilerType.PGI_STANDARD)
class ElbrusCCompiler(GnuCCompiler, ElbrusCompiler):
def __init__(self, exelist, version, compiler_type, is_cross, exe_wrapper=None, defines=None, **kwargs):
GnuCCompiler.__init__(self, exelist, version, compiler_type, is_cross, exe_wrapper, defines, **kwargs)
ElbrusCompiler.__init__(self, compiler_type, defines)
# It does support some various ISO standards and c/gnu 90, 9x, 1x in addition to those which GNU CC supports.
def get_options(self):
opts = CCompiler.get_options(self)
opts.update({'c_std': coredata.UserComboOption('c_std', 'C language standard to use',
['none', 'c89', 'c90', 'c9x', 'c99', 'c1x', 'c11',
'gnu89', 'gnu90', 'gnu9x', 'gnu99', 'gnu1x', 'gnu11',
'iso9899:2011', 'iso9899:1990', 'iso9899:199409', 'iso9899:1999'],
'none')})
return opts
# Elbrus C compiler does not have lchmod, but there is only linker warning, not compiler error.
# So we should explicitly fail at this case.
def has_function(self, funcname, prefix, env, *, extra_args=None, dependencies=None):
if funcname == 'lchmod':
return False
else:
return super().has_function(funcname, prefix, env,
extra_args=extra_args,
dependencies=dependencies)
class IntelCCompiler(IntelCompiler, CCompiler):
def __init__(self, exelist, version, compiler_type, is_cross, exe_wrapper=None, **kwargs):
CCompiler.__init__(self, exelist, version, is_cross, exe_wrapper, **kwargs)
IntelCompiler.__init__(self, compiler_type)
self.lang_header = 'c-header'
default_warn_args = ['-Wall', '-w3', '-diag-disable:remark']
self.warn_args = {'0': [],
'1': default_warn_args,
'2': default_warn_args + ['-Wextra'],
'3': default_warn_args + ['-Wextra']}
def get_options(self):
opts = CCompiler.get_options(self)
c_stds = ['c89', 'c99']
g_stds = ['gnu89', 'gnu99']
if version_compare(self.version, '>=16.0.0'):
c_stds += ['c11']
opts.update({'c_std': coredata.UserComboOption('c_std', 'C language standard to use',
['none'] + c_stds + g_stds,
'none')})
return opts
def get_option_compile_args(self, options):
args = []
std = options['c_std']
if std.value != 'none':
args.append('-std=' + std.value)
return args
class VisualStudioCCompiler(CCompiler):
std_warn_args = ['/W3']
std_opt_args = ['/O2']
ignore_libs = unixy_compiler_internal_libs
internal_libs = ()
crt_args = {'none': [],
'md': ['/MD'],
'mdd': ['/MDd'],
'mt': ['/MT'],
'mtd': ['/MTd'],
}
def __init__(self, exelist, version, is_cross, exe_wrap, target):
CCompiler.__init__(self, exelist, version, is_cross, exe_wrap)
self.id = 'msvc'
# /showIncludes is needed for build dependency tracking in Ninja
# See: https://ninja-build.org/manual.html#_deps
self.always_args = ['/nologo', '/showIncludes']
self.warn_args = {'0': ['/W1'],
'1': ['/W2'],
'2': ['/W3'],
'3': ['/W4']}
self.base_options = ['b_pch', 'b_ndebug', 'b_vscrt'] # FIXME add lto, pgo and the like
self.target = target
self.is_64 = ('x64' in target) or ('x86_64' in target)
# Override CCompiler.get_always_args
def get_always_args(self):
return self.always_args
def get_linker_debug_crt_args(self):
"""
Arguments needed to select a debug crt for the linker
Sometimes we need to manually select the CRT (C runtime) to use with
MSVC. One example is when trying to link with static libraries since
MSVC won't auto-select a CRT for us in that case and will error out
asking us to select one.
"""
return ['/MDd']
def get_buildtype_args(self, buildtype):
args = compilers.msvc_buildtype_args[buildtype]
if self.id == 'msvc' and version_compare(self.version, '<18.0'):
args = [arg for arg in args if arg != '/Gw']
return args
def get_buildtype_linker_args(self, buildtype):
return compilers.msvc_buildtype_linker_args[buildtype]
def get_pch_suffix(self):
return 'pch'
def get_pch_name(self, header):
chopped = os.path.basename(header).split('.')[:-1]
chopped.append(self.get_pch_suffix())
pchname = '.'.join(chopped)
return pchname
def get_pch_use_args(self, pch_dir, header):
base = os.path.basename(header)
if self.id == 'clang-cl':
base = header
pchname = self.get_pch_name(header)
return ['/FI' + base, '/Yu' + base, '/Fp' + os.path.join(pch_dir, pchname)]
def get_preprocess_only_args(self):
return ['/EP']
def get_compile_only_args(self):
return ['/c']
def get_no_optimization_args(self):
return ['/Od']
def get_output_args(self, target):
if target.endswith('.exe'):
return ['/Fe' + target]
return ['/Fo' + target]
def get_optimization_args(self, optimization_level):
return compilers.msvc_optimization_args[optimization_level]
def get_debug_args(self, is_debug):
return compilers.msvc_debug_args[is_debug]
def get_dependency_gen_args(self, outtarget, outfile):
return []
def get_linker_exelist(self):
# FIXME, should have same path as compiler.
# FIXME, should be controllable via cross-file.
if self.id == 'clang-cl':
return ['lld-link']
else:
return ['link']
def get_linker_always_args(self):
return ['/nologo']
def get_linker_output_args(self, outputname):
return ['/OUT:' + outputname]
def get_linker_search_args(self, dirname):
return ['/LIBPATH:' + dirname]
def linker_to_compiler_args(self, args):
return ['/link'] + args
def get_gui_app_args(self, value):
# the default is for the linker to guess the subsystem based on presence
# of main or WinMain symbols, so always be explicit
if value:
return ['/SUBSYSTEM:WINDOWS']
else:
return ['/SUBSYSTEM:CONSOLE']
def get_pic_args(self):
return [] # PIC is handled by the loader on Windows
def gen_export_dynamic_link_args(self, env):
return [] # Not applicable with MSVC
def get_std_shared_lib_link_args(self):
return ['/DLL']
def gen_vs_module_defs_args(self, defsfile):
if not isinstance(defsfile, str):
raise RuntimeError('Module definitions file should be str')
# With MSVC, DLLs only export symbols that are explicitly exported,
# so if a module defs file is specified, we use that to export symbols
return ['/DEF:' + defsfile]
def gen_pch_args(self, header, source, pchname):
objname = os.path.splitext(pchname)[0] + '.obj'
return objname, ['/Yc' + header, '/Fp' + pchname, '/Fo' + objname]
def gen_import_library_args(self, implibname):
"The name of the outputted import library"
return ['/IMPLIB:' + implibname]
def build_rpath_args(self, build_dir, from_dir, rpath_paths, build_rpath, install_rpath):
return []
def openmp_flags(self):
return ['/openmp']
# FIXME, no idea what these should be.
def thread_flags(self, env):
return []
def thread_link_flags(self, env):
return []
def get_options(self):
opts = CCompiler.get_options(self)
opts.update({'c_winlibs': coredata.UserArrayOption('c_winlibs',
'Windows libs to link against.',
msvc_winlibs)})
return opts
def get_option_link_args(self, options):
return options['c_winlibs'].value[:]
@classmethod
def unix_args_to_native(cls, args):
result = []
for i in args:
# -mms-bitfields is specific to MinGW-GCC
# -pthread is only valid for GCC
if i in ('-mms-bitfields', '-pthread'):
continue
if i.startswith('-L'):
i = '/LIBPATH:' + i[2:]
# Translate GNU-style -lfoo library name to the import library
elif i.startswith('-l'):
name = i[2:]
if name in cls.ignore_libs:
# With MSVC, these are provided by the C runtime which is
# linked in by default
continue
else:
i = name + '.lib'
# -pthread in link flags is only used on Linux
elif i == '-pthread':
continue
result.append(i)
return result
def get_werror_args(self):
return ['/WX']
def get_include_args(self, path, is_system):
if path == '':
path = '.'
# msvc does not have a concept of system header dirs.
return ['-I' + path]
def compute_parameters_with_absolute_paths(self, parameter_list, build_dir):
for idx, i in enumerate(parameter_list):
if i[:2] == '-I' or i[:2] == '/I':
parameter_list[idx] = i[:2] + os.path.normpath(os.path.join(build_dir, i[2:]))
elif i[:9] == '/LIBPATH:':
parameter_list[idx] = i[:9] + os.path.normpath(os.path.join(build_dir, i[9:]))
return parameter_list
# Visual Studio is special. It ignores some arguments it does not
# understand and you can't tell it to error out on those.
# http://stackoverflow.com/questions/15259720/how-can-i-make-the-microsoft-c-compiler-treat-unknown-flags-as-errors-rather-t
def has_arguments(self, args, env, code, mode):
warning_text = '4044' if mode == 'link' else '9002'
if self.id == 'clang-cl' and mode != 'link':
args = args + ['-Werror=unknown-argument']
with self._build_wrapper(code, env, extra_args=args, mode=mode) as p:
if p.returncode != 0:
return False
return not(warning_text in p.stde or warning_text in p.stdo)
def get_compile_debugfile_args(self, rel_obj, pch=False):
pdbarr = rel_obj.split('.')[:-1]
pdbarr += ['pdb']
args = ['/Fd' + '.'.join(pdbarr)]
# When generating a PDB file with PCH, all compile commands write
# to the same PDB file. Hence, we need to serialize the PDB
# writes using /FS since we do parallel builds. This slows down the
# build obviously, which is why we only do this when PCH is on.
# This was added in Visual Studio 2013 (MSVC 18.0). Before that it was
# always on: https://msdn.microsoft.com/en-us/library/dn502518.aspx
if pch and self.id == 'msvc' and version_compare(self.version, '>=18.0'):
args = ['/FS'] + args
return args
def get_link_debugfile_args(self, targetfile):
pdbarr = targetfile.split('.')[:-1]
pdbarr += ['pdb']
return ['/DEBUG', '/PDB:' + '.'.join(pdbarr)]
def get_link_whole_for(self, args):
# Only since VS2015
args = listify(args)
return ['/WHOLEARCHIVE:' + x for x in args]
def get_instruction_set_args(self, instruction_set):
if self.is_64:
return vs64_instruction_set_args.get(instruction_set, None)
if self.id == 'msvc' and self.version.split('.')[0] == '16' and instruction_set == 'avx':
# VS documentation says that this exists and should work, but
# it does not. The headers do not contain AVX intrinsics
# and the can not be called.
return None
return vs32_instruction_set_args.get(instruction_set, None)
def get_toolset_version(self):
if self.id == 'clang-cl':
# I have no idea
return '14.1'
# See boost/config/compiler/visualc.cpp for up to date mapping
try:
version = int(''.join(self.version.split('.')[0:2]))
except ValueError:
return None
if version < 1310:
return '7.0'
elif version < 1400:
return '7.1' # (Visual Studio 2003)
elif version < 1500:
return '8.0' # (Visual Studio 2005)
elif version < 1600:
return '9.0' # (Visual Studio 2008)
elif version < 1700:
return '10.0' # (Visual Studio 2010)
elif version < 1800:
return '11.0' # (Visual Studio 2012)
elif version < 1900:
return '12.0' # (Visual Studio 2013)
elif version < 1910:
return '14.0' # (Visual Studio 2015)
elif version < 1920:
return '14.1' # (Visual Studio 2017)
return None
def get_default_include_dirs(self):
if 'INCLUDE' not in os.environ:
return []
return os.environ['INCLUDE'].split(os.pathsep)
def get_crt_compile_args(self, crt_val, buildtype):
if crt_val in self.crt_args:
return self.crt_args[crt_val]
assert(crt_val == 'from_buildtype')
# Match what build type flags used to do.
if buildtype == 'plain':
return []
elif buildtype == 'debug':
return self.crt_args['mdd']
elif buildtype == 'debugoptimized':
return self.crt_args['md']
elif buildtype == 'release':
return self.crt_args['md']
elif buildtype == 'minsize':
return self.crt_args['md']
else:
assert(buildtype == 'custom')
raise EnvironmentException('Requested C runtime based on buildtype, but buildtype is "custom".')
def has_func_attribute(self, name, env):
# MSVC doesn't have __attribute__ like Clang and GCC do, so just return
# false without compiling anything
return name in ['dllimport', 'dllexport']
def get_argument_syntax(self):
return 'msvc'
def get_allow_undefined_link_args(self):
# link.exe
return ['/FORCE:UNRESOLVED']
class ClangClCCompiler(VisualStudioCCompiler):
def __init__(self, exelist, version, is_cross, exe_wrap, target):
super().__init__(exelist, version, is_cross, exe_wrap, target)
self.id = 'clang-cl'
class ArmCCompiler(ArmCompiler, CCompiler):
def __init__(self, exelist, version, compiler_type, is_cross, exe_wrapper=None, **kwargs):
CCompiler.__init__(self, exelist, version, is_cross, exe_wrapper, **kwargs)
ArmCompiler.__init__(self, compiler_type)
def get_options(self):
opts = CCompiler.get_options(self)
opts.update({'c_std': coredata.UserComboOption('c_std', 'C language standard to use',
['none', 'c90', 'c99'],
'none')})
return opts
def get_option_compile_args(self, options):
args = []
std = options['c_std']
if std.value != 'none':
args.append('--' + std.value)
return args
class CcrxCCompiler(CcrxCompiler, CCompiler):
def __init__(self, exelist, version, compiler_type, is_cross, exe_wrapper=None, **kwargs):
CCompiler.__init__(self, exelist, version, is_cross, exe_wrapper, **kwargs)
CcrxCompiler.__init__(self, compiler_type)
# Override CCompiler.get_always_args
def get_always_args(self):
return ['-nologo']
def get_options(self):
opts = CCompiler.get_options(self)
opts.update({'c_std': coredata.UserComboOption('c_std', 'C language standard to use',
['none', 'c89', 'c99'],
'none')})
return opts
def get_option_compile_args(self, options):
args = []
std = options['c_std']
if std.value == 'c89':
args.append('-lang=c')
elif std.value == 'c99':
args.append('-lang=c99')
return args
def get_compile_only_args(self):
return []
def get_no_optimization_args(self):
return ['-optimize=0']
def get_output_args(self, target):
return ['-output=obj=%s' % target]
def get_linker_output_args(self, outputname):
return ['-output=%s' % outputname]
def get_werror_args(self):
return ['-change_message=error']
def get_include_args(self, path, is_system):
if path == '':
path = '.'
return ['-include=' + path]
| [] | [] | [
"INCLUDE"
] | [] | ["INCLUDE"] | python | 1 | 0 | |
npm/http/server/server_test.go | package server
import (
"net/http"
"net/http/httptest"
"os"
"testing"
"time"
"github.com/Azure/azure-container-networking/npm/cache"
"github.com/Azure/azure-container-networking/npm/http/api"
"github.com/Azure/azure-container-networking/npm/ipsm"
"github.com/stretchr/testify/assert"
"github.com/Azure/azure-container-networking/npm"
k8sversion "k8s.io/apimachinery/pkg/version"
kubeinformers "k8s.io/client-go/informers"
k8sfake "k8s.io/client-go/kubernetes/fake"
fakeexec "k8s.io/utils/exec/testing"
)
func NPMEncoder() npm.NetworkPolicyManagerEncoder {
noResyncPeriodFunc := func() time.Duration { return 0 }
kubeclient := k8sfake.NewSimpleClientset()
kubeInformer := kubeinformers.NewSharedInformerFactory(kubeclient, noResyncPeriodFunc())
fakeK8sVersion := &k8sversion.Info{
GitVersion: "v1.20.2",
}
exec := &fakeexec.FakeExec{}
npmVersion := "npm-ut-test"
npmEncoder := npm.NewNetworkPolicyManager(kubeclient, kubeInformer, exec, npmVersion, fakeK8sVersion)
return npmEncoder
}
func TestGetNPMCacheHandler(t *testing.T) {
assert := assert.New(t)
npmEncoder := NPMEncoder()
n := &NPMRestServer{}
handler := n.npmCacheHandler(npmEncoder)
req, err := http.NewRequest(http.MethodGet, api.NPMMgrPath, nil)
if err != nil {
t.Fatal(err)
}
rr := httptest.NewRecorder()
handler.ServeHTTP(rr, req)
if status := rr.Code; status != http.StatusOK {
t.Errorf("handler returned wrong status code: got %v want %v",
status, http.StatusOK)
}
var actual *cache.NPMCache
actual, err = cache.Decode(rr.Body)
if err != nil {
t.Fatal(err)
}
expected := &cache.NPMCache{
Nodename: os.Getenv("HOSTNAME"),
NsMap: make(map[string]*npm.Namespace),
PodMap: make(map[string]*npm.NpmPod),
ListMap: make(map[string]*ipsm.Ipset),
SetMap: make(map[string]*ipsm.Ipset),
}
assert.Exactly(expected, actual)
}
| [
"\"HOSTNAME\""
] | [] | [
"HOSTNAME"
] | [] | ["HOSTNAME"] | go | 1 | 0 | |
mongo-driver/x/mongo/driver/topology/initial_dns_seedlist_discovery_test.go | // Copyright (C) MongoDB, Inc. 2017-present.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
package topology
import (
"context"
"encoding/json"
"io/ioutil"
"os"
"path"
"runtime"
"strings"
"testing"
"github.com/stretchr/testify/require"
testhelpers "github.com/dollarkillerx/mongo/mongo-driver/internal/testutil/helpers"
"github.com/dollarkillerx/mongo/mongo-driver/x/mongo/driver/connstring"
"github.com/dollarkillerx/mongo/mongo-driver/x/mongo/driver/description"
)
const seedlistTestDir string = "../../../../data/initial-dns-seedlist-discovery/"
type seedlistTestCase struct {
URI string
Seeds []string
Hosts []string
Error bool
Options map[string]interface{}
}
// Because the Go driver tests can be run either against a server with SSL enabled or without, a
// number of configurations have to be checked to ensure that the SRV tests are run properly.
//
// First, the "ssl" option in the JSON test description has to be checked. If this option is not
// present, we assume that the test will assert an error, so we proceed with the test as normal.
// If the option is false, then we skip the test if the server is running with SSL enabled.
// If the option is true, then we skip the test if the server is running without SSL enabled; if
// the server is running with SSL enabled, then we manually set the necessary SSL options in the
// connection string.
func setSSLSettings(t *testing.T, cs *connstring.ConnString, options map[string]interface{}) {
var testCaseExpectsSSL bool
if ssl, found := options["ssl"]; found && ssl.(bool) {
// The options specify "ssl: true".
testCaseExpectsSSL = true
} else if !found {
// No "ssl" option is specified.
return
}
envSSL := os.Getenv("SSL") == "ssl"
// Skip non-SSL tests if the server is running with SSL.
if !testCaseExpectsSSL && envSSL {
t.Skip()
}
// Skip SSL tests if the server is running without SSL.
if testCaseExpectsSSL && !envSSL {
t.Skip()
}
// If SSL tests are running, set the CA file.
if testCaseExpectsSSL && envSSL {
cs.SSLInsecure = true
}
}
func runSeedlistTest(t *testing.T, filename string, test *seedlistTestCase) {
t.Run(filename, func(t *testing.T) {
if runtime.GOOS == "windows" && filename == "two-txt-records" {
t.Skip("Skipping to avoid windows multiple TXT record lookup bug")
}
if strings.HasPrefix(runtime.Version(), "go1.11") && (filename == "one-txt-record-multiple-strings") {
t.Skip("Skipping to avoid Go 1.11 problem with multiple strings in one TXT record")
}
cs, err := connstring.Parse(test.URI)
if test.Error {
require.Error(t, err)
return
}
// The resolved connstring may not have valid credentials
if err != nil && err.Error() == "error parsing uri: authsource without username is invalid" {
err = nil
}
require.NoError(t, err)
require.Equal(t, cs.Scheme, "mongodb+srv")
require.Equal(t, cs.Scheme, connstring.SchemeMongoDBSRV)
// DNS records may be out of order from the test files ordering
seeds := buildSet(test.Seeds)
hosts := buildSet(cs.Hosts)
require.Equal(t, hosts, seeds)
testhelpers.VerifyConnStringOptions(t, cs, test.Options)
setSSLSettings(t, &cs, test.Options)
// make a topology from the options
c, err := New(WithConnString(func(connstring.ConnString) connstring.ConnString { return cs }))
require.NoError(t, err)
err = c.Connect()
require.NoError(t, err)
for _, host := range test.Hosts {
_, err := getServerByAddress(host, c)
require.NoError(t, err)
}
})
}
// Test case for all connection string spec tests.
func TestInitialDNSSeedlistDiscoverySpec(t *testing.T) {
if os.Getenv("TOPOLOGY") != "replica_set" || os.Getenv("AUTH") != "noauth" {
t.Skip("Skipping on non-replica set topology")
}
for _, fname := range testhelpers.FindJSONFilesInDir(t, seedlistTestDir) {
filepath := path.Join(seedlistTestDir, fname)
content, err := ioutil.ReadFile(filepath)
require.NoError(t, err)
var testCase seedlistTestCase
require.NoError(t, json.Unmarshal(content, &testCase))
fname = fname[:len(fname)-5]
runSeedlistTest(t, fname, &testCase)
}
}
func buildSet(list []string) map[string]struct{} {
set := map[string]struct{}{}
for _, s := range list {
set[s] = struct{}{}
}
return set
}
func getServerByAddress(address string, c *Topology) (description.Server, error) {
selectByName := description.ServerSelectorFunc(func(_ description.Topology, servers []description.Server) ([]description.Server, error) {
for _, s := range servers {
if s.Addr.String() == address {
return []description.Server{s}, nil
}
}
return []description.Server{}, nil
})
selectedServer, err := c.SelectServerLegacy(context.Background(), selectByName)
if err != nil {
return description.Server{}, err
}
return selectedServer.Server.Description(), nil
}
| [
"\"SSL\"",
"\"TOPOLOGY\"",
"\"AUTH\""
] | [] | [
"SSL",
"AUTH",
"TOPOLOGY"
] | [] | ["SSL", "AUTH", "TOPOLOGY"] | go | 3 | 0 | |
controllers/providers/aws/aws.go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package aws
import (
"encoding/base64"
"fmt"
"os"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/autoscaling"
"github.com/aws/aws-sdk-go/service/autoscaling/autoscalingiface"
"github.com/aws/aws-sdk-go/service/eks"
"github.com/aws/aws-sdk-go/service/eks/eksiface"
"github.com/aws/aws-sdk-go/service/iam"
"github.com/aws/aws-sdk-go/service/iam/iamiface"
"github.com/keikoproj/aws-sdk-go-cache/cache"
"github.com/keikoproj/instance-manager/controllers/common"
"github.com/pkg/errors"
ctrl "sigs.k8s.io/controller-runtime"
)
var (
log = ctrl.Log.WithName("aws-provider")
)
const (
CacheDefaultTTL time.Duration = time.Second * 0
DescribeAutoScalingGroupsTTL time.Duration = 60 * time.Second
DescribeLaunchConfigurationsTTL time.Duration = 60 * time.Second
ListAttachedRolePoliciesTTL time.Duration = 60 * time.Second
GetRoleTTL time.Duration = 60 * time.Second
GetInstanceProfileTTL time.Duration = 60 * time.Second
DescribeNodegroupTTL time.Duration = 60 * time.Second
DescribeClusterTTL time.Duration = 180 * time.Second
CacheMaxItems int64 = 5000
CacheItemsToPrune uint32 = 500
)
type AwsWorker struct {
AsgClient autoscalingiface.AutoScalingAPI
EksClient eksiface.EKSAPI
IamClient iamiface.IAMAPI
Parameters map[string]interface{}
}
var (
DefaultInstanceProfilePropagationDelay = time.Second * 25
DefaultWaiterDuration = time.Second * 5
DefaultWaiterRetries = 12
DefaultAutoscalingMetrics = []string{
"GroupMinSize",
"GroupMaxSize",
"GroupDesiredCapacity",
"GroupInServiceInstances",
"GroupPendingInstances",
"GroupStandbyInstances",
"GroupTerminatingInstances",
"GroupInServiceCapacity",
"GroupPendingCapacity",
"GroupTerminatingCapacity",
"GroupStandbyCapacity",
"GroupTotalInstances",
"GroupTotalCapacity",
}
)
const (
IAMPolicyPrefix = "arn:aws:iam::aws:policy"
)
func DefaultEksUserDataFmt() string {
return `#!/bin/bash
set -o xtrace
/etc/eks/bootstrap.sh %s %s`
}
func (w *AwsWorker) RoleExist(name string) (*iam.Role, bool) {
out, err := w.GetRole(name)
if err != nil {
var role *iam.Role
return role, false
}
return out, true
}
func (w *AwsWorker) InstanceProfileExist(name string) (*iam.InstanceProfile, bool) {
var (
instanceProfile *iam.InstanceProfile
input = &iam.GetInstanceProfileInput{
InstanceProfileName: aws.String(name),
}
)
out, err := w.IamClient.GetInstanceProfile(input)
if err != nil {
return instanceProfile, false
}
return out.InstanceProfile, true
}
func (w *AwsWorker) GetBasicBlockDevice(name, volType string, volSize int64) *autoscaling.BlockDeviceMapping {
return &autoscaling.BlockDeviceMapping{
DeviceName: aws.String(name),
Ebs: &autoscaling.Ebs{
VolumeSize: aws.Int64(volSize),
VolumeType: aws.String(volType),
DeleteOnTermination: aws.Bool(true),
},
}
}
func (w *AwsWorker) CreateLaunchConfig(input *autoscaling.CreateLaunchConfigurationInput) error {
_, err := w.AsgClient.CreateLaunchConfiguration(input)
if err != nil {
return err
}
return err
}
func (w *AwsWorker) DeleteLaunchConfig(name string) error {
input := &autoscaling.DeleteLaunchConfigurationInput{
LaunchConfigurationName: aws.String(name),
}
_, err := w.AsgClient.DeleteLaunchConfiguration(input)
if err != nil {
return err
}
return nil
}
func (w *AwsWorker) CreateScalingGroup(input *autoscaling.CreateAutoScalingGroupInput) error {
_, err := w.AsgClient.CreateAutoScalingGroup(input)
if err != nil {
return err
}
return nil
}
func (w *AwsWorker) UpdateScalingGroupTags(add []*autoscaling.Tag, remove []*autoscaling.Tag) error {
if len(add) > 0 {
_, err := w.AsgClient.CreateOrUpdateTags(&autoscaling.CreateOrUpdateTagsInput{
Tags: add,
})
if err != nil {
return err
}
}
if len(remove) > 0 {
_, err := w.AsgClient.DeleteTags(&autoscaling.DeleteTagsInput{
Tags: remove,
})
if err != nil {
return err
}
}
return nil
}
func (w *AwsWorker) UpdateScalingGroup(input *autoscaling.UpdateAutoScalingGroupInput) error {
_, err := w.AsgClient.UpdateAutoScalingGroup(input)
if err != nil {
return err
}
return nil
}
func (w *AwsWorker) DeleteScalingGroup(name string) error {
input := &autoscaling.DeleteAutoScalingGroupInput{
AutoScalingGroupName: aws.String(name),
ForceDelete: aws.Bool(true),
}
_, err := w.AsgClient.DeleteAutoScalingGroup(input)
if err != nil {
return err
}
return nil
}
func (w *AwsWorker) SetSuspendProcesses(name string, processesToSuspend []string) error {
input := &autoscaling.ScalingProcessQuery{
AutoScalingGroupName: aws.String(name),
ScalingProcesses: aws.StringSlice(processesToSuspend),
}
_, err := w.AsgClient.SuspendProcesses(input)
if err != nil {
return err
}
return nil
}
func (w *AwsWorker) SetResumeProcesses(name string, processesToResume []string) error {
input := &autoscaling.ScalingProcessQuery{
AutoScalingGroupName: aws.String(name),
ScalingProcesses: aws.StringSlice(processesToResume),
}
_, err := w.AsgClient.ResumeProcesses(input)
if err != nil {
return err
}
return nil
}
func (w *AwsWorker) GetBasicUserData(clusterName, bootstrapArgs string) string {
userData := fmt.Sprintf(DefaultEksUserDataFmt(), clusterName, bootstrapArgs)
return base64.StdEncoding.EncodeToString([]byte(userData))
}
func (w *AwsWorker) NewTag(key, val, resource string) *autoscaling.Tag {
return &autoscaling.Tag{
Key: aws.String(key),
Value: aws.String(val),
PropagateAtLaunch: aws.Bool(true),
ResourceId: aws.String(resource),
ResourceType: aws.String("auto-scaling-group"),
}
}
func (w *AwsWorker) WithRetries(f func() bool) error {
var counter int
for {
if counter >= DefaultWaiterRetries {
break
}
if f() {
return nil
}
time.Sleep(DefaultWaiterDuration)
counter++
}
return errors.New("waiter timed out")
}
func (w *AwsWorker) TerminateScalingInstances(instanceIds []string) error {
for _, instance := range instanceIds {
_, err := w.AsgClient.TerminateInstanceInAutoScalingGroup(&autoscaling.TerminateInstanceInAutoScalingGroupInput{
InstanceId: aws.String(instance),
ShouldDecrementDesiredCapacity: aws.Bool(false),
})
if err != nil {
return err
}
}
return nil
}
func (w *AwsWorker) DeleteScalingGroupRole(name string, managedPolicies []string) error {
for _, policy := range managedPolicies {
_, err := w.IamClient.DetachRolePolicy(&iam.DetachRolePolicyInput{
RoleName: aws.String(name),
PolicyArn: aws.String(policy),
})
if err != nil {
return err
}
}
_, err := w.IamClient.RemoveRoleFromInstanceProfile(&iam.RemoveRoleFromInstanceProfileInput{
InstanceProfileName: aws.String(name),
RoleName: aws.String(name),
})
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
if aerr.Code() != iam.ErrCodeNoSuchEntityException {
return err
}
}
}
_, err = w.IamClient.DeleteInstanceProfile(&iam.DeleteInstanceProfileInput{
InstanceProfileName: aws.String(name),
})
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
if aerr.Code() != iam.ErrCodeNoSuchEntityException {
return err
}
}
}
// must wait until all policies are detached
err = w.WithRetries(func() bool {
_, err := w.IamClient.DeleteRole(&iam.DeleteRoleInput{
RoleName: aws.String(name),
})
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
if aerr.Code() != iam.ErrCodeNoSuchEntityException {
log.Error(err, "failed to delete role")
return false
}
}
}
return true
})
if err != nil {
return errors.Wrap(err, "role deletion failed")
}
return nil
}
func (w *AwsWorker) AttachManagedPolicies(name string, managedPolicies []string) error {
for _, policy := range managedPolicies {
_, err := w.IamClient.AttachRolePolicy(&iam.AttachRolePolicyInput{
RoleName: aws.String(name),
PolicyArn: aws.String(policy),
})
if err != nil {
return errors.Wrap(err, "failed to attach role policies")
}
}
return nil
}
func (w *AwsWorker) DetachManagedPolicies(name string, managedPolicies []string) error {
for _, policy := range managedPolicies {
_, err := w.IamClient.DetachRolePolicy(&iam.DetachRolePolicyInput{
RoleName: aws.String(name),
PolicyArn: aws.String(policy),
})
if err != nil {
return errors.Wrap(err, "failed to detach role policies")
}
}
return nil
}
func (w *AwsWorker) ListRolePolicies(name string) ([]*iam.AttachedPolicy, error) {
policies := []*iam.AttachedPolicy{}
err := w.IamClient.ListAttachedRolePoliciesPages(
&iam.ListAttachedRolePoliciesInput{
RoleName: aws.String(name),
},
func(page *iam.ListAttachedRolePoliciesOutput, lastPage bool) bool {
for _, p := range page.AttachedPolicies {
policies = append(policies, p)
}
return page.Marker != nil
})
if err != nil {
return policies, err
}
return policies, nil
}
func (w *AwsWorker) CreateScalingGroupRole(name string) (*iam.Role, *iam.InstanceProfile, error) {
var (
assumeRolePolicyDocument = `{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {
"Service": "ec2.amazonaws.com"
},
"Action": "sts:AssumeRole"
}]
}`
createdRole = &iam.Role{}
createdProfile = &iam.InstanceProfile{}
)
if role, ok := w.RoleExist(name); !ok {
out, err := w.IamClient.CreateRole(&iam.CreateRoleInput{
RoleName: aws.String(name),
AssumeRolePolicyDocument: aws.String(assumeRolePolicyDocument),
})
if err != nil {
return createdRole, createdProfile, errors.Wrap(err, "failed to create role")
}
createdRole = out.Role
} else {
createdRole = role
}
if instanceProfile, ok := w.InstanceProfileExist(name); !ok {
out, err := w.IamClient.CreateInstanceProfile(&iam.CreateInstanceProfileInput{
InstanceProfileName: aws.String(name),
})
if err != nil {
return createdRole, createdProfile, errors.Wrap(err, "failed to create instance-profile")
}
createdProfile = out.InstanceProfile
time.Sleep(DefaultInstanceProfilePropagationDelay)
_, err = w.IamClient.AddRoleToInstanceProfile(&iam.AddRoleToInstanceProfileInput{
InstanceProfileName: aws.String(name),
RoleName: aws.String(name),
})
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
if aerr.Code() != iam.ErrCodeLimitExceededException {
return createdRole, createdProfile, errors.Wrap(err, "failed to attach instance-profile")
}
}
}
} else {
createdProfile = instanceProfile
}
return createdRole, createdProfile, nil
}
// TODO: Move logic to provisioner
func (w *AwsWorker) IsNodeGroupExist() bool {
input := &eks.DescribeNodegroupInput{
ClusterName: aws.String(w.Parameters["ClusterName"].(string)),
NodegroupName: aws.String(w.Parameters["NodegroupName"].(string)),
}
_, err := w.EksClient.DescribeNodegroup(input)
if err != nil {
if awsErr, ok := err.(awserr.Error); ok {
if awsErr.Code() == eks.ErrCodeResourceNotFoundException {
return false
}
}
log.Error(err, "failed to describe nodegroup")
return false
}
return true
}
func (w *AwsWorker) DescribeEKSCluster(clusterName string) (*eks.Cluster, error) {
cluster := &eks.Cluster{}
input := &eks.DescribeClusterInput{
Name: aws.String(clusterName),
}
output, err := w.EksClient.DescribeCluster(input)
if err != nil {
return cluster, err
}
return output.Cluster, nil
}
// TODO: Rename - GetNodeGroup
func (w *AwsWorker) GetSelfNodeGroup() (error, *eks.Nodegroup) {
input := &eks.DescribeNodegroupInput{
ClusterName: aws.String(w.Parameters["ClusterName"].(string)),
NodegroupName: aws.String(w.Parameters["NodegroupName"].(string)),
}
output, err := w.EksClient.DescribeNodegroup(input)
if err != nil {
return err, &eks.Nodegroup{}
}
return nil, output.Nodegroup
}
func (w *AwsWorker) DeleteManagedNodeGroup() error {
input := &eks.DeleteNodegroupInput{
ClusterName: aws.String(w.Parameters["ClusterName"].(string)),
NodegroupName: aws.String(w.Parameters["NodegroupName"].(string)),
}
_, err := w.EksClient.DeleteNodegroup(input)
if err != nil {
return err
}
return nil
}
func (w *AwsWorker) GetLabelsUpdatePayload(existing, new map[string]string) *eks.UpdateLabelsPayload {
var (
removeLabels = make([]string, 0)
addUpdateLabels = make(map[string]string)
)
for k, v := range new {
// handle new labels
if _, ok := existing[k]; !ok {
addUpdateLabels[k] = v
}
// handle label value updates
if val, ok := existing[k]; ok && val != v {
addUpdateLabels[k] = v
}
}
for k := range existing {
// handle removals
if _, ok := new[k]; !ok {
removeLabels = append(removeLabels, k)
}
}
return &eks.UpdateLabelsPayload{
AddOrUpdateLabels: aws.StringMap(addUpdateLabels),
RemoveLabels: aws.StringSlice(removeLabels),
}
}
func (w *AwsWorker) UpdateManagedNodeGroup(currentDesired int64, labelsPayload *eks.UpdateLabelsPayload) error {
input := &eks.UpdateNodegroupConfigInput{
ClusterName: aws.String(w.Parameters["ClusterName"].(string)),
NodegroupName: aws.String(w.Parameters["NodegroupName"].(string)),
ScalingConfig: &eks.NodegroupScalingConfig{
MaxSize: aws.Int64(w.Parameters["MaxSize"].(int64)),
MinSize: aws.Int64(w.Parameters["MinSize"].(int64)),
DesiredSize: aws.Int64(currentDesired),
},
Labels: labelsPayload,
}
_, err := w.EksClient.UpdateNodegroupConfig(input)
if err != nil {
return err
}
return nil
}
func (w *AwsWorker) CreateManagedNodeGroup() error {
input := &eks.CreateNodegroupInput{
AmiType: aws.String(w.Parameters["AmiType"].(string)),
ClusterName: aws.String(w.Parameters["ClusterName"].(string)),
DiskSize: aws.Int64(w.Parameters["DiskSize"].(int64)),
InstanceTypes: aws.StringSlice(w.Parameters["InstanceTypes"].([]string)),
Labels: aws.StringMap(w.Parameters["Labels"].(map[string]string)),
NodeRole: aws.String(w.Parameters["NodeRole"].(string)),
NodegroupName: aws.String(w.Parameters["NodegroupName"].(string)),
ReleaseVersion: aws.String(w.Parameters["ReleaseVersion"].(string)),
RemoteAccess: &eks.RemoteAccessConfig{
Ec2SshKey: aws.String(w.Parameters["Ec2SshKey"].(string)),
SourceSecurityGroups: aws.StringSlice(w.Parameters["SourceSecurityGroups"].([]string)),
},
ScalingConfig: &eks.NodegroupScalingConfig{
MaxSize: aws.Int64(w.Parameters["MaxSize"].(int64)),
MinSize: aws.Int64(w.Parameters["MinSize"].(int64)),
DesiredSize: aws.Int64(w.Parameters["MinSize"].(int64)),
},
Subnets: aws.StringSlice(w.Parameters["Subnets"].([]string)),
Tags: aws.StringMap(w.compactTags(w.Parameters["Tags"].([]map[string]string))),
Version: aws.String(w.Parameters["Version"].(string)),
}
_, err := w.EksClient.CreateNodegroup(input)
if err != nil {
return err
}
return nil
}
func (w *AwsWorker) compactTags(tags []map[string]string) map[string]string {
compacted := make(map[string]string)
for _, tagSet := range tags {
var (
key string
value string
)
for t, v := range tagSet {
if t == "key" {
key = v
} else if t == "value" {
value = v
}
}
compacted[key] = value
}
return compacted
}
func (w *AwsWorker) DescribeAutoscalingGroups() ([]*autoscaling.Group, error) {
scalingGroups := []*autoscaling.Group{}
err := w.AsgClient.DescribeAutoScalingGroupsPages(&autoscaling.DescribeAutoScalingGroupsInput{}, func(page *autoscaling.DescribeAutoScalingGroupsOutput, lastPage bool) bool {
scalingGroups = append(scalingGroups, page.AutoScalingGroups...)
return page.NextToken != nil
})
if err != nil {
return scalingGroups, err
}
return scalingGroups, nil
}
func (w *AwsWorker) DescribeAutoscalingLaunchConfigs() ([]*autoscaling.LaunchConfiguration, error) {
launchConfigurations := []*autoscaling.LaunchConfiguration{}
err := w.AsgClient.DescribeLaunchConfigurationsPages(&autoscaling.DescribeLaunchConfigurationsInput{}, func(page *autoscaling.DescribeLaunchConfigurationsOutput, lastPage bool) bool {
launchConfigurations = append(launchConfigurations, page.LaunchConfigurations...)
return page.NextToken != nil
})
if err != nil {
return launchConfigurations, err
}
return launchConfigurations, nil
}
func (w *AwsWorker) EnableMetrics(asgName string, metrics []string) error {
if common.SliceEmpty(metrics) {
return nil
}
_, err := w.AsgClient.EnableMetricsCollection(&autoscaling.EnableMetricsCollectionInput{
AutoScalingGroupName: aws.String(asgName),
Granularity: aws.String("1Minute"),
Metrics: aws.StringSlice(metrics),
})
if err != nil {
return err
}
return nil
}
func (w *AwsWorker) DisableMetrics(asgName string, metrics []string) error {
if common.SliceEmpty(metrics) {
return nil
}
_, err := w.AsgClient.DisableMetricsCollection(&autoscaling.DisableMetricsCollectionInput{
AutoScalingGroupName: aws.String(asgName),
Metrics: aws.StringSlice(metrics),
})
if err != nil {
return err
}
return nil
}
func GetScalingGroupTagsByName(name string, client autoscalingiface.AutoScalingAPI) ([]*autoscaling.TagDescription, error) {
tags := []*autoscaling.TagDescription{}
input := &autoscaling.DescribeAutoScalingGroupsInput{}
out, err := client.DescribeAutoScalingGroups(input)
if err != nil {
return tags, err
}
for _, asg := range out.AutoScalingGroups {
n := aws.StringValue(asg.AutoScalingGroupName)
if strings.EqualFold(name, n) {
tags = asg.Tags
}
}
return tags, nil
}
func GetTagValueByKey(tags []*autoscaling.TagDescription, key string) string {
for _, tag := range tags {
k := aws.StringValue(tag.Key)
v := aws.StringValue(tag.Value)
if key == k {
return v
}
}
return ""
}
func GetRegion() (string, error) {
if os.Getenv("AWS_REGION") != "" {
return os.Getenv("AWS_REGION"), nil
}
// Try Derive
var config aws.Config
sess := session.Must(session.NewSessionWithOptions(session.Options{
SharedConfigState: session.SharedConfigEnable,
Config: config,
}))
c := ec2metadata.New(sess)
region, err := c.Region()
if err != nil {
return "", err
}
return region, nil
}
// GetAwsAsgClient returns an ASG client
func GetAwsAsgClient(region string, cacheCfg *cache.Config, maxRetries int) autoscalingiface.AutoScalingAPI {
config := aws.NewConfig().WithRegion(region).WithCredentialsChainVerboseErrors(true)
config = request.WithRetryer(config, NewRetryLogger(maxRetries))
sess, err := session.NewSession(config)
if err != nil {
panic(err)
}
cache.AddCaching(sess, cacheCfg)
cacheCfg.SetCacheTTL("autoscaling", "DescribeAutoScalingGroups", DescribeAutoScalingGroupsTTL)
cacheCfg.SetCacheTTL("autoscaling", "DescribeLaunchConfigurations", DescribeLaunchConfigurationsTTL)
sess.Handlers.Complete.PushFront(func(r *request.Request) {
ctx := r.HTTPRequest.Context()
log.V(1).Info("AWS API call",
"cacheHit", cache.IsCacheHit(ctx),
"service", r.ClientInfo.ServiceName,
"operation", r.Operation.Name,
)
})
return autoscaling.New(sess)
}
// GetAwsEksClient returns an EKS client
func GetAwsEksClient(region string, cacheCfg *cache.Config, maxRetries int) eksiface.EKSAPI {
config := aws.NewConfig().WithRegion(region).WithCredentialsChainVerboseErrors(true)
config = request.WithRetryer(config, NewRetryLogger(maxRetries))
sess, err := session.NewSession(config)
if err != nil {
panic(err)
}
cache.AddCaching(sess, cacheCfg)
cacheCfg.SetCacheTTL("eks", "DescribeCluster", DescribeClusterTTL)
cacheCfg.SetCacheTTL("eks", "DescribeNodegroup", DescribeNodegroupTTL)
sess.Handlers.Complete.PushFront(func(r *request.Request) {
ctx := r.HTTPRequest.Context()
log.V(1).Info("AWS API call",
"cacheHit", cache.IsCacheHit(ctx),
"service", r.ClientInfo.ServiceName,
"operation", r.Operation.Name,
)
})
return eks.New(sess, config)
}
func (w *AwsWorker) DeriveEksVpcID(clusterName string) (string, error) {
out, err := w.EksClient.DescribeCluster(&eks.DescribeClusterInput{Name: aws.String(clusterName)})
if err != nil {
return "", err
}
return aws.StringValue(out.Cluster.ResourcesVpcConfig.VpcId), nil
}
type CloudResourceReconcileState struct {
OngoingState bool
FiniteState bool
FiniteDeleted bool
UpdateRecoverableError bool
UnrecoverableError bool
UnrecoverableDeleteError bool
}
var OngoingState = CloudResourceReconcileState{OngoingState: true}
var FiniteState = CloudResourceReconcileState{FiniteState: true}
var FiniteDeleted = CloudResourceReconcileState{FiniteDeleted: true}
var UpdateRecoverableError = CloudResourceReconcileState{UpdateRecoverableError: true}
var UnrecoverableError = CloudResourceReconcileState{UnrecoverableError: true}
var UnrecoverableDeleteError = CloudResourceReconcileState{UnrecoverableDeleteError: true}
// GetAwsIAMClient returns an IAM client
func GetAwsIamClient(region string, cacheCfg *cache.Config, maxRetries int) iamiface.IAMAPI {
config := aws.NewConfig().WithRegion(region).WithCredentialsChainVerboseErrors(true)
config = request.WithRetryer(config, NewRetryLogger(maxRetries))
sess, err := session.NewSession(config)
if err != nil {
panic(err)
}
cache.AddCaching(sess, cacheCfg)
cacheCfg.SetCacheTTL("iam", "GetInstanceProfile", GetInstanceProfileTTL)
cacheCfg.SetCacheTTL("iam", "GetRole", GetRoleTTL)
cacheCfg.SetCacheTTL("iam", "ListAttachedRolePolicies", ListAttachedRolePoliciesTTL)
sess.Handlers.Complete.PushFront(func(r *request.Request) {
ctx := r.HTTPRequest.Context()
log.V(1).Info("AWS API call",
"cacheHit", cache.IsCacheHit(ctx),
"service", r.ClientInfo.ServiceName,
"operation", r.Operation.Name,
)
})
return iam.New(sess, config)
}
type ManagedNodeGroupReconcileState struct {
OngoingState bool
FiniteState bool
UnrecoverableError bool
UnrecoverableDeleteError bool
}
var ManagedNodeGroupOngoingState = ManagedNodeGroupReconcileState{OngoingState: true}
var ManagedNodeGroupFiniteState = ManagedNodeGroupReconcileState{FiniteState: true}
var ManagedNodeGroupUnrecoverableError = ManagedNodeGroupReconcileState{UnrecoverableError: true}
var ManagedNodeGroupUnrecoverableDeleteError = ManagedNodeGroupReconcileState{UnrecoverableDeleteError: true}
func IsNodeGroupInConditionState(key string, condition string) bool {
conditionStates := map[string]ManagedNodeGroupReconcileState{
"CREATING": ManagedNodeGroupOngoingState,
"UPDATING": ManagedNodeGroupOngoingState,
"DELETING": ManagedNodeGroupOngoingState,
"ACTIVE": ManagedNodeGroupFiniteState,
"DEGRADED": ManagedNodeGroupFiniteState,
"CREATE_FAILED": ManagedNodeGroupUnrecoverableError,
"DELETE_FAILED": ManagedNodeGroupUnrecoverableDeleteError,
}
state := conditionStates[key]
switch condition {
case "OngoingState":
return state.OngoingState
case "FiniteState":
return state.FiniteState
case "UnrecoverableError":
return state.UnrecoverableError
case "UnrecoverableDeleteError":
return state.UnrecoverableDeleteError
default:
return false
}
}
func IsProfileInConditionState(key string, condition string) bool {
conditionStates := map[string]CloudResourceReconcileState{
aws.StringValue(nil): FiniteDeleted,
eks.FargateProfileStatusCreating: OngoingState,
eks.FargateProfileStatusActive: FiniteState,
eks.FargateProfileStatusDeleting: OngoingState,
eks.FargateProfileStatusCreateFailed: UpdateRecoverableError,
eks.FargateProfileStatusDeleteFailed: UnrecoverableDeleteError,
}
state := conditionStates[key]
switch condition {
case "OngoingState":
return state.OngoingState
case "FiniteState":
return state.FiniteState
case "FiniteDeleted":
return state.FiniteDeleted
case "UpdateRecoverableError":
return state.UpdateRecoverableError
case "UnrecoverableError":
return state.UnrecoverableError
case "UnrecoverableDeleteError":
return state.UnrecoverableDeleteError
default:
return false
}
}
const defaultPolicyArn = "arn:aws:iam::aws:policy/AmazonEKSFargatePodExecutionRolePolicy"
func (w *AwsWorker) DetachDefaultPolicyFromDefaultRole() error {
var roleName = w.Parameters["DefaultRoleName"].(string)
rolePolicy := &iam.DetachRolePolicyInput{
PolicyArn: aws.String(defaultPolicyArn),
RoleName: aws.String(roleName),
}
_, err := w.IamClient.DetachRolePolicy(rolePolicy)
return err
}
func (w *AwsWorker) DeleteDefaultFargateRole() error {
var roleName = w.Parameters["DefaultRoleName"].(string)
role := &iam.DeleteRoleInput{
RoleName: aws.String(roleName),
}
_, err := w.IamClient.DeleteRole(role)
return err
}
func (w *AwsWorker) GetDefaultFargateRole() (*iam.Role, error) {
var roleName = w.Parameters["DefaultRoleName"].(string)
return w.GetRole(roleName)
}
func (w *AwsWorker) GetRole(roleName string) (*iam.Role, error) {
role := &iam.GetRoleInput{
RoleName: aws.String(roleName),
}
resp, err := w.IamClient.GetRole(role)
if err != nil {
return nil, err
}
return resp.Role, nil
}
func (w *AwsWorker) CreateDefaultFargateRole() error {
var roleName = w.Parameters["DefaultRoleName"].(string)
var template = `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"Service":"eks-fargate-pods.amazonaws.com"},"Action":"sts:AssumeRole"}]}`
role := &iam.CreateRoleInput{
AssumeRolePolicyDocument: &template,
Path: aws.String("/"),
RoleName: aws.String(roleName),
}
_, err := w.IamClient.CreateRole(role)
return err
}
func (w *AwsWorker) AttachDefaultPolicyToDefaultRole() error {
var roleName = w.Parameters["DefaultRoleName"].(string)
rolePolicy := &iam.AttachRolePolicyInput{
PolicyArn: aws.String(defaultPolicyArn),
RoleName: aws.String(roleName),
}
_, err := w.IamClient.AttachRolePolicy(rolePolicy)
return err
}
func (w *AwsWorker) CreateFargateProfile(arn string) error {
tags := w.Parameters["Tags"].(map[string]*string)
if len(tags) == 0 {
tags = nil
}
selectors := w.Parameters["Selectors"].([]*eks.FargateProfileSelector)
if len(selectors) == 0 {
selectors = nil
}
fargateInput := &eks.CreateFargateProfileInput{
ClusterName: aws.String(w.Parameters["ClusterName"].(string)),
FargateProfileName: aws.String(w.Parameters["ProfileName"].(string)),
PodExecutionRoleArn: aws.String(arn),
Selectors: selectors,
Subnets: aws.StringSlice(w.Parameters["Subnets"].([]string)),
Tags: tags,
}
_, err := w.EksClient.CreateFargateProfile(fargateInput)
return err
}
func (w *AwsWorker) DeleteFargateProfile() error {
deleteInput := &eks.DeleteFargateProfileInput{
ClusterName: aws.String(w.Parameters["ClusterName"].(string)),
FargateProfileName: aws.String(w.Parameters["ProfileName"].(string)),
}
_, err := w.EksClient.DeleteFargateProfile(deleteInput)
return err
}
func (w *AwsWorker) DescribeFargateProfile() (*eks.FargateProfile, error) {
describeInput := &eks.DescribeFargateProfileInput{
ClusterName: aws.String(w.Parameters["ClusterName"].(string)),
FargateProfileName: aws.String(w.Parameters["ProfileName"].(string)),
}
output, err := w.EksClient.DescribeFargateProfile(describeInput)
if err != nil {
return nil, err
}
return output.FargateProfile, nil
}
| [
"\"AWS_REGION\"",
"\"AWS_REGION\""
] | [] | [
"AWS_REGION"
] | [] | ["AWS_REGION"] | go | 1 | 0 | |
bot.py | import os
import discord
from dotenv import load_dotenv
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
client = discord.Client()
@client.event
async def on_ready():
print(f'{client.user.name} has connected to Discord!')
@client.event
async def on_message(message):
if 'ping' in message.content.lower():
await message.channel.send('Ping successful')
if 'Hi' in message.content():
await message.channel.send('Hello')
client.run('TOKEN')
| [] | [] | [
"DISCORD_TOKEN"
] | [] | ["DISCORD_TOKEN"] | python | 1 | 0 | |
api4/oauth.go | // Copyright (c) 2017 Mattermost, Inc. All Rights Reserved.
// See License.txt for license information.
package api4
import (
"net/http"
"net/url"
"path/filepath"
"strings"
l4g "github.com/alecthomas/log4go"
"github.com/demisto/mattermost-server/app"
"github.com/demisto/mattermost-server/model"
"github.com/demisto/mattermost-server/utils"
)
func (api *API) InitOAuth() {
api.BaseRoutes.OAuthApps.Handle("", api.ApiSessionRequired(createOAuthApp)).Methods("POST")
api.BaseRoutes.OAuthApp.Handle("", api.ApiSessionRequired(updateOAuthApp)).Methods("PUT")
api.BaseRoutes.OAuthApps.Handle("", api.ApiSessionRequired(getOAuthApps)).Methods("GET")
api.BaseRoutes.OAuthApp.Handle("", api.ApiSessionRequired(getOAuthApp)).Methods("GET")
api.BaseRoutes.OAuthApp.Handle("/info", api.ApiSessionRequired(getOAuthAppInfo)).Methods("GET")
api.BaseRoutes.OAuthApp.Handle("", api.ApiSessionRequired(deleteOAuthApp)).Methods("DELETE")
api.BaseRoutes.OAuthApp.Handle("/regen_secret", api.ApiSessionRequired(regenerateOAuthAppSecret)).Methods("POST")
api.BaseRoutes.User.Handle("/oauth/apps/authorized", api.ApiSessionRequired(getAuthorizedOAuthApps)).Methods("GET")
// API version independent OAuth 2.0 as a service provider endpoints
api.BaseRoutes.Root.Handle("/oauth/authorize", api.ApiHandlerTrustRequester(authorizeOAuthPage)).Methods("GET")
api.BaseRoutes.Root.Handle("/oauth/authorize", api.ApiSessionRequired(authorizeOAuthApp)).Methods("POST")
api.BaseRoutes.Root.Handle("/oauth/deauthorize", api.ApiSessionRequired(deauthorizeOAuthApp)).Methods("POST")
api.BaseRoutes.Root.Handle("/oauth/access_token", api.ApiHandlerTrustRequester(getAccessToken)).Methods("POST")
// API version independent OAuth as a client endpoints
api.BaseRoutes.Root.Handle("/oauth/{service:[A-Za-z0-9]+}/complete", api.ApiHandler(completeOAuth)).Methods("GET")
api.BaseRoutes.Root.Handle("/oauth/{service:[A-Za-z0-9]+}/login", api.ApiHandler(loginWithOAuth)).Methods("GET")
api.BaseRoutes.Root.Handle("/oauth/{service:[A-Za-z0-9]+}/mobile_login", api.ApiHandler(mobileLoginWithOAuth)).Methods("GET")
api.BaseRoutes.Root.Handle("/oauth/{service:[A-Za-z0-9]+}/signup", api.ApiHandler(signupWithOAuth)).Methods("GET")
// Old endpoints for backwards compatibility, needed to not break SSO for any old setups
api.BaseRoutes.Root.Handle("/api/v3/oauth/{service:[A-Za-z0-9]+}/complete", api.ApiHandler(completeOAuth)).Methods("GET")
api.BaseRoutes.Root.Handle("/signup/{service:[A-Za-z0-9]+}/complete", api.ApiHandler(completeOAuth)).Methods("GET")
api.BaseRoutes.Root.Handle("/login/{service:[A-Za-z0-9]+}/complete", api.ApiHandler(completeOAuth)).Methods("GET")
}
func createOAuthApp(c *Context, w http.ResponseWriter, r *http.Request) {
oauthApp := model.OAuthAppFromJson(r.Body)
if oauthApp == nil {
c.SetInvalidParam("oauth_app")
return
}
if !c.App.SessionHasPermissionTo(c.Session, model.PERMISSION_MANAGE_OAUTH) {
c.SetPermissionError(model.PERMISSION_MANAGE_OAUTH)
return
}
if !c.App.SessionHasPermissionTo(c.Session, model.PERMISSION_MANAGE_SYSTEM) {
oauthApp.IsTrusted = false
}
oauthApp.CreatorId = c.Session.UserId
rapp, err := c.App.CreateOAuthApp(oauthApp)
if err != nil {
c.Err = err
return
}
c.LogAudit("client_id=" + rapp.Id)
w.WriteHeader(http.StatusCreated)
w.Write([]byte(rapp.ToJson()))
}
func updateOAuthApp(c *Context, w http.ResponseWriter, r *http.Request) {
c.RequireAppId()
if c.Err != nil {
return
}
if !c.App.SessionHasPermissionTo(c.Session, model.PERMISSION_MANAGE_OAUTH) {
c.SetPermissionError(model.PERMISSION_MANAGE_OAUTH)
return
}
oauthApp := model.OAuthAppFromJson(r.Body)
if oauthApp == nil {
c.SetInvalidParam("oauth_app")
return
}
c.LogAudit("attempt")
oldOauthApp, err := c.App.GetOAuthApp(c.Params.AppId)
if err != nil {
c.Err = err
return
}
if c.Session.UserId != oldOauthApp.CreatorId && !c.App.SessionHasPermissionTo(c.Session, model.PERMISSION_MANAGE_SYSTEM_WIDE_OAUTH) {
c.SetPermissionError(model.PERMISSION_MANAGE_SYSTEM_WIDE_OAUTH)
return
}
updatedOauthApp, err := c.App.UpdateOauthApp(oldOauthApp, oauthApp)
if err != nil {
c.Err = err
return
}
c.LogAudit("success")
w.Write([]byte(updatedOauthApp.ToJson()))
}
func getOAuthApps(c *Context, w http.ResponseWriter, r *http.Request) {
if !c.App.SessionHasPermissionTo(c.Session, model.PERMISSION_MANAGE_OAUTH) {
c.Err = model.NewAppError("getOAuthApps", "api.command.admin_only.app_error", nil, "", http.StatusForbidden)
return
}
var apps []*model.OAuthApp
var err *model.AppError
if c.App.SessionHasPermissionTo(c.Session, model.PERMISSION_MANAGE_SYSTEM_WIDE_OAUTH) {
apps, err = c.App.GetOAuthApps(c.Params.Page, c.Params.PerPage)
} else if c.App.SessionHasPermissionTo(c.Session, model.PERMISSION_MANAGE_OAUTH) {
apps, err = c.App.GetOAuthAppsByCreator(c.Session.UserId, c.Params.Page, c.Params.PerPage)
} else {
c.SetPermissionError(model.PERMISSION_MANAGE_OAUTH)
return
}
if err != nil {
c.Err = err
return
}
w.Write([]byte(model.OAuthAppListToJson(apps)))
}
func getOAuthApp(c *Context, w http.ResponseWriter, r *http.Request) {
c.RequireAppId()
if c.Err != nil {
return
}
if !c.App.SessionHasPermissionTo(c.Session, model.PERMISSION_MANAGE_OAUTH) {
c.SetPermissionError(model.PERMISSION_MANAGE_OAUTH)
return
}
oauthApp, err := c.App.GetOAuthApp(c.Params.AppId)
if err != nil {
c.Err = err
return
}
if oauthApp.CreatorId != c.Session.UserId && !c.App.SessionHasPermissionTo(c.Session, model.PERMISSION_MANAGE_SYSTEM_WIDE_OAUTH) {
c.SetPermissionError(model.PERMISSION_MANAGE_SYSTEM_WIDE_OAUTH)
return
}
w.Write([]byte(oauthApp.ToJson()))
}
func getOAuthAppInfo(c *Context, w http.ResponseWriter, r *http.Request) {
c.RequireAppId()
if c.Err != nil {
return
}
oauthApp, err := c.App.GetOAuthApp(c.Params.AppId)
if err != nil {
c.Err = err
return
}
oauthApp.Sanitize()
w.Write([]byte(oauthApp.ToJson()))
}
func deleteOAuthApp(c *Context, w http.ResponseWriter, r *http.Request) {
c.RequireAppId()
if c.Err != nil {
return
}
c.LogAudit("attempt")
if !c.App.SessionHasPermissionTo(c.Session, model.PERMISSION_MANAGE_OAUTH) {
c.SetPermissionError(model.PERMISSION_MANAGE_OAUTH)
return
}
oauthApp, err := c.App.GetOAuthApp(c.Params.AppId)
if err != nil {
c.Err = err
return
}
if c.Session.UserId != oauthApp.CreatorId && !c.App.SessionHasPermissionTo(c.Session, model.PERMISSION_MANAGE_SYSTEM_WIDE_OAUTH) {
c.SetPermissionError(model.PERMISSION_MANAGE_SYSTEM_WIDE_OAUTH)
return
}
err = c.App.DeleteOAuthApp(oauthApp.Id)
if err != nil {
c.Err = err
return
}
c.LogAudit("success")
ReturnStatusOK(w)
}
func regenerateOAuthAppSecret(c *Context, w http.ResponseWriter, r *http.Request) {
c.RequireAppId()
if c.Err != nil {
return
}
if !c.App.SessionHasPermissionTo(c.Session, model.PERMISSION_MANAGE_OAUTH) {
c.SetPermissionError(model.PERMISSION_MANAGE_OAUTH)
return
}
oauthApp, err := c.App.GetOAuthApp(c.Params.AppId)
if err != nil {
c.Err = err
return
}
if oauthApp.CreatorId != c.Session.UserId && !c.App.SessionHasPermissionTo(c.Session, model.PERMISSION_MANAGE_SYSTEM_WIDE_OAUTH) {
c.SetPermissionError(model.PERMISSION_MANAGE_SYSTEM_WIDE_OAUTH)
return
}
oauthApp, err = c.App.RegenerateOAuthAppSecret(oauthApp)
if err != nil {
c.Err = err
return
}
c.LogAudit("success")
w.Write([]byte(oauthApp.ToJson()))
}
func getAuthorizedOAuthApps(c *Context, w http.ResponseWriter, r *http.Request) {
c.RequireUserId()
if c.Err != nil {
return
}
if !c.App.SessionHasPermissionToUser(c.Session, c.Params.UserId) {
c.SetPermissionError(model.PERMISSION_EDIT_OTHER_USERS)
return
}
apps, err := c.App.GetAuthorizedAppsForUser(c.Params.UserId, c.Params.Page, c.Params.PerPage)
if err != nil {
c.Err = err
return
}
w.Write([]byte(model.OAuthAppListToJson(apps)))
}
func authorizeOAuthApp(c *Context, w http.ResponseWriter, r *http.Request) {
authRequest := model.AuthorizeRequestFromJson(r.Body)
if authRequest == nil {
c.SetInvalidParam("authorize_request")
}
if err := authRequest.IsValid(); err != nil {
c.Err = err
return
}
c.LogAudit("attempt")
redirectUrl, err := c.App.AllowOAuthAppAccessToUser(c.Session.UserId, authRequest)
if err != nil {
c.Err = err
return
}
c.LogAudit("")
w.Write([]byte(model.MapToJson(map[string]string{"redirect": redirectUrl})))
}
func deauthorizeOAuthApp(c *Context, w http.ResponseWriter, r *http.Request) {
requestData := model.MapFromJson(r.Body)
clientId := requestData["client_id"]
if len(clientId) != 26 {
c.SetInvalidParam("client_id")
return
}
err := c.App.DeauthorizeOAuthAppForUser(c.Session.UserId, clientId)
if err != nil {
c.Err = err
return
}
c.LogAudit("success")
ReturnStatusOK(w)
}
func authorizeOAuthPage(c *Context, w http.ResponseWriter, r *http.Request) {
if !c.App.Config().ServiceSettings.EnableOAuthServiceProvider {
err := model.NewAppError("authorizeOAuth", "api.oauth.authorize_oauth.disabled.app_error", nil, "", http.StatusNotImplemented)
utils.RenderWebAppError(w, r, err, c.App.AsymmetricSigningKey())
return
}
authRequest := &model.AuthorizeRequest{
ResponseType: r.URL.Query().Get("response_type"),
ClientId: r.URL.Query().Get("client_id"),
RedirectUri: r.URL.Query().Get("redirect_uri"),
Scope: r.URL.Query().Get("scope"),
State: r.URL.Query().Get("state"),
}
if err := authRequest.IsValid(); err != nil {
utils.RenderWebAppError(w, r, err, c.App.AsymmetricSigningKey())
return
}
oauthApp, err := c.App.GetOAuthApp(authRequest.ClientId)
if err != nil {
utils.RenderWebAppError(w, r, err, c.App.AsymmetricSigningKey())
return
}
// here we should check if the user is logged in
if len(c.Session.UserId) == 0 {
http.Redirect(w, r, c.GetSiteURLHeader()+"/login?redirect_to="+url.QueryEscape(r.RequestURI), http.StatusFound)
return
}
if !oauthApp.IsValidRedirectURL(authRequest.RedirectUri) {
err := model.NewAppError("authorizeOAuthPage", "api.oauth.allow_oauth.redirect_callback.app_error", nil, "", http.StatusBadRequest)
utils.RenderWebAppError(w, r, err, c.App.AsymmetricSigningKey())
return
}
isAuthorized := false
if _, err := c.App.GetPreferenceByCategoryAndNameForUser(c.Session.UserId, model.PREFERENCE_CATEGORY_AUTHORIZED_OAUTH_APP, authRequest.ClientId); err == nil {
// when we support scopes we should check if the scopes match
isAuthorized = true
}
// Automatically allow if the app is trusted
if oauthApp.IsTrusted || isAuthorized {
authRequest.ResponseType = model.AUTHCODE_RESPONSE_TYPE
redirectUrl, err := c.App.AllowOAuthAppAccessToUser(c.Session.UserId, authRequest)
if err != nil {
utils.RenderWebAppError(w, r, err, c.App.AsymmetricSigningKey())
return
}
http.Redirect(w, r, redirectUrl, http.StatusFound)
return
}
w.Header().Set("X-Frame-Options", "SAMEORIGIN")
w.Header().Set("Content-Security-Policy", "frame-ancestors 'self'")
w.Header().Set("Content-Type", "text/html; charset=utf-8")
w.Header().Set("Cache-Control", "no-cache, max-age=31556926, public")
staticDir, _ := utils.FindDir(model.CLIENT_DIR)
http.ServeFile(w, r, filepath.Join(staticDir, "root.html"))
}
func getAccessToken(c *Context, w http.ResponseWriter, r *http.Request) {
r.ParseForm()
code := r.FormValue("code")
refreshToken := r.FormValue("refresh_token")
grantType := r.FormValue("grant_type")
switch grantType {
case model.ACCESS_TOKEN_GRANT_TYPE:
if len(code) == 0 {
c.Err = model.NewAppError("getAccessToken", "api.oauth.get_access_token.missing_code.app_error", nil, "", http.StatusBadRequest)
return
}
case model.REFRESH_TOKEN_GRANT_TYPE:
if len(refreshToken) == 0 {
c.Err = model.NewAppError("getAccessToken", "api.oauth.get_access_token.missing_refresh_token.app_error", nil, "", http.StatusBadRequest)
return
}
default:
c.Err = model.NewAppError("getAccessToken", "api.oauth.get_access_token.bad_grant.app_error", nil, "", http.StatusBadRequest)
return
}
clientId := r.FormValue("client_id")
if len(clientId) != 26 {
c.Err = model.NewAppError("getAccessToken", "api.oauth.get_access_token.bad_client_id.app_error", nil, "", http.StatusBadRequest)
return
}
secret := r.FormValue("client_secret")
if len(secret) == 0 {
c.Err = model.NewAppError("getAccessToken", "api.oauth.get_access_token.bad_client_secret.app_error", nil, "", http.StatusBadRequest)
return
}
redirectUri := r.FormValue("redirect_uri")
c.LogAudit("attempt")
accessRsp, err := c.App.GetOAuthAccessToken(clientId, grantType, redirectUri, code, secret, refreshToken)
if err != nil {
c.Err = err
return
}
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Cache-Control", "no-store")
w.Header().Set("Pragma", "no-cache")
c.LogAudit("success")
w.Write([]byte(accessRsp.ToJson()))
}
func completeOAuth(c *Context, w http.ResponseWriter, r *http.Request) {
c.RequireService()
if c.Err != nil {
return
}
service := c.Params.Service
code := r.URL.Query().Get("code")
if len(code) == 0 {
utils.RenderWebError(w, r, http.StatusTemporaryRedirect, url.Values{
"type": []string{"oauth_missing_code"},
"service": []string{strings.Title(service)},
}, c.App.AsymmetricSigningKey())
return
}
state := r.URL.Query().Get("state")
uri := c.GetSiteURLHeader() + "/signup/" + service + "/complete"
body, teamId, props, err := c.App.AuthorizeOAuthUser(w, r, service, code, state, uri)
action := ""
if props != nil {
action = props["action"]
}
if err != nil {
err.Translate(c.T)
l4g.Error(err.Error())
if action == model.OAUTH_ACTION_MOBILE {
w.Write([]byte(err.ToJson()))
} else {
utils.RenderWebAppError(w, r, err, c.App.AsymmetricSigningKey())
}
return
}
user, err := c.App.CompleteOAuth(service, body, teamId, props)
if err != nil {
err.Translate(c.T)
l4g.Error(err.Error())
if action == model.OAUTH_ACTION_MOBILE {
w.Write([]byte(err.ToJson()))
} else {
utils.RenderWebAppError(w, r, err, c.App.AsymmetricSigningKey())
}
return
}
var redirectUrl string
if action == model.OAUTH_ACTION_EMAIL_TO_SSO {
redirectUrl = c.GetSiteURLHeader() + "/login?extra=signin_change"
} else if action == model.OAUTH_ACTION_SSO_TO_EMAIL {
redirectUrl = app.GetProtocol(r) + "://" + r.Host + "/claim?email=" + url.QueryEscape(props["email"])
} else {
session, err := c.App.DoLogin(w, r, user, "")
if err != nil {
err.Translate(c.T)
c.Err = err
if action == model.OAUTH_ACTION_MOBILE {
w.Write([]byte(err.ToJson()))
}
return
}
c.Session = *session
redirectUrl = c.GetSiteURLHeader()
}
if action == model.OAUTH_ACTION_MOBILE {
ReturnStatusOK(w)
return
} else {
http.Redirect(w, r, redirectUrl, http.StatusTemporaryRedirect)
}
}
func loginWithOAuth(c *Context, w http.ResponseWriter, r *http.Request) {
c.RequireService()
if c.Err != nil {
return
}
loginHint := r.URL.Query().Get("login_hint")
redirectTo := r.URL.Query().Get("redirect_to")
teamId, err := c.App.GetTeamIdFromQuery(r.URL.Query())
if err != nil {
c.Err = err
return
}
if authUrl, err := c.App.GetOAuthLoginEndpoint(w, r, c.Params.Service, teamId, model.OAUTH_ACTION_LOGIN, redirectTo, loginHint); err != nil {
c.Err = err
return
} else {
http.Redirect(w, r, authUrl, http.StatusFound)
}
}
func mobileLoginWithOAuth(c *Context, w http.ResponseWriter, r *http.Request) {
c.RequireService()
if c.Err != nil {
return
}
teamId, err := c.App.GetTeamIdFromQuery(r.URL.Query())
if err != nil {
c.Err = err
return
}
if authUrl, err := c.App.GetOAuthLoginEndpoint(w, r, c.Params.Service, teamId, model.OAUTH_ACTION_MOBILE, "", ""); err != nil {
c.Err = err
return
} else {
http.Redirect(w, r, authUrl, http.StatusFound)
}
}
func signupWithOAuth(c *Context, w http.ResponseWriter, r *http.Request) {
c.RequireService()
if c.Err != nil {
return
}
if !c.App.Config().TeamSettings.EnableUserCreation {
utils.RenderWebError(w, r, http.StatusBadRequest, url.Values{
"message": []string{utils.T("api.oauth.singup_with_oauth.disabled.app_error")},
}, c.App.AsymmetricSigningKey())
return
}
teamId, err := c.App.GetTeamIdFromQuery(r.URL.Query())
if err != nil {
c.Err = err
return
}
if authUrl, err := c.App.GetOAuthSignupEndpoint(w, r, c.Params.Service, teamId); err != nil {
c.Err = err
return
} else {
http.Redirect(w, r, authUrl, http.StatusFound)
}
}
| [] | [] | [] | [] | [] | go | null | null | null |
scripts/generateInputFileRHF.py | import os
import sys; sys.path.append( os.environ['AFQMCLAB_DIR']+"/scripts/pyscf" )
from molecule import *
from rhf import *
from model import *
atoms = [['O',(0.000,0.000,0.000)]]
chrg = 1
spn = 3
basis = 'bfd-vdz'
psp = 'bfd-pp'
sym = 'c1'
mol = setupMolecule(atoms, chrg, spn, basis, psp, sym)
rhf = doROHFCalculation(mol, 1e-12, 1e-8, 0.4, 16000, 5000)
canonic = CanonicalBais(mol, rhf, 1e-8)
writeInputForModel(mol, rhf, canonic, 1e-12)
writeROHFSD2is(mol, rhf, canonic, "phi.dat")
writeROHFSD2s(mol, rhf, canonic, "phiT.dat")
# from fci import *
# doFCICalculation(mol, rhf.mo_coeff) | [] | [] | [
"AFQMCLAB_DIR"
] | [] | ["AFQMCLAB_DIR"] | python | 1 | 0 | |
Fw/Python/test/fprime/common/models/serialize/test_types.py | """
Tests the serialization and deserialization of all the types in Fw/Python/src/fprime/common/models/serialize/
Created on Jun 25, 2020
@author: hpaulson, mstarch
"""
import json
from collections.abc import Iterable
import pytest
from fprime.common.models.serialize.array_type import ArrayType
from fprime.common.models.serialize.bool_type import BoolType
from fprime.common.models.serialize.enum_type import EnumType
from fprime.common.models.serialize.numerical_types import (
F32Type,
F64Type,
I8Type,
I16Type,
I32Type,
I64Type,
U8Type,
U16Type,
U32Type,
U64Type,
)
from fprime.common.models.serialize.serializable_type import SerializableType
from fprime.common.models.serialize.string_type import StringType
from fprime.common.models.serialize.time_type import TimeBase, TimeType
from fprime.common.models.serialize.type_exceptions import (
DeserializeException,
NotInitializedException,
TypeMismatchException,
TypeRangeException,
)
PYTHON_TESTABLE_TYPES = [
True,
False,
-1,
0,
300,
"abc",
"True",
"False",
3.1412,
(0, 1),
(True, False),
[],
[0],
{},
{"abc": 123},
{2, 4, 3},
]
def valid_values_test(type_input, valid_values, sizes, extras=None):
""" Tests to be run on all types """
if not isinstance(sizes, Iterable):
sizes = [sizes] * len(valid_values)
# Should be able to instantiate a blank type, but not serialize it until a value has been supplied
if not extras:
extras = [[]] * len(valid_values)
instantiation = type_input(*extras[0])
with pytest.raises(NotInitializedException):
instantiation.serialize()
# Should be able to get a JSONable object that is dumpable to a JSON string
jsonable = instantiation.to_jsonable()
json.loads(json.dumps(jsonable))
# Run on valid values
for value, size, extra in zip(valid_values, sizes, extras):
instantiation = type_input(*extra, val=value)
assert instantiation.val == value
assert instantiation.getSize() == size
# Check assignment by value
by_value = type_input(*extra)
by_value.val = value
assert by_value.val == instantiation.val, "Assignment by value has failed"
assert by_value.getSize() == size
# Check serialization and deserialization
serialized = instantiation.serialize()
for offset in [0, 10, 50]:
deserializer = type_input(*extra)
deserializer.deserialize((b" " * offset) + serialized, offset)
assert instantiation.val == deserializer.val, "Deserialization has failed"
assert deserializer.getSize() == size
def invalid_values_test(
type_input, invalid_values, exception_class=TypeMismatchException
):
""" Check invalid values for all types """
for item in invalid_values:
# Constructor initialization
with pytest.raises(exception_class):
instantiation = type_input(item)
# Value initialization
with pytest.raises(exception_class):
instantiation = type_input()
instantiation.val = item
# Deserialization problems required space
for offset in [0, 10, 50]:
with pytest.raises(DeserializeException):
instantiation = type_input()
instantiation.deserialize(b" " * offset, offset)
def ser_deser_time_test(t_base, t_context, secs, usecs):
"""
Test serialization/deserialization of TimeType objects.
This test function creates a time type object with the given parameters and
then serializes it and deserializes it. Also prints it for visual inspection
of the formatted output.
Args:
t_base (int): Time base for the new time type object
t_context (int): Time context for the new time type object
secs (int): Seconds value for the new time type object
usecs (int): Seconds value for the new time type object
should_err (int): True if error expected, else False
Returns:
True if test passed, False otherwise
"""
val = TimeType(t_base, t_context, secs, usecs)
buff = val.serialize()
val2 = TimeType()
val2.deserialize(buff, 0)
assert val2.timeBase.value == t_base
assert val2.timeContext == t_context
assert val2.seconds == secs
assert val2.useconds == usecs
def test_boolean_nominal():
""" Tests the nominal cases of a BoolType """
valid_values_test(BoolType, [True, False], 1)
def test_boolean_off_nominal():
""" Tests the nominal cases of a BoolType """
invalid_values_test(
BoolType, filter(lambda item: not isinstance(item, bool), PYTHON_TESTABLE_TYPES)
)
def test_int_types_nominal():
""" Tests the integer types """
for type_input, size in [(I8Type, 1), (I16Type, 2), (I32Type, 4), (I64Type, 8)]:
total = pow(2, (size * 8) - 1)
valid_values_test(type_input, [0, -1, 1, -total, total - 1], size)
def test_int_types_off_nominal():
""" Tests the integer off nominal types """
for type_input, size in [(I8Type, 1), (I16Type, 2), (I32Type, 4), (I64Type, 8)]:
total = pow(2, (size * 8) - 1)
invalid_values_test(
type_input,
filter(lambda item: not isinstance(item, int), PYTHON_TESTABLE_TYPES),
)
invalid_values_test(
type_input, [-total - 1, total, -total * 35, total * 35], TypeRangeException
)
def test_uint_types_nominal():
""" Tests the integer types """
for type_input, size in [(U8Type, 1), (U16Type, 2), (U32Type, 4), (U64Type, 8)]:
max_int = pow(2, (size * 8)) - 1
valid_values_test(type_input, [0, 1, max_int - 1, max_int], size)
def test_uint_types_off_nominal():
""" Tests the integer off nominal types """
for type_input, size in [(U8Type, 1), (U16Type, 2), (U32Type, 4), (U64Type, 8)]:
max_int = pow(2, (size * 8)) - 1
invalid_values_test(
type_input,
filter(lambda item: not isinstance(item, int), PYTHON_TESTABLE_TYPES),
)
invalid_values_test(
type_input,
[-1, -2, max_int + 1, max_int * 35, -max_int],
TypeRangeException,
)
def test_float_types_nominal():
""" Tests the integer types """
valid_values_test(F32Type, [0.31415000557899475, 0.0, -3.141590118408203], 4)
valid_values_test(F64Type, [0.31415000557899475, 0.0, -3.141590118408203], 8)
def test_float_types_off_nominal():
""" Tests the integer off nominal types """
invalid_values_test(
F32Type, filter(lambda item: not isinstance(item, float), PYTHON_TESTABLE_TYPES)
)
invalid_values_test(
F64Type, filter(lambda item: not isinstance(item, float), PYTHON_TESTABLE_TYPES)
)
def test_enum_type():
"""
Tests the EnumType serialization and deserialization
"""
members = {"MEMB1": 0, "MEMB2": 6, "MEMB3": 9}
val1 = EnumType("SomeEnum", members, "MEMB3")
buff = val1.serialize()
val2 = EnumType("SomeEnum", members)
val2.deserialize(buff, 0)
assert val1.val == val2.val
def check_cloned_member_list(members1, members2):
""" Check member list knowing direct compares don't work"""
for tuple1, tuple2 in zip(members1, members2):
assert tuple1[0] == tuple2[0], "Names do not match"
assert tuple1[2] == tuple2[2], "Format strings do not match"
assert tuple1[3] == tuple2[3], "Descriptions do not match"
assert tuple1[1].val == tuple2[1].val, "Values don't match"
def test_serializable_type():
"""
Tests the SerializableType serialization and deserialization
"""
u32Mem = U32Type(1000000)
stringMem = StringType("something to say")
members = {"MEMB1": 0, "MEMB2": 6, "MEMB3": 9}
enumMem = EnumType("SomeEnum", members, "MEMB3")
memList = [
("mem1", u32Mem, ">i"),
("mem2", stringMem, ">H"),
("mem3", enumMem, ">i"),
]
serType1 = SerializableType("ASerType", memList)
buff = serType1.serialize()
serType2 = SerializableType("ASerType", memList)
serType2.deserialize(buff, 0)
check_cloned_member_list(serType1.mem_list, serType2.mem_list)
assert serType1.val == serType2.val
i32Mem = I32Type(-1000000)
stringMem = StringType("something else to say")
members = {"MEMB1": 4, "MEMB2": 2, "MEMB3": 0}
enumMem = EnumType("SomeEnum", members, "MEMB3")
memList = [
("mem1", i32Mem, ">i"),
("mem2", stringMem, ">H"),
("mem3", enumMem, ">i"),
]
serType1 = SerializableType("ASerType", memList)
buff = serType1.serialize()
serType2 = SerializableType("ASerType", memList)
serType2.deserialize(buff, 0)
check_cloned_member_list(serType1.mem_list, serType2.mem_list)
value_dict = {"mem1": 3, "mem2": "abc 123", "mem3": "MEMB1"}
serType1.val = value_dict
assert serType1.val == value_dict
mem_list = serType1.mem_list
memList = [(a, b, c, None) for a, b, c in memList]
check_cloned_member_list(mem_list, memList)
serTypeEmpty = SerializableType("ASerType", [])
assert serTypeEmpty.val == {}
assert serTypeEmpty.mem_list == []
# def test_array_type():
# """
# Tests the ArrayType serialization and deserialization
# """
# extra_ctor_args = [("TestArray", (I32Type, 2, "I DON'T KNOW")), ("TestArray2", (U8Type, 4, "I DON'T KNOW")),
# ("TestArray3", (StringType, 1, "I DON'T KNOW"))]
# values = [[32, 1], [0, 1, 2, 3], ["one"]]
# sizes = [8, 4, 3]
#
# valid_values_test(ArrayType, values, sizes, extra_ctor_args)
def test_time_type():
"""
Tests the TimeType serialization and deserialization
"""
TIME_SIZE = 11
in_no_err_list = [
(TimeBase["TB_NONE"].value, 1, 100, 999999),
(TimeBase["TB_PROC_TIME"].value, 0xFF, 1234567, 2952),
(TimeBase["TB_WORKSTATION_TIME"].value, 8, 1529430215, 12),
(TimeBase["TB_SC_TIME"].value, 231, 1344230277, 123456),
(TimeBase["TB_FPGA_TIME"].value, 78, 10395, 24556),
(TimeBase["TB_DONT_CARE"].value, 0xB3, 12390819, 12356),
]
in_err_list = [
(10, 58, 15345, 0),
(TimeBase["TB_NONE"].value, 1, 3, -1),
(TimeBase["TB_WORKSTATION_TIME"].value, 1, 700000, 1234567),
]
val = TimeType()
size = val.getSize()
assert size == TIME_SIZE
for (t_base, t_context, secs, usecs) in in_no_err_list:
ser_deser_time_test(t_base, t_context, secs, usecs)
for (t_base, t_context, secs, usecs) in in_err_list:
with pytest.raises(TypeRangeException):
ser_deser_time_test(t_base, t_context, secs, usecs)
| [] | [] | [] | [] | [] | python | null | null | null |
go/vt/mysqlctl/mysqld.go | // Copyright 2012, Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Commands for controlling an external mysql process.
Some commands are issued as exec'd tools, some are handled by connecting via
the mysql protocol.
*/
package mysqlctl
import (
"errors"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path"
"path/filepath"
"strings"
"time"
"code.google.com/p/vitess/go/mysql"
"code.google.com/p/vitess/go/netutil"
"code.google.com/p/vitess/go/relog"
vtenv "code.google.com/p/vitess/go/vt/env"
"code.google.com/p/vitess/go/vt/hook"
)
const (
MysqlWaitTime = 120 * time.Second // default number of seconds to wait
)
type CreateConnection func() (*mysql.Connection, error)
var DefaultDbaParams = mysql.ConnectionParams{
Uname: "vt_dba",
Charset: "utf8",
}
var DefaultReplParams = mysql.ConnectionParams{
Uname: "vt_repl",
Charset: "utf8",
}
type Mysqld struct {
config *Mycnf
dbaParams mysql.ConnectionParams
replParams mysql.ConnectionParams
createConnection CreateConnection
TabletDir string
SnapshotDir string
}
func NewMysqld(config *Mycnf, dba, repl mysql.ConnectionParams) *Mysqld {
if dba == DefaultDbaParams {
dba.UnixSocket = config.SocketFile
}
// the super connection is not linked to a specific database
// (allows us to create them)
superParams := dba
superParams.Dbname = ""
createSuperConnection := func() (*mysql.Connection, error) {
return mysql.Connect(superParams)
}
return &Mysqld{config,
dba,
repl,
createSuperConnection,
TabletDir(config.ServerId),
SnapshotDir(config.ServerId),
}
}
func Start(mt *Mysqld, mysqlWaitTime time.Duration) error {
var name string
// try the mysqld start hook, if any
h := hook.NewSimpleHook("mysqld_start")
hr := h.Execute()
switch hr.ExitStatus {
case hook.HOOK_SUCCESS:
// hook exists and worked, we can keep going
name = "mysqld_start hook"
case hook.HOOK_DOES_NOT_EXIST:
// hook doesn't exist, run mysqld_safe ourselves
relog.Info("No mysqld_start hook, running mysqld_safe directly")
dir, err := vtenv.VtMysqlRoot()
if err != nil {
return err
}
name = path.Join(dir, "bin/mysqld_safe")
arg := []string{
"--defaults-file=" + mt.config.path}
env := []string{os.ExpandEnv("LD_LIBRARY_PATH=$VT_MYSQL_ROOT/lib/mysql")}
cmd := exec.Command(name, arg...)
cmd.Dir = dir
cmd.Env = env
relog.Info("mysqlctl.Start mysqlWaitTime:%v %#v", mysqlWaitTime, cmd)
_, err = cmd.StderrPipe()
if err != nil {
return nil
}
err = cmd.Start()
if err != nil {
return nil
}
// wait so we don't get a bunch of defunct processes
go cmd.Wait()
default:
// hook failed, we report error
return fmt.Errorf("mysqld_start hook failed: %v", hr.String())
}
// give it some time to succeed - usually by the time the socket emerges
// we are in good shape
for i := mysqlWaitTime; i >= 0; i -= time.Second {
_, statErr := os.Stat(mt.config.SocketFile)
if statErr == nil {
// Make sure the socket file isn't stale.
conn, connErr := mt.createConnection()
if connErr == nil {
conn.Close()
return nil
}
} else if !os.IsNotExist(statErr) {
return statErr
}
time.Sleep(time.Second)
}
return errors.New(name + ": deadline exceeded waiting for " + mt.config.SocketFile)
}
/* waitForMysqld: should the function block until mysqld has stopped?
This can actually take a *long* time if the buffer cache needs to be fully
flushed - on the order of 20-30 minutes.
*/
func Shutdown(mt *Mysqld, waitForMysqld bool, mysqlWaitTime time.Duration) error {
relog.Info("mysqlctl.Shutdown")
// possibly mysql is already shutdown, check for a few files first
_, socketPathErr := os.Stat(mt.config.SocketFile)
_, pidPathErr := os.Stat(mt.config.PidFile)
if socketPathErr != nil && pidPathErr != nil {
relog.Warning("assuming shutdown - no socket, no pid file")
return nil
}
// try the mysqld shutdown hook, if any
h := hook.NewSimpleHook("mysqld_shutdown")
hr := h.Execute()
switch hr.ExitStatus {
case hook.HOOK_SUCCESS:
// hook exists and worked, we can keep going
case hook.HOOK_DOES_NOT_EXIST:
// hook doesn't exist, try mysqladmin
relog.Info("No mysqld_shutdown hook, running mysqladmin directly")
dir, err := vtenv.VtMysqlRoot()
if err != nil {
return err
}
name := path.Join(dir, "bin/mysqladmin")
arg := []string{
"-u", "vt_dba", "-S", mt.config.SocketFile,
"shutdown"}
env := []string{
os.ExpandEnv("LD_LIBRARY_PATH=$VT_MYSQL_ROOT/lib/mysql"),
}
_, err = execCmd(name, arg, env, dir)
if err != nil {
return err
}
default:
// hook failed, we report error
return fmt.Errorf("mysqld_shutdown hook failed: %v", hr.String())
}
// wait for mysqld to really stop. use the sock file as a proxy for that since
// we can't call wait() in a process we didn't start.
if waitForMysqld {
for i := mysqlWaitTime; i >= 0; i -= time.Second {
_, statErr := os.Stat(mt.config.SocketFile)
if statErr != nil && os.IsNotExist(statErr) {
return nil
}
time.Sleep(time.Second)
}
return errors.New("gave up waiting for mysqld to stop")
}
return nil
}
/* exec and wait for a return code. look for name in $PATH. */
func execCmd(name string, args, env []string, dir string) (cmd *exec.Cmd, err error) {
cmdPath, _ := exec.LookPath(name)
relog.Info("execCmd: %v %v %v", name, cmdPath, args)
cmd = exec.Command(cmdPath, args...)
cmd.Env = env
cmd.Dir = dir
output, err := cmd.CombinedOutput()
if err != nil {
err = errors.New(name + ": " + string(output))
}
return cmd, err
}
func Init(mt *Mysqld, mysqlWaitTime time.Duration) error {
relog.Info("mysqlctl.Init")
err := mt.createDirs()
if err != nil {
relog.Error("%s", err.Error())
return err
}
root, err := vtenv.VtRoot()
if err != nil {
relog.Error("%s", err.Error())
return err
}
hr := hook.NewSimpleHook("make_mycnf").Execute()
configData := ""
if hr.ExitStatus == hook.HOOK_DOES_NOT_EXIST {
relog.Info("make_mycnf hook doesn't exist")
cnfTemplatePaths := []string{
path.Join(root, "config/mycnf/default.cnf"),
path.Join(root, "config/mycnf/master.cnf"),
path.Join(root, "config/mycnf/replica.cnf"),
}
if extraCnf := os.Getenv("EXTRA_MY_CNF"); extraCnf != "" {
parts := strings.Split(extraCnf, ":")
cnfTemplatePaths = append(cnfTemplatePaths, parts...)
}
configData, err = MakeMycnf(mt.config, cnfTemplatePaths)
} else if hr.ExitStatus == hook.HOOK_SUCCESS {
configData, err = fillMycnfTemplate(mt.config, hr.Stdout)
} else {
err = fmt.Errorf("make_mycnf hook failed(%v): %v", hr.ExitStatus, hr.Stderr)
}
if err == nil {
err = ioutil.WriteFile(mt.config.path, []byte(configData), 0664)
}
if err != nil {
relog.Error("failed creating %v: %v", mt.config.path, err)
return err
}
dbTbzPath := path.Join(root, "data/bootstrap/mysql-db-dir.tbz")
relog.Info("decompress bootstrap db %v", dbTbzPath)
args := []string{"-xj", "-C", mt.config.DataDir, "-f", dbTbzPath}
_, tarErr := execCmd("tar", args, []string{}, "")
if tarErr != nil {
relog.Error("failed unpacking %v: %v", dbTbzPath, tarErr)
return tarErr
}
if err = Start(mt, mysqlWaitTime); err != nil {
relog.Error("failed starting, check %v", mt.config.ErrorLogPath)
return err
}
schemaPath := path.Join(root, "data/bootstrap/_vt_schema.sql")
schema, err := ioutil.ReadFile(schemaPath)
if err != nil {
return err
}
sqlCmds := make([]string, 0, 10)
relog.Info("initial schema: %v", string(schema))
for _, cmd := range strings.Split(string(schema), ";") {
cmd = strings.TrimSpace(cmd)
if cmd == "" {
continue
}
sqlCmds = append(sqlCmds, cmd)
}
return mt.executeSuperQueryList(sqlCmds)
}
func (mt *Mysqld) createDirs() error {
relog.Info("creating directory %s", mt.TabletDir)
if err := os.MkdirAll(mt.TabletDir, 0775); err != nil {
return err
}
for _, dir := range TopLevelDirs() {
if err := mt.createTopDir(dir); err != nil {
return err
}
}
for _, dir := range DirectoryList(mt.config) {
relog.Info("creating directory %s", dir)
if err := os.MkdirAll(dir, 0775); err != nil {
return err
}
// FIXME(msolomon) validate permissions?
}
return nil
}
// createTopDir creates a top level directory under TabletDir.
// However, if a directory of the same name already exists under
// vtenv.VtDataRoot(), it creates a directory named after the tablet
// id under that directory, and then creates a symlink under TabletDir
// that points to the newly created directory. For example, if
// /vt/data is present, it will create the following structure:
// /vt/data/vt_xxxx /vt/vt_xxxx/data -> /vt/data/vt_xxxx
func (mt *Mysqld) createTopDir(dir string) error {
vtname := path.Base(mt.TabletDir)
target := path.Join(vtenv.VtDataRoot(), dir)
_, err := os.Lstat(target)
if err != nil {
if os.IsNotExist(err) {
topdir := path.Join(mt.TabletDir, dir)
relog.Info("creating directory %s", topdir)
return os.MkdirAll(topdir, 0775)
}
return err
}
linkto := path.Join(target, vtname)
source := path.Join(mt.TabletDir, dir)
relog.Info("creating directory %s", linkto)
err = os.MkdirAll(linkto, 0775)
if err != nil {
return err
}
relog.Info("creating symlink %s -> %s", source, linkto)
return os.Symlink(linkto, source)
}
func Teardown(mt *Mysqld, force bool) error {
relog.Info("mysqlctl.Teardown")
if err := Shutdown(mt, true, MysqlWaitTime); err != nil {
relog.Warning("failed mysqld shutdown: %v", err.Error())
if !force {
return err
}
}
var removalErr error
for _, dir := range TopLevelDirs() {
qdir := path.Join(mt.TabletDir, dir)
if err := deleteTopDir(qdir); err != nil {
removalErr = err
}
}
return removalErr
}
func deleteTopDir(dir string) (removalErr error) {
fi, err := os.Lstat(dir)
if err != nil {
relog.Error("error deleting dir %v: %v", dir, err.Error())
removalErr = err
} else if fi.Mode()&os.ModeSymlink != 0 {
target, err := filepath.EvalSymlinks(dir)
if err != nil {
relog.Error("could not resolve symlink %v: %v", dir, err.Error())
removalErr = err
}
relog.Info("remove data dir (symlinked) %v", target)
if err = os.RemoveAll(target); err != nil {
relog.Error("failed removing %v: %v", target, err.Error())
removalErr = err
}
}
relog.Info("remove data dir %v", dir)
if err = os.RemoveAll(dir); err != nil {
relog.Error("failed removing %v: %v", dir, err.Error())
removalErr = err
}
return
}
func (mysqld *Mysqld) Addr() string {
hostname := netutil.FullyQualifiedHostnameOrPanic()
return fmt.Sprintf("%v:%v", hostname, mysqld.config.MysqlPort)
}
func (mysqld *Mysqld) IpAddr() string {
addr, err := netutil.ResolveIpAddr(mysqld.Addr())
if err != nil {
panic(err) // should never happen
}
return addr
}
// executes some SQL commands using a mysql command line interface process
func (mysqld *Mysqld) ExecuteMysqlCommand(sql string) error {
dir, err := vtenv.VtMysqlRoot()
if err != nil {
return err
}
name := path.Join(dir, "bin/mysql")
arg := []string{
"-u", "vt_dba", "-S", mysqld.config.SocketFile,
"-e", sql}
env := []string{
"LD_LIBRARY_PATH=" + path.Join(dir, "lib/mysql"),
}
_, err = execCmd(name, arg, env, dir)
if err != nil {
return err
}
return nil
}
| [
"\"EXTRA_MY_CNF\""
] | [] | [
"EXTRA_MY_CNF"
] | [] | ["EXTRA_MY_CNF"] | go | 1 | 0 | |
example/tutorials/code/rtt-logistic_regression.py | #!/usr/bin/env python3
import latticex.rosetta as rtt # difference from tensorflow
import math
import os
import csv
import tensorflow as tf
import numpy as np
from util import read_dataset
np.set_printoptions(suppress=True)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
np.random.seed(0)
EPOCHES = 10
BATCH_SIZE = 16
learning_rate = 0.0002
rtt.activate("SecureNN")
mpc_player_id = rtt.py_protocol_handler.get_party_id()
# real data
# ######################################## difference from tensorflow
file_x = '../dsets/P' + str(mpc_player_id) + "/cls_train_x.csv"
file_y = '../dsets/P' + str(mpc_player_id) + "/cls_train_y.csv"
real_X, real_Y = rtt.PrivateDataset(data_owner=(
0, 1), label_owner=0).load_data(file_x, file_y, header=None)
# ######################################## difference from tensorflow
DIM_NUM = real_X.shape[1]
X = tf.placeholder(tf.float64, [None, DIM_NUM])
Y = tf.placeholder(tf.float64, [None, 1])
print(X)
print(Y)
# initialize W & b
W = tf.Variable(tf.zeros([DIM_NUM, 1], dtype=tf.float64))
b = tf.Variable(tf.zeros([1], dtype=tf.float64))
print(W)
print(b)
# predict
pred_Y = tf.sigmoid(tf.matmul(X, W) + b)
print(pred_Y)
# loss
logits = tf.matmul(X, W) + b
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=Y, logits=logits)
loss = tf.reduce_mean(loss)
print(loss)
# optimizer
train = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
print(train)
init = tf.global_variables_initializer()
print(init)
with tf.Session() as sess:
sess.run(init)
xW, xb = sess.run([W, b])
print("init weight:{} \nbias:{}".format(xW, xb))
# train
BATCHES = math.ceil(len(real_X) / BATCH_SIZE)
for e in range(EPOCHES):
for i in range(BATCHES):
bX = real_X[(i * BATCH_SIZE): (i + 1) * BATCH_SIZE]
bY = real_Y[(i * BATCH_SIZE): (i + 1) * BATCH_SIZE]
sess.run(train, feed_dict={X: bX, Y: bY})
j = e * BATCHES + i
if j % 50 == 0 or (j == EPOCHES * BATCHES - 1 and j % 50 != 0):
xW, xb = sess.run([W, b])
print("I,E,B:{:0>4d},{:0>4d},{:0>4d} weight:{} \nbias:{}".format(
j, e, i, xW, xb))
# predict
Y_pred = sess.run(pred_Y, feed_dict={X: real_X, Y: real_Y})
print("Y_pred:", Y_pred)
print(rtt.get_perf_stats(True))
rtt.deactivate()
| [] | [] | [
"TF_CPP_MIN_LOG_LEVEL"
] | [] | ["TF_CPP_MIN_LOG_LEVEL"] | python | 1 | 0 | |
config.py | import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') \
or 'sqlite:///' + os.path.join(basedir, 'app.db')
SQLALCHEMY_TRACK_MODIFICATIONS = False | [] | [] | [
"DATABASE_URL"
] | [] | ["DATABASE_URL"] | python | 1 | 0 | |
morse-stf/unittest/runMain.py | # encoding: utf-8
"""
@author: guanshun
@contact: [email protected]
@time: 2019-07-02 15:35
@file: runMain.py.py
@desc:
"""
# -*- coding: UTF-8 -*-
import os
import unittest
from xmlrunner import xmlrunner
from stensorflow.engine.start_server import start_local_server
if __name__ == '__main__':
stf_home = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
print("stf_home=", stf_home)
os.environ["stf_home"] = stf_home
start_local_server(os.path.join(os.environ.get("stf_home", ".."), "conf", "config.json"))
suite = unittest.TestSuite()
# 找到目录下所有的以_test结尾的py文件
# all_cases = unittest.defaultTestLoader.discover('.', '*_test.py')
all_cases = unittest.defaultTestLoader.discover('unittest', 'test_*.py')
#all_cases = unittest.defaultTestLoader.discover('.', 'test_*.py')
for case in all_cases:
# 把所有的测试用例添加进来
suite.addTests(case)
runner = xmlrunner.XMLTestRunner(output='report')
runner.run(suite)
| [] | [] | [
"stf_home"
] | [] | ["stf_home"] | python | 1 | 0 | |
.github/scripts/parse_ref.py | #!/usr/bin/env python3
import os
import re
def main() -> None:
ref = os.environ['GITHUB_REF']
m = re.match(r'^refs/(\w+)/(.*)$', ref)
if m:
category, stripped = m.groups()
if category == 'heads':
print(f'::set-output name=branch::{stripped}')
elif category == 'pull':
print(f'::set-output name=branch::pull/{stripped.split("/")[0]}')
elif category == 'tags':
print(f'::set-output name=tag::{stripped}')
if __name__ == '__main__':
main()
| [] | [] | [
"GITHUB_REF"
] | [] | ["GITHUB_REF"] | python | 1 | 0 | |
tests/settings.py | """
Django settings for running tests for rolca-core package.
"""
import os
from django.utils.translation import gettext_lazy as _
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
SECRET_KEY = 'secret'
DEBUG = True
MIDDLEWARE = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
]
INSTALLED_APPS = (
"channels",
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.staticfiles',
'django_filters',
'rolca.backup',
'rolca.core',
'rolca.payment',
'rolca.rating',
)
ROOT_URLCONF = 'tests.urls'
LOGIN_REDIRECT_URL = '/'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(PROJECT_ROOT, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
LANGUAGES = (
('en', _('English')),
('sl', _('Slovene')),
)
LANGUAGE_CODE = 'en'
LOCALE_PATHS = (os.path.join(PROJECT_ROOT, 'locale'),)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ.get('ROLCA_POSTGRESQL_NAME', 'rolca'),
'USER': os.environ.get('ROLCA_POSTGRESQL_USER', 'rolca'),
'HOST': os.environ.get('ROLCA_POSTGRESQL_HOST', 'localhost'),
'PORT': int(os.environ.get('ROLCA_POSTGRESQL_PORT', 5432)),
}
}
REST_FRAMEWORK = {
'DEFAULT_FILTER_BACKENDS': (
'django_filters.rest_framework.backends.DjangoFilterBackend',
),
}
CHANNEL_LAYERS = {'default': {'BACKEND': 'channels.layers.InMemoryChannelLayer'}}
ASGI_APPLICATION = 'rolca.tests.routing.application'
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media')
MEDIA_URL = '/media/'
STATIC_URL = '/static/'
ROLCA_MAX_SIZE = 1048576
ROLCA_MAX_LONG_EDGE = 2400
ROLCA_ACCEPTED_FORMATS = ['JPEG']
| [] | [] | [
"ROLCA_POSTGRESQL_HOST",
"ROLCA_POSTGRESQL_USER",
"ROLCA_POSTGRESQL_NAME",
"ROLCA_POSTGRESQL_PORT"
] | [] | ["ROLCA_POSTGRESQL_HOST", "ROLCA_POSTGRESQL_USER", "ROLCA_POSTGRESQL_NAME", "ROLCA_POSTGRESQL_PORT"] | python | 4 | 0 | |
manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'wwttms.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
pyserini/hsearch/__main__.py | #
# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import json
import os
import sys
from tqdm import tqdm
from pyserini.dsearch import SimpleDenseSearcher
from pyserini.query_iterator import get_query_iterator, TopicsFormat
from pyserini.output_writer import get_output_writer, OutputFormat
from pyserini.search import ImpactSearcher, SimpleSearcher
from pyserini.hsearch import HybridSearcher
from pyserini.dsearch.__main__ import define_dsearch_args, init_query_encoder
from pyserini.search.__main__ import define_search_args, set_bm25_parameters
# Fixes this error: "OMP: Error #15: Initializing libomp.a, but found libomp.dylib already initialized."
# https://stackoverflow.com/questions/53014306/error-15-initializing-libiomp5-dylib-but-found-libiomp5-dylib-already-initial
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
def define_fusion_args(parser):
parser.add_argument('--alpha', type=float, metavar='num', required=False, default=0.1,
help="alpha for hybrid search")
parser.add_argument('--hits', type=int, required=False, default=10, help='number of hits from dense and sparse')
parser.add_argument('--normalization', action='store_true', required=False, help='hybrid score with normalization')
parser.add_argument('--weight-on-dense', action='store_true', required=False, help='weight on dense part')
def parse_args(parser, commands):
# Divide argv by commands
split_argv = [[]]
for c in sys.argv[1:]:
if c in commands.choices:
split_argv.append([c])
else:
split_argv[-1].append(c)
# Initialize namespace
args = argparse.Namespace()
for c in commands.choices:
setattr(args, c, None)
# Parse each command
parser.parse_args(split_argv[0], namespace=args) # Without command
for argv in split_argv[1:]: # Commands
n = argparse.Namespace()
setattr(args, argv[0], n)
parser.parse_args(argv, namespace=n)
return args
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Conduct a hybrid search on dense+sparse indexes.')
commands = parser.add_subparsers(title='sub-commands')
dense_parser = commands.add_parser('dense')
define_dsearch_args(dense_parser)
sparse_parser = commands.add_parser('sparse')
define_search_args(sparse_parser)
fusion_parser = commands.add_parser('fusion')
define_fusion_args(fusion_parser)
run_parser = commands.add_parser('run')
run_parser.add_argument('--topics', type=str, metavar='topic_name', required=False,
help="Name of topics. Available: msmarco-passage-dev-subset.")
run_parser.add_argument('--hits', type=int, metavar='num', required=False, default=1000, help="Number of hits.")
run_parser.add_argument('--topics-format', type=str, metavar='format', default=TopicsFormat.DEFAULT.value,
help=f"Format of topics. Available: {[x.value for x in list(TopicsFormat)]}")
run_parser.add_argument('--output-format', type=str, metavar='format', default=OutputFormat.TREC.value,
help=f"Format of output. Available: {[x.value for x in list(OutputFormat)]}")
run_parser.add_argument('--output', type=str, metavar='path', required=False, help="Path to output file.")
run_parser.add_argument('--max-passage', action='store_true',
default=False, help="Select only max passage from document.")
run_parser.add_argument('--max-passage-hits', type=int, metavar='num', required=False, default=100,
help="Final number of hits when selecting only max passage.")
run_parser.add_argument('--max-passage-delimiter', type=str, metavar='str', required=False, default='#',
help="Delimiter between docid and passage id.")
run_parser.add_argument('--batch-size', type=int, metavar='num', required=False,
default=1, help="Specify batch size to search the collection concurrently.")
run_parser.add_argument('--threads', type=int, metavar='num', required=False,
default=1, help="Maximum number of threads to use.")
args = parse_args(parser, commands)
query_iterator = get_query_iterator(args.run.topics, TopicsFormat(args.run.topics_format))
topics = query_iterator.topics
query_encoder = init_query_encoder(args.dense.encoder,
args.dense.tokenizer,
args.run.topics,
args.dense.encoded_queries,
args.dense.device,
args.dense.query_prefix)
if os.path.exists(args.dense.index):
# create searcher from index directory
dsearcher = SimpleDenseSearcher(args.dense.index, query_encoder)
else:
# create searcher from prebuilt index name
dsearcher = SimpleDenseSearcher.from_prebuilt_index(args.dense.index, query_encoder)
if not dsearcher:
exit()
if os.path.exists(args.sparse.index):
# create searcher from index directory
if args.sparse.impact:
ssearcher = ImpactSearcher(args.sparse.index, args.sparse.encoder, args.sparse.min_idf)
else:
ssearcher = SimpleSearcher(args.sparse.index)
else:
# create searcher from prebuilt index name
if args.sparse.impact:
ssearcher = ImpactSearcher.from_prebuilt_index(args.sparse.index, args.sparse.encoder, args.sparse.min_idf)
else:
ssearcher = SimpleSearcher.from_prebuilt_index(args.sparse.index)
if not ssearcher:
exit()
set_bm25_parameters(ssearcher, args.sparse.index, args.sparse.k1, args.sparse.b)
if args.sparse.language != 'en':
ssearcher.set_language(args.sparse.language)
hsearcher = HybridSearcher(dsearcher, ssearcher)
if not hsearcher:
exit()
# build output path
output_path = args.run.output
print(f'Running {args.run.topics} topics, saving to {output_path}...')
tag = 'hybrid'
output_writer = get_output_writer(output_path, OutputFormat(args.run.output_format), 'w',
max_hits=args.run.hits, tag=tag, topics=topics,
use_max_passage=args.run.max_passage,
max_passage_delimiter=args.run.max_passage_delimiter,
max_passage_hits=args.run.max_passage_hits)
with output_writer:
batch_topics = list()
batch_topic_ids = list()
for index, (topic_id, text) in enumerate(tqdm(query_iterator, total=len(topics.keys()))):
if args.run.batch_size <= 1 and args.run.threads <= 1:
hits = hsearcher.search(text, args.fusion.hits, args.run.hits, args.fusion.alpha, args.fusion.normalization, args.fusion.weight_on_dense)
results = [(topic_id, hits)]
else:
batch_topic_ids.append(str(topic_id))
batch_topics.append(text)
if (index + 1) % args.run.batch_size == 0 or \
index == len(topics.keys()) - 1:
results = hsearcher.batch_search(
batch_topics, batch_topic_ids, args.fusion.hits, args.run.hits, args.run.threads,
args.fusion.alpha, args.fusion.normalization, args.fusion.weight_on_dense)
results = [(id_, results[id_]) for id_ in batch_topic_ids]
batch_topic_ids.clear()
batch_topics.clear()
else:
continue
for topic, hits in results:
output_writer.write(topic, hits)
results.clear()
| [] | [] | [
"KMP_DUPLICATE_LIB_OK"
] | [] | ["KMP_DUPLICATE_LIB_OK"] | python | 1 | 0 | |
apiserver/cmd/apiserver/server/watch.go | // Copyright (c) 2019 Tigera, Inc. All rights reserved.
package server
import (
"fmt"
"os"
"time"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/clientcmd"
)
const (
authConfigMap = "extension-apiserver-authentication"
authConfigMapNamespace = "kube-system"
)
// WatchExtensionAuth watches the ConfigMap extension-apiserver-authentication
// and returns true if its resource version changes or a watch event indicates
// it changed. The cfg is used to get a k8s client for getting and watching the
// ConfigMap. If stopChan is closed then the function will return no change.
func WatchExtensionAuth(stopChan chan struct{}) (bool, error) {
//TODO: Use SharedInformerFactory rather than creating new client.
// set up k8s client
// attempt 1: KUBECONFIG env var
cfgFile := os.Getenv("KUBECONFIG")
cfg, err := clientcmd.BuildConfigFromFlags("", cfgFile)
if err != nil {
// attempt 2: in cluster config
if cfg, err = rest.InClusterConfig(); err != nil {
return false, err
}
}
client, err := kubernetes.NewForConfig(cfg)
if err != nil {
return false, fmt.Errorf("Failed to get client to watch extension auth ConfigMap: %v", err)
}
changed := false
synced := false
watcher := cache.NewListWatchFromClient(
client.CoreV1().RESTClient(),
"configmaps",
authConfigMapNamespace,
fields.OneTermEqualSelector("metadata.name", authConfigMap))
_, controller := cache.NewInformer(
watcher,
&corev1.ConfigMap{},
0,
cache.ResourceEventHandlerFuncs{
AddFunc: func(_ interface{}) {
if synced {
changed = true
close(stopChan)
}
},
DeleteFunc: func(_ interface{}) {
if synced {
changed = true
close(stopChan)
}
},
UpdateFunc: func(old, new interface{}) {
if synced {
o := old.(*corev1.ConfigMap)
n := new.(*corev1.ConfigMap)
// Only detect as changed if the version has changed
if o.ResourceVersion != n.ResourceVersion {
changed = true
close(stopChan)
}
}
},
})
go func() {
for !controller.HasSynced() {
time.Sleep(50 * time.Millisecond)
}
synced = true
}()
controller.Run(stopChan)
return changed, nil
}
| [
"\"KUBECONFIG\""
] | [] | [
"KUBECONFIG"
] | [] | ["KUBECONFIG"] | go | 1 | 0 | |
azuredevops/internal/acceptancetests/resource_group_membership_test.go | // +build all core resource_group_membership
// +build !exclude_resource_group_membership
package acceptancetests
import (
"fmt"
"os"
"strings"
"testing"
"time"
"github.com/hashicorp/terraform-plugin-sdk/helper/resource"
"github.com/hashicorp/terraform-plugin-sdk/terraform"
"github.com/microsoft/azure-devops-go-api/azuredevops/graph"
"github.com/microsoft/terraform-provider-azuredevops/azuredevops/internal/acceptancetests/testutils"
"github.com/microsoft/terraform-provider-azuredevops/azuredevops/internal/client"
"github.com/microsoft/terraform-provider-azuredevops/azuredevops/internal/utils/converter"
)
// Verifies that the following sequence of events occurs without error:
// (1) TF apply creates resource
// (2) TF state values are set
// (3) Group membership exists and can be queried for
// (4) TF destroy removes group memberships
//
// Note: This will be uncommented in https://github.com/microsoft/terraform-provider-azuredevops/issues/174
//
func TestAccGroupMembership_CreateAndRemove(t *testing.T) {
t.Skip("Skipping test TestAccGroupMembership_CreateAndRemove due to service inconsistent")
projectName := testutils.GenerateResourceName()
userPrincipalName := os.Getenv("AZDO_TEST_AAD_USER_EMAIL")
groupName := "Build Administrators"
tfNode := "azuredevops_group_membership.membership"
tfStanzaWithMembership := testutils.HclGroupMembershipResource(projectName, groupName, userPrincipalName)
tfStanzaWithoutMembership := testutils.HclGroupMembershipDependencies(projectName, groupName, userPrincipalName)
// This test differs from most other acceptance tests in the following ways:
// - The second step is the same as the first except it omits the group membership.
// This lets us test that the membership is removed in isolation of the project being deleted
// - There is no CheckDestroy function because that is covered based on the above point
resource.Test(t, resource.TestCase{
PreCheck: func() { testutils.PreCheck(t, nil) },
Providers: testutils.GetProviders(),
Steps: []resource.TestStep{
{
Config: tfStanzaWithMembership,
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttrSet(tfNode, "id"),
resource.TestCheckResourceAttrSet(tfNode, "group"),
resource.TestCheckResourceAttr(tfNode, "members.#", "1"),
checkGroupMembershipMatchesState(),
),
}, {
// remove the group membership
Config: tfStanzaWithoutMembership,
Check: checkGroupMembershipMatchesState(),
},
},
})
}
// Verifies that the group membership in AzDO matches the group membership specified by the state
func checkGroupMembershipMatchesState() resource.TestCheckFunc {
return func(s *terraform.State) error {
memberDescriptor := s.RootModule().Outputs["user_descriptor"].Value.(string)
groupDescriptor := s.RootModule().Outputs["group_descriptor"].Value.(string)
_, expectingMembership := s.RootModule().Resources["azuredevops_group_membership.membership"]
// The sleep here is to take into account some propagation delay that can happen with Group Membership APIs.
// If we want to go inspect the behavior of the service after a Terraform Apply, we'll need to wait a little bit
// before making the API call.
//
// Note: some thought was put behind keeping the time.sleep here vs in the provider implementation. After consideration,
// I decided to keep it here. Moving to the provider would (1) provide no functional benefit to the end user, (2) increase
// complexity and (3) be inconsistent with the UI and CLI behavior for the same operation.
time.Sleep(5 * time.Second)
memberships, err := getMembersOfGroup(groupDescriptor)
if err != nil {
return err
}
if !expectingMembership && len(*memberships) == 0 {
return nil
}
if !expectingMembership && len(*memberships) > 0 {
return fmt.Errorf("unexpectedly found group members: %+v", memberships)
}
if expectingMembership && len(*memberships) == 0 {
return fmt.Errorf("unexpectedly did not find memberships")
}
actualMemberDescriptor := *(*memberships)[0].MemberDescriptor
if !strings.EqualFold(strings.ToLower(actualMemberDescriptor), strings.ToLower(memberDescriptor)) {
return fmt.Errorf("expected member with descriptor %s but member had descriptor %s", memberDescriptor, actualMemberDescriptor)
}
return nil
}
}
// call AzDO API to query for group members
func getMembersOfGroup(groupDescriptor string) (*[]graph.GraphMembership, error) {
clients := testutils.GetProvider().Meta().(*client.AggregatedClient)
return clients.GraphClient.ListMemberships(clients.Ctx, graph.ListMembershipsArgs{
SubjectDescriptor: &groupDescriptor,
Direction: &graph.GraphTraversalDirectionValues.Down,
Depth: converter.Int(1),
})
}
| [
"\"AZDO_TEST_AAD_USER_EMAIL\""
] | [] | [
"AZDO_TEST_AAD_USER_EMAIL"
] | [] | ["AZDO_TEST_AAD_USER_EMAIL"] | go | 1 | 0 | |
litmus-portal/backend/auth/pkg/models/user.go | package models
import (
"log"
"os"
"time"
"github.com/litmuschaos/litmus/litmus-portal/backend/auth/pkg/types"
)
func init() {
if os.Getenv("ADMIN_USERNAME") == "" || os.Getenv("ADMIN_PASSWORD") == "" {
log.Fatal("Environment variables ADMIN_USERNAME or ADMIN_PASSWORD are not set")
}
}
//UserCredentials contains the user information
type UserCredentials struct {
ID string `bson:"_id"`
UserName string `bson:"username"`
Password string `bson:"password"`
Email string `bson:"email"`
Name string `bson:"name"`
// UserID string `bson:"user_id"`
LoggedIn bool `bson:"logged_in"`
SocialAuths []SocialAuth `bson:"social_auths"`
CreatedAt *time.Time `bson:"created_at"`
UpdatedAt *time.Time `bson:"updated_at"`
RemovedAt *time.Time `bson:"removed_at"`
State State `bson:"state"`
}
//DefaultUser is the admin user created by default
var DefaultUser *UserCredentials = &UserCredentials{
UserName: types.DefaultUserName,
Password: types.DefaultUserPassword,
}
//PublicUserInfo displays the information of the user that is publicly available
type PublicUserInfo struct {
ID string `json:"_id"`
UserName string `json:"username"`
Email string `json:"email"`
Name string `json:"name"`
LoggedIn bool `json:"logged_in"`
CreatedAt *time.Time `json:"created_at"`
UpdatedAt *time.Time `json:"updated_at"`
RemovedAt *time.Time `json:"removed_at"`
State State `json:"state"`
}
//SocialAuth contains the oauth types and related information opted by the user
type SocialAuth struct {
Type string `bson:"type"`
AccessToken string `bson:"access_token"`
RefreshToken string `bson:"refresh_token"`
Expiry *time.Time `bson:"expiry"`
TokenType string `bson:"token_type"`
}
//State is the current state of the database entry of the user
type State string
const (
//StateCreating means this entry is being created yet
StateCreating State = "creating"
//StateActive means this entry is active
StateActive State = "active"
//StateRemoving means this entry is being removed
StateRemoving State = "removing"
//StateRemoved means this entry has been removed
StateRemoved State = "removed"
)
// GetID user id
func (u *UserCredentials) GetID() string {
return u.ID
}
// GetUserName user username
func (u *UserCredentials) GetUserName() string {
return u.UserName
}
// GetPassword user password
func (u *UserCredentials) GetPassword() string {
return u.Password
}
// GetEmail user email
func (u *UserCredentials) GetEmail() string {
return u.Email
}
// GetName returns user name
func (u *UserCredentials) GetName() string {
return u.Name
}
// GetSocialAuths returns all the social authentications of the user
func (u *UserCredentials) GetSocialAuths() []SocialAuth {
return u.SocialAuths
}
// GetCreatedAt defines the time at which this user was created
func (u *UserCredentials) GetCreatedAt() *time.Time {
return u.CreatedAt
}
// GetUpdatedAt defines the time at which user was last updated
func (u *UserCredentials) GetUpdatedAt() *time.Time {
return u.UpdatedAt
}
// GetRemovedAt defines the time at which this user was removed
func (u *UserCredentials) GetRemovedAt() *time.Time {
return u.RemovedAt
}
// GetState user password
func (u *UserCredentials) GetState() State {
return u.State
}
// GetLoggedIn user password
func (u *UserCredentials) GetLoggedIn() bool {
return u.LoggedIn
}
// GetPublicInfo fetches the pubicUserInfo from User
func (u *UserCredentials) GetPublicInfo() *PublicUserInfo {
return &PublicUserInfo{
Name: u.GetName(),
UserName: u.GetUserName(),
Email: u.GetEmail(),
ID: u.GetID(),
LoggedIn: u.GetLoggedIn(),
CreatedAt: u.GetCreatedAt(),
UpdatedAt: u.GetUpdatedAt(),
RemovedAt: u.GetRemovedAt(),
State: u.GetState(),
}
}
// GetUserName user username
func (uinfo *PublicUserInfo) GetUserName() string {
return uinfo.UserName
}
// GetName user username
func (uinfo *PublicUserInfo) GetName() string {
return uinfo.Name
}
// GetEmail user email
func (uinfo *PublicUserInfo) GetEmail() string {
return uinfo.Email
}
// GetCreatedAt user createdAt
func (uinfo *PublicUserInfo) GetCreatedAt() *time.Time {
return uinfo.CreatedAt
}
// GetID user ID
func (uinfo *PublicUserInfo) GetID() string {
return uinfo.ID
}
// GetLoggedIn user loggedIn
func (uinfo *PublicUserInfo) GetLoggedIn() bool {
return uinfo.LoggedIn
}
// GetUpdatedAt user updatedAt
func (uinfo *PublicUserInfo) GetUpdatedAt() *time.Time {
return uinfo.UpdatedAt
}
// GetRemovedAt user removedAt
func (uinfo *PublicUserInfo) GetRemovedAt() *time.Time {
return uinfo.RemovedAt
}
// GetState user state
func (uinfo *PublicUserInfo) GetState() State {
return uinfo.State
}
// GetType returns auth type
func (s *SocialAuth) GetType() string {
return s.Type
}
// GetAccessToken returns auth token
func (s *SocialAuth) GetAccessToken() string {
return s.AccessToken
}
// GetRefreshToken returns refresh token
func (s *SocialAuth) GetRefreshToken() string {
return s.RefreshToken
}
// GetTokenType returns token type
func (s *SocialAuth) GetTokenType() string {
return s.TokenType
}
// GetExpiry returns auth type
func (s *SocialAuth) GetExpiry() *time.Time {
return s.Expiry
}
| [
"\"ADMIN_USERNAME\"",
"\"ADMIN_PASSWORD\""
] | [] | [
"ADMIN_USERNAME",
"ADMIN_PASSWORD"
] | [] | ["ADMIN_USERNAME", "ADMIN_PASSWORD"] | go | 2 | 0 | |
var/spack/repos/builtin/packages/darshan-runtime/package.py | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack import *
class DarshanRuntime(AutotoolsPackage):
"""Darshan (runtime) is a scalable HPC I/O characterization tool
designed to capture an accurate picture of application I/O behavior,
including properties such as patterns of access within files, with
minimum overhead. DarshanRuntime package should be installed on
systems where you intend to instrument MPI applications."""
homepage = "https://www.mcs.anl.gov/research/projects/darshan/"
url = "https://ftp.mcs.anl.gov/pub/darshan/releases/darshan-3.1.0.tar.gz"
git = "https://github.com/darshan-hpc/darshan.git"
maintainers = ['shanedsnyder', 'carns']
tags = ['e4s']
test_requires_compiler = True
version('main', branch='main', submodules=True)
version('3.4.0-pre1', sha256='57d0fd40329b9f8a51bdc9d7635b646692b341d80339115ab203357321706c09')
version('3.3.1', sha256='281d871335977d0592a49d053df93d68ce1840f6fdec27fea7a59586a84395f7', preferred=True)
version('3.3.0', sha256='2e8bccf28acfa9f9394f2084ec18122c66e45d966087fa2e533928e824fcb57a')
version('3.3.0-pre2', sha256='0fc09f86f935132b7b05df981b05cdb3796a1ea02c7acd1905323691df65e761')
version('3.3.0-pre1', sha256='1c655359455b5122921091bab9961491be58a5f0158f073d09fe8cc772bd0812')
version('3.2.1', sha256='d63048b7a3d1c4de939875943e3e7a2468a9034fcb68585edbc87f57f622e7f7')
version('3.2.0', sha256='4035435bdc0fa2a678247fbf8d5a31dfeb3a133baf06577786b1fe8d00a31b7e')
version('3.1.8', sha256='3ed51c8d5d93b4a8cbb7d53d13052140a9dffe0bc1a3e1ebfc44a36a184b5c82')
version('3.1.7', sha256='9ba535df292727ac1e8025bdf2dc42942715205cad8319d925723fd88709e8d6')
version('3.1.6', sha256='21cb24e2a971c45e04476e00441b7fbea63d2afa727a5cf8b7a4a9d9004dd856')
version('3.1.0', sha256='b847047c76759054577823fbe21075cfabb478cdafad341d480274fb1cef861c')
version('3.0.0', sha256='95232710f5631bbf665964c0650df729c48104494e887442596128d189da43e0')
depends_on('mpi', when='+mpi')
depends_on('zlib')
depends_on('hdf5', when='+hdf5')
depends_on('papi', when='+apxc')
depends_on('autoconf', type='build', when='@main')
depends_on('automake', type='build', when='@main')
depends_on('libtool', type='build', when='@main')
depends_on('m4', type='build', when='@main')
depends_on('autoconf', type='build', when='@3.4.0:')
depends_on('automake', type='build', when='@3.4.0:')
depends_on('libtool', type='build', when='@3.4.0:')
depends_on('m4', type='build', when='@3.4.0:')
variant('mpi', default=True, description='Compile with MPI support')
variant('hdf5', default=False, description='Compile with HDF5 module')
variant('apmpi', default=False, description='Compile with AutoPerf MPI module')
variant('apmpi_sync', default=False, description='Compile with AutoPerf MPI module (with collective synchronization timing)')
variant('apxc', default=False, description='Compile with AutoPerf XC module')
variant(
'scheduler',
default='NONE',
description='queue system scheduler JOB ID',
values=('NONE', 'cobalt', 'pbs', 'sge', 'slurm'),
multi=False
)
conflicts('+hdf5', when='@:3.1.8',
msg='+hdf5 variant only available starting from version 3.2.0')
conflicts('+apmpi', when='@:3.2.1',
msg='+apmpi variant only available starting from version 3.3.0')
conflicts('+apmpi_sync', when='@:3.2.1',
msg='+apmpi variant only available starting from version 3.3.0')
conflicts('+apxc', when='@:3.2.1',
msg='+apxc variant only available starting from version 3.3.0')
@property
def configure_directory(self):
return 'darshan-runtime'
def configure_args(self):
spec = self.spec
extra_args = []
job_id = 'NONE'
if '+slurm' in spec:
job_id = 'SLURM_JOBID'
if '+cobalt' in spec:
job_id = 'COBALT_JOBID'
if '+pbs' in spec:
job_id = 'PBS_JOBID'
if '+sge' in spec:
job_id = 'JOB_ID'
if '+hdf5' in spec:
if self.version < Version('3.3.2'):
extra_args.append('--enable-hdf5-mod=%s' % spec['hdf5'].prefix)
else:
extra_args.append('--enable-hdf5-mod')
if '+apmpi' in spec:
extra_args.append('--enable-apmpi-mod')
if '+apmpi_sync' in spec:
extra_args.append(['--enable-apmpi-mod',
'--enable-apmpi-coll-sync'])
if '+apxc' in spec:
extra_args.append(['--enable-apxc-mod'])
extra_args.append('--with-mem-align=8')
extra_args.append('--with-log-path-by-env=DARSHAN_LOG_DIR_PATH')
extra_args.append('--with-jobid-env=%s' % job_id)
extra_args.append('--with-zlib=%s' % spec['zlib'].prefix)
if '+mpi' in spec:
extra_args.append('CC=%s' % self.spec['mpi'].mpicc)
else:
extra_args.append('CC=%s' % self.compiler.cc)
extra_args.append('--without-mpi')
return extra_args
def setup_run_environment(self, env):
# default path for log file, could be user or site specific setting
darshan_log_dir = os.environ['HOME']
env.set('DARSHAN_LOG_DIR_PATH', darshan_log_dir)
@property
def basepath(self):
return join_path('darshan-test',
join_path('regression',
join_path('test-cases', 'src')))
@run_after('install')
def _copy_test_inputs(self):
test_inputs = [
join_path(self.basepath, 'mpi-io-test.c')]
self.cache_extra_test_sources(test_inputs)
def _test_intercept(self):
testdir = "intercept-test"
with working_dir(testdir, create=True):
if '+mpi' in self.spec:
# compile a test program
logname = join_path(os.getcwd(), "test.darshan")
fname = join_path(self.test_suite.current_test_cache_dir,
join_path(self.basepath, 'mpi-io-test.c'))
cc = Executable(self.spec['mpi'].mpicc)
compile_opt = ['-c', fname]
link_opt = ['-o', "mpi-io-test", 'mpi-io-test.o']
cc(*(compile_opt))
cc(*(link_opt))
# run test program and intercept
purpose = "Test running code built against darshan"
exe = "./mpi-io-test"
options = ['-f', 'tmp.dat']
status = [0]
installed = False
expected_output = [r"Write bandwidth = \d+.\d+ Mbytes/sec",
r"Read bandwidth = \d+.\d+ Mbytes/sec"]
env['LD_PRELOAD'] = 'libdarshan.so'
env['DARSHAN_LOGFILE'] = logname
self.run_test(exe,
options,
expected_output,
status,
installed,
purpose,
skip_missing=False,
work_dir=None)
env.pop('LD_PRELOAD')
import llnl.util.tty as tty
# verify existence of log and size is > 0
tty.msg("Test for existince of log:")
if os.path.exists(logname):
sr = os.stat(logname)
print("PASSED")
tty.msg("Test for size of log:")
if not sr.st_size > 0:
exc = BaseException('log size is 0')
m = None
if spack.config.get('config:fail_fast', False):
raise TestFailure([(exc, m)])
else:
self.test_failures.append((exc, m))
else:
print("PASSED")
else:
exc = BaseException('log does not exist')
m = None
if spack.config.get('config:fail_fast', False):
raise TestFailure([(exc, m)])
else:
self.test_failures.append((exc, m))
def test(self):
self._test_intercept()
| [] | [] | [
"HOME"
] | [] | ["HOME"] | python | 1 | 0 | |
soracom/generated/cmd/soralets_list_versions.go | // Code generated by soracom-cli generate-cmd. DO NOT EDIT.
package cmd
import (
"net/url"
"os"
"github.com/spf13/cobra"
)
// SoraletsListVersionsCmdLastEvaluatedKey holds value of 'last_evaluated_key' option
var SoraletsListVersionsCmdLastEvaluatedKey string
// SoraletsListVersionsCmdSoraletId holds value of 'soralet_id' option
var SoraletsListVersionsCmdSoraletId string
// SoraletsListVersionsCmdSort holds value of 'sort' option
var SoraletsListVersionsCmdSort string
// SoraletsListVersionsCmdLimit holds value of 'limit' option
var SoraletsListVersionsCmdLimit int64
// SoraletsListVersionsCmdPaginate indicates to do pagination or not
var SoraletsListVersionsCmdPaginate bool
func init() {
SoraletsListVersionsCmd.Flags().StringVar(&SoraletsListVersionsCmdLastEvaluatedKey, "last-evaluated-key", "", TRAPI("The identifier of the last version retrieved on the current page. By specifying this parameter, you can continue to retrieve the list from the next version onward."))
SoraletsListVersionsCmd.Flags().StringVar(&SoraletsListVersionsCmdSoraletId, "soralet-id", "", TRAPI("The identifier of Soralet."))
SoraletsListVersionsCmd.Flags().StringVar(&SoraletsListVersionsCmdSort, "sort", "desc", TRAPI("Sort order"))
SoraletsListVersionsCmd.Flags().Int64Var(&SoraletsListVersionsCmdLimit, "limit", 0, TRAPI("The maximum number of items in a response."))
SoraletsListVersionsCmd.Flags().BoolVar(&SoraletsListVersionsCmdPaginate, "fetch-all", false, TRCLI("cli.common_params.paginate.short_help"))
SoraletsCmd.AddCommand(SoraletsListVersionsCmd)
}
// SoraletsListVersionsCmd defines 'list-versions' subcommand
var SoraletsListVersionsCmd = &cobra.Command{
Use: "list-versions",
Short: TRAPI("/soralets/{soralet_id}/versions:get:summary"),
Long: TRAPI(`/soralets/{soralet_id}/versions:get:description`),
RunE: func(cmd *cobra.Command, args []string) error {
opt := &apiClientOptions{
BasePath: "/v1",
Language: getSelectedLanguage(),
}
ac := newAPIClient(opt)
if v := os.Getenv("SORACOM_VERBOSE"); v != "" {
ac.SetVerbose(true)
}
err := authHelper(ac, cmd, args)
if err != nil {
cmd.SilenceUsage = true
return err
}
param, err := collectSoraletsListVersionsCmdParams(ac)
if err != nil {
return err
}
body, err := ac.callAPI(param)
if err != nil {
cmd.SilenceUsage = true
return err
}
if body == "" {
return nil
}
if rawOutput {
_, err = os.Stdout.Write([]byte(body))
} else {
return prettyPrintStringAsJSON(body)
}
return err
},
}
func collectSoraletsListVersionsCmdParams(ac *apiClient) (*apiParams, error) {
var parsedBody interface{}
var err error
err = checkIfRequiredStringParameterIsSupplied("soralet_id", "soralet-id", "path", parsedBody, SoraletsListVersionsCmdSoraletId)
if err != nil {
return nil, err
}
return &apiParams{
method: "GET",
path: buildPathForSoraletsListVersionsCmd("/soralets/{soralet_id}/versions"),
query: buildQueryForSoraletsListVersionsCmd(),
doPagination: SoraletsListVersionsCmdPaginate,
paginationKeyHeaderInResponse: "x-soracom-next-key",
paginationRequestParameterInQuery: "last_evaluated_key",
noRetryOnError: noRetryOnError,
}, nil
}
func buildPathForSoraletsListVersionsCmd(path string) string {
escapedSoraletId := url.PathEscape(SoraletsListVersionsCmdSoraletId)
path = strReplace(path, "{"+"soralet_id"+"}", escapedSoraletId, -1)
return path
}
func buildQueryForSoraletsListVersionsCmd() url.Values {
result := url.Values{}
if SoraletsListVersionsCmdLastEvaluatedKey != "" {
result.Add("last_evaluated_key", SoraletsListVersionsCmdLastEvaluatedKey)
}
if SoraletsListVersionsCmdSort != "desc" {
result.Add("sort", SoraletsListVersionsCmdSort)
}
if SoraletsListVersionsCmdLimit != 0 {
result.Add("limit", sprintf("%d", SoraletsListVersionsCmdLimit))
}
return result
}
| [
"\"SORACOM_VERBOSE\""
] | [] | [
"SORACOM_VERBOSE"
] | [] | ["SORACOM_VERBOSE"] | go | 1 | 0 | |
gimme_aws_creds/ui.py | """
Copyright 2018-present Krzysztof Nazarewski.
Licensed under the Apache License, Version 2.0 (the "License");
You may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and* limitations under the License.*
"""
import builtins
import os
import sys
class UserInterface:
def __init__(self, environ=os.environ, argv=sys.argv):
self.environ = environ.copy()
self.environ_bkp = None
self.argv = argv[:]
self.argv_bkp = None
self.args = self.argv[1:]
with self:
self.HOME = os.path.expanduser('~')
def result(self, result):
"""handles output lines
:type result: str
"""
raise NotImplementedError()
def prompt(self, message):
"""handles input's prompt message, but does not ask for input
:type message: str
"""
raise NotImplementedError()
def message(self, message):
"""handles messages meant for user interactions
:type message: str
"""
raise NotImplementedError()
def read_input(self):
"""returns user input
:rtype: str
"""
raise NotImplementedError()
def notify(self, message):
"""handles messages meant for user notifications
:type message: str
"""
raise NotImplementedError()
def input(self, message=None):
"""handles asking for user input, calls prompt() then read_input()
:type message: str
:rtype: str
"""
self.prompt(message)
return self.read_input()
def info(self, message):
"""handles messages meant for info
:type message: str
"""
self.notify(message)
def warning(self, message):
"""handles messages meant for warnings
:type message: str
"""
self.notify(message)
def error(self, message):
"""handles messages meant for errors
:type message: str
"""
self.notify(message)
def __enter__(self):
self.environ_bkp = os.environ
self.argv_bkp = sys.argv
os.environ = self.environ
sys.argv = sys.argv[:1] + self.args
return self
def __exit__(self, exc_type, exc_val, exc_tb):
os.environ = self.environ_bkp
sys.argv = self.argv_bkp
self.environ_bkp = None
self.argv_bkp = None
class CLIUserInterface(UserInterface):
def result(self, result):
builtins.print(result, file=sys.stdout)
def prompt(self, message=None):
if message is not None:
builtins.print(message, file=sys.stderr, end='')
def message(self, message):
builtins.print(message, file=sys.stderr)
def read_input(self):
return builtins.input()
def notify(self, message):
builtins.print(message, file=sys.stderr)
cli = CLIUserInterface()
default = cli
| [] | [] | [] | [] | [] | python | 0 | 0 | |
src/scripts/install.py | #!/usr/bin/env python
"""
Botan install script
(C) 2014,2015,2017 Jack Lloyd
Botan is released under the Simplified BSD License (see license.txt)
"""
import errno
import json
import logging
import optparse # pylint: disable=deprecated-module
import os
import shutil
import sys
import subprocess
def parse_command_line(args):
parser = optparse.OptionParser()
parser.add_option('--verbose', action='store_true', default=False,
help='Show debug messages')
parser.add_option('--quiet', action='store_true', default=False,
help='Show only warnings and errors')
build_group = optparse.OptionGroup(parser, 'Source options')
build_group.add_option('--build-dir', metavar='DIR', default='build',
help='Location of build output (default \'%default\')')
parser.add_option_group(build_group)
install_group = optparse.OptionGroup(parser, 'Installation options')
install_group.add_option('--prefix', default='/usr/local',
help='Set output directory (default %default)')
install_group.add_option('--bindir', default='bin', metavar='DIR',
help='Set binary subdir (default %default)')
install_group.add_option('--libdir', default='lib', metavar='DIR',
help='Set library subdir (default %default)')
install_group.add_option('--includedir', default='include', metavar='DIR',
help='Set include subdir (default %default)')
install_group.add_option('--docdir', default='share/doc', metavar='DIR',
help='Set documentation subdir (default %default)')
install_group.add_option('--pkgconfigdir', default='pkgconfig', metavar='DIR',
help='Set pkgconfig subdir (default %default)')
install_group.add_option('--umask', metavar='MASK', default='022',
help='Umask to set (default %default)')
parser.add_option_group(install_group)
(options, args) = parser.parse_args(args)
def log_level():
if options.verbose:
return logging.DEBUG
if options.quiet:
return logging.WARNING
return logging.INFO
logging.getLogger().setLevel(log_level())
return (options, args)
class PrependDestdirError(Exception):
pass
def is_subdir(path, subpath):
return os.path.relpath(path, start=subpath).startswith("..")
def prepend_destdir(path):
"""
Needed because os.path.join() discards the first path if the
second one is absolute, which is usually the case here. Still, we
want relative paths to work and leverage the os awareness of
os.path.join().
"""
destdir = os.environ.get('DESTDIR', "")
if destdir:
# DESTDIR is non-empty, but we only join absolute paths on UNIX-like file systems
if os.path.sep != "/":
raise PrependDestdirError("Only UNIX-like file systems using forward slash " \
"separator supported when DESTDIR is set.")
if not os.path.isabs(path):
raise PrependDestdirError("--prefix must be an absolute path when DESTDIR is set.")
path = os.path.normpath(path)
# Remove / or \ prefixes if existent to accomodate for os.path.join()
path = path.lstrip(os.path.sep)
path = os.path.join(destdir, path)
if not is_subdir(destdir, path):
raise PrependDestdirError("path escapes DESTDIR (path='%s', destdir='%s')" % (path, destdir))
return path
def makedirs(dirname, exist_ok=True):
try:
logging.debug('Creating directory %s' % (dirname))
os.makedirs(dirname)
except OSError as e:
if e.errno != errno.EEXIST or not exist_ok:
raise e
# Clear link and create new one
def force_symlink(target, linkname):
try:
os.unlink(linkname)
except OSError as e:
if e.errno != errno.ENOENT:
raise e
os.symlink(target, linkname)
def calculate_exec_mode(options):
out = 0o777
if 'umask' in os.__dict__:
umask = int(options.umask, 8)
logging.debug('Setting umask to %s' % oct(umask))
os.umask(int(options.umask, 8))
out &= (umask ^ 0o777)
return out
def main(args):
# pylint: disable=too-many-locals,too-many-branches,too-many-statements
logging.basicConfig(stream=sys.stdout,
format='%(levelname) 7s: %(message)s')
(options, args) = parse_command_line(args)
exe_mode = calculate_exec_mode(options)
def copy_file(src, dst):
logging.debug('Copying %s to %s' % (src, dst))
shutil.copyfile(src, dst)
def copy_executable(src, dst):
copy_file(src, dst)
logging.debug('Make %s executable' % dst)
os.chmod(dst, exe_mode)
with open(os.path.join(options.build_dir, 'build_config.json')) as f:
cfg = json.load(f)
ver_major = int(cfg['version_major'])
ver_minor = int(cfg['version_minor'])
ver_patch = int(cfg['version_patch'])
target_os = cfg['os']
build_shared_lib = bool(cfg['build_shared_lib'])
build_static_lib = bool(cfg['build_static_lib'])
out_dir = cfg['out_dir']
bin_dir = os.path.join(options.prefix, options.bindir)
lib_dir = os.path.join(options.prefix, options.libdir)
target_include_dir = os.path.join(options.prefix,
options.includedir,
'botan-%d' % (ver_major),
'botan')
for d in [options.prefix, lib_dir, bin_dir, target_include_dir]:
makedirs(prepend_destdir(d))
build_include_dir = os.path.join(options.build_dir, 'include', 'botan')
for include in sorted(os.listdir(build_include_dir)):
if include == 'internal':
continue
copy_file(os.path.join(build_include_dir, include),
prepend_destdir(os.path.join(target_include_dir, include)))
build_external_include_dir = os.path.join(options.build_dir, 'include', 'external')
for include in sorted(os.listdir(build_external_include_dir)):
copy_file(os.path.join(build_external_include_dir, include),
prepend_destdir(os.path.join(target_include_dir, include)))
if build_static_lib or target_os == 'windows':
static_lib = cfg['static_lib_name']
copy_file(os.path.join(out_dir, static_lib),
prepend_destdir(os.path.join(lib_dir, os.path.basename(static_lib))))
if build_shared_lib:
if target_os == "windows":
libname = cfg['libname']
soname_base = libname + '.dll'
copy_executable(os.path.join(out_dir, soname_base),
prepend_destdir(os.path.join(lib_dir, soname_base)))
else:
soname_patch = cfg['soname_patch']
soname_abi = cfg['soname_abi']
soname_base = cfg['soname_base']
copy_executable(os.path.join(out_dir, soname_patch),
prepend_destdir(os.path.join(lib_dir, soname_patch)))
if target_os != "openbsd":
prev_cwd = os.getcwd()
try:
os.chdir(prepend_destdir(lib_dir))
force_symlink(soname_patch, soname_abi)
force_symlink(soname_patch, soname_base)
finally:
os.chdir(prev_cwd)
copy_executable(cfg['cli_exe'], prepend_destdir(os.path.join(bin_dir, cfg['cli_exe_name'])))
# On Darwin, if we are using shared libraries and we install, we should fix
# up the library name, otherwise the botan command won't work; ironically
# we only need to do this because we previously changed it from a setting
# that would be correct for installation to one that lets us run it from
# the build directory
if target_os == 'darwin' and build_shared_lib:
soname_abi = cfg['soname_abi']
subprocess.check_call(['install_name_tool',
'-change',
os.path.join('@executable_path', soname_abi),
os.path.join(lib_dir, soname_abi),
os.path.join(bin_dir, cfg['cli_exe_name'])])
if 'botan_pkgconfig' in cfg:
pkgconfig_dir = os.path.join(options.prefix, options.libdir, options.pkgconfigdir)
makedirs(prepend_destdir(pkgconfig_dir))
copy_file(cfg['botan_pkgconfig'],
prepend_destdir(os.path.join(pkgconfig_dir, os.path.basename(cfg['botan_pkgconfig']))))
if 'ffi' in cfg['mod_list']:
for ver in cfg['python_version'].split(','):
py_lib_path = os.path.join(lib_dir, 'python%s' % (ver), 'site-packages')
logging.debug('Installing python module to %s' % (py_lib_path))
makedirs(prepend_destdir(py_lib_path))
py_dir = cfg['python_dir']
copy_file(os.path.join(py_dir, 'botan2.py'),
prepend_destdir(os.path.join(py_lib_path, 'botan2.py')))
if cfg['with_documentation']:
target_doc_dir = os.path.join(options.prefix, options.docdir,
'botan-%d.%d.%d' % (ver_major, ver_minor, ver_patch))
shutil.rmtree(prepend_destdir(target_doc_dir), True)
shutil.copytree(cfg['doc_output_dir'], prepend_destdir(target_doc_dir))
copy_file(os.path.join(cfg['base_dir'], 'license.txt'),
prepend_destdir(os.path.join(target_doc_dir, 'license.txt')))
copy_file(os.path.join(cfg['base_dir'], 'news.rst'),
prepend_destdir(os.path.join(target_doc_dir, 'news.txt')))
for f in [f for f in os.listdir(cfg['doc_dir']) if f.endswith('.txt')]:
copy_file(os.path.join(cfg['doc_dir'], f), prepend_destdir(os.path.join(target_doc_dir, f)))
if cfg['with_rst2man']:
man1_dir = prepend_destdir(os.path.join(options.prefix, os.path.join(cfg['mandir'], 'man1')))
makedirs(man1_dir)
copy_file(os.path.join(cfg['build_dir'], 'botan.1'),
os.path.join(man1_dir, 'botan.1'))
logging.info('Botan %s installation complete', cfg['version'])
return 0
if __name__ == '__main__':
try:
sys.exit(main(sys.argv))
except Exception as e: # pylint: disable=broad-except
logging.error('Failure: %s' % (e))
import traceback
logging.info(traceback.format_exc())
sys.exit(1)
| [] | [] | [
"DESTDIR"
] | [] | ["DESTDIR"] | python | 1 | 0 | |
tests/tracking/_model_registry/test_utils.py | import os
import pytest
from unittest import mock
from mlflow.exceptions import MlflowException
from mlflow.store.db.db_types import DATABASE_ENGINES
from mlflow.store.model_registry.sqlalchemy_store import SqlAlchemyStore
from mlflow.store.model_registry.rest_store import RestStore
from mlflow.tracking._model_registry.utils import _get_store, get_registry_uri, set_registry_uri
from mlflow.tracking._tracking_service.utils import _TRACKING_URI_ENV_VAR
# Disable mocking tracking URI here, as we want to test setting the tracking URI via
# environment variable. See
# http://doc.pytest.org/en/latest/skipping.html#skip-all-test-functions-of-a-class-or-module
# and https://github.com/mlflow/mlflow/blob/master/CONTRIBUTING.rst#writing-python-tests
# for more information.
pytestmark = pytest.mark.notrackingurimock
def test_set_get_registry_uri():
with mock.patch(
"mlflow.tracking._model_registry.utils.get_tracking_uri"
) as get_tracking_uri_mock:
get_tracking_uri_mock.return_value = "databricks://tracking_sldkfj"
uri = "databricks://registry/path"
set_registry_uri(uri)
assert get_registry_uri() == uri
set_registry_uri(None)
def test_set_get_empty_registry_uri():
with mock.patch(
"mlflow.tracking._model_registry.utils.get_tracking_uri"
) as get_tracking_uri_mock:
get_tracking_uri_mock.return_value = None
set_registry_uri("")
assert get_registry_uri() is None
set_registry_uri(None)
def test_default_get_registry_uri_no_tracking_uri():
with mock.patch(
"mlflow.tracking._model_registry.utils.get_tracking_uri"
) as get_tracking_uri_mock:
get_tracking_uri_mock.return_value = None
set_registry_uri(None)
assert get_registry_uri() is None
def test_default_get_registry_uri_with_tracking_uri_set():
tracking_uri = "databricks://tracking_werohoz"
with mock.patch(
"mlflow.tracking._model_registry.utils.get_tracking_uri"
) as get_tracking_uri_mock:
get_tracking_uri_mock.return_value = tracking_uri
set_registry_uri(None)
assert get_registry_uri() == tracking_uri
def test_get_store_rest_store_from_arg():
env = {_TRACKING_URI_ENV_VAR: "https://my-tracking-server:5050"} # should be ignored
with mock.patch.dict(os.environ, env):
store = _get_store("http://some/path")
assert isinstance(store, RestStore)
assert store.get_host_creds().host == "http://some/path"
def test_fallback_to_tracking_store():
env = {_TRACKING_URI_ENV_VAR: "https://my-tracking-server:5050"}
with mock.patch.dict(os.environ, env):
store = _get_store()
assert isinstance(store, RestStore)
assert store.get_host_creds().host == "https://my-tracking-server:5050"
assert store.get_host_creds().token is None
@pytest.mark.parametrize("db_type", DATABASE_ENGINES)
def test_get_store_sqlalchemy_store(db_type):
patch_create_engine = mock.patch("sqlalchemy.create_engine")
uri = "{}://hostname/database".format(db_type)
env = {_TRACKING_URI_ENV_VAR: uri}
with mock.patch.dict(os.environ, env), patch_create_engine as mock_create_engine, mock.patch(
"mlflow.store.model_registry.sqlalchemy_store.SqlAlchemyStore."
"_verify_registry_tables_exist"
):
store = _get_store()
assert isinstance(store, SqlAlchemyStore)
assert store.db_uri == uri
mock_create_engine.assert_called_once_with(uri, pool_pre_ping=True)
@pytest.mark.parametrize("bad_uri", ["badsql://imfake", "yoursql://hi"])
def test_get_store_bad_uris(bad_uri):
env = {_TRACKING_URI_ENV_VAR: bad_uri}
with mock.patch.dict(os.environ, env), pytest.raises(MlflowException):
_get_store()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
blog/blog.go | package blog
import (
"errors"
"goblog/auth"
"log"
"net/http"
"net/url"
"os"
"strconv"
"strings"
"time"
"github.com/gin-contrib/sessions"
"github.com/gin-gonic/gin"
"github.com/joho/godotenv"
"gorm.io/gorm"
"github.com/ikeikeikeike/go-sitemap-generator/v2/stm"
)
// Blog API handles non-admin functions of the blog like listing posts, tags
// comments, etc.
type Blog struct {
db *gorm.DB
auth auth.IAuth
Version string
}
//New constructs an Admin API
func New(db *gorm.DB, auth auth.IAuth, version string) Blog {
api := Blog{db, auth, version}
return api
}
//Generic Functions (not JSON or HTML)
func (b Blog) GetPosts() []Post {
var posts []Post
b.db.Preload("Tags").Order("created_at desc").Find(&posts)
return posts
}
func (b Blog) getTags() []Tag {
var tags []Tag
b.db.Preload("Posts").Order("name asc").Find(&tags)
return tags
}
func (b Blog) GetPostObject(c *gin.Context) (*Post, error) {
var post Post
year, err := strconv.Atoi(c.Param("yyyy"))
if err != nil {
return nil, errors.New("year must be an integer")
}
month, err := strconv.Atoi(c.Param("mm"))
if err != nil {
return nil, errors.New("month must be an integer")
}
day, err := strconv.Atoi(c.Param("dd"))
if err != nil {
return nil, errors.New("day must be an integer")
}
slug := c.Param("slug")
slug = url.QueryEscape(slug)
log.Println("Looking for post: ", year, "/", month, "/", day, "/", slug)
if err := b.db.Preload("Tags").Where("created_at > ? AND slug LIKE ?", time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.UTC), slug).First(&post).Error; err != nil {
return nil, errors.New("No post at " + strconv.Itoa(year) + "/" + strconv.Itoa(month) + "/" + strconv.Itoa(day) + "/" + slug)
}
//b.db.Model(&post).Related(&post.Tags, "Tags")
log.Println("Found: ", post.Title, " TAGS: ", post.Tags)
return &post, nil
}
func (b Blog) getPostByParams(year int, month int, day int, slug string) (*Post, error) {
log.Println("trying: " + strconv.Itoa(year) + "/" + strconv.Itoa(month) + "/" + strconv.Itoa(day) + "/" + slug)
var post Post
slug = url.QueryEscape(slug)
if err := b.db.Preload("Tags").Where("created_at > ? AND slug LIKE ?", time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.UTC), slug).First(&post).Error; err != nil {
log.Println("NOT FOUND")
return nil, errors.New("No post at " + strconv.Itoa(year) + "/" + strconv.Itoa(month) + "/" + strconv.Itoa(day) + "/" + slug)
}
log.Println("Found: ", post.Title, " TAGS: ", post.Tags)
return &post, nil
}
func (b Blog) getPostsByTag(c *gin.Context) ([]Post, error) {
var posts []Post
var tag Tag
name := c.Param("name")
if err := b.db.Where("name = ?", name).First(&tag).Error; err != nil {
return nil, errors.New("No tag named " + name)
}
b.db.Model(&tag).Order("created_at desc").Association("Posts").Find(&posts)
log.Print("POSTS: ", posts)
return posts, nil
}
//////JSON API///////
//ListPosts lists all blog posts
func (b Blog) ListPosts(c *gin.Context) {
c.JSON(http.StatusOK, b.GetPosts())
}
//GetPost returns a post with yyyy/mm/dd/slug
func (b Blog) GetPost(c *gin.Context) {
post, err := b.GetPostObject(c)
if err != nil {
log.Println("Bad request in GetPost: " + err.Error())
c.JSON(http.StatusBadRequest, err)
}
if post == nil {
c.JSON(http.StatusNotFound, "Post Not Found")
}
c.JSON(http.StatusOK, post)
}
//////HTML API///////
//NoRoute returns a custom 404 page
func (b Blog) NoRoute(c *gin.Context) {
tokens := strings.Split(c.Request.URL.String(), "/")
// for some reason, first token is empty
if len(tokens) >= 5 {
year, _ := strconv.Atoi(tokens[1])
month, _ := strconv.Atoi(tokens[2])
day, _ := strconv.Atoi(tokens[3])
post, err := b.getPostByParams(year, month, day, tokens[4])
if err == nil && post != nil {
if b.auth.IsAdmin(c) {
c.HTML(http.StatusOK, "post-admin.html", gin.H{
"logged_in": b.auth.IsLoggedIn(c),
"is_admin": b.auth.IsAdmin(c),
"post": post,
"version": b.Version,
})
} else {
c.HTML(http.StatusOK, "post.html", gin.H{
"logged_in": b.auth.IsLoggedIn(c),
"is_admin": b.auth.IsAdmin(c),
"post": post,
"version": b.Version,
})
}
return
}
} else {
log.Println("TOKEN LEN: " + strconv.Itoa(len(tokens)))
for _, s := range tokens {
log.Println(s)
}
}
c.HTML(http.StatusNotFound, "error.html", gin.H{
"logged_in": b.auth.IsLoggedIn(c),
"is_admin": b.auth.IsAdmin(c),
"error": "404: Page Not Found",
"description": "The page at '" + c.Request.URL.String() + "' was not found",
"version": b.Version,
})
}
//Home returns html of the home page using the template
//if people want to have different stuff show on the home page they probably
//need to modify this function
func (b Blog) Home(c *gin.Context) {
c.HTML(http.StatusOK, "home.html", gin.H{
"logged_in": b.auth.IsLoggedIn(c),
"is_admin": b.auth.IsAdmin(c),
"version": b.Version,
"title": "Software Engineer",
})
}
//Posts is the index page for blog posts
func (b Blog) Posts(c *gin.Context) {
c.HTML(http.StatusOK, "posts.html", gin.H{
"logged_in": b.auth.IsLoggedIn(c),
"is_admin": b.auth.IsAdmin(c),
"posts": b.GetPosts(),
"version": b.Version,
"title": "Posts",
})
}
//Post is the page for all individual posts
func (b Blog) Post(c *gin.Context) {
post, err := b.GetPostObject(c)
if err != nil {
c.HTML(http.StatusNotFound, "error.html", gin.H{
"error": "Post Not Found",
"description": err.Error(),
"version": b.Version,
"title": "Post Not Found",
})
} else {
c.HTML(http.StatusOK, "post.html", gin.H{
"logged_in": b.auth.IsLoggedIn(c),
"is_admin": b.auth.IsAdmin(c),
"post": post,
"version": b.Version,
})
//if b.auth.IsAdmin(c) {
// c.HTML(http.StatusOK, "post-admin.html", gin.H{
// "logged_in": b.auth.IsLoggedIn(c),
// "is_admin": b.auth.IsAdmin(c),
// "post": post,
// "version": b.version,
// })
//} else {
// c.HTML(http.StatusOK, "post.html", gin.H{
// "logged_in": b.auth.IsLoggedIn(c),
// "is_admin": b.auth.IsAdmin(c),
// "post": post,
// "version": b.version,
// })
//}
}
}
//Tag lists all posts with a given tag
func (b Blog) Tag(c *gin.Context) {
tag := c.Param("name")
posts, err := b.getPostsByTag(c)
if err != nil {
c.HTML(http.StatusNotFound, "error.html", gin.H{
"error": "Tag '" + tag + "' Not Found",
"description": err.Error(),
"version": b.Version,
"title": "Tag '" + tag + "' Not Found",
})
} else {
c.HTML(http.StatusOK, "tag.html", gin.H{
"logged_in": b.auth.IsLoggedIn(c),
"is_admin": b.auth.IsAdmin(c),
"posts": posts,
"tag": tag,
"version": b.Version,
"title": "Posts with Tag '" + tag + "'",
})
}
}
//Tags is the index page for all Tags
func (b Blog) Tags(c *gin.Context) {
c.HTML(http.StatusOK, "tags.html", gin.H{
"version": b.Version,
"title": "Tags",
"tags": b.getTags(),
})
}
//Speaking is the index page for presentations
func (b Blog) Speaking(c *gin.Context) {
c.HTML(http.StatusOK, "presentations.html", gin.H{
"logged_in": b.auth.IsLoggedIn(c),
"is_admin": b.auth.IsAdmin(c),
"version": b.Version,
"title": "Speaking",
})
}
//Speaking is the index page for research publications
func (b Blog) Research(c *gin.Context) {
c.HTML(http.StatusOK, "research.html", gin.H{
"logged_in": b.auth.IsLoggedIn(c),
"is_admin": b.auth.IsAdmin(c),
"version": b.Version,
"title": "Research Publications by Jason Ernst",
})
}
//Projects is the index page for projects / code
func (b Blog) Projects(c *gin.Context) {
c.HTML(http.StatusOK, "projects.html", gin.H{
"logged_in": b.auth.IsLoggedIn(c),
"is_admin": b.auth.IsAdmin(c),
"version": b.Version,
"title": "Projects",
})
}
//About is the about page
func (b Blog) About(c *gin.Context) {
c.HTML(http.StatusOK, "about.html", gin.H{
"logged_in": b.auth.IsLoggedIn(c),
"is_admin": b.auth.IsAdmin(c),
"version": b.Version,
"title": "About Jason",
})
}
func (b Blog) Sitemap(c *gin.Context) {
sm := stm.NewSitemap(1)
sm.SetDefaultHost("https://www.jasonernst.com")
sm.Create()
sm.Add(stm.URL{{"loc", "/"}, {"changefreq", "weekly"}, {"priority", 1.0}})
sm.Add(stm.URL{{"loc", "/posts"}, {"changefreq", "weekly"}, {"priority", 0.8}})
sm.Add(stm.URL{{"loc", "/tags"}, {"changefreq", "weekly"}, {"priority", 0.6}})
sm.Add(stm.URL{{"loc", "/research"}, {"changefreq", "yearly"}, {"priority", 0.2}})
sm.Add(stm.URL{{"loc", "/about"}, {"changefreq", "yearly"}, {"priority", 0.2}})
posts := b.GetPosts()
for _, post := range posts {
sm.Add(stm.URL{{"loc", post.Permalink()}, {"changefreq", "yearly"}, {"priority", 0.55}})
}
tags := b.getTags()
for _, tag := range tags {
if len(tag.Posts) > 0 {
sm.Add(stm.URL{{"loc", tag.Permalink()}, {"changefreq", "weekly"}, {"priority", 0.55}})
}
}
c.Data(http.StatusOK, "text/xml", sm.XMLContent())
}
//Login to the blog
func (b Blog) Login(c *gin.Context) {
err := godotenv.Load(".env")
if err != nil {
//fall back to local config
err = godotenv.Load("local.env")
if err != nil {
//todo: handle better - perhaps return error to browser
c.HTML(http.StatusInternalServerError, "Error loading .env file: "+err.Error(), gin.H{
"logged_in": b.auth.IsLoggedIn(c),
"is_admin": b.auth.IsAdmin(c),
"version": b.Version,
"title": "Login Configuration Error",
})
return
}
}
clientID := os.Getenv("client_id")
c.HTML(http.StatusOK, "login.html", gin.H{
"logged_in": b.auth.IsLoggedIn(c),
"is_admin": b.auth.IsAdmin(c),
"client_id": clientID,
"version": b.Version,
"title": "Login",
})
}
//Logout of the blog
func (b Blog) Logout(c *gin.Context) {
session := sessions.Default(c)
session.Delete("token")
session.Save()
c.Redirect(http.StatusTemporaryRedirect, "/")
}
| [
"\"client_id\""
] | [] | [
"client_id"
] | [] | ["client_id"] | go | 1 | 0 | |
services/searchengine/bleveengine/bleve_test.go | // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package bleveengine
import (
"io/ioutil"
"os"
"testing"
"github.com/blevesearch/bleve"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/mattermost/mattermost-server/v5/model"
"github.com/mattermost/mattermost-server/v5/services/searchengine"
"github.com/mattermost/mattermost-server/v5/store/searchlayer"
"github.com/mattermost/mattermost-server/v5/store/searchtest"
"github.com/mattermost/mattermost-server/v5/store/sqlstore"
"github.com/mattermost/mattermost-server/v5/store/storetest"
"github.com/mattermost/mattermost-server/v5/testlib"
)
type BleveEngineTestSuite struct {
suite.Suite
SQLSettings *model.SqlSettings
SQLStore *sqlstore.SqlStore
SearchEngine *searchengine.Broker
Store *searchlayer.SearchStore
BleveEngine *BleveEngine
IndexDir string
}
func TestBleveEngineTestSuite(t *testing.T) {
suite.Run(t, new(BleveEngineTestSuite))
}
func (s *BleveEngineTestSuite) setupIndexes() {
indexDir, err := ioutil.TempDir("", "mmbleve")
if err != nil {
s.Require().FailNow("Cannot setup bleveengine tests: %s", err.Error())
}
s.IndexDir = indexDir
}
func (s *BleveEngineTestSuite) setupStore() {
driverName := os.Getenv("MM_SQLSETTINGS_DRIVERNAME")
if driverName == "" {
driverName = model.DatabaseDriverPostgres
}
s.SQLSettings = storetest.MakeSqlSettings(driverName, false)
s.SQLStore = sqlstore.New(*s.SQLSettings, nil)
cfg := &model.Config{}
cfg.SetDefaults()
cfg.BleveSettings.EnableIndexing = model.NewBool(true)
cfg.BleveSettings.EnableSearching = model.NewBool(true)
cfg.BleveSettings.EnableAutocomplete = model.NewBool(true)
cfg.BleveSettings.IndexDir = model.NewString(s.IndexDir)
cfg.SqlSettings.DisableDatabaseSearch = model.NewBool(true)
s.SearchEngine = searchengine.NewBroker(cfg, nil)
s.Store = searchlayer.NewSearchLayer(&testlib.TestStore{Store: s.SQLStore}, s.SearchEngine, cfg)
s.BleveEngine = NewBleveEngine(cfg, nil)
s.BleveEngine.indexSync = true
s.SearchEngine.RegisterBleveEngine(s.BleveEngine)
if err := s.BleveEngine.Start(); err != nil {
s.Require().FailNow("Cannot start bleveengine: %s", err.Error())
}
}
func (s *BleveEngineTestSuite) SetupSuite() {
s.setupIndexes()
s.setupStore()
}
func (s *BleveEngineTestSuite) TearDownSuite() {
os.RemoveAll(s.IndexDir)
s.SQLStore.Close()
storetest.CleanupSqlSettings(s.SQLSettings)
}
func (s *BleveEngineTestSuite) TestBleveSearchStoreTests() {
searchTestEngine := &searchtest.SearchTestEngine{
Driver: searchtest.EngineBleve,
}
s.Run("TestSearchChannelStore", func() {
searchtest.TestSearchChannelStore(s.T(), s.Store, searchTestEngine)
})
s.Run("TestSearchUserStore", func() {
searchtest.TestSearchUserStore(s.T(), s.Store, searchTestEngine)
})
s.Run("TestSearchPostStore", func() {
searchtest.TestSearchPostStore(s.T(), s.Store, searchTestEngine)
})
s.Run("TestSearchFileInfoStore", func() {
searchtest.TestSearchFileInfoStore(s.T(), s.Store, searchTestEngine)
})
}
func (s *BleveEngineTestSuite) TestDeleteChannelPosts() {
s.Run("Should remove all the posts that belongs to a channel", func() {
s.BleveEngine.PurgeIndexes()
teamID := model.NewId()
userID := model.NewId()
channelID := model.NewId()
channelToAvoidID := model.NewId()
posts := make([]*model.Post, 0)
for i := 0; i < 10; i++ {
post := createPost(userID, channelID)
appErr := s.SearchEngine.BleveEngine.IndexPost(post, teamID)
require.Nil(s.T(), appErr)
posts = append(posts, post)
}
postToAvoid := createPost(userID, channelToAvoidID)
appErr := s.SearchEngine.BleveEngine.IndexPost(postToAvoid, teamID)
require.Nil(s.T(), appErr)
s.SearchEngine.BleveEngine.DeleteChannelPosts(channelID)
doc, err := s.BleveEngine.PostIndex.Document(postToAvoid.Id)
require.NoError(s.T(), err)
require.Equal(s.T(), postToAvoid.Id, doc.ID)
numberDocs, err := s.BleveEngine.PostIndex.DocCount()
require.NoError(s.T(), err)
require.Equal(s.T(), 1, int(numberDocs))
})
s.Run("Shouldn't do anything if there is not posts for the selected channel", func() {
s.BleveEngine.PurgeIndexes()
teamID := model.NewId()
userID := model.NewId()
channelID := model.NewId()
channelToDeleteID := model.NewId()
post := createPost(userID, channelID)
appErr := s.SearchEngine.BleveEngine.IndexPost(post, teamID)
require.Nil(s.T(), appErr)
s.SearchEngine.BleveEngine.DeleteChannelPosts(channelToDeleteID)
_, err := s.BleveEngine.PostIndex.Document(post.Id)
require.NoError(s.T(), err)
numberDocs, err := s.BleveEngine.PostIndex.DocCount()
require.NoError(s.T(), err)
require.Equal(s.T(), 1, int(numberDocs))
})
}
func (s *BleveEngineTestSuite) TestDeleteUserPosts() {
s.Run("Should remove all the posts that belongs to a user", func() {
s.BleveEngine.PurgeIndexes()
teamID := model.NewId()
userID := model.NewId()
userToAvoidID := model.NewId()
channelID := model.NewId()
posts := make([]*model.Post, 0)
for i := 0; i < 10; i++ {
post := createPost(userID, channelID)
appErr := s.SearchEngine.BleveEngine.IndexPost(post, teamID)
require.Nil(s.T(), appErr)
posts = append(posts, post)
}
postToAvoid := createPost(userToAvoidID, channelID)
appErr := s.SearchEngine.BleveEngine.IndexPost(postToAvoid, teamID)
require.Nil(s.T(), appErr)
s.SearchEngine.BleveEngine.DeleteUserPosts(userID)
doc, err := s.BleveEngine.PostIndex.Document(postToAvoid.Id)
require.NoError(s.T(), err)
require.Equal(s.T(), postToAvoid.Id, doc.ID)
numberDocs, err := s.BleveEngine.PostIndex.DocCount()
require.NoError(s.T(), err)
require.Equal(s.T(), 1, int(numberDocs))
})
s.Run("Shouldn't do anything if there is not posts for the selected user", func() {
s.BleveEngine.PurgeIndexes()
teamID := model.NewId()
userID := model.NewId()
userToDeleteID := model.NewId()
channelID := model.NewId()
post := createPost(userID, channelID)
appErr := s.SearchEngine.BleveEngine.IndexPost(post, teamID)
require.Nil(s.T(), appErr)
s.SearchEngine.BleveEngine.DeleteUserPosts(userToDeleteID)
_, err := s.BleveEngine.PostIndex.Document(post.Id)
require.NoError(s.T(), err)
numberDocs, err := s.BleveEngine.PostIndex.DocCount()
require.NoError(s.T(), err)
require.Equal(s.T(), 1, int(numberDocs))
})
}
func (s *BleveEngineTestSuite) TestDeletePosts() {
s.BleveEngine.PurgeIndexes()
teamID := model.NewId()
userID := model.NewId()
userToAvoidID := model.NewId()
channelID := model.NewId()
posts := make([]*model.Post, 0)
for i := 0; i < 10; i++ {
post := createPost(userID, channelID)
appErr := s.SearchEngine.BleveEngine.IndexPost(post, teamID)
require.Nil(s.T(), appErr)
posts = append(posts, post)
}
postToAvoid := createPost(userToAvoidID, channelID)
appErr := s.SearchEngine.BleveEngine.IndexPost(postToAvoid, teamID)
require.Nil(s.T(), appErr)
query := bleve.NewTermQuery(userID)
query.SetField("UserId")
search := bleve.NewSearchRequest(query)
count, err := s.BleveEngine.deletePosts(search, 1)
require.NoError(s.T(), err)
require.Equal(s.T(), 10, int(count))
doc, err := s.BleveEngine.PostIndex.Document(postToAvoid.Id)
require.NoError(s.T(), err)
require.Equal(s.T(), postToAvoid.Id, doc.ID)
numberDocs, err := s.BleveEngine.PostIndex.DocCount()
require.NoError(s.T(), err)
require.Equal(s.T(), 1, int(numberDocs))
}
| [
"\"MM_SQLSETTINGS_DRIVERNAME\""
] | [] | [
"MM_SQLSETTINGS_DRIVERNAME"
] | [] | ["MM_SQLSETTINGS_DRIVERNAME"] | go | 1 | 0 | |
scripts/plotting/reprocessed_kccg_samples_pca/main.py | """Entry point for the analysis runner."""
import os
import sys
import hail as hl
import hailtop.batch as hb
from analysis_runner import dataproc
OUTPUT = os.getenv('OUTPUT')
assert OUTPUT
hl.init(default_reference='GRCh38')
POP = sys.argv[1] if len(sys.argv) > 1 else 'nfe'
service_backend = hb.ServiceBackend(
billing_project=os.getenv('HAIL_BILLING_PROJECT'), bucket=os.getenv('HAIL_BUCKET')
)
batch = hb.Batch(name=f'{POP} kccg-reprocessed', backend=service_backend)
dataproc.hail_dataproc_job(
batch,
f'project_reprocessed_kccg_samples.py --output={OUTPUT} --pop {POP}',
max_age='5h',
packages=['click', 'selenium'],
init=['gs://cpg-reference/hail_dataproc/install_phantomjs.sh'],
job_name=f'{POP}-kccg-reprocessed',
)
batch.run()
| [] | [] | [
"OUTPUT",
"HAIL_BILLING_PROJECT",
"HAIL_BUCKET"
] | [] | ["OUTPUT", "HAIL_BILLING_PROJECT", "HAIL_BUCKET"] | python | 3 | 0 | |
lazyrunner/loading/module_initialization.py | from ..exceptions import ConfigError
from copy import copy
from itertools import chain, product
from os.path import exists, abspath, join, split, relpath
import os
import sys
from glob import glob
from treedict import TreeDict
from cStringIO import StringIO
import re
import ctypes
import shutil
import cleaning
import logging
from collections import defaultdict
from inspect import getsourcefile, getfile
"""
Specifies the setup stuff
"""
################################################################################
# Generic loading
import imp
from os.path import split, join
__loaded_modules = None
__synced_set = None
def resetAndInitModuleLoading(opttree):
global __loaded_modules
global __synced_set
## Clear out all of the modules from the project directory
#base_dir = abspath(opttree.project_directory)
#sub_modules = defaultdict(lambda: [])
#del_module_keys = []
#s = sys.modules
#for k, m in sys.modules.items():
#if m is None:
#kl = k.split('.')
#for i in xrange(len(kl)):
#sub_modules['.'.join(kl[:i])].append(k)
#else:
#if hasattr(m, "__file__") and abspath(m.__file__).startswith(base_dir):
#del_module_keys.append(k)
#for k in [k for k in del_module_keys]:
#del_module_keys += sub_modules[k]
#for k in del_module_keys:
#try:
#del sys.modules[k]
#except KeyError:
#pass
__loaded_modules = {}
__synced_set = set()
def loadModule(d, m = None):
global __loaded_modules
global __synced_set
if m is None:
d, m = split(d.replace(".py", ""))
elif m.endswith(".py"):
m = m[:-3]
elif m.startswith(d):
d, m = split(m.replace(".py", ""))
if '.' in m:
ml = m.split('.')
d, m = join(d, *ml[:-1]), ml[-1]
try:
# sync up with the loaded modules
if (m, d) in __loaded_modules:
return __loaded_modules[(m, d)]
# Is it in the sys.modules directory?
for k, module in sys.modules.iteritems():
if module is None or k in __synced_set:
continue
__synced_set.add(k)
try:
path = getfile(module)
if path is not None:
__loaded_modules[abspath(path)] = module
path = getsourcefile(module)
if path is not None:
__loaded_modules[abspath(path)] = module
except TypeError:
pass
m_data = imp.find_module(m, [d])
file, path_name, description = m_data
log = logging.getLogger("Loading")
key = abspath(path_name)
if key in __loaded_modules:
log.debug("Module '%s' already loaded." % key)
return __loaded_modules[key]
module = imp.load_module(m, *m_data)
log.debug("Module '%s' loaded." % key)
__loaded_modules[key] = module
__loaded_modules[(m,d)] = module
return module
finally:
try:
m_data[0].close()
except Exception:
pass
################################################################################
# Compiling and Init
# This list is to keep them from being unloaded and garbage collected.
loaded_ctype_dlls = []
# A setup regular expression for the
output_line_okay = re.compile(
r"(^\s*running build_ext\s*$)|(^\s*skipping.+up-to-date\)\s*$)|(^\s*$)")
# Copied from python 2.7's subprocess.py file
from subprocess import Popen, PIPE, STDOUT
# Exception classes used by this module.
class CalledProcessError(Exception):
"""This exception is raised when a process run by check_call() or
check_output() returns a non-zero exit status.
The exit status will be stored in the returncode attribute;
check_output() will also store the output in the output attribute.
"""
def __init__(self, returncode, cmd, output=None):
self.returncode = returncode
self.cmd = cmd
self.output = output
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
def check_output(*popenargs, **kwargs):
r"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
if 'stderr' in kwargs:
raise ValueError('stderr argument not allowed, it will be overridden.')
process = Popen(stdout=PIPE, stderr=PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd, output=output)
return output
def readyCMakeProjects(opttree):
"""
Compiles CMake Projects.
"""
# may switch so it runs in a specified build directory.
if opttree.verbose:
if opttree.no_compile:
print "\n>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"
print "Loading cmake project library files.\n"
else:
print "\n>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"
print "Compiling cmake projects.\n"
for k, b in opttree.cmake.iteritems(recursive=False, branch_mode = "only"):
d = abspath(join(opttree.project_directory, b.directory))
if not exists(d):
raise ConfigError("CMake subproject '%s' directory does not exist (%s)"
% (k, d))
def run(cmd):
run_command = \
"cd '%s' && CMAKE_INCLUDE_PATH=$INCLUDE_PATH CMAKE_LIBRARY_PATH=$LD_LIBRARY_PATH %s" % (d, cmd)
try:
check_output([run_command], shell=True, stderr=STDOUT)
except CalledProcessError, ce:
raise ConfigError("Error while compiling cmake project '%s' in '%s':\n%s\n%s"
%(k, d, "Error code %d while running '%s':" % (ce.returncode, cmd), ce.output))
if opttree.verbose:
print "CMake: '%s' in '%s' " % (k,d)
retry_allowed = True
load_retry_allowed = True
while True:
if not opttree.no_compile:
while True:
try:
if not exists(join(d, "Makefile")):
run("cmake ./")
run("make --jobs -f Makefile")
except ConfigError, ce:
if retry_allowed:
print ("WARNING: Error while compiling cmake project '%s';"
" removing cache files and retrying.") % k
cleaning.clean_cmake_project(opttree, opttree, b)
retry_allowed = False
continue
else:
raise
break
if not exists(b.library_file):
if opttree.no_compile:
raise ConfigError("Expected shared library apparently not present, recompiling needed?: \n"
+ " Subproject: %s " % k
+ " Expected library file: %s" % (relpath(b.library_file)))
else:
raise ConfigError("Expected shared library apparently not produced by compiliation: \n"
+ " Subproject: %s " % k
+ " Expected library file: %s" % (relpath(b.library_file)))
# Dynamically load them here; this means they will be preloaded and the program will work
try:
loaded_dll = ctypes.cdll.LoadLibrary(b.library_file)
except OSError, ose:
if load_retry_allowed:
print "Error loading library: ", str(ose)
print "Cleaning, attempting again."
cleaning.clean_cmake_project(opttree, opttree, b)
load_retry_allowed = False
continue
else:
raise
break
loaded_ctype_dlls.append(loaded_ctype_dlls)
if opttree.verbose:
print "Done compiling and loading cmake library projects."
print "<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n"
def runBuildExt(opttree):
"""
Sets up and runs the python build_ext setup configuration.
"""
if opttree.no_compile:
return
ct = opttree.cython
extra_include_dirs = ct.extra_include_dirs
extra_library_dirs = ct.extra_library_dirs
libraries = ct.libraries
library_map = ct.library_map
extra_source_map = ct.extra_source_map
compiler_args = ct.compiler_args
link_args = ct.link_args
quiet = not opttree.verbose
from distutils.core import setup as dist_setup
from distutils.extension import Extension
if ct.numpy_needed:
import numpy
extra_include_dirs += [numpy.get_include()]
######################################################
# First have to see if we're authorized to use cython files, or if we
# should instead compile the included files
# Get all the cython files in the sub directories and in this directory
cython_files = opttree.cython_files
# Set the compiler arguments -- Add in the environment path stuff
ld_library_path = os.getenv("LD_LIBRARY_PATH")
if ld_library_path is not None:
extra_library_dirs += [p.strip() for p in ld_library_path.split(":") if len(p.strip()) > 0]
include_path = os.getenv("INCLUDE_PATH")
if include_path is not None:
extra_include_dirs += [p.strip() for p in include_path.split(":") if len(p.strip()) > 0]
# The rest is also shared with the setup.py file, in addition to
# this one, so
def strip_empty(l):
return [e.strip() for e in l if len(e.strip()) != 0]
def get_include_dirs(m):
return strip_empty(extra_include_dirs)
def get_libnode_library(t):
if "library_name" not in t:
raise ValueError("Expected library_name in %s." % t.treeName())
if type(t.library_name) is not str:
raise ValueError("%s.library_name must be string." % t.treeName())
return t.library_name
def get_libnode_directory(t):
if "directory" not in t:
raise ValueError("Expected directory in %s." % t.treeName())
if type(t.directory) is not str:
raise ValueError("%s.directory must be a string." % t.treeName())
return t.directory
def get_library_dirs(m):
l = strip_empty(extra_library_dirs)
if m in library_map:
for lib in library_map[m]:
if type(lib) is TreeDict:
l.append(get_libnode_directory(lib))
l = [abspath(ld) for ld in l]
return l
def get_libraries(m):
def process_lib(lib):
if type(lib) is TreeDict:
return get_libnode_library(lib)
else:
return lib
liblist = libraries + (library_map[m] if m in library_map else [])
return strip_empty(process_lib(lib) for lib in liblist)
def get_extra_source_files(m):
return extra_source_map.get(m, [])
def get_extra_compile_args(m):
return strip_empty(compiler_args + (['-g'] if opttree.debug_mode else ["-DNDEBUG"]))
def get_extra_link_args(m):
return strip_empty(link_args + (['-g'] if opttree.debug_mode else ["-DNDEBUG"]))
############################################################
# Cython extension lists
ext_modules = []
for f in cython_files:
# extract the module names
rel_f = relpath(f, opttree.project_directory)
assert rel_f.endswith('.pyx')
modname = rel_f[:-4].replace('/', '.')
ext_modules.append(Extension(
modname,
[f] + get_extra_source_files(modname),
include_dirs = get_include_dirs(modname),
library_dirs = get_library_dirs(modname),
libraries = get_libraries(modname),
extra_compile_args = get_extra_compile_args(modname),
extra_link_args = get_extra_link_args(modname),
language = "c++" if ct.use_cpp else "c",
))
############################################################
# Now get all these ready to go
from Cython.Distutils import build_ext
cmdclass = {'build_ext' : build_ext}
old_argv = copy(sys.argv)
sys.argv = (old_argv[0], "build_ext", "--inplace")
if not quiet:
print ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> "
print "Compiling cython extension modules.\n"
old_stdout = sys.stdout
old_stderr = sys.stderr
try:
if quiet:
output = sys.stderr = sys.stdout = StringIO()
dist_setup(
cmdclass = cmdclass,
ext_modules = ext_modules)
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
if quiet:
output_string = output.getvalue()
# Check for output
if not all(output_line_okay.match(le) is not None for le in output_string.split('\n')):
if quiet:
print "++++++++++++++++++++"
print "Compiling cython extension modules.\n"
print output_string
if not quiet:
print "\nCython extension modules successfully compiled."
print "<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n"
sys.argv = old_argv
def resetAndInitModules(opttree):
"The main setup function; calls the rest."
readyCMakeProjects(opttree)
runBuildExt(opttree)
for m in opttree.modules_to_import:
if opttree.verbose:
print "Loading module '%s' in directory '%s'" % (m, opttree.project_directory)
loadModule(m)
| [] | [] | [
"LD_LIBRARY_PATH",
"INCLUDE_PATH"
] | [] | ["LD_LIBRARY_PATH", "INCLUDE_PATH"] | python | 2 | 0 | |
msticpy/nbtools/nbinit.py | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""Initialization for Jupyter Notebooks."""
import importlib
import io
import os
import sys
import traceback
import warnings
from contextlib import redirect_stdout
from functools import wraps
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Tuple
import ipywidgets as widgets
import pandas as pd
import yaml
from IPython.core.interactiveshell import InteractiveShell
from IPython.display import HTML, display
from matplotlib import MatplotlibDeprecationWarning
try:
import seaborn as sns
except ImportError:
sns = None
from .._version import VERSION
from ..common.azure_auth_core import check_cli_credentials, AzureCliStatus
from ..common.check_version import check_version
from ..common.exceptions import MsticpyException, MsticpyUserError
from ..common.pkg_config import get_config, validate_config
from ..common.utility import (
check_and_install_missing_packages,
check_kwargs,
is_ipython,
md,
search_for_file,
unit_testing,
)
from ..config import MpConfigFile
from ..datamodel.pivot import Pivot
from .azure_ml_tools import check_versions as check_versions_aml
from .azure_ml_tools import is_in_aml
from .user_config import load_user_defaults
__version__ = VERSION
__author__ = "Ian Hellen"
_IMPORT_ERR_MSSG = """
<h2><font color='red'>One or more missing packages detected</h2>
Please correct these by installing the required packages, restart
the kernel and re-run the notebook.</font>
<i>Package error: {err}</i><br>
"""
_IMPORT_MODULE_MSSG = """
<font color='red'>Error import module {module}</font>
"""
_MISSING_PKG_WARN = """
<h3><font color='orange'>Warning {package} is not installed or has an
incorrect version</h3></font>
"""
_HELP_URIS = [
(
'<li><a href="https://github.com/Azure/Azure-Sentinel-Notebooks/blob/master/'
'A%20Getting%20Started%20Guide%20For%20Azure%20Sentinel%20ML%20Notebooks.ipynb"'
'target="_blank" rel="noopener noreferrer">'
"Getting Started (notebook)</a></li>"
),
(
'<li><a href="https://github.com/Azure/Azure-Sentinel-Notebooks/blob/master/'
'ConfiguringNotebookEnvironment.ipynb"'
'target="_blank" rel="noopener noreferrer">'
"Configuring your Notebook environment (notebook)</a></li>"
),
(
'<li><a href="https://msticpy.readthedocs.io/en/latest/getting_started/'
'msticpyconfig.html"'
'target="_blank" rel="noopener noreferrer">'
"Configuring MSTICPy settings (doc)</a></li>"
),
(
'<li><a href="https://msticpy.readthedocs.io/en/latest/getting_started/'
'SettingsEditor.html"'
'target="_blank" rel="noopener noreferrer">'
"MSTICPy settings editor (doc)</a></li>"
),
(
'<li><a href="https://github.com/Azure/Azure-Sentinel-Notebooks/blob/'
'master/TroubleShootingNotebooks.ipynb"'
'target="_blank" rel="noopener noreferrer">'
"Trouble-Shooting Notebooks (notebook)</a></li>"
),
]
_MISSING_MPCONFIG_ENV_ERR = f"""
<h3><font color='orange'>Warning: no <i>msticpyconfig.yaml</i> found</h3></font>
The MSTICPYCONFIG environment variable is set but does not point
to a valid file.<br>
Some functionality (such as Threat Intel lookups) will not function without
valid configuration settings.<br>
The following resources will help you set up your configuration:
<ul>{"".join(_HELP_URIS)}</ul>
<br>You can load and run the first two of these from the Microsoft Sentinel
<b>Notebooks</b> tab
"""
_PANDAS_REQ_VERSION = (0, 25, 0)
def _get_verbosity_setting() -> Callable[[Optional[int]], int]:
"""Closure for holding trace setting."""
_verbosity = 1
def _verbose(verbosity: Optional[int] = None) -> int:
nonlocal _verbosity
if verbosity is not None:
_verbosity = verbosity
return _verbosity
return _verbose
_VERBOSITY: Callable[[Optional[int]], int] = _get_verbosity_setting()
_NB_IMPORTS = [
dict(pkg="pandas", alias="pd"),
dict(pkg="IPython", tgt="get_ipython"),
dict(pkg="IPython.display", tgt="display"),
dict(pkg="IPython.display", tgt="HTML"),
dict(pkg="IPython.display", tgt="Markdown"),
dict(pkg="ipywidgets", alias="widgets"),
dict(pkg="pathlib", tgt="Path"),
dict(pkg="matplotlib.pyplot", alias="plt"),
dict(pkg="matplotlib", tgt="MatplotlibDeprecationWarning"),
dict(pkg="numpy", alias="np"),
]
if sns is not None:
_NB_IMPORTS.append(dict(pkg="seaborn", alias="sns"))
_MP_IMPORTS = [
dict(pkg="msticpy"),
dict(pkg="msticpy.data", tgt="QueryProvider"),
dict(pkg="msticpy.nbtools.foliummap", tgt="FoliumMap"),
dict(pkg="msticpy.common.utility", tgt="md"),
dict(pkg="msticpy.common.utility", tgt="md_warn"),
dict(pkg="msticpy.common.wsconfig", tgt="WorkspaceConfig"),
dict(pkg="msticpy.datamodel.pivot", tgt="Pivot"),
dict(pkg="msticpy.datamodel", tgt="entities"),
dict(pkg="msticpy.vis", tgt="mp_pandas_plot"),
]
_MP_IMPORT_ALL = [
dict(module_name="msticpy.nbtools"),
dict(module_name="msticpy.sectools"),
]
_CONF_URI = (
"https://msticpy.readthedocs.io/en/latest/getting_started/msticpyconfig.html"
)
_AZNB_GUIDE = (
"Please run the <i>Getting Started Guide for Azure Sentinel "
+ "ML Notebooks</i> notebook."
)
_AZ_CLI_WIKI_URI = (
"https://github.com/Azure/Azure-Sentinel-Notebooks/wiki/"
"Caching-credentials-with-Azure-CLI"
)
_CLI_WIKI_MSSG_GEN = (
f"For more information see <a href='{_AZ_CLI_WIKI_URI}'>"
"Caching credentials with Azure CLI</>"
)
_CLI_WIKI_MSSG_SHORT = (
f"see <a href='{_AZ_CLI_WIKI_URI}'>Caching credentials with Azure CLI</>"
)
current_providers: Dict[str, Any] = {} # pylint: disable=invalid-name
def _pr_output(*args):
"""Output to IPython display or print."""
if not _VERBOSITY():
return
if is_ipython():
display(HTML(" ".join([*args, "<br>"]).replace("\n", "<br>")))
else:
print(*args)
def _err_output(*args):
"""Output to IPython display or print - always output regardless of verbosity."""
if is_ipython():
display(HTML(" ".join([*args, "<br>"]).replace("\n", "<br>")))
else:
print(*args)
def init_notebook(
namespace: Dict[str, Any],
def_imports: str = "all",
additional_packages: List[str] = None,
extra_imports: List[str] = None,
**kwargs,
) -> bool:
"""
Initialize the notebook environment.
Parameters
----------
namespace : Dict[str, Any]
Namespace (usually globals()) into which imports
are to be populated.
def_imports : str, optional
Import default packages. By default "all".
Possible values are:
- "all" - import all packages
- "nb" - import common notebook packages
- "msticpy" - import msticpy packages
- "none" (or any other value) don't load any default packages.
additional_packages : List[str], optional
Additional packages to be pip installed,
by default None.
Packages are specified by name only or version
specification (e.g. "pandas>=0.25")
user_install : bool, optional
Install packages in the "user" rather than system site-packages.
Use this option if you cannot or do not want to update the system
packages.
You should usually avoid using this option with standard Conda environments.
extra_imports : List[str], optional
Additional import definitions, by default None.
Imports are specified as up to 3 comma-delimited values
in a string:
"{source_pkg}, [{import_tgt}], [{alias}]"
`source_pkg` is mandatory - equivalent to a simple "import xyz"
statement.
`{import_tgt}` specifies an object to import from the package
equivalent to "from source_pkg import import_tgt"
`alias` allows renaming of the imported object - equivalent to
the "as alias" part of the import statement.
If you want to provide just `source_pkg` and `alias` include
an additional placeholder comma: e.g. "pandas, , pd"
friendly_exceptions : Optional[bool]
Setting this to True causes msticpy to hook the notebook
exception hander. Any exceptions derived from MsticpyUserException
are displayed but do not produce a stack trace, etc.
Defaults to system/user settings if no value is supplied.
verbose : Union[int, bool], optional
Controls amount if status output, by default 1
0 = No output
1 or False = Brief output (default)
2 or True = Detailed output
no_config_check : bool, optional
Skip the check for valid configuration. Default is False.
verbosity : int, optional
Returns
-------
bool
True if successful
Raises
------
MsticpyException
If extra_imports data format is incorrect.
If package with required version check has no version
information.
"""
global current_providers # pylint: disable=global-statement, invalid-name
check_kwargs(
kwargs,
[
"user_install",
"friendly_exceptions",
"no_config_check",
"verbosity",
"verbose",
],
)
user_install: bool = kwargs.pop("user_install", False)
friendly_exceptions: Optional[bool] = kwargs.pop("friendly_exceptions", None)
no_config_check: bool = kwargs.pop("no_config_check", False)
_set_verbosity(**kwargs)
_pr_output("<hr><h4>Starting Notebook initialization...</h4>")
# Check Azure ML environment
if is_in_aml():
check_versions_aml(*_get_aml_globals(namespace))
else:
# If not in AML check and print version status
stdout_cap = io.StringIO()
with redirect_stdout(stdout_cap):
check_version()
_pr_output(stdout_cap.getvalue())
# Handle required packages and imports
_pr_output("Processing imports....")
imp_ok = _global_imports(
namespace, additional_packages, user_install, extra_imports, def_imports
)
# Configuration check
if no_config_check:
conf_ok = True
else:
_pr_output("Checking configuration....")
conf_ok = _get_or_create_config()
_check_azure_cli_status()
# Notebook options
_pr_output("Setting notebook options....")
_set_nb_options(namespace)
# Set friendly exceptions
if friendly_exceptions is None:
friendly_exceptions = get_config("msticpy.FriendlyExceptions")
if friendly_exceptions:
if _VERBOSITY() == 2: # type: ignore
_pr_output("Friendly exceptions enabled.")
InteractiveShell.showtraceback = _hook_ipython_exceptions(
InteractiveShell.showtraceback
)
# load pivots
stdout_cap = io.StringIO()
with redirect_stdout(stdout_cap):
_load_pivots(namespace=namespace)
_pr_output(stdout_cap.getvalue())
# User defaults
stdout_cap = io.StringIO()
with redirect_stdout(stdout_cap):
prov_dict = load_user_defaults()
_pr_output(stdout_cap.getvalue())
if prov_dict:
namespace.update(prov_dict)
current_providers = prov_dict
_pr_output("Autoloaded components:", ", ".join(prov_dict.keys()))
# show any warnings
init_status = _show_init_warnings(imp_ok, conf_ok)
_pr_output("<h4>Notebook initialization complete</h4>")
return init_status
def _show_init_warnings(imp_ok, conf_ok):
if imp_ok and conf_ok:
return True
md("<font color='orange'><h3>Notebook setup completed with some warnings.</h3>")
if not imp_ok:
md("One or more libraries did not import successfully.")
md(_AZNB_GUIDE)
if not conf_ok:
md("One or more configuration items were missing or set incorrectly.")
md(
_AZNB_GUIDE
+ f" and the <a href='{_CONF_URI}'>msticpy configuration guide</a>."
)
md("This notebook may still run but with reduced functionality.")
return False
def _set_verbosity(**kwargs):
"""Set verbosity of output from boolean or int `verbose` param."""
verbosity = 1
verb_param = kwargs.pop("verbose", kwargs.pop("verbosity", 1))
if isinstance(verb_param, bool):
verbosity = 2 if verb_param else 1
elif isinstance(verb_param, int):
verbosity = min(2, max(0, verb_param))
_VERBOSITY(verbosity)
def list_default_imports():
"""List the default imports for `init_notebook`."""
for imp_group in (_NB_IMPORTS, _MP_IMPORTS):
for imp_item in imp_group:
if "tgt" in imp_item:
import_line = f"from {imp_item['pkg']} import {imp_item['tgt']}"
else:
import_line = f"import {imp_item['pkg']}"
if "alias" in imp_item:
import_line += f" as {imp_item['alias']}"
_pr_output(import_line)
for imp_item in _MP_IMPORT_ALL:
_pr_output(f"from {imp_item['module_name']} import *")
def _extract_pkg_name(
imp_pkg: Optional[Dict[str, str]] = None,
pkg: str = None,
tgt: str = None,
alias: str = None,
) -> str:
"""Return string representation of package import."""
if imp_pkg:
pkg = imp_pkg.get("pkg")
tgt = imp_pkg.get("tgt")
alias = imp_pkg.get("alias")
import_item = f"{pkg}.{tgt}" if tgt else pkg
if alias:
import_item = f"{alias} ({import_item})"
return import_item # type: ignore
PY_VER_VAR = "REQ_PYTHON_VER"
MP_VER_VAR = "REQ_MSTICPY_VER"
MP_EXTRAS = "REQ_MP_EXTRAS"
def _get_aml_globals(namespace: Dict[str, Any]):
"""Return global values if found."""
py_ver = namespace.get(PY_VER_VAR, "3.6")
mp_ver = namespace.get(MP_VER_VAR, __version__)
extras = namespace.get(MP_EXTRAS)
return py_ver, mp_ver, extras
def _global_imports(
namespace: Dict[str, Any],
additional_packages: List[str] = None,
user_install: bool = False,
extra_imports: List[str] = None,
def_imports: str = "all",
):
import_list = []
imports = _build_import_list(def_imports)
try:
for imp_pkg in imports:
_imp_from_package(nm_spc=namespace, **imp_pkg)
import_list.append(_extract_pkg_name(imp_pkg))
_check_and_reload_pkg(namespace, pd, _PANDAS_REQ_VERSION, "pd")
if additional_packages:
pkg_success = check_and_install_missing_packages(
additional_packages, user=user_install
)
if not pkg_success:
_err_output("One or more packages failed to install.")
_err_output(
"Please re-run init_notebook() with the parameter user_install=True."
)
# We want to force import lib to see anything that we've
# just installed.
importlib.invalidate_caches()
if extra_imports:
import_list.extend(
_import_extras(nm_spc=namespace, extra_imports=extra_imports)
)
_pr_output("Imported:", ", ".join(imp for imp in import_list if imp))
return True
except ImportError as imp_err:
display(HTML(_IMPORT_ERR_MSSG.format(err=imp_err)))
return False
def _build_import_list(def_imports: str) -> List[Dict[str, str]]:
imports = []
if def_imports.casefold() in ["all", "nb"]:
imports.extend(_NB_IMPORTS)
if def_imports.casefold() in ["all", "msticpy"]:
imports.extend(_MP_IMPORTS)
imports.extend(_MP_IMPORT_ALL)
return imports
_AZ_SENT_ERRS = [
"Missing or empty 'AzureSentinel' section",
"Missing or empty 'Workspaces' key in 'AzureSentinel' section",
]
def _verify_no_azs_errors(errs):
"""Verify none of the Microsoft Sentinel errors appear in `errs`."""
return all(az_err not in errs for az_err in _AZ_SENT_ERRS)
def _get_or_create_config() -> bool:
# Cases
# 1. Env var set and mpconfig exists -> goto 4
# 2. Env var set and mpconfig file not exists - warn and continue
# 3. search_for_file finds mpconfig -> goto 4
# 4. if file and check_file_contents -> return ok
# 5. search_for_file(config.json)
# 6. If config.json -> import into mpconfig and save
# 7. Error - no Microsoft Sentinel config
mp_path = os.environ.get("MSTICPYCONFIG")
if mp_path and not Path(mp_path).is_file():
_err_output(_MISSING_MPCONFIG_ENV_ERR)
if not mp_path or not Path(mp_path).is_file():
mp_path = search_for_file("msticpyconfig.yaml", paths=[".", ".."])
if mp_path:
errs: List[str] = []
try:
std_out_cap = io.StringIO()
with redirect_stdout(std_out_cap):
errs, _ = validate_config(config_file=mp_path)
if errs:
_pr_output(std_out_cap.getvalue())
if _verify_no_azs_errors(errs):
# If the mpconfig has a Microsoft Sentinel config, return here
return True
# pylint: disable=broad-except
except Exception as err:
errs.append(f"Exception while checking configuration:\n{err}")
_pr_output(f"Exception while checking configuration:\n{type(err)} - {err}")
_pr_output("\n".join(traceback.format_tb(err.__traceback__)))
_pr_output("Please report this to [email protected]")
# pylint: enable=broad-except
# Look for a config.json
config_json = search_for_file("config.json", paths=[".", ".."])
if config_json:
# if we found one, use it to populate msticpyconfig.yaml
_populate_config_to_mp_config(mp_path, config_json)
return True
_pr_output("No valid configuration for Microsoft Sentinel found.")
return False
def _populate_config_to_mp_config(mp_path, config_json):
"""Populate new or existing msticpyconfig with settings from config.json."""
mp_path = mp_path or "./msticpyconfig.yaml"
mp_config_convert = MpConfigFile(file=config_json)
azs_settings = mp_config_convert.map_json_to_mp_ws()
def_azs_settings = next(
iter(azs_settings.get("AzureSentinel", {}).get("Workspaces", {}).values())
)
if def_azs_settings:
mp_config_convert.settings["AzureSentinel"]["Workspaces"][
"Default"
] = def_azs_settings.copy()
mssg = f"Created '{mp_path}'' with Microsoft Sentinel settings."
if Path(mp_path).exists():
# If there is an existing file read it in
mp_config_text = Path(mp_path).read_text(encoding="utf-8")
mp_config_settings = yaml.safe_load(mp_config_text)
# update exist settings with the AzSent settings from config.json
mp_config_settings.update(mp_config_convert.settings)
# update MpConfigFile with the merged settings
mp_config_convert.settings = mp_config_settings
mssg = f"Updated '{mp_path}'' with Microsoft Sentinel settings."
# Save the file
mp_config_convert.save_to_file(mp_path, backup=True)
_pr_output(mssg)
def _set_nb_options(namespace):
namespace["WIDGET_DEFAULTS"] = {
"layout": widgets.Layout(width="95%"),
"style": {"description_width": "initial"},
}
# Some of our dependencies (networkx) still use deprecated Matplotlib
# APIs - we can't do anything about it, so suppress them from view
warnings.simplefilter("ignore", category=MatplotlibDeprecationWarning)
warnings.filterwarnings("ignore", category=DeprecationWarning)
if sns:
sns.set()
pd.set_option("display.max_rows", 100)
pd.set_option("display.max_columns", 50)
pd.set_option("display.max_colwidth", 100)
os.environ["KQLMAGIC_LOAD_MODE"] = "silent"
# Kqlmagic config will use AZ CLI login if available
kql_config = os.environ.get("KQLMAGIC_CONFIGURATION", "")
if "try_azcli_login" not in kql_config:
kql_config = ";".join([kql_config, "try_azcli_login=True"])
os.environ["KQLMAGIC_CONFIGURATION"] = kql_config
def _load_pivots(namespace):
"""Load pivot functions."""
if not Pivot.current:
pivot = Pivot()
namespace["pivot"] = pivot
vt_pivot = None
try:
get_config("TIProviders.VirusTotal")
try:
vt_pivot = importlib.import_module("msticpy.sectools.vtlookupv3.vt_pivot")
namespace["vt_pivot"] = vt_pivot
except ImportError:
# Importing Vt3 libraries failed.
pass
except KeyError:
# No VT settings detected
pass
if vt_pivot:
vt_pivot.add_pivot_functions()
def _import_extras(nm_spc: Dict[str, Any], extra_imports: List[str]):
added_imports = []
if isinstance(extra_imports, str):
extra_imports = [extra_imports]
for imp_spec in extra_imports:
params: List[Optional[str]] = [None, None, None]
for idx, param in enumerate(imp_spec.split(",")):
params[idx] = param.strip() or None
if params[0] is None:
raise MsticpyException(
f"First parameter in extra_imports is mandatory: {imp_spec}"
)
_imp_from_package(nm_spc=nm_spc, pkg=params[0], tgt=params[1], alias=params[2])
added_imports.append(
_extract_pkg_name(pkg=params[0], tgt=params[1], alias=params[2])
)
return added_imports
def _imp_module(nm_spc: Dict[str, Any], module_name: str, alias: str = None):
"""Import named module and assign to global alias."""
try:
mod = importlib.import_module(module_name)
except ImportError:
_err_output(_IMPORT_MODULE_MSSG.format(module=module_name))
return None
if alias:
nm_spc[alias] = mod
else:
nm_spc[module_name] = mod
if _VERBOSITY() == 2: # type: ignore
_pr_output(f"{module_name} imported (alias={alias})")
return mod
def _imp_module_all(nm_spc: Dict[str, Any], module_name):
"""Import all from named module add to globals."""
try:
imported_mod = importlib.import_module(module_name)
except ImportError:
_err_output(_IMPORT_MODULE_MSSG.format(module=module_name))
return
for item in dir(imported_mod):
if item.startswith("_"):
continue
nm_spc[item] = getattr(imported_mod, item)
if _VERBOSITY() == 2: # type: ignore
_pr_output(f"All items imported from {module_name}")
def _imp_from_package(
nm_spc: Dict[str, Any], pkg: str, tgt: str = None, alias: str = None
):
"""Import object or submodule from `pkg`."""
if not tgt:
return _imp_module(nm_spc=nm_spc, module_name=pkg, alias=alias)
try:
# target could be a module
obj = importlib.import_module(f".{tgt}", pkg)
except ImportError:
# if not, it must be an attribute (class, func, etc.)
try:
mod = importlib.import_module(pkg)
except ImportError:
_err_output(_IMPORT_MODULE_MSSG.format(module=pkg))
return None
obj = getattr(mod, tgt)
if alias:
nm_spc[alias] = obj
else:
nm_spc[tgt] = obj
if _VERBOSITY() == 2: # type: ignore
_pr_output(f"{tgt} imported from {pkg} (alias={alias})")
return obj
def _check_and_reload_pkg(
nm_spc: Dict[str, Any], pkg: Any, req_version: Tuple[int, ...], alias: str = None
):
"""Check package version matches required version and reload."""
warn_mssg = []
pkg_name = pkg.__name__
if not hasattr(pkg, "__version__"):
raise MsticpyException(f"Package {pkg_name} has no version data.")
pkg_version = tuple(int(v) for v in pkg.__version__.split("."))
if pkg_version < req_version:
_err_output(_MISSING_PKG_WARN.format(package=pkg_name))
resp = (
input("Install the package now? (y/n)") if not unit_testing() else "y"
) # nosec
if resp.casefold().startswith("y"):
warn_mssg.append(f"{pkg_name} was installed or upgraded.")
pip_ver = ".".join(str(elem) for elem in req_version)
pkg_spec = f"{pkg_name}>={pip_ver}"
check_and_install_missing_packages(required_packages=[pkg_spec], user=True)
if pkg_name in sys.modules:
importlib.reload(pkg)
else:
_imp_module(nm_spc, pkg_name, alias=alias)
if _VERBOSITY() == 2: # type: ignore
_pr_output(f"{pkg_name} imported version {pkg.__version__}")
return warn_mssg
def _hook_ipython_exceptions(func):
"""Hooks the `func` and bypasses it if exception is MsticpyUserException."""
@wraps(func)
def showtraceback(*args, **kwargs):
"""Replace IPython showtraceback."""
# extract exception type, value and traceback
e_type, _, _ = sys.exc_info()
if e_type is not None and issubclass(e_type, MsticpyUserError):
return None
# otherwise run the original hook
return func(*args, **kwargs)
return showtraceback
def _check_azure_cli_status():
"""Check for Azure CLI credentials."""
if not unit_testing():
status, message = check_cli_credentials()
if status == AzureCliStatus.CLI_OK:
_pr_output(message)
elif status == AzureCliStatus.CLI_NOT_INSTALLED:
_pr_output(
"Azure CLI credentials not detected." f" ({_CLI_WIKI_MSSG_SHORT})"
)
elif message:
_pr_output("\n".join([message, _CLI_WIKI_MSSG_GEN]))
| [] | [] | [
"KQLMAGIC_LOAD_MODE",
"MSTICPYCONFIG",
"KQLMAGIC_CONFIGURATION"
] | [] | ["KQLMAGIC_LOAD_MODE", "MSTICPYCONFIG", "KQLMAGIC_CONFIGURATION"] | python | 3 | 0 | |
src/nvm.go | package main
import (
"bytes"
"errors"
"fmt"
"io/ioutil"
"log"
"net/url"
"os"
"os/exec"
"path/filepath"
"regexp"
"strconv"
"strings"
"syscall"
"time"
"nvm/arch"
"nvm/file"
"nvm/node"
"nvm/web"
"github.com/blang/semver"
"github.com/olekukonko/tablewriter"
)
const (
NvmVersion = "1.1.8"
)
type Environment struct {
settings string
root string
symlink string
arch string
node_mirror string
npm_mirror string
proxy string
originalpath string
originalversion string
verifyssl bool
}
var home = filepath.Clean(os.Getenv("NVM_HOME") + "\\settings.txt")
var symlink = filepath.Clean(os.Getenv("NVM_SYMLINK"))
var env = &Environment{
settings: home,
root: "",
symlink: symlink,
arch: os.Getenv("PROCESSOR_ARCHITECTURE"),
node_mirror: "",
npm_mirror: "",
proxy: "none",
originalpath: "",
originalversion: "",
verifyssl: true,
}
func main() {
args := os.Args
detail := ""
procarch := arch.Validate(env.arch)
// Capture any additional arguments
if len(args) > 2 {
detail = args[2]
}
if len(args) > 3 {
if args[3] == "32" || args[3] == "64" {
procarch = args[3]
}
}
if len(args) < 2 {
help()
return
}
if args[1] != "version" && args[1] != "v" {
setup()
}
// Run the appropriate method
switch args[1] {
case "install":
install(detail, procarch)
case "uninstall":
uninstall(detail)
case "use":
use(detail, procarch)
case "list":
list(detail)
case "ls":
list(detail)
case "on":
enable()
case "off":
disable()
case "root":
if len(args) == 3 {
updateRootDir(args[2])
} else {
fmt.Println("\nCurrent Root: " + env.root)
}
case "v":
fmt.Println(NvmVersion)
case "version":
fmt.Println(NvmVersion)
case "arch":
if strings.Trim(detail, " \r\n") != "" {
detail = strings.Trim(detail, " \r\n")
if detail != "32" && detail != "64" {
fmt.Println("\"" + detail + "\" is an invalid architecture. Use 32 or 64.")
return
}
env.arch = detail
saveSettings()
fmt.Println("Default architecture set to " + detail + "-bit.")
return
}
_, a := node.GetCurrentVersion()
fmt.Println("System Default: " + env.arch + "-bit.")
fmt.Println("Currently Configured: " + a + "-bit.")
case "proxy":
if detail == "" {
fmt.Println("Current proxy: " + env.proxy)
} else {
env.proxy = detail
saveSettings()
}
case "current":
inuse, _ := node.GetCurrentVersion()
v, _ := semver.Make(inuse)
err := v.Validate()
if err != nil {
fmt.Println(inuse)
} else if inuse == "Unknown" {
fmt.Println("No current version. Run 'nvm use x.x.x' to set a version.")
} else {
fmt.Println("v" + inuse)
}
//case "update": update()
case "node_mirror":
setNodeMirror(detail)
case "npm_mirror":
setNpmMirror(detail)
default:
help()
}
}
// ===============================================================
// BEGIN | CLI functions
// ===============================================================
func setNodeMirror(uri string) {
env.node_mirror = uri
saveSettings()
}
func setNpmMirror(uri string) {
env.npm_mirror = uri
saveSettings()
}
/*
func update() {
cmd := exec.Command("cmd", "/d", "echo", "testing")
var output bytes.Buffer
var _stderr bytes.Buffer
cmd.Stdout = &output
cmd.Stderr = &_stderr
perr := cmd.Run()
if perr != nil {
fmt.Println(fmt.Sprint(perr) + ": " + _stderr.String())
return
}
}
*/
func getVersion(version string, cpuarch string) (string, string, error) {
arch := strings.ToLower(cpuarch)
if cpuarch != "" {
if cpuarch != "32" && cpuarch != "64" && cpuarch != "all" {
return version, cpuarch, errors.New("\"" + cpuarch + "\" is not a valid CPU architecture. Must be 32 or 64.")
}
} else {
cpuarch = env.arch
}
if cpuarch != "all" {
cpuarch = arch.Validate(cpuarch)
}
if version == "" {
return "", cpuarch, errors.New("Invalid version.")
}
// If user specifies "latest" version, find out what version is
if version == "latest" {
version = getLatest()
}
if version == "lts" {
version = getLTS()
}
if version == "newest" {
installed := node.GetInstalled(env.root)
if len(installed) == 0 {
return version, "", errors.New("No versions of node.js found. Try installing the latest by typing nvm install latest")
}
version = installed[0]
}
if version == "32" || version == "64" {
cpuarch = version
v, _ := node.GetCurrentVersion()
version = v
}
v, err := semver.Make(version)
if err == nil {
err = v.Validate()
}
if err == nil {
// if the user specifies only the major/minor version, identify the latest
// version applicable to what was provided.
sv := strings.Split(version, ".")
if len(sv) < 3 {
version = findLatestSubVersion(version)
} else {
version = cleanVersion(version)
}
}
return version, cpuarch, err
}
func install(version string, cpuarch string) {
requestedVersion := version
args := os.Args
lastarg := args[len(args)-1]
if lastarg == "--insecure" {
env.verifyssl = false
}
v, a, err := getVersion(version, cpuarch)
version = v
cpuarch = a
if err != nil {
fmt.Println(err.Error())
if version == "" {
fmt.Println(" ")
help()
}
return
}
if err != nil {
fmt.Println("\"" + requestedVersion + "\" is not a valid version.")
fmt.Println("Please use a valid semantic version number, \"lts\", or \"latest\".")
return
}
if checkVersionExceedsLatest(version) {
fmt.Println("Node.js v" + version + " is not yet released or available.")
return
}
if cpuarch == "64" && !web.IsNode64bitAvailable(version) {
fmt.Println("Node.js v" + version + " is only available in 32-bit.")
return
}
// Check to see if the version is already installed
if !node.IsVersionInstalled(env.root, version, cpuarch) {
if !node.IsVersionAvailable(version) {
url := web.GetFullNodeUrl("index.json")
fmt.Println("\nVersion " + version + " is not available.\n\nThe complete list of available versions can be found at " + url)
return
}
// Make the output directories
os.Mkdir(filepath.Join(env.root, "v"+version), os.ModeDir)
os.Mkdir(filepath.Join(env.root, "v"+version, "node_modules"), os.ModeDir)
// Warn the user if they're attempting to install without verifying the remote SSL cert
if !env.verifyssl {
fmt.Println("\nWARNING: The remote SSL certificate will not be validated during the download process.\n")
}
// Download node
append32 := node.IsVersionInstalled(env.root, version, "64")
append64 := node.IsVersionInstalled(env.root, version, "32")
if (cpuarch == "32" || cpuarch == "all") && !node.IsVersionInstalled(env.root, version, "32") {
success := web.GetNodeJS(env.root, version, "32", append32)
if !success {
os.RemoveAll(filepath.Join(env.root, "v"+version, "node_modules"))
fmt.Println("Could not download node.js v" + version + " 32-bit executable.")
return
}
}
if (cpuarch == "64" || cpuarch == "all") && !node.IsVersionInstalled(env.root, version, "64") {
success := web.GetNodeJS(env.root, version, "64", append64)
if !success {
os.RemoveAll(filepath.Join(env.root, "v"+version, "node_modules"))
fmt.Println("Could not download node.js v" + version + " 64-bit executable.")
return
}
}
if file.Exists(filepath.Join(env.root, "v"+version, "node_modules", "npm")) {
fmt.Println("\n\nInstallation complete. If you want to use this version, type\n\nnvm use " + version)
return
}
// If successful, add npm
npmv := getNpmVersion(version)
success := web.GetNpm(env.root, getNpmVersion(version))
if success {
fmt.Printf("Installing npm v" + npmv + "...")
// new temp directory under the nvm root
tempDir := filepath.Join(env.root, "temp")
// Extract npm to the temp directory
err := file.Unzip(filepath.Join(tempDir, "npm-v"+npmv+".zip"), filepath.Join(tempDir, "nvm-npm"))
// Copy the npm and npm.cmd files to the installation directory
tempNpmBin := filepath.Join(tempDir, "nvm-npm", "cli-"+npmv, "bin")
// Support npm < 6.2.0
if file.Exists(tempNpmBin) == false {
tempNpmBin = filepath.Join(tempDir, "nvm-npm", "npm-"+npmv, "bin")
}
if file.Exists(tempNpmBin) == false {
log.Fatal("Failed to extract npm. Could not find " + tempNpmBin)
}
// Standard npm support
os.Rename(filepath.Join(tempNpmBin, "npm"), filepath.Join(env.root, "v"+version, "npm"))
os.Rename(filepath.Join(tempNpmBin, "npm.cmd"), filepath.Join(env.root, "v"+version, "npm.cmd"))
// npx support
if _, err := os.Stat(filepath.Join(tempNpmBin, "npx")); err == nil {
os.Rename(filepath.Join(tempNpmBin, "npx"), filepath.Join(env.root, "v"+version, "npx"))
os.Rename(filepath.Join(tempNpmBin, "npx.cmd"), filepath.Join(env.root, "v"+version, "npx.cmd"))
}
npmSourcePath := filepath.Join(tempDir, "nvm-npm", "npm-"+npmv)
if file.Exists(npmSourcePath) == false {
npmSourcePath = filepath.Join(tempDir, "nvm-npm", "cli-"+npmv)
}
moveNpmErr := os.Rename(npmSourcePath, filepath.Join(env.root, "v"+version, "node_modules", "npm"))
if moveNpmErr != nil {
// sometimes Windows can take some time to enable access to large amounts of files after unzip, use exponential backoff to wait until it is ready
for _, i := range [5]int{1, 2, 4, 8, 16} {
time.Sleep(time.Duration(i) * time.Second)
moveNpmErr = os.Rename(npmSourcePath, filepath.Join(env.root, "v"+version, "node_modules", "npm"))
if moveNpmErr == nil {
break
}
}
}
if err == nil && moveNpmErr == nil {
// Remove the temp directory
// may consider keep the temp files here
os.RemoveAll(tempDir)
fmt.Println("\n\nInstallation complete. If you want to use this version, type\n\nnvm use " + version)
} else if moveNpmErr != nil {
fmt.Println("Error: Unable to move directory " + npmSourcePath + " to node_modules: " + moveNpmErr.Error())
} else {
fmt.Println("Error: Unable to install NPM: " + err.Error())
}
} else {
fmt.Println("Could not download npm for node v" + version + ".")
fmt.Println("Please visit https://github.com/npm/cli/releases/tag/v" + npmv + " to download npm.")
fmt.Println("It should be extracted to " + env.root + "\\v" + version)
}
// Reset the SSL verification
env.verifyssl = true
// If this is ever shipped for Mac, it should use homebrew.
// If this ever ships on Linux, it should be on bintray so it can use yum, apt-get, etc.
return
} else {
fmt.Println("Version " + version + " is already installed.")
return
}
}
func uninstall(version string) {
// Make sure a version is specified
if len(version) == 0 {
fmt.Println("Provide the version you want to uninstall.")
help()
return
}
if strings.ToLower(version) == "latest" {
version = getLatest()
} else if strings.ToLower(version) == "lts" {
version = getLTS()
} else if strings.ToLower(version) == "newest" {
installed := node.GetInstalled(env.root)
if len(installed) == 0 {
fmt.Println("No versions of node.js found. Try installing the latest by typing nvm install latest")
return
}
version = installed[0]
}
version = cleanVersion(version)
// Determine if the version exists and skip if it doesn't
if node.IsVersionInstalled(env.root, version, "32") || node.IsVersionInstalled(env.root, version, "64") {
fmt.Printf("Uninstalling node v" + version + "...")
v, _ := node.GetCurrentVersion()
if v == version {
runElevated(fmt.Sprintf(`"%s" cmd /C rmdir "%s"`,
filepath.Join(env.root, "elevate.cmd"),
filepath.Clean(env.symlink)))
}
e := os.RemoveAll(filepath.Join(env.root, "v"+version))
if e != nil {
fmt.Println("Error removing node v" + version)
fmt.Println("Manually remove " + filepath.Join(env.root, "v"+version) + ".")
} else {
fmt.Printf(" done")
}
} else {
fmt.Println("node v" + version + " is not installed. Type \"nvm list\" to see what is installed.")
}
return
}
func findLatestSubVersion(version string) string {
url := web.GetFullNodeUrl("latest-v" + version + ".x" + "/SHASUMS256.txt")
content := web.GetRemoteTextFile(url)
re := regexp.MustCompile("node-v(.+)+msi")
reg := regexp.MustCompile("node-v|-x.+")
latest := reg.ReplaceAllString(re.FindString(content), "")
return latest
}
func use(version string, cpuarch string) {
v, a, err := getVersion(cersion, cpuarch)
version = v
cpuarch = a
if err != nil {
fmt.Println(err.Error())
return
}
// Make sure the version is installed. If not, warn.
if !node.IsVersionInstalled(env.root, version, cpuarch) {
fmt.Println("node v" + version + " (" + cpuarch + "-bit) is not installed.")
if cpuarch == "32" {
if node.IsVersionInstalled(env.root, version, "64") {
fmt.Println("\nDid you mean node v" + version + " (64-bit)?\nIf so, type \"nvm use " + version + " 64\" to use it.")
}
}
if cpuarch == "64" {
if node.IsVersionInstalled(env.root, version, "32") {
fmt.Println("\nDid you mean node v" + version + " (32-bit)?\nIf so, type \"nvm use " + version + " 32\" to use it.")
}
}
return
}
// Remove symlink if it already exists
sym, _ := os.Stat(env.symlink)
if sym != nil {
if !runElevated(fmt.Sprintf(`"%s" cmd /C rmdir "%s"`,
filepath.Join(env.root, "elevate.cmd"),
filepath.Clean(env.symlink))) {
return
}
}
// Create new symlink
if !runElevated(fmt.Sprintf(`"%s" cmd /C mklink /D "%s" "%s"`,
filepath.Join(env.root, "elevate.cmd"),
filepath.Clean(env.symlink),
filepath.Join(env.root, "v"+version))) {
return
}
// Use the assigned CPU architecture
cpuarch = arch.Validate(cpuarch)
nodepath := filepath.Join(env.root, "v"+version, "node.exe")
node32path := filepath.Join(env.root, "v"+version, "node32.exe")
node64path := filepath.Join(env.root, "v"+version, "node64.exe")
node32exists := file.Exists(node32path)
node64exists := file.Exists(node64path)
nodeexists := file.Exists(nodepath)
if node32exists && cpuarch == "32" { // user wants 32, but node.exe is 64
if nodeexists {
os.Rename(nodepath, node64path) // node.exe -> node64.exe
}
os.Rename(node32path, nodepath) // node32.exe -> node.exe
}
if node64exists && cpuarch == "64" { // user wants 64, but node.exe is 32
if nodeexists {
os.Rename(nodepath, node32path) // node.exe -> node32.exe
}
os.Rename(node64path, nodepath) // node64.exe -> node.exe
}
fmt.Println("Now using node v" + version + " (" + cpuarch + "-bit)")
}
func useArchitecture(a string) {
if strings.ContainsAny("32", os.Getenv("PROCESSOR_ARCHITECTURE")) {
fmt.Println("This computer only supports 32-bit processing.")
return
}
if a == "32" || a == "64" {
env.arch = a
saveSettings()
fmt.Println("Set to " + a + "-bit mode")
} else {
fmt.Println("Cannot set architecture to " + a + ". Must be 32 or 64 are acceptable values.")
}
}
func list(listtype string) {
if listtype == "" {
listtype = "installed"
}
if listtype != "installed" && listtype != "available" {
fmt.Println("\nInvalid list option.\n\nPlease use on of the following\n - nvm list\n - nvm list installed\n - nvm list available")
help()
return
}
if listtype == "installed" {
fmt.Println("")
inuse, a := node.GetCurrentVersion()
v := node.GetInstalled(env.root)
for i := 0; i < len(v); i++ {
version := v[i]
isnode, _ := regexp.MatchString("v", version)
str := ""
if isnode {
if "v"+inuse == version {
str = str + " * "
} else {
str = str + " "
}
str = str + regexp.MustCompile("v").ReplaceAllString(version, "")
if "v"+inuse == version {
str = str + " (Currently using " + a + "-bit executable)"
// str = ansi.Color(str,"green:black")
}
fmt.Printf(str + "\n")
}
}
if len(v) == 0 {
fmt.Println("No installations recognized.")
}
} else {
_, lts, current, stable, unstable, _ := node.GetAvailable()
releases := 20
data := make([][]string, releases, releases+5)
for i := 0; i < releases; i++ {
release := make([]string, 4, 6)
release[0] = ""
release[1] = ""
release[2] = ""
release[3] = ""
if len(current) > i {
if len(current[i]) > 0 {
release[0] = current[i]
}
}
if len(lts) > i {
if len(lts[i]) > 0 {
release[1] = lts[i]
}
}
if len(stable) > i {
if len(stable[i]) > 0 {
release[2] = stable[i]
}
}
if len(unstable) > i {
if len(unstable[i]) > 0 {
release[3] = unstable[i]
}
}
data[i] = release
}
fmt.Println("")
table := tablewriter.NewWriter(os.Stdout)
table.SetHeader([]string{" Current ", " LTS ", " Old Stable ", "Old Unstable"})
table.SetBorders(tablewriter.Border{Left: true, Top: false, Right: true, Bottom: false})
table.SetAlignment(tablewriter.ALIGN_CENTER)
table.SetCenterSeparator("|")
table.AppendBulk(data) // Add Bulk Data
table.Render()
fmt.Println("\nThis is a partial list. For a complete list, visit https://nodejs.org/en/download/releases")
}
}
func enable() {
dir := ""
files, _ := ioutil.ReadDir(env.root)
for _, f := range files {
if f.IsDir() {
isnode, _ := regexp.MatchString("v", f.Name())
if isnode {
dir = f.Name()
}
}
}
fmt.Println("nvm enabled")
if dir != "" {
use(strings.Trim(regexp.MustCompile("v").ReplaceAllString(dir, ""), " \n\r"), env.arch)
} else {
fmt.Println("No versions of node.js found. Try installing the latest by typing nvm install latest")
}
}
func disable() {
if !runElevated(fmt.Sprintf(`"%s" cmd /C rmdir "%s"`,
filepath.Join(env.root, "elevate.cmd"),
filepath.Clean(env.symlink))) {
return
}
fmt.Println("nvm disabled")
}
func help() {
fmt.Println("\nRunning version " + NvmVersion + ".")
fmt.Println("\nUsage:")
fmt.Println(" ")
fmt.Println(" nvm arch : Show if node is running in 32 or 64 bit mode.")
fmt.Println(" nvm current : Display active version.")
fmt.Println(" nvm install <version> [arch] : The version can be a specific version, \"latest\" for the latest current version, or \"lts\" for the")
fmt.Println(" most recent LTS version. Optionally specify whether to install the 32 or 64 bit version (defaults")
fmt.Println(" to system arch). Set [arch] to \"all\" to install 32 AND 64 bit versions.")
fmt.Println(" Add --insecure to the end of this command to bypass SSL validation of the remote download server.")
fmt.Println(" nvm list [available] : List the node.js installations. Type \"available\" at the end to see what can be installed. Aliased as ls.")
fmt.Println(" nvm on : Enable node.js version management.")
fmt.Println(" nvm off : Disable node.js version management.")
fmt.Println(" nvm proxy [url] : Set a proxy to use for downloads. Leave [url] blank to see the current proxy.")
fmt.Println(" Set [url] to \"none\" to remove the proxy.")
fmt.Println(" nvm node_mirror [url] : Set the node mirror. Defaults to https://nodejs.org/dist/. Leave [url] blank to use default url.")
fmt.Println(" nvm npm_mirror [url] : Set the npm mirror. Defaults to https://github.com/npm/cli/archive/. Leave [url] blank to default url.")
fmt.Println(" nvm uninstall <version> : The version must be a specific version.")
// fmt.Println(" nvm update : Automatically update nvm to the latest version.")
fmt.Println(" nvm use [version] [arch] : Switch to use the specified version. Optionally use \"latest\", \"lts\", or \"newest\".")
fmt.Println(" \"newest\" is the latest installed version. Optionally specify 32/64bit architecture.")
fmt.Println(" nvm use <arch> will continue using the selected version, but switch to 32/64 bit mode.")
fmt.Println(" nvm root [path] : Set the directory where nvm should store different versions of node.js.")
fmt.Println(" If <path> is not set, the current root will be displayed.")
fmt.Println(" nvm version : Displays the current running version of nvm for Windows. Aliased as v.")
fmt.Println(" ")
}
// ===============================================================
// END | CLI functions
// ===============================================================
// ===============================================================
// BEGIN | Utility functions
// ===============================================================
func checkVersionExceedsLatest(version string) bool {
//content := web.GetRemoteTextFile("http://nodejs.org/dist/latest/SHASUMS256.txt")
url := web.GetFullNodeUrl("latest/SHASUMS256.txt")
content := web.GetRemoteTextFile(url)
re := regexp.MustCompile("node-v(.+)+msi")
reg := regexp.MustCompile("node-v|-x.+")
latest := reg.ReplaceAllString(re.FindString(content), "")
var vArr = strings.Split(version, ".")
var lArr = strings.Split(latest, ".")
for index := range lArr {
lat, _ := strconv.Atoi(lArr[index])
ver, _ := strconv.Atoi(vArr[index])
//Should check for valid input (checking for conversion errors) but this tool is made to trust the user
if ver < lat {
return false
} else if ver > lat {
return true
}
}
return false
}
func cleanVersion(version string) string {
re := regexp.MustCompile("\\d+.\\d+.\\d+")
matched := re.FindString(version)
if len(matched) == 0 {
re = regexp.MustCompile("\\d+.\\d+")
matched = re.FindString(version)
if len(matched) == 0 {
matched = version + ".0.0"
} else {
matched = matched + ".0"
}
fmt.Println(matched)
}
return matched
}
// Given a node.js version, returns the associated npm version
func getNpmVersion(nodeversion string) string {
_, _, _, _, _, npm := node.GetAvailable()
return npm[nodeversion]
}
func getLatest() string {
url := web.GetFullNodeUrl("latest/SHASUMS256.txt")
content := web.GetRemoteTextFile(url)
re := regexp.MustCompile("node-v(.+)+msi")
reg := regexp.MustCompile("node-v|-x.+")
return reg.ReplaceAllString(re.FindString(content), "")
}
func getLTS() string {
_, ltsList, _, _, _, _ := node.GetAvailable()
// ltsList has already been numerically sorted
return ltsList[0]
}
func updateRootDir(path string) {
_, err := os.Stat(path)
if err != nil {
fmt.Println(path + " does not exist or could not be found.")
return
}
currentRoot := env.root
env.root = filepath.Clean(path)
// Copy command files
os.Link(filepath.Clean(currentRoot+"/elevate.cmd"), filepath.Clean(env.root+"/elevate.cmd"))
os.Link(filepath.Clean(currentRoot+"/elevate.vbs"), filepath.Clean(env.root+"/elevate.vbs"))
saveSettings()
if currentRoot != env.root {
fmt.Println("\nRoot has been changed from " + currentRoot + " to " + path)
}
}
func runElevated(command string) bool {
c := exec.Command("cmd") // dummy executable that actually needs to exist but we'll overwrite using .SysProcAttr
// Based on the official docs, syscall.SysProcAttr.CmdLine doesn't exist.
// But it does and is vital:
// https://github.com/golang/go/issues/15566#issuecomment-333274825
// https://medium.com/@felixge/killing-a-child-process-and-all-of-its-children-in-go-54079af94773
c.SysProcAttr = &syscall.SysProcAttr{CmdLine: command}
var stderr bytes.Buffer
c.Stderr = &stderr
err := c.Run()
if err != nil {
fmt.Println(fmt.Sprint(err) + ": " + stderr.String())
return false
}
return true
}
func saveSettings() {
content := "root: " + strings.Trim(env.root, " \n\r") + "\r\narch: " + strings.Trim(env.arch, " \n\r") + "\r\nproxy: " + strings.Trim(env.proxy, " \n\r") + "\r\noriginalpath: " + strings.Trim(env.originalpath, " \n\r") + "\r\noriginalversion: " + strings.Trim(env.originalversion, " \n\r")
content = content + "\r\nnode_mirror: " + strings.Trim(env.node_mirror, " \n\r") + "\r\nnpm_mirror: " + strings.Trim(env.npm_mirror, " \n\r")
ioutil.WriteFile(env.settings, []byte(content), 0644)
}
// NOT USED?
/*
func useArchitecture(a string) {
if strings.ContainsAny("32",os.Getenv("PROCESSOR_ARCHITECTURE")) {
fmt.Println("This computer only supports 32-bit processing.")
return
}
if a == "32" || a == "64" {
env.arch = a
saveSettings()
fmt.Println("Set to "+a+"-bit mode")
} else {
fmt.Println("Cannot set architecture to "+a+". Must be 32 or 64 are acceptable values.")
}
}
*/
// ===============================================================
// END | Utility functions
// ===============================================================
func setup() {
lines, err := file.ReadLines(env.settings)
if err != nil {
fmt.Println("\nERROR", err)
os.Exit(1)
}
// Process each line and extract the value
m := make(map[string]string)
for _, line := range lines {
line = strings.TrimSpace(line)
line = os.ExpandEnv(line)
res := strings.Split(line, ":")
if len(res) < 2 {
continue
}
m[res[0]] = strings.TrimSpace(strings.Join(res[1:], ":"))
}
if val, ok := m["root"]; ok {
env.root = filepath.Clean(val)
}
if val, ok := m["originalpath"]; ok {
env.originalpath = filepath.Clean(val)
}
if val, ok := m["originalversion"]; ok {
env.originalversion = val
}
if val, ok := m["arch"]; ok {
env.arch = val
}
if val, ok := m["node_mirror"]; ok {
env.node_mirror = val
}
if val, ok := m["npm_mirror"]; ok {
env.npm_mirror = val
}
if val, ok := m["proxy"]; ok {
if val != "none" && val != "" {
if strings.ToLower(val[0:4]) != "http" {
val = "http://" + val
}
res, err := url.Parse(val)
if err != nil {
web.SetProxy(res.String(), env.verifyssl)
}
}
}
web.SetMirrors(env.node_mirror, env.npm_mirror)
env.arch = arch.Validate(env.arch)
// Make sure the directories exist
_, e := os.Stat(env.root)
if e != nil {
fmt.Println(env.root + " could not be found or does not exist. Exiting.")
return
}
}
| [
"\"NVM_HOME\"",
"\"NVM_SYMLINK\"",
"\"PROCESSOR_ARCHITECTURE\"",
"\"PROCESSOR_ARCHITECTURE\"",
"\"PROCESSOR_ARCHITECTURE\""
] | [] | [
"NVM_SYMLINK",
"NVM_HOME",
"PROCESSOR_ARCHITECTURE"
] | [] | ["NVM_SYMLINK", "NVM_HOME", "PROCESSOR_ARCHITECTURE"] | go | 3 | 0 | |
Projects/frank_website/frank_website/wsgi.py | """
WSGI config for frank_website project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "frank_website.settings")
application = get_wsgi_application()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
pyaci/core.py | # Copyright (c) 2014, 2015 Cisco Systems, Inc. All rights reserved.
"""
pyaci.core
~~~~~~~~~~~~~~~~~~~
This module contains the core classes of PyACI.
"""
from OpenSSL.crypto import FILETYPE_PEM, load_privatekey, sign
from collections import OrderedDict, defaultdict, deque
from lxml import etree
from requests import Request
from threading import Event
from io import BytesIO
from functools import reduce
from six import iteritems, iterkeys, itervalues
from six.moves.urllib.parse import unquote, urlparse
import base64
import getpass
import json
import logging
import operator
import os
import parse
import requests
import ssl
import threading
import websocket
import xmltodict
import sys
import time
from .errors import (
MetaError, MoError, ResourceError, RestError, UserError
)
from .utils import splitIntoRns
from . import options
logger = logging.getLogger(__name__)
payloadFormat = 'xml'
DELTA = 5 # time delta to allow for any variations of clock...
# Web Socket Statuses
WS_OPENING = 'Websocket Opening.'
WS_OPENED = 'Websocket Opened.'
WS_ERRORED = 'Websocket Errored.'
WS_CLOSED = 'Websocket Closed.'
def subLogger(name):
return logging.getLogger('{}.{}'.format(__name__, name))
def _elementToString(e):
return etree.tostring(e, pretty_print=True, encoding='unicode')
# TODO (2015-05-07, Praveen Kumar): Research a way to automatically
# load this by discovering the version from the node.
aciMetaDir = os.path.expanduser(os.environ.get('ACI_META_DIR', '~/.aci-meta'))
aciMetaFile = os.path.join(aciMetaDir, 'aci-meta.json')
if os.path.exists(aciMetaFile):
with open(aciMetaFile, 'rb') as f:
logger.debug('Loading meta information from %s', aciMetaFile)
aciMeta = json.load(f)
aciClassMetas = aciMeta['classes']
else:
aciClassMetas = dict()
class Api(object):
def __init__(self, parentApi=None, userProxies=None):
self._parentApi = parentApi
self._userProxies = userProxies
def GET(self, format=None, **kwargs):
return self._performRequest('GET', format=format, **kwargs)
def DELETE(self, format=None):
return self._performRequest('DELETE', format=format)
def POST(self, format=None, **kwargs):
return self._performRequest(
'POST', format=format, needData=True, **kwargs)
def _url(self, format=None, **kwargs):
if format is None:
format = payloadFormat
def loop(entity, accumulator):
if entity is None:
return accumulator
else:
if accumulator:
relativeUrl = entity._relativeUrl
if relativeUrl:
passDown = entity._relativeUrl + '/' + accumulator
else:
passDown = accumulator
else:
passDown = entity._relativeUrl
return loop(entity._parentApi, passDown)
if kwargs:
options = '?'
for key, value in iteritems(kwargs):
options += (key + '=' + value + '&')
options = options[:-1]
else:
options = ''
return loop(self, '') + '.' + format + options
def _performRequest(self, method, format=None, needData=False, **kwargs):
if format is None:
format = payloadFormat
logger = subLogger(method)
rootApi = self._rootApi()
url = self._url(format, **kwargs)
if needData:
if format == 'json':
data = self.Json
elif format == 'xml':
data = self.Xml
else:
data = None
logger.debug('-> %s %s', method, url)
if needData:
logger.debug('%s', data)
req = Request(method, url, data=data)
prepped = rootApi._session.prepare_request(req)
# never use certificate for subscription requests
if "subscription" not in kwargs:
self._x509Prep(rootApi, prepped, data)
send_kwargs = rootApi._session.merge_environment_settings(
prepped.url, proxies={}, stream=None, verify=rootApi._verify, cert=None)
if rootApi._userProxies is not None:
send_kwargs['proxies'] = rootApi._userProxies
response = rootApi._session.send(
prepped, timeout=rootApi._timeout, **send_kwargs)
logger.debug('<- %d', response.status_code)
logger.debug('%s', response.text)
if response.status_code != requests.codes.ok:
# TODO: Parse error message and extract fields.
raise RestError(response.text)
return response
def _x509Prep(self, rootApi, req, data):
if rootApi._x509Key is None:
return
payload = '{}{}'.format(req.method, req.url.replace(rootApi._url, ''))
payload = unquote(payload)
if data is not None:
payload += data
signature = base64.b64encode(sign(rootApi._x509Key, payload,
'sha256'))
if sys.version_info[0] >= 3:
signature = signature.decode('ascii')
cookie = ('APIC-Request-Signature={}; '
'APIC-Certificate-Algorithm=v1.0; '
'APIC-Certificate-Fingerprint=fingerprint; '
'APIC-Certificate-DN={}').format(
signature, rootApi._x509Dn)
req.headers['Cookie'] = cookie
def _rootApi(self):
return self._parentApi._rootApi()
class Node(Api):
def __init__(self, url, session=None, verify=False, disableWarnings=True,
timeout=None, aciMetaFilePath=None, userProxies=None):
super(Node, self).__init__(userProxies=userProxies)
self._url = url
if session is not None:
self._session = session
else:
self._session = requests.session()
if aciMetaFilePath is not None:
with open(aciMetaFilePath, 'rb') as f:
logger.debug('Loading meta information from %s',
aciMetaFilePath)
aciMetaContents = json.load(f)
self._aciClassMetas = aciMetaContents['classes']
else:
if not aciClassMetas:
raise MetaError('ACI meta was not specified !')
else:
self._aciClassMetas = aciClassMetas
self._timeout = timeout
self._verify = verify
if disableWarnings:
requests.packages.urllib3.disable_warnings()
self._apiUrlComponent = 'api'
self._x509Key = None
self._wsMos = defaultdict(deque)
self._wsReady = Event()
self._wsEvents = {}
self._autoRefresh = False
self._autoRefreshThread = None
self._login = {}
@property
def session(self):
return self._session
@property
def webSocketUrl(self):
if 'APIC-cookie' in self._rootApi()._session.cookies:
token = self._rootApi()._session.cookies['APIC-cookie']
else:
raise Exception('APIC-cookie NOT found.. Make sure you have logged in.')
return '{}/socket{}'.format(
self._url.replace('https', 'wss').replace('http', 'ws'), token)
def useX509CertAuth(self, userName, certName, keyFile, appcenter=False):
with open(keyFile, 'r') as f:
key = f.read()
if appcenter:
self._x509Dn = (self.mit.polUni().aaaUserEp().
aaaAppUser(userName).aaaUserCert(certName).Dn)
else:
self._x509Dn = (self.mit.polUni().aaaUserEp().
aaaUser(userName).aaaUserCert(certName).Dn)
self._x509Key = load_privatekey(FILETYPE_PEM, key)
def toggleTestApi(self, shouldEnable, dme='policymgr'):
if shouldEnable:
self._apiUrlComponent = 'testapi/{}'.format(dme)
else:
self._apiUrlComponent = 'api'
def toggleDebugApi(self, shouldEnable, dme='policymgr'):
if shouldEnable:
self._apiUrlComponent = 'debugapi/{}'.format(dme)
else:
self._apiUrlComponent = 'api'
def startWsListener(self):
logger.info('Establishing WebSocket connection to %s',
self.webSocketUrl)
ws = websocket.WebSocketApp(
self.webSocketUrl,
on_open=self._handleWsOpen,
on_message=self._handleWsMessage,
on_error=self._handleWsError,
on_close=self._handleWsClose)
runForeverKwargs = {"sslopt": {"cert_reqs": ssl.CERT_NONE}}
logger.info("URL {} user_proxy {}".format(self.webSocketUrl, self._userProxies))
if self._userProxies:
try:
proxyUrl = self._userProxies.get("https", self._userProxies.get("http", None))
if proxyUrl:
runForeverKwargs["http_proxy_host"] = urlparse(proxyUrl).netloc.split(":")[0]
runForeverKwargs["http_proxy_port"] = int(urlparse(proxyUrl).netloc.split(":")[1])
runForeverKwargs["proxy_type"] = "http"
except ValueError:
logger.info("http(s) proxy unavailable for {}".format(self.webSocketUrl))
wst = threading.Thread(target=lambda: ws.run_forever(
**runForeverKwargs))
wst.daemon = True
wst.start()
logger.info('Waiting for the WebSocket connection to open')
self._wsStatus = WS_OPENING
self._wsError = None
self._wsReady.wait()
if self._wsStatus != WS_OPENED:
if self._wsError is not None:
raise Exception(self._wsError)
raise Exception('Error occurred when opening Websocket')
def _handleWsOpen(self):
logger.info('Opened WebSocket connection')
self._wsStatus = WS_OPENED
self._wsReady.set()
self._wsLastRefresh = int(time.time())
def _handleWsMessage(self, message):
logger.debug('Got a message on WebSocket: %s', message)
subscriptionIds = []
if message[:5] == '<?xml':
mos = self.mit.ParseXmlResponse(
message, subscriptionIds=subscriptionIds)
else:
mos = self.mit.ParseJsonResponse(
message, subscriptionIds=subscriptionIds)
for subscriptionId in subscriptionIds:
for mo in mos:
self._wsMos[subscriptionId].append(mo)
if subscriptionId not in self._wsEvents:
self._wsEvents[subscriptionId] = Event()
if mos:
self._wsEvents[subscriptionId].set()
def _handleWsError(self, error):
logger.error('Encountered WebSocket error: %s', error)
self._wsStatus = WS_ERRORED
self._wsError = error
self._wsReady.set()
def _handleWsClose(self):
logger.info('Closed WebSocket connection')
self._wsStatus = WS_CLOSED
self._wsReady.set()
def waitForWsMo(self, subscriptionId, timeout=None):
logger.info('Waiting for the WebSocket MOs')
if subscriptionId not in self._wsEvents:
self._wsEvents[subscriptionId] = Event()
return self._wsEvents[subscriptionId].wait(timeout)
def hasWsMo(self, subscriptionId):
return len(self._wsMos[subscriptionId]) > 0
def popWsMo(self, subscriptionId):
mo = self._wsMos[subscriptionId].popleft()
if not self.hasWsMo(subscriptionId):
self._wsEvents[subscriptionId].clear()
return mo
@property
def mit(self):
return Mo(self, 'topRoot', self._aciClassMetas)
@property
def methods(self):
return MethodApi(self)
@property
def _relativeUrl(self):
return self._url + '/' + self._apiUrlComponent
def _rootApi(self):
return self
def _stopArThread(self):
if self._autoRefresh and self._autoRefreshThread is not None:
self._autoRefreshThread.stop()
self._autoRefreshThread = None
self._autoRefresh = False
class MoIter(Api):
def __init__(self, parentApi, className, objects, aciClassMetas):
self._parentApi = parentApi
self._className = className
self._objects = objects
self._aciClassMetas = aciClassMetas
self._aciClassMeta = aciClassMetas[self._className]
self._rnFormat = self._aciClassMeta['rnFormat']
self._iter = itervalues(self._objects)
def __call__(self, *args, **kwargs):
identifiedBy = self._aciClassMeta['identifiedBy']
if (len(args) >= 1):
if len(args) != len(identifiedBy):
raise MoError(
'Class `{}` requires {} naming properties, '
'but only {} were provided'.format(
self._className, len(identifiedBy), len(args)
)
)
identifierDict = dict(zip(identifiedBy, args))
else:
for name in identifiedBy:
if name not in kwargs:
raise MoError(
'Missing naming property `{}` for class `{}`'.format(
name, self._className
))
identifierDict = kwargs
rn = self._rnFormat.format(**identifierDict)
mo = self._parentApi._getChildByRn(rn)
if mo is None:
if self._parentApi.TopRoot._readOnlyTree:
raise MoError(
'Mo with DN {} does not contain a child with RN {}'
.format(self._parentApi.Dn, rn))
mo = Mo(self._parentApi, self._className, self._aciClassMetas)
for name in identifiedBy:
setattr(mo, name, identifierDict[name])
self._parentApi._addChild(self._className, rn, mo)
self._objects[rn] = mo
for attribute in set(kwargs) - set(identifiedBy):
setattr(mo, attribute, kwargs[attribute])
return mo
def __iter__(self):
return self._iter
def next(self):
return next(self._iter)
def __len__(self):
return len(self._objects)
class Mo(Api):
def __init__(self, parentApi, className, aciClassMetas):
super(Mo, self).__init__(parentApi=parentApi)
self._className = className
self._aciClassMetas = aciClassMetas
self._aciClassMeta = aciClassMetas[self._className]
self._properties = {
x[0]: None
for x in self._aciClassMeta['properties'].items()
}
self._rnFormat = self._aciClassMeta['rnFormat']
self._children = OrderedDict()
self._childrenByClass = defaultdict(OrderedDict)
self._readOnlyTree = False
def FromDn(self, dn):
def reductionF(acc, rn):
dashAt = rn.find('-')
rnPrefix = rn if dashAt == -1 else rn[:dashAt] + '-'
className = acc._aciClassMeta['rnMap'][rnPrefix]
return acc._spawnChildFromRn(className, rn)
return reduce(reductionF, splitIntoRns(dn), self)
@property
def TopRoot(self):
if self._isTopRoot():
return self
else:
return self._parentApi.TopRoot
@property
def ReadOnlyTree(self):
return self.TopRoot._readOnlyTree
@ReadOnlyTree.setter
def ReadOnlyTree(self, value):
self.TopRoot._readOnlyTree = value
@property
def ClassName(self):
return self._className
@property
def Rn(self):
idDict = {
k: v
for k, v in self._properties.items()
if k in self._aciClassMeta['identifiedBy']
}
return self._rnFormat.format(**idDict)
@property
def Dn(self):
if self._parentApi._isTopRoot():
return self.Rn
else:
return self._parentApi.Dn + '/' + self.Rn
@property
def Parent(self):
if isinstance(self._parentApi, Mo):
return self._parentApi
else:
return None
def Up(self, level=1):
result = self
for i in range(level):
if result.Parent is None:
raise MoError('Reached topRoot after {} levels'.format(i))
result = result.Parent
return result
@property
def Children(self):
return itervalues(self._children)
@property
def Status(self):
return self._properties['status']
@Status.setter
def Status(self, value):
self._properties['status'] = value
@property
def PropertyNames(self):
return sorted(self._properties.keys())
@property
def NonEmptyPropertyNames(self):
return sorted([k for k, v in self._properties.items()
if v is not None])
@property
def IsConfigurable(self):
return self._aciClassMeta['isConfigurable']
def IsConfigurableProperty(self, name):
return (name in self._aciClassMeta['properties'] and
self._aciClassMeta['properties'][name]['isConfigurable'])
@property
def Json(self):
return json.dumps(self._dataDict(),
sort_keys=True, indent=2, separators=(',', ': '))
@Json.setter
def Json(self, value):
self._fromObjectDict(json.loads(value))
@property
def Xml(self):
def element(mo):
result = etree.Element(mo._className)
for key, value in mo._properties.items():
if value is not None:
result.set(key, value)
for child in mo._children.values():
result.append(element(child))
return result
return _elementToString(element(self))
def GetXml(self, elementPredicate=lambda mo: True,
propertyPredicate=lambda mo, name: True):
def element(mo, elementPredicate, propertyPredicate):
if not elementPredicate(mo):
return None
result = etree.Element(mo._className)
for key, value in mo._properties.items():
if value is not None:
if propertyPredicate(mo, key):
result.set(key, value)
for child in mo._children.values():
childElement = element(child, elementPredicate,
propertyPredicate)
if childElement is not None:
result.append(childElement)
return result
return _elementToString(
element(self, elementPredicate, propertyPredicate))
@Xml.setter
def Xml(self, value):
xml = bytes(bytearray(value, encoding='utf-8'))
self._fromXmlElement(etree.fromstring(xml))
def ParseXmlResponse(self, xml, localOnly=False, subscriptionIds=[]):
# https://gist.github.com/karlcow/3258330
xml = bytes(bytearray(xml, encoding='utf-8'))
context = etree.iterparse(BytesIO(xml),
events=('end',), tag='imdata')
mos = []
event, root = next(context)
sIds = root.get('subscriptionId', '')
if sIds:
subscriptionIds.extend([str(x) for x in sIds.split(',')])
for element in root.iterchildren():
if 'dn' not in element.attrib:
raise MoError('Property `dn` not found in element {}'.format(
_elementToString(element)))
if element.tag == 'moCount':
mo = self.moCount()
else:
mo = self.FromDn(element.attrib['dn'])
mo._fromXmlElement(element, localOnly=localOnly)
element.clear()
mos.append(mo)
return mos
def ParseJsonResponse(self, text, subscriptionIds=[]):
response = json.loads(text)
sIds = response.get('subscriptionId', [])
if sIds:
subscriptionIds.extend(sIds)
mos = []
for element in response['imdata']:
name, value = next(iteritems(element))
if 'dn' not in value['attributes']:
raise MoError('Property `dn` not found in dict {}'.format(
value['attributes']))
mo = self.FromDn(value['attributes']['dn'])
mo._fromObjectDict(element)
mos.append(mo)
return mos
def GET(self, format=None, **kwargs):
if format is None:
format = payloadFormat
topRoot = self.TopRoot
subscriptionIds = []
response = super(Mo, self).GET(format, **kwargs)
if format == 'json':
result = topRoot.ParseJsonResponse(response.text,
subscriptionIds=subscriptionIds)
elif format == 'xml':
result = topRoot.ParseXmlResponse(response.text,
subscriptionIds=subscriptionIds)
topRoot.ReadOnlyTree = True
if subscriptionIds:
return result, subscriptionIds[0]
else:
return result
@property
def _relativeUrl(self):
if self._className == 'topRoot':
return 'mo'
else:
return self.Rn
def _fromObjectDict(self, objectDict):
attributes = objectDict[self._className].get('attributes', {})
for key, value in attributes.items():
self._properties[key] = value
children = objectDict[self._className].get('children', [])
for cdict in children:
className = next(iterkeys(cdict))
attributes = next(itervalues(cdict)).get('attributes', {})
child = self._spawnChildFromAttributes(className, **attributes)
child._fromObjectDict(cdict)
def _fromXmlElement(self, element, localOnly=False):
if element.tag != self._className:
raise MoError(
'Root element tag `{}` does not match with class `{}`'
.format(element.tag, self._className))
if localOnly and element.attrib.get('lcOwn', 'local') != 'local':
return
for key, value in element.attrib.items():
self._properties[key] = value
for celement in element.iterchildren('*'):
className = celement.tag
attributes = celement.attrib
child = self._spawnChildFromAttributes(className, **attributes)
child._fromXmlElement(celement, localOnly=localOnly)
def _dataDict(self):
data = {}
objectData = {}
data[self._className] = objectData
attributes = {
k: v
for k, v in self._properties.items()
if v is not None
}
if attributes:
objectData['attributes'] = attributes
if self._children:
objectData['children'] = []
for child in self._children.values():
objectData['children'].append(child._dataDict())
return data
def __getattr__(self, name):
if name in self._properties:
return self._properties[name]
if name in self._aciClassMeta['contains']:
return MoIter(self, name, self._childrenByClass[name],
aciClassMetas=self._aciClassMetas)
raise AttributeError('{} is not a valid attribute for class {}'.
format(name, self.ClassName))
def __setattr__(self, name, value):
if '_properties' in self.__dict__ and name in self._properties:
self._properties[name] = value
else:
super(Mo, self).__setattr__(name, value)
def _isTopRoot(self):
return self._className == 'topRoot'
def _getChildByRn(self, rn):
return self._children.get(rn, None)
def _addChild(self, className, rn, child):
self._children[rn] = child
self._childrenByClass[className][rn] = child
def _spawnChildFromRn(self, className, rn):
# TODO: Refactor.
moIter = getattr(self, className)
parsed = parse.parse(moIter._rnFormat, rn)
if parsed is None:
logging.debug('RN parsing failed, RN: {}, format: {}'.
format(rn, moIter._rnFormat))
# FIXME (2015-04-08, Praveen Kumar): Hack alert!
rn = rn.replace('[]', '[None]')
if rn.endswith('-'):
rn = rn + 'None'
parsed = parse.parse(moIter._rnFormat, rn)
identifierDict = parsed.named
orderedIdentifiers = [
t[0] for t in sorted(parsed.spans.items(),
key=operator.itemgetter(1))
]
identifierArgs = [
identifierDict[name] for name in orderedIdentifiers
]
return moIter(*identifierArgs)
def _spawnChildFromAttributes(self, className, **attributes):
rnFormat = self._aciClassMetas[className]['rnFormat']
rn = rnFormat.format(**attributes)
return self._spawnChildFromRn(className, rn)
class AutoRefreshThread(threading.Thread):
REFRESH_BEFORE = 60 # approx - this many seconds before expiry, do token refresh
CHECK_INTERVAL = 10 # how long to sleep before waking to check for any work to do
WS_REFRESH_INT = 40 # approx - this many seconds before subscription refresh
def __init__(self, rootApi):
super(AutoRefreshThread, self).__init__()
self._stop_event = threading.Event()
self._rootApi = rootApi
def stop(self):
self._stop_event.set()
def isStopped(self):
return self._stop_event.is_set()
def _refreshLoginIfNeeded(self):
now = int(time.time())
if now + self.REFRESH_BEFORE > self._rootApi._login['nextRefreshBefore']:
logger.debug('arThread: Need to refresh Token')
refObj = self._rootApi.methods.LoginRefresh()
resp = refObj.GET()
# Process refresh response
if payloadFormat != 'xml' or resp.text[:5] != '<?xml':
logger.error('XML format of aaaLogin is only supported now')
return
doc = xmltodict.parse(resp.text)
if 'imdata' in doc:
if 'aaaLogin' in doc['imdata']:
root = self._rootApi
root._login = {}
lastLogin = int(time.time())
root._login['lastLoginTime'] = lastLogin
root._login['nextRefreshBefore'] = lastLogin - DELTA + \
int(doc['imdata']['aaaLogin']['@refreshTimeoutSeconds'])
else:
logger.error('arThread: response for aaaRefresh does not have required aaaLogin Tag')
else:
logger.error('arThread: response for aaaRefresh does not have required imdata Tag')
return
def _refreshSubscriptionsIfNeeded(self):
now = int(time.time())
if len(self._rootApi._wsEvents) > 0 and \
now >= self._rootApi._wsLastRefresh + self.WS_REFRESH_INT:
ids=''
for k in self._rootApi._wsEvents:
ids+=k+','
ids = ids[:-1]
logger.debug('Refreshing Ids: %s', ids)
wsRefreshObj = self._rootApi.methods.RefreshSubscriptions(ids)
resp = wsRefreshObj.GET()
if resp.status_code != requests.codes.ok:
logger.error('Subscription Refresh Failed !!' + resp.text)
else:
self._rootApi._wsLastRefresh = now
return
def run(self):
logger.debug('arThread: Starting up')
while True:
time.sleep(self.CHECK_INTERVAL)
if self.isStopped():
break
self._refreshLoginIfNeeded()
self._refreshSubscriptionsIfNeeded()
logger.debug('arThread: Terminating')
class LoginMethod(Api):
def __init__(self, parentApi):
super(LoginMethod, self).__init__(parentApi=parentApi)
self._moClassName = 'aaaUser'
self._properties = {}
def POST(self, format=None, **kwargs):
resp = super(LoginMethod, self).POST(format=format, **kwargs)
if resp is None or resp.status_code != requests.codes.ok:
logger.debug('Login failed!')
return resp
if payloadFormat != 'xml' or resp.text[:5] != '<?xml':
logger.error('XML format of aaaLogin is only supported now')
return resp
doc = xmltodict.parse(resp.text)
if 'imdata' in doc:
if 'aaaLogin' in doc['imdata']:
root = self._rootApi()
root._login = {}
root._login['version'] = doc['imdata']['aaaLogin']['@version']
root._login['userName'] = doc['imdata']['aaaLogin']['@userName']
lastLogin = int(time.time())
root._login['lastLoginTime'] = lastLogin
root._login['nextRefreshBefore'] = lastLogin - DELTA + \
int(doc['imdata']['aaaLogin']['@refreshTimeoutSeconds'])
logger.debug(root._login)
if root._autoRefresh:
arThread = AutoRefreshThread(root)
root._autoRefreshThread = arThread
arThread.daemon = True
arThread.start()
return resp
@property
def Json(self):
result = {}
result[self._moClassName] = {'attributes': self._properties.copy()}
return json.dumps(result,
sort_keys=True, indent=2, separators=(',', ': '))
@property
def Xml(self):
result = etree.Element(self._moClassName)
for key, value in self._properties.items():
result.set(key, value)
return _elementToString(result)
@property
def _relativeUrl(self):
return 'aaaLogin'
def __call__(self, name, password=None, passwordFile=None, autoRefresh=False):
if password is None and passwordFile is None:
password = getpass.getpass('Enter {} password: '.format(name))
elif password is None:
with open(passwordFile, 'r') as f:
password = f.read()
self._properties['name'] = name
self._properties['pwd'] = password
rootApi = self._rootApi()
rootApi._autoRefresh = autoRefresh
return self
class AppLoginMethod(Api):
def __init__(self, parentApi):
super(AppLoginMethod, self).__init__(parentApi=parentApi)
self._moClassName = "aaaAppToken"
self._properties = {}
def POST(self, format=None, **kwargs):
resp = super(AppLoginMethod, self).POST(format=format, **kwargs)
if resp is None or resp.status_code != requests.codes.ok:
logger.debug('Login failed!')
return resp
if payloadFormat != 'xml' or resp.text[:5] != '<?xml':
logger.error('XML format of AppLogin is only supported now')
return resp
# NOTE (2021-02-03, Praveen Kumar): /api/requestAppToken.xml doesn't set
# the token in the cookies automatically. Hence, intercept the response
# and set the cookie explicitly.
doc = xmltodict.parse(resp.text)
if 'imdata' in doc:
if 'aaaLogin' in doc['imdata']:
token = doc['imdata']['aaaLogin']['@token']
domain = urlparse(resp.url).netloc.split(':')[0]
self._rootApi().session.cookies.set(
'APIC-cookie', token, domain=domain
)
return resp
@property
def Json(self):
result = {}
result[self._moClassName] = {'attributes': self._properties.copy()}
return json.dumps(result,
sort_keys=True, indent=2, separators=(',', ': '))
@property
def Xml(self):
result = etree.Element(self._moClassName)
for key, value in self._properties.items():
result.set(key, value)
return _elementToString(result)
@property
def _relativeUrl(self):
return 'requestAppToken'
def __call__(self, appName):
self._properties['appName'] = appName
return self
class LoginRefreshMethod(Api):
def __init__(self, parentApi):
super(LoginRefreshMethod, self).__init__(parentApi=parentApi)
self._moClassName = 'aaaRefresh'
@property
def Json(self):
return ''
@property
def Xml(self):
return ''
@property
def _relativeUrl(self):
return 'aaaRefresh'
def __call__(self):
return self
class ChangeCertMethod(Api):
def __init__(self, parentApi):
super(ChangeCertMethod, self).__init__(parentApi=parentApi)
self._moClassName = 'aaaChangeX509Cert'
self._properties = {}
@property
def Json(self):
result = {}
result[self._moClassName] = {'attributes': self._properties.copy()}
return json.dumps(result,
sort_keys=True, indent=2, separators=(',', ': '))
@property
def Xml(self):
result = etree.Element(self._moClassName)
for key, value in self._properties.items():
result.set(key, value)
return _elementToString(result)
@property
def _relativeUrl(self):
return 'changeSelfX509Cert'
def __call__(self, userName, certName, certFile):
self._properties['userName'] = userName
self._properties['name'] = certName
with open(certFile, 'r') as f:
self._properties['data'] = f.read()
return self
class LogoutMethod(Api):
def __init__(self, parentApi):
super(LogoutMethod, self).__init__(parentApi=parentApi)
self._moClassName = 'aaaUser'
self._properties = {}
def POST(self, format=None, **kwargs):
resp = super(LogoutMethod, self).POST(format=format, **kwargs)
if resp.status_code == requests.codes.ok:
self._rootApi()._stopArThread()
return resp
@property
def Json(self):
result = {}
result[self._moClassName] = {'attributes': self._properties.copy()}
return json.dumps(result,
sort_keys=True, indent=2, separators=(',', ': '))
@property
def Xml(self):
result = etree.Element(self._moClassName)
for key, value in self._properties.items():
result.set(key, value)
return _elementToString(result)
@property
def _relativeUrl(self):
return 'aaaLogout'
def __call__(self, user=None):
root = self._rootApi()
if user is None:
self._properties['name'] = root._login['userName']
else:
self._properties['name'] = user
return self
class RefreshSubscriptionsMethod(Api):
def __init__(self, parentApi):
super(RefreshSubscriptionsMethod, self).__init__(parentApi=parentApi)
def GET(self, format=None, **kwargs):
resp = None
for sid in self._ids.split(','):
args = {'id': sid}
args.update(kwargs)
resp = super(RefreshSubscriptionsMethod, self).GET(format=format, **args)
if resp.status_code != requests.codes.ok:
logger.error('Refresh of subscription id %s failed with status code: %d', sid, resp.status_code)
# Current Subscription Refresh does one id at a time, so
# we have to loop here - once it supports multiple ids, then
# give the entire set of ids
return resp
@property
def Json(self):
return ''
@property
def Xml(self):
return ''
@property
def _relativeUrl(self):
return 'subscriptionRefresh'
def __call__(self, ids):
''' ids are comma separate subscription ids '''
self._ids = ids
return self
class UploadPackageMethod(Api):
def __init__(self, parentApi):
super(UploadPackageMethod, self).__init__(parentApi=parentApi)
self._packageFile = None
@property
def _relativeUrl(self):
return 'ppi/node/mo'
def __call__(self, packageFile):
self._packageFile = packageFile
return self
def POST(self, format='xml'):
# TODO (2015-05-23, Praveen Kumar): Fix this method to work
# with certificate based authentication.
root = self._rootApi()
if format != 'xml':
raise UserError('Unsupported format: {}'.format(format))
if not os.path.exists(self._packageFile):
raise ResourceError('File not found: ' + self.packageFile)
with open(self._packageFile, 'r') as f:
response = root._session.request(
'POST', self._url(format), files={'file': f},
verify=root._verify
)
if response.status_code != requests.codes.ok:
# TODO: Parse error message and extract fields.
raise RestError(response.text)
return response
class ResolveClassMethod(Api):
def __init__(self, parentApi):
super(ResolveClassMethod, self).__init__(parentApi=parentApi)
@property
def _relativeUrl(self):
return 'class/' + self._className
def __call__(self, className):
self._className = className
return self
def GET(self, format=None, mit=None, autoPage=False, pageSize=10000,
**kwargs):
if format is None:
format = payloadFormat
subscriptionIds = []
topRoot = self._rootApi().mit if mit is None else mit
if autoPage:
# TODO: Subscription is not supported with autoPage option.
if 'subscription' in kwargs:
raise UserError(
'Subscription is not suppored with autoPage option')
logger.debug('Auto paginating query with page size of %d',
pageSize)
currentPage = 0
results = []
while True:
pageOptions = (options.pageSize(str(pageSize)) &
options.page(str(currentPage)))
newKwargs = dict(pageOptions.items() + kwargs.items())
logger.debug('Querying page %d', currentPage)
response = super(ResolveClassMethod, self).GET(format,
**newKwargs)
if format == 'json':
result = topRoot.ParseJsonResponse(response.text)
elif format == 'xml':
result = topRoot.ParseXmlResponse(response.text)
logger.debug('Got %s objects', len(result))
results.append(result)
if len(result) < pageSize:
break
currentPage += 1
result = [mo for resultList in results for mo in resultList]
else:
response = super(ResolveClassMethod, self).GET(format, **kwargs)
if format == 'json':
result = topRoot.ParseJsonResponse(
response.text, subscriptionIds=subscriptionIds)
elif format == 'xml':
result = topRoot.ParseXmlResponse(
response.text, subscriptionIds=subscriptionIds)
topRoot.ReadOnlyTree = True
if subscriptionIds:
return result, subscriptionIds[0]
else:
return result
class MethodApi(Api):
def __init__(self, parentApi):
super(MethodApi, self).__init__(parentApi=parentApi)
@property
def _relativeUrl(self):
return ''
@property
def Login(self):
return LoginMethod(parentApi=self)
@property
def AppLogin(self):
return AppLoginMethod(parentApi=self)
@property
def LoginRefresh(self):
return LoginRefreshMethod(parentApi=self)
@property
def Logout(self):
return LogoutMethod(parentApi=self)
@property
def RefreshSubscriptions(self):
return RefreshSubscriptionsMethod(parentApi=self)
@property
def ChangeCert(self):
return ChangeCertMethod(parentApi=self)
@property
def UploadPackage(self):
return UploadPackageMethod(parentApi=self)
@property
def ResolveClass(self):
return ResolveClassMethod(parentApi=self)
| [] | [] | [
"ACI_META_DIR"
] | [] | ["ACI_META_DIR"] | python | 1 | 0 | |
test/wappsto_test.py | """
The test module.
Tests wappsto project functionality.
"""
import re
import os
import math
import json
import pytest
import wappsto
import zipfile
import jsonschema
from mock import Mock
from unittest.mock import patch
import urllib.parse as urlparse
from urllib.parse import parse_qs
from wappsto import status
from wappsto.connection import message_data
from wappsto.errors import wappsto_errors
from wappsto.connection import event_storage
ADDRESS = "wappsto.com"
PORT = 11006
TEST_JSON = "test_JSON/test_json.json"
def check_for_correct_conn(*args, **kwargs):
"""
Check if connection is valid.
Reviews the provided address and port, if it does not correspond to expected values raises the same exception,
that would be raised when inputting wrong details.
Args:
args: arguments that method was called with
kwargs: key worded arguments
"""
if args[0][0] != ADDRESS or args[0][1] != PORT:
raise wappsto_errors.ServerConnectionException
def fake_connect(self, address, port, send_trace=False):
"""
Creates fake connection.
Mocks the connection so no call would leave this environment, also makes application faster for tests.
Args:
self: the instance of the calling object
address: address used for connecting to server
port: port used for connecting to server
send_trace: Boolean indicating if trace should be automatically sent
"""
def check_for_correct_conn(*args, **kwargs):
if args[0][0] != ADDRESS or args[0][1] != PORT:
raise wappsto_errors.ServerConnectionException
wappsto.RETRY_LIMIT = 2
with patch("ssl.SSLContext.wrap_socket") as context:
context.connect = Mock(side_effect=check_for_correct_conn)
with patch('time.sleep', return_value=None), \
patch('threading.Thread'), \
patch('threading.Timer'), \
patch('wappsto.communication.ClientSocket.add_id_to_confirm_list'), \
patch('wappsto.Wappsto.keep_running'), \
patch('socket.socket'), \
patch('ssl.SSLContext.wrap_socket', return_value=context):
self.service.start(address=address, port=port, automatic_trace=send_trace)
def fix_object_callback(callback_exists, testing_object):
"""
Add callback to object.
Depending on callback_exists variable, either adds mock to callback or sets it to None.
Args:
callback_exists: boolean indicating if callback should exist.
testing_object: object to whom callback needs to be set.
"""
if callback_exists:
test_callback = Mock(return_value=True)
testing_object.set_callback(test_callback)
else:
try:
testing_object.set_callback(None)
except wappsto_errors.CallbackNotCallableException:
pass
def get_object(network, name):
"""
Get object from network.
Get object based on the name provided.
Args:
network: The network from where the data should be found.
name: Name indicating the object being searched for.
Returns:
The found object.
"""
if name == "network":
return network.service.data_manager.network
elif name == "device":
return network.service.data_manager.network.devices[0]
elif name == "value":
return network.service.data_manager.network.devices[0].values[0]
elif name == "control_state":
return network.service.data_manager.network.devices[0].values[0].get_control_state()
elif name == "report_state":
return network.service.data_manager.network.devices[0].values[0].get_report_state()
return None
def send_response(instance,
verb,
trace_id=None,
bulk=None,
message_id=None,
element_id=None,
data=None,
split_message=None,
element_type=None,
period=None,
delta=None):
"""
Sends response.
Sends responses to be used in receive tests based on the parameters provided.
Args:
instance: The current Test instance.
verb: specifies if request is DELETE/PUT/POST/GET
trace_id: id used for tracing messages
bulk: Boolean value indicating if multiple messages should be sent at once.
message_id: id used to indicate the specific message
element_id: id used for indicating element
data: data to be sent
split_message: Boolean value indicating if message should be sent in parts
element_type: type of module being used.
period: parameter indicating whether value should be updated periodically
delta: delta of value (determines if change was significant enough to be sent)
Returns:
the generated message.
"""
trace = ""
if verb == "DELETE" or verb == "PUT" or verb == "GET":
if data is None:
params = None
else:
if trace_id is None:
trace = None
else:
trace = {"trace": str(trace_id)}
if element_id is None:
meta = None
else:
meta = {"id": element_id,
"type": element_type}
params = {"meta": trace,
"url": "/{}/{}".format(element_type, element_id),
"data": {
"meta": meta,
"data": data,
"period": period,
"delta": delta}}
message = {"jsonrpc": "2.0",
"id": message_id,
"params": params,
"method": verb}
elif verb == "error" or verb == "result":
if data:
message_value = {"data": data,
"type": "Control",
"timestamp": "2020-01-20T09:20:21.092Z",
"meta": {
"type": "state",
"version": "2.0",
"id": element_id}}
else:
message_value = "True"
message = {"jsonrpc": "2.0",
"id": message_id,
verb: {
"value": message_value,
"meta": {
"server_send_time": "2020-01-22T08:22:55.315Z"}}}
instance.service.socket.packet_awaiting_confirm[message_id] = message
else:
message = {"jsonrpc": "2.0", "id": "1", "params": {}, "method": "??????"}
if bulk:
message = [message, message]
message = json.dumps(message)
if split_message:
message_size = math.ceil(len(message) / 2)
message1 = message[:message_size]
message2 = message[message_size:]
wappsto.connection.communication.receive_data.RECEIVE_SIZE = message_size
instance.service.socket.my_socket.recv = Mock(side_effect=[message1.encode("utf-8"),
message2.encode("utf-8"),
KeyboardInterrupt])
else:
instance.service.socket.my_socket.recv = Mock(side_effect=[message.encode("utf-8"),
KeyboardInterrupt])
def validate_json(json_schema, arg):
"""
Validates json.
Validates json and returns Boolean value indicating if it is valid.
Args:
json_schema: Schema to validate message against
arg: sent message
Returns:
Boolean value indicating if message is valid
"""
schema_location = os.path.join(
os.path.dirname(__file__),
"schema/" + json_schema + ".json")
with open(schema_location, "r") as json_file:
schema = json.load(json_file)
base_uri = os.path.join(os.path.dirname(__file__), "schema")
base_uri = base_uri.replace("\\", "/")
base_uri = "file:///" + base_uri + "/"
resolver = jsonschema.RefResolver(base_uri, schema)
try:
for i in arg:
jsonschema.validate(i, schema, resolver=resolver)
return True
except jsonschema.exceptions.ValidationError:
return False
def set_up_log(self, log_file_exists, file_size, make_zip=False):
"""
Sets up logs.
Deletes all log files and creates new one if log file should exist.
Args:
self: referece to calling object
log_file_exists: boolean indicating if log file should exist
file_size: how big is the current size of the folder
make_zip: boolean indicating if log file should be zip
Returns:
path to the latest file
"""
file_name = self.service.event_storage.get_log_name()
file_path = self.service.event_storage.get_file_path(file_name)
log_location = self.service.event_storage.log_location
# removes all files
for root, dirs, files in os.walk(log_location):
for file in files:
os.remove(os.path.join(root, file))
# creates file
if log_file_exists:
with open(file_path, "w") as file:
num_chars = int((1024 * 1024 * file_size) / 10)
string = ""
data = "0" * num_chars
for i in range(10):
string += '[{"data": "' + data + '"}]\n'
file.write(string)
if make_zip:
with zipfile.ZipFile(file_path.replace(".txt", ".zip"), "w") as zip_file:
zip_file.write(file_path, file_name)
os.remove(file_path)
with open(self.service.event_storage.get_file_path("2000-1.txt"), "w") as file:
file.write("")
return file_path
def check_for_logged_info(*args, **kwargs):
"""
Checks for provided data in logger.
If the logger is provided with the necessary information,
KeyboardInterrupt is raised to stop the test.
Args:
args: arguments that method was called with
kwargs: key worded arguments
"""
if (re.search("^Raw log Json:", args[0])
or re.search("^Sending while not connected$", args[0])
or re.search("^Received message exeeds size limit.$", args[0])
or re.search("^Unhandled send$", args[0])):
raise KeyboardInterrupt
# ################################## TESTS ################################## #
class TestJsonLoadClass:
"""
TestJsonLoadClass instance.
Tests loading json files in wappsto.
"""
@classmethod
def setup_class(self):
"""
Sets up the class.
Sets locations to be used in test.
"""
self.test_json_location = os.path.join(
os.path.dirname(__file__),
TEST_JSON)
@pytest.mark.parametrize("valid_location", [True, False])
@pytest.mark.parametrize("valid_json", [True, False])
def test_load_json(self, valid_location, valid_json):
"""
Tests loading json.
Loads json file and checks if pretty print json is read same way as ordinary file.
Args:
valid_location: parameter indicating whether file should be found in the location
valid_json: parameter indicating whether file should be parsed successfully
"""
# Arrange
if not valid_location:
test_json_location_2 = "wrong_location"
else:
if not valid_json:
test_json_location_2 = os.path.join(
os.path.dirname(__file__),
"test_JSON/test_json_wrong.json")
else:
test_json_location_2 = os.path.join(
os.path.dirname(__file__),
"test_JSON/test_json_prettyprint.json")
with open(self.test_json_location, "r") as json_file:
decoded = json.load(json_file)
# Act
try:
service = wappsto.Wappsto(json_file_name=test_json_location_2)
except FileNotFoundError:
service = None
except json.JSONDecodeError:
service = None
# Assert
assert (valid_location and valid_json) == bool(service)
if service is not None:
assert service.data_manager.decoded == decoded
@pytest.mark.parametrize("object_exists", [True, False])
@pytest.mark.parametrize("object_name", ["network", "device", "value", "control_state", "report_state"])
def test_get_by_id(self, object_exists, object_name):
"""
Tests getting element by id.
Gets id and checks if result is the expected one.
Args:
object_exists: indicates if element should exist
object_name: name of the object to be updated
"""
# Arrange
self.service = wappsto.Wappsto(json_file_name=self.test_json_location)
get_object(self, "network").conn = Mock()
actual_object = get_object(self, object_name)
id = actual_object.uuid
if not object_exists:
actual_object.delete()
# Act
result = self.service.get_by_id(id)
# Assert
assert (object_exists and result is not None) or (not object_exists and result is None)
@pytest.mark.parametrize("save_as_string", [True, False])
def test_load_existing_instance(self, save_as_string):
"""
Tests loading existing instance.
Creates new instance and then tries to load it.
Args:
save_as_string: indicates if informations inside data should be saved as a string.
"""
# Arrange
self.service = wappsto.Wappsto(json_file_name=self.test_json_location)
saved_network = self.service.data_manager.get_encoded_network()
file_name = "test.json"
encoded = saved_network
if save_as_string:
encoded = json.dumps(encoded)
encoded = encoded.replace("\'", "\\\"")
encoded = json.dumps({"data": encoded})
path = os.path.join(self.service.data_manager.path_to_calling_file, 'saved_instances')
os.makedirs(path, exist_ok=True)
path_open = os.path.join(path, file_name)
with open(path_open, "w+") as network_file:
network_file.write(encoded)
# Act
self.service = wappsto.Wappsto(json_file_name=file_name, load_from_state_file=True)
# Assert
assert saved_network == self.service.data_manager.get_encoded_network()
class TestConnClass:
"""
TestConnClass instance.
Tests connecting to wappsto server.
"""
@pytest.mark.parametrize("address,port,expected_status", [
(ADDRESS, PORT, status.RUNNING),
(ADDRESS, -1, status.DISCONNECTING),
("wappstoFail.com", PORT, status.DISCONNECTING)])
@pytest.mark.parametrize("send_trace", [True, False])
@pytest.mark.parametrize("callback_exists", [True, False])
@pytest.mark.parametrize("upgradable", [True, False])
@pytest.mark.parametrize("valid_json", [True, False])
@pytest.mark.parametrize("log_location", ["test_logs/logs"])
@pytest.mark.parametrize("log_offline,log_file_exists,make_zip", [
(True, True, True),
(True, True, False),
(True, False, False),
(False, False, False)])
@pytest.mark.parametrize("load_from_state_file", [True, False])
def test_connection(self, address, port, expected_status, callback_exists, send_trace,
upgradable, valid_json, log_offline, log_location,
log_file_exists, load_from_state_file, make_zip):
"""
Tests connection.
Tests if connecting works es expected within different setup.
Args:
address: address used for connecting to server
port: port used for connecting to server
callback_exists: specifies if object should have callback
expected_status: status expected after execution of the test
upgradable: specifies if object is upgradable
send_trace: Boolean indicating if trace should be automatically sent
valid_json: Boolean indicating if the sent json should be valid
log_offline: boolean indicating if data should be logged
log_location: location of the logs
log_file_exists: boolean indicating if log file exist
load_from_state_file: Defines if the data should be loaded from saved files
make_zip: boolean indicating if log file should be zip
"""
# Arrange
test_json_location = os.path.join(os.path.dirname(__file__), TEST_JSON)
self.service = wappsto.Wappsto(json_file_name=test_json_location,
load_from_state_file=load_from_state_file,
log_offline=log_offline,
log_location=log_location)
status_service = self.service.get_status()
fix_object_callback(callback_exists, status_service)
urlopen_trace_id = sent_json_trace_id = ''
if not valid_json:
self.service.data_manager.network.uuid = None
set_up_log(self, log_file_exists, 1, make_zip)
def send_log():
self.service.event_storage.send_log(self.service.socket)
# Act
with patch("os.getenv", return_value=str(upgradable)), \
patch('urllib.request.urlopen') as urlopen, \
patch("wappsto.communication.ClientSocket.send_logged_data", side_effect=send_log):
try:
fake_connect(self, address, port, send_trace)
args, kwargs = self.service.socket.my_socket.send.call_args
arg = json.loads(args[0].decode("utf-8"))
sent_json = arg[-1]["params"]["data"]
if send_trace:
urlopen_args, urlopen_kwargs = urlopen.call_args
parsed_urlopen = urlparse.urlparse(urlopen_args[0])
urlopen_trace_id = parse_qs(parsed_urlopen.query)['id']
parsed_sent_json = urlparse.urlparse(arg[0]['params']['url'])
sent_json_trace_id = parse_qs(parsed_sent_json.query)['trace']
except wappsto_errors.ServerConnectionException:
sent_json = None
arg = []
pass
# Assert
if sent_json is not None:
if log_offline:
assert len(os.listdir(log_location)) == 0
assert validate_json("request", arg) == valid_json
assert "None" not in str(sent_json)
assert sent_json_trace_id == urlopen_trace_id
assert (send_trace and urlopen_trace_id != ''
or not send_trace and urlopen_trace_id == '')
assert (upgradable and "upgradable" in str(sent_json["meta"])
or not upgradable and "upgradable" not in str(sent_json["meta"]))
if callback_exists:
assert status_service.callback.call_args[0][-1].current_status == expected_status
assert self.service.status.get_status() == expected_status
@pytest.mark.parametrize("load_from_state_file", [True, False])
@pytest.mark.parametrize("save", [True, False])
def test_close(self, load_from_state_file, save):
"""
Tests closing connection.
Tests if closing connecting works es expected within different setup.
Args:
load_from_state_file: Defines if the data should be loaded from saved files
save: Flag to determine whether runtime instances should be saved
"""
# Arrange
test_json_location = os.path.join(os.path.dirname(__file__), TEST_JSON)
self.service = wappsto.Wappsto(json_file_name=test_json_location,
load_from_state_file=load_from_state_file)
fake_connect(self, ADDRESS, PORT)
path = os.path.join(self.service.data_manager.path_to_calling_file, 'saved_instances')
network_id = self.service.data_manager.json_file_name = "test.json"
path_open = os.path.join(path, network_id)
if os.path.exists(path_open):
os.remove(path_open)
# Act
self.service.stop(save)
# Assert
assert save == os.path.isfile(path_open)
class TestValueSendClass:
"""
TestValueSendClass instance.
Tests sending value to wappsto server.
"""
def setup_method(self):
"""
Sets up each method.
Sets location to be used in test, initializes service and creates connection.
"""
test_json_location = os.path.join(os.path.dirname(__file__), TEST_JSON)
self.service = wappsto.Wappsto(json_file_name=test_json_location)
# TODO(MBK): Rewrite to check the set Control value instead of Report value
@pytest.mark.parametrize("input,step_size,expected", [
(8, 1, "8"), # value on the step
(8, -1, "8"),
(-8, 1, "-8"),
(-8, -1, "-8"),
(100, 1, "100"),
(-100, 1, "-100"),
(0, 1, "0"),
(-0, 1, "0"),
# (-99.9, 1, "-100"), # decimal value
# (-0.1, 1, "-1"),
# (0.1, 1, "0"),
# (3.3, 1, "3"),
# (3.0, 1, "3"),
# (3.9, 1, "3"),
# (0.02442002442002442, 1, "0"),
# (-0.1, 1, "-1"),
# (-3.3, 1, "-4"),
# (-3.0, 1, "-3"),
# (-3.9, 1, "-4"),
# (-101, 1, None), # out of range
# (101, 1, None),
# (3, 2, "2"), # big steps
# (3.999, 2, "2"),
(4, 2, "4"),
# (-3, 2, "-4"),
# (-3.999, 2, "-4"),
(-4, 2, "-4"),
(1, 0.5, "1"), # decimal steps
# (1.01, 0.02, "1"),
# (2.002, 0.02, "2"),
(2.002, 0.0002, "2.002"),
(-1, 0.5, "-1"),
# (-1.01, 0.02, "-1.02"),
# (-2.002, 0.02, "-2.02"),
(-2.002, 0.0002, "-2.002"),
(2, 1.0e-07, "2"),
# (2, 123.456e-5, "1.9999872"),
# (1, 9.0e-20, "0.99999999999999999999"),
# (0.02442002442002442001001, 0.00000000000002, "0.02442002442002")
])
@pytest.mark.parametrize("send_trace", [True, False])
@pytest.mark.parametrize("delta", [None, 0.1, 1, 100])
@pytest.mark.parametrize("period", [True, False])
def test_send_value_update_number_type(self, input, step_size, expected, send_trace, delta, period):
"""
Tests sending update for number value.
Tests if expected message is being sent.
Args:
input: value to be updated
step_size: step size value should follow
expected: value expected to be sent
send_trace: Boolean indicating if trace should be automatically sent
delta: delta of value (determines if change was significant enough to be sent)
period: parameter indicating whether value should be updated periodically
"""
# Arrange
with patch('urllib.request.urlopen'):
fake_connect(self, ADDRESS, PORT, send_trace)
self.service.socket.my_socket.send = Mock()
urlopen_trace_id = sent_json_trace_id = ''
device = self.service.get_device("device-1")
value = next(val for val in device.values if val.data_type == "number")
value.data_type == "number"
value.number_step = step_size
if delta:
value.last_update_of_report = 0
value.set_delta(delta)
if abs(input - value.last_update_of_report) < value.delta:
# if change is less then delta then no message would be sent
expected = None
# Act
with patch('urllib.request.urlopen') as urlopen:
try:
if period is True and delta is None:
with patch('threading.Timer.start') as start:
value.set_period(1)
value.timer_elapsed = True
if start.called:
value.update(input)
else:
value.update(input)
args, kwargs = self.service.socket.my_socket.send.call_args
arg = json.loads(args[0].decode("utf-8"))
result = arg[0]["params"]["data"]["data"]
if send_trace:
urlopen_args, urlopen_kwargs = urlopen.call_args
parsed_urlopen = urlparse.urlparse(urlopen_args[0])
urlopen_trace_id = parse_qs(parsed_urlopen.query)['id']
parsed_sent_json = urlparse.urlparse(arg[0]['params']['url'])
sent_json_trace_id = parse_qs(parsed_sent_json.query)['trace']
except TypeError:
result = None
arg = []
# Assert
assert validate_json("request", arg) is True
assert result == expected
assert sent_json_trace_id == urlopen_trace_id
if send_trace and result is not None:
assert urlopen_trace_id != ''
else:
assert urlopen_trace_id == ''
@pytest.mark.parametrize("input,max,expected", [
("test", 10, "test"), # value under max
("", 10, ""),
("", 0, ""), # value on max
("testtestte", 10, "testtestte"),
("", None, ""), # no max
("testtesttesttesttesttest", None,
"testtesttesttesttesttest"),
(None, 10, None), # no value
(None, None, None),
("test", 1, None)]) # value over max
@pytest.mark.parametrize("type", ["string", "blob"])
@pytest.mark.parametrize("send_trace", [True, False])
@pytest.mark.parametrize("delta", [None, 0.1, 1, 100])
@pytest.mark.parametrize("period", [True, False])
def test_send_value_update_text_type(self, input, max, expected, type, send_trace, delta, period):
"""
Tests sending update for text/blob value.
Tests if expected message is being sent.
Args:
input: value to be updated
max: maximum length of the message
expected: value expected to be sent
type: indicates if it is string or blob types of value
send_trace: Boolean indicating if trace should be automatically sent
delta: delta of value (determines if change was significant enough to be sent)
period: parameter indicating whether value should be updated periodically
"""
# Arrange
with patch('urllib.request.urlopen'):
fake_connect(self, ADDRESS, PORT, send_trace)
self.service.socket.my_socket.send = Mock()
urlopen_trace_id = sent_json_trace_id = ''
device = self.service.get_device("device-1")
value = next(val for val in device.values if val.data_type == type)
value.data_type = type
value.string_max = max
value.blob_max = max
if delta:
value.last_update_of_report = 0
value.set_delta(delta)
# delta should not have eny effect
# Act
with patch('urllib.request.urlopen') as urlopen:
try:
if period is True:
with patch('threading.Timer.start') as start:
value.set_period(1)
value.timer_elapsed = True
if start.called:
value.update(input)
else:
value.update(input)
args, kwargs = self.service.socket.my_socket.send.call_args
arg = json.loads(args[0].decode("utf-8"))
result = arg[0]["params"]["data"]["data"]
if send_trace:
urlopen_args, urlopen_kwargs = urlopen.call_args
parsed_urlopen = urlparse.urlparse(urlopen_args[0])
urlopen_trace_id = parse_qs(parsed_urlopen.query)['id']
parsed_sent_json = urlparse.urlparse(arg[0]['params']['url'])
sent_json_trace_id = parse_qs(parsed_sent_json.query)['trace']
except TypeError:
result = None
# Assert
assert result == expected
assert sent_json_trace_id == urlopen_trace_id
if send_trace and result is not None:
assert urlopen_trace_id != ''
else:
assert urlopen_trace_id == ''
def teardown_module(self):
"""
Teardown each method.
Stops connection.
"""
self.service.socket.close()
class TestReceiveThreadClass:
"""
TestReceiveThreadClass instance.
Tests receiving messages from wappsto server.
"""
def setup_method(self):
"""
Sets up each method.
Sets location to be used in test, initializes service and creates connection.
"""
test_json_location = os.path.join(os.path.dirname(__file__), TEST_JSON)
self.service = wappsto.Wappsto(json_file_name=test_json_location)
fake_connect(self, ADDRESS, PORT)
@pytest.mark.parametrize("trace_id", [None, "321"])
@pytest.mark.parametrize("expected_msg_id", [message_data.SEND_FAILED])
@pytest.mark.parametrize("bulk", [False, True])
@pytest.mark.parametrize("split_message", [False, True])
def test_receive_thread_wrong_verb(self, trace_id, expected_msg_id, bulk,
split_message):
"""
Tests receiving message with wrong verb.
Tests what would happen if wrong verb would be provided in incoming message.
Args:
trace_id: id used for tracing
expected_msg_id: message id expected to be received
bulk: Boolean value indicating if multiple messages should be sent at once
split_message: Boolean value indicating if message should be sent in parts
"""
# Arrange
send_response(self, "wrong_verb", trace_id=trace_id, bulk=bulk, split_message=split_message)
# Act
try:
# runs until mock object is run and its side_effect raises
# exception
self.service.socket.receive_data.receive_thread()
except KeyboardInterrupt:
pass
# Assert
assert self.service.socket.sending_queue.qsize() > 0
while self.service.socket.sending_queue.qsize() > 0:
message = self.service.socket.sending_queue.get()
assert message.msg_id == expected_msg_id
@pytest.mark.parametrize("callback_exists", [False, True])
@pytest.mark.parametrize("trace_id", [None, "321"])
@pytest.mark.parametrize("expected_msg_id", [message_data.SEND_SUCCESS])
@pytest.mark.parametrize("object_name", ["value"])
@pytest.mark.parametrize("object_exists", [False, True])
@pytest.mark.parametrize("bulk", [False, True])
@pytest.mark.parametrize("data", ["44", None])
@pytest.mark.parametrize("split_message", [False, True])
@pytest.mark.parametrize("type", ["state", "value"])
@pytest.mark.parametrize("period", [1])
@pytest.mark.parametrize("delta", [1])
def test_receive_thread_Put(self, callback_exists, trace_id,
expected_msg_id, object_name, object_exists,
bulk, data, split_message, type, period, delta):
"""
Tests receiving message with PUT verb.
Tests what would happen if PUT method would be provided in incoming message.
Args:
callback_exists: Boolean indicating if object should have callback
trace_id: id used for tracing
expected_msg_id: message id expected to be received
object_name: name of the object to be updated
object_exists: indicates if object would exists
bulk: Boolean value indicating if multiple messages should be sent at once
data: data value provided in the message
split_message: Boolean value indicating if message should be sent in parts
type: type of module being used.
delta: delta of value (determines if change was significant enough to be sent)
period: parameter indicating whether value should be updated periodically
"""
# Arrange
actual_object = get_object(self, object_name)
fix_object_callback(callback_exists, actual_object)
actual_object.control_state.data = "1"
if type == "state":
id = str(actual_object.control_state.uuid)
elif type == "value":
id = str(actual_object.uuid)
if not object_exists:
self.service.data_manager.network = None
expected_msg_id = message_data.SEND_FAILED
send_response(self, 'PUT', trace_id=trace_id, bulk=bulk, element_id=id,
data=data, split_message=split_message, element_type=type, period=period,
delta=delta)
# Act
try:
# runs until mock object is run and its side_effect raises
# exception
with patch('threading.Timer.start'):
self.service.socket.receive_data.receive_thread()
except KeyboardInterrupt:
pass
# Assert
if data is not None:
if object_exists:
if trace_id:
assert any(message.msg_id == message_data.SEND_TRACE for message
in self.service.socket.sending_queue.queue)
if type == "state":
if callback_exists:
assert actual_object.callback.call_args[0][1] == 'set'
elif type == "value":
assert actual_object.period == period
assert actual_object.delta == delta
assert self.service.socket.sending_queue.qsize() > 0
while self.service.socket.sending_queue.qsize() > 0:
message = self.service.socket.sending_queue.get()
assert (message.msg_id == message_data.SEND_TRACE
or message.msg_id == expected_msg_id)
if message.msg_id == message_data.SEND_TRACE:
assert message.trace_id == trace_id
else:
assert self.service.socket.sending_queue.qsize() == 0
@pytest.mark.parametrize("callback_exists", [False, True])
@pytest.mark.parametrize("trace_id", [None, "321"])
@pytest.mark.parametrize("expected_msg_id", [message_data.SEND_SUCCESS])
@pytest.mark.parametrize("object_name", ["value"])
@pytest.mark.parametrize("object_exists", [False, True])
@pytest.mark.parametrize("bulk", [False, True])
@pytest.mark.parametrize("split_message", [False, True])
@pytest.mark.parametrize("id_exists", [False, True])
def test_receive_thread_Get(self, callback_exists, trace_id,
expected_msg_id, object_name, object_exists,
bulk, split_message, id_exists):
"""
Tests receiving message with GET verb.
Tests what would happen if GET method would be provided in incoming message.
Args:
callback_exists: Boolean indicating if object should have callback
trace_id: id used for tracing
expected_msg_id: message id expected to be received
object_name: name of the object to be updated
object_exists: indicates if object would exists
bulk: Boolean value indicating if multiple messages should be sent at once
split_message: Boolean value indicating if message should be sent in parts
id_exists: indicates if id should be in the message
"""
# Arrange
rpc_id = "GET-1"
actual_object = get_object(self, object_name)
fix_object_callback(callback_exists, actual_object)
uuid = str(actual_object.report_state.uuid)
if not object_exists:
self.service.data_manager.network = None
expected_msg_id = message_data.SEND_FAILED
if not id_exists:
uuid = None
send_response(self, "GET", message_id=rpc_id, trace_id=trace_id, bulk=bulk, element_id=uuid,
element_type="state", data=1, split_message=split_message)
# Act
try:
# runs until mock object is run and its side_effect raises
# exception
self.service.socket.receive_data.receive_thread()
except KeyboardInterrupt:
pass
# Assert
if id_exists:
if object_exists:
if trace_id:
assert any(message.msg_id == message_data.SEND_TRACE for message
in self.service.socket.sending_queue.queue)
if callback_exists:
assert actual_object.callback.call_args[0][1] == "refresh"
assert self.service.socket.sending_queue.qsize() > 0
while self.service.socket.sending_queue.qsize() > 0:
message = self.service.socket.sending_queue.get()
assert (message.msg_id == message_data.SEND_TRACE
or message.msg_id == expected_msg_id)
if message.msg_id == message_data.SEND_TRACE:
assert message.trace_id == trace_id
else:
if bulk:
assert self.service.socket.sending_queue.qsize() == 2
else:
assert self.service.socket.sending_queue.qsize() == 1
while self.service.socket.sending_queue.qsize() > 0:
message = self.service.socket.sending_queue.get()
assert (message.msg_id == message_data.SEND_FAILED)
assert (message.rpc_id == rpc_id)
@pytest.mark.parametrize("callback_exists", [False, True])
@pytest.mark.parametrize("trace_id", [None, "321"])
@pytest.mark.parametrize("expected_msg_id", [message_data.SEND_SUCCESS])
@pytest.mark.parametrize("object_name", ["network", "device", "value", "control_state", "report_state"])
@pytest.mark.parametrize("object_exists", [False, True])
@pytest.mark.parametrize("bulk", [False, True])
@pytest.mark.parametrize("split_message", [False, True])
@pytest.mark.parametrize("id_exists", [False, True])
def test_receive_thread_Delete(self, callback_exists, trace_id,
expected_msg_id, object_name, object_exists,
bulk, split_message, id_exists):
"""
Tests receiving message with DELETE verb.
Tests what would happen if DELETE method would be provided in incoming message.
Args:
callback_exists: Boolean indicating if object should have callback
trace_id: id used for tracing
expected_msg_id: message id expected to be received
object_name: name of the object to be updated
object_exists: indicates if object would exists
bulk: Boolean value indicating if multiple messages should be sent at once
split_message: Boolean value indicating if message should be sent in parts
id_exists: indicates if id should be in the message
"""
# Arrange
rpc_id = "DELETE-1"
actual_object = get_object(self, object_name)
fix_object_callback(callback_exists, actual_object)
id = str(actual_object.uuid)
if not object_exists:
self.service.data_manager.network = None
expected_msg_id = message_data.SEND_FAILED
if not id_exists:
id = None
send_response(self, 'DELETE', message_id=rpc_id, trace_id=trace_id, bulk=bulk, element_id=id, data=1,
element_type='network', split_message=split_message)
# Act
try:
# runs until mock object is run and its side_effect raises
# exception
self.service.socket.receive_data.receive_thread()
except KeyboardInterrupt:
pass
# Assert
if id_exists:
if object_exists:
if trace_id:
assert any(message.msg_id == message_data.SEND_TRACE for message
in self.service.socket.sending_queue.queue)
if callback_exists:
assert actual_object.callback.call_args[0][1] == "remove"
assert self.service.socket.sending_queue.qsize() > 0
while self.service.socket.sending_queue.qsize() > 0:
message = self.service.socket.sending_queue.get()
assert (message.msg_id == message_data.SEND_TRACE
or message.msg_id == expected_msg_id)
if message.msg_id == message_data.SEND_TRACE:
assert message.trace_id == trace_id
else:
if bulk:
assert self.service.socket.sending_queue.qsize() == 2
else:
assert self.service.socket.sending_queue.qsize() == 1
while self.service.socket.sending_queue.qsize() > 0:
message = self.service.socket.sending_queue.get()
assert (message.msg_id == message_data.SEND_FAILED)
assert (message.rpc_id == rpc_id)
@pytest.mark.parametrize("id", ["93043873"])
@pytest.mark.parametrize("data", ["55"])
@pytest.mark.parametrize("bulk", [False, True])
@pytest.mark.parametrize("split_message", [False, True])
def test_receive_thread_result(self, id, data, bulk, split_message):
"""
Tests receiving success message.
Tests what would happen if result response would be provided in incoming message.
Args:
id: id of the message
data: value state should be in
bulk: Boolean value indicating if multiple messages should be sent at once
split_message: Boolean value indicating if message should be sent in parts
"""
# Arrange
state = self.service.get_devices()[0].get_value("temp").control_state
state.data = 1
send_response(self, "result", bulk=bulk, message_id=state.uuid, element_id=state.uuid,
data=data, split_message=split_message)
# Act
try:
# runs until mock object is run and its side_effect raises
# exception
self.service.socket.receive_data.receive_thread()
except KeyboardInterrupt:
pass
# Assert
assert state.data == data
assert len(self.service.socket.packet_awaiting_confirm) == 0
@pytest.mark.parametrize("bulk", [False, True])
@pytest.mark.parametrize("split_message", [False, True])
@pytest.mark.parametrize("message_size_exeeded", [False, True])
def test_receive_thread_error(self, bulk, split_message, message_size_exeeded):
"""
Tests receiving error message.
Tests what would happen if error response would be provided in incoming message.
Args:
id: id of the message
bulk: Boolean value indicating if multiple messages should be sent at once
split_message: Boolean value indicating if message should be sent in parts
message_size_exeeded: Boolean indicating if message received is too big
"""
# Arrange
if message_size_exeeded:
wappsto.communication.receive_data.MESSAGE_SIZE_BYTES = 0
messages_in_list = 1
else:
messages_in_list = 0
send_response(self, 'error', bulk=bulk, message_id="93043873", split_message=split_message)
# Act
try:
# runs until mock object is run and its side_effect raises
# exception
self.service.socket.receive_data.receive_thread()
except KeyboardInterrupt:
pass
# Assert
assert len(self.service.socket.packet_awaiting_confirm) == messages_in_list
class TestSendThreadClass:
"""
TestSendThreadClass instance.
Tests sending messages to wappsto server.
"""
@pytest.mark.parametrize("messages_in_queue", [1, 20])
def test_send_thread_unhandled(self, messages_in_queue):
"""
Tests sending message.
Tests what would happen when sending message.
Args:
messages_in_queue: How many messages should be sent
"""
# Arrange
test_json_location = os.path.join(os.path.dirname(__file__), TEST_JSON)
self.service = wappsto.Wappsto(json_file_name=test_json_location)
fake_connect(self, ADDRESS, PORT)
for x in range(messages_in_queue):
reply = message_data.MessageData(
-1
)
self.service.socket.sending_queue.put(reply)
# Act
try:
# runs until mock object is run and its side_effect raises
# exception
with patch("logging.Logger.warning", side_effect=check_for_logged_info):
self.service.socket.send_data.send_thread()
except KeyboardInterrupt:
pass
# Assert
assert self.service.socket.sending_queue.qsize() == messages_in_queue - 1
@pytest.mark.parametrize("value", [1, None])
@pytest.mark.parametrize("messages_in_queue", [1, 20])
@pytest.mark.parametrize("log_location", ["test_logs/logs"])
@pytest.mark.parametrize("file_size", [1, 0])
@pytest.mark.parametrize("limit_action", [event_storage.REMOVE_OLD])
@pytest.mark.parametrize("connected,log_offline,log_file_exists,make_zip", [
(False, True, True, True),
(False, True, True, False),
(False, True, False, False),
(False, False, False, False),
(True, False, False, False)])
def test_send_thread_success(self, messages_in_queue, value, log_offline,
connected, log_location, file_size, limit_action,
log_file_exists, make_zip):
"""
Tests sending message.
Tests what would happen when sending message.
Args:
value: value to be sent (when None is provided should make json invalid)
messages_in_queue: How many messages should be sent
log_offline: boolean indicating if data should be logged
connected: boolean indicating if the is connection to server
log_location: location of the logs
file_size: how big is the current size of the folder
limit_action: action to perform when limit is exeeded
log_file_exists: boolean indicating if log file exist
make_zip: boolean indicating if log file should be zip
"""
# Arrange
test_json_location = os.path.join(os.path.dirname(__file__), TEST_JSON)
self.service = wappsto.Wappsto(json_file_name=test_json_location,
log_offline=log_offline,
log_location=log_location,
log_data_limit=1,
limit_action=limit_action,
compression_period=event_storage.HOUR_PERIOD)
fake_connect(self, ADDRESS, PORT)
for x in range(messages_in_queue):
reply = message_data.MessageData(
message_data.SEND_SUCCESS,
rpc_id=value
)
self.service.socket.sending_queue.put(reply)
self.service.socket.my_socket.send = Mock(side_effect=KeyboardInterrupt)
self.service.socket.connected = connected
file_path = set_up_log(self, log_file_exists, file_size, make_zip)
# Act
try:
# runs until mock object is run and its side_effect raises
# exception
with patch("logging.Logger.error", side_effect=check_for_logged_info), \
patch("logging.Logger.debug", side_effect=check_for_logged_info):
self.service.socket.send_data.send_thread()
except KeyboardInterrupt:
pass
# Assert
assert os.path.isdir(self.service.event_storage.log_location)
if connected or log_offline:
if connected:
args, kwargs = self.service.socket.my_socket.send.call_args
args = args[0].decode("utf-8")
else:
with open(file_path, "r") as file:
args = file.readlines()[-1]
arg = json.loads(args)
assert len(arg) <= wappsto.connection.communication.send_data.MAX_BULK_SIZE
assert self.service.socket.sending_queue.qsize() == max(
messages_in_queue - wappsto.connection.communication.send_data.MAX_BULK_SIZE, 0)
assert validate_json("successResponse", arg) == bool(value)
for request in arg:
assert request.get("id", None) == value
assert bool(request["result"]) is True
else:
# Message not being sent or saved
pass
@pytest.mark.parametrize("value", ["test_info", None])
@pytest.mark.parametrize("messages_in_queue", [1, 20])
@pytest.mark.parametrize("log_location", ["test_logs/logs"])
@pytest.mark.parametrize("file_size", [1, 0])
@pytest.mark.parametrize("limit_action", [event_storage.REMOVE_OLD])
@pytest.mark.parametrize("connected,log_offline,log_file_exists,make_zip", [
(False, True, True, True),
(False, True, True, False),
(False, True, False, False),
(False, False, False, False),
(True, False, False, False)])
@pytest.mark.parametrize("send_trace", [True, False])
def test_send_thread_report(self, messages_in_queue, value, log_offline,
connected, log_location, file_size, limit_action,
log_file_exists, make_zip, send_trace):
"""
Tests sending message.
Tests what would happen when sending message.
Args:
messages_in_queue: How many messages should be sent
value: value to be sent (when None is provided should make json invalid)
log_offline: boolean indicating if data should be logged
connected: boolean indicating if the is connection to server
log_location: location of the logs
file_size: how big is the current size of the folder
limit_action: action to perform when limit is exeeded
log_file_exists: boolean indicating if log file exist
make_zip: boolean indicating if log file should be zip
send_trace: Boolean indicating if trace should be automatically sent
"""
# Arrange
test_json_location = os.path.join(os.path.dirname(__file__), TEST_JSON)
self.service = wappsto.Wappsto(json_file_name=test_json_location,
log_offline=log_offline,
log_location=log_location,
log_data_limit=1,
limit_action=limit_action,
compression_period=event_storage.HOUR_PERIOD)
fake_connect(self, ADDRESS, PORT)
for x in range(messages_in_queue):
reply = message_data.MessageData(
message_data.SEND_REPORT,
state_id=self.service.get_network().uuid,
data=value,
verb=message_data.PUT
)
self.service.socket.sending_queue.put(reply)
self.service.socket.my_socket.send = Mock(side_effect=KeyboardInterrupt)
self.service.socket.connected = connected
self.service.socket.send_data.automatic_trace = send_trace
file_path = set_up_log(self, log_file_exists, file_size, make_zip)
urlopen_trace_id = sent_json_trace_id = ''
# Act
try:
# runs until mock object is run and its side_effect raises
# exception
with patch("logging.Logger.error", side_effect=check_for_logged_info), \
patch("logging.Logger.debug", side_effect=check_for_logged_info), \
patch('urllib.request.urlopen') as urlopen:
self.service.socket.send_data.send_thread()
except KeyboardInterrupt:
pass
# Assert
assert os.path.isdir(self.service.event_storage.log_location)
if connected or log_offline:
if connected:
args, kwargs = self.service.socket.my_socket.send.call_args
args = args[0].decode("utf-8")
else:
with open(file_path, "r") as file:
args = file.readlines()[-1]
arg = json.loads(args)
if urlopen.called:
urlopen_args, urlopen_kwargs = urlopen.call_args
parsed_urlopen = urlparse.urlparse(urlopen_args[0])
urlopen_trace_id = parse_qs(parsed_urlopen.query)['id']
parsed_sent_json = urlparse.urlparse(arg[-1]['params']['url'])
sent_json_trace_id = parse_qs(parsed_sent_json.query)['trace']
assert urlopen_trace_id == sent_json_trace_id
if send_trace:
assert urlopen_trace_id != ''
else:
assert urlopen_trace_id == ''
assert len(arg) <= wappsto.connection.communication.send_data.MAX_BULK_SIZE
assert self.service.socket.sending_queue.qsize() == max(
messages_in_queue - wappsto.connection.communication.send_data.MAX_BULK_SIZE, 0)
assert validate_json("request", arg) == bool(value)
for request in arg:
assert request["params"]["data"].get("data", None) == value
assert request["params"]["data"]["type"] == "Report"
assert request["method"] == "PUT"
else:
# Message not being sent or saved
pass
@pytest.mark.parametrize("value", [1, None])
@pytest.mark.parametrize("messages_in_queue", [1, 20])
@pytest.mark.parametrize("log_location", ["test_logs/logs"])
@pytest.mark.parametrize("file_size", [1, 0])
@pytest.mark.parametrize("limit_action", [event_storage.REMOVE_OLD])
@pytest.mark.parametrize("connected,log_offline,log_file_exists,make_zip", [
(False, True, True, True),
(False, True, True, False),
(False, True, False, False),
(False, False, False, False),
(True, False, False, False)])
def test_send_thread_failed(self, messages_in_queue, value, log_offline,
connected, log_location, file_size, limit_action,
log_file_exists, make_zip):
"""
Tests sending message.
Tests what would happen when sending message.
Args:
messages_in_queue: How many messages should be sent
value: value to be sent (when None is provided should make json invalid)
log_offline: boolean indicating if data should be logged
connected: boolean indicating if the is connection to server
log_location: location of the logs
file_size: how big is the current size of the folder
limit_action: action to perform when limit is exeeded
log_file_exists: boolean indicating if log file exist
make_zip: boolean indicating if log file should be zip
"""
# Arrange
test_json_location = os.path.join(os.path.dirname(__file__), TEST_JSON)
self.service = wappsto.Wappsto(json_file_name=test_json_location,
log_offline=log_offline,
log_location=log_location,
log_data_limit=1,
limit_action=limit_action,
compression_period=event_storage.HOUR_PERIOD)
fake_connect(self, ADDRESS, PORT)
for x in range(messages_in_queue):
reply = message_data.MessageData(
message_data.SEND_FAILED,
rpc_id=value
)
self.service.socket.sending_queue.put(reply)
self.service.socket.my_socket.send = Mock(side_effect=KeyboardInterrupt)
self.service.socket.connected = connected
file_path = set_up_log(self, log_file_exists, file_size, make_zip)
# Act
try:
# runs until mock object is run and its side_effect raises
# exception
with patch("logging.Logger.error", side_effect=check_for_logged_info), \
patch("logging.Logger.debug", side_effect=check_for_logged_info):
self.service.socket.send_data.send_thread()
except KeyboardInterrupt:
pass
# Assert
assert os.path.isdir(self.service.event_storage.log_location)
if connected or log_offline:
if connected:
args, kwargs = self.service.socket.my_socket.send.call_args
args = args[0].decode("utf-8")
else:
with open(file_path, "r") as file:
args = file.readlines()[-1]
arg = json.loads(args)
assert len(arg) <= wappsto.connection.communication.send_data.MAX_BULK_SIZE
assert self.service.socket.sending_queue.qsize() == max(
messages_in_queue - wappsto.connection.communication.send_data.MAX_BULK_SIZE, 0)
assert validate_json("errorResponse", arg) == bool(value)
for request in arg:
assert request.get("id", None) == value
assert request["error"] == {"code": -32020}
else:
# Message not being sent or saved
pass
@pytest.mark.parametrize("valid_message", [True, False])
@pytest.mark.parametrize("messages_in_queue", [1, 20])
@pytest.mark.parametrize("log_location", ["test_logs/logs"])
@pytest.mark.parametrize("file_size", [1, 0])
@pytest.mark.parametrize("limit_action", [event_storage.REMOVE_OLD])
@pytest.mark.parametrize("upgradable", [True, False])
@pytest.mark.parametrize("connected,log_offline,log_file_exists,make_zip", [
(False, True, True, True),
(False, True, True, False),
(False, True, False, False),
(False, False, False, False),
(True, False, False, False)])
@pytest.mark.parametrize("send_trace", [True, False])
def test_send_thread_reconnect(self, messages_in_queue, valid_message, log_offline,
connected, log_location, file_size, limit_action,
log_file_exists, upgradable, make_zip, send_trace):
"""
Tests sending message.
Tests what would happen when sending message.
Args:
messages_in_queue: How many messages should be sent
valid_message: Boolean indicating if the sent json should be valid
log_offline: boolean indicating if data should be logged
connected: boolean indicating if the is connection to server
log_location: location of the logs
file_size: how big is the current size of the folder
limit_action: action to perform when limit is exeeded
log_file_exists: boolean indicating if log file exist
upgradable: specifies if object is upgradable
make_zip: boolean indicating if log file should be zip
send_trace: Boolean indicating if trace should be automatically sent
"""
# Arrange
test_json_location = os.path.join(os.path.dirname(__file__), TEST_JSON)
self.service = wappsto.Wappsto(json_file_name=test_json_location,
log_offline=log_offline,
log_location=log_location,
log_data_limit=1,
limit_action=limit_action,
compression_period=event_storage.HOUR_PERIOD)
fake_connect(self, ADDRESS, PORT)
if valid_message:
value = self.service.get_network().uuid
else:
value = self.service.get_network().uuid = 1
for x in range(messages_in_queue):
reply = message_data.MessageData(
message_data.SEND_RECONNECT,
data=value
)
self.service.socket.sending_queue.put(reply)
self.service.socket.my_socket.send = Mock(side_effect=KeyboardInterrupt)
self.service.socket.connected = connected
self.service.socket.send_data.automatic_trace = send_trace
urlopen_trace_id = sent_json_trace_id = ''
file_path = set_up_log(self, log_file_exists, file_size, make_zip)
# Act
try:
# runs until mock object is run and its side_effect raises
# exception
with patch('os.getenv', return_value=str(upgradable)), \
patch("logging.Logger.error", side_effect=check_for_logged_info), \
patch("logging.Logger.debug", side_effect=check_for_logged_info), \
patch("urllib.request.urlopen") as urlopen:
self.service.socket.send_data.send_thread()
except KeyboardInterrupt:
pass
# Assert
assert os.path.isdir(self.service.event_storage.log_location)
if connected or log_offline:
if connected:
args, kwargs = self.service.socket.my_socket.send.call_args
args = args[0].decode("utf-8")
else:
with open(file_path, "r") as file:
args = file.readlines()[-1]
arg = json.loads(args)
if urlopen.called:
urlopen_args, urlopen_kwargs = urlopen.call_args
parsed_urlopen = urlparse.urlparse(urlopen_args[0])
urlopen_trace_id = parse_qs(parsed_urlopen.query)['id']
parsed_sent_json = urlparse.urlparse(arg[-1]['params']['url'])
sent_json_trace_id = parse_qs(parsed_sent_json.query)['trace']
assert urlopen_trace_id == sent_json_trace_id
if send_trace:
assert urlopen_trace_id != ''
else:
assert urlopen_trace_id == ''
assert len(arg) <= wappsto.connection.communication.send_data.MAX_BULK_SIZE
assert self.service.socket.sending_queue.qsize() == max(
messages_in_queue - wappsto.connection.communication.send_data.MAX_BULK_SIZE, 0)
assert validate_json("request", arg) == valid_message
for request in arg:
assert request["params"]["data"]["meta"].get("id", None) == value
assert request["params"]["data"]["meta"]["type"] == "network"
assert request["method"] == "POST"
else:
# Message not being sent or saved
pass
@pytest.mark.parametrize("valid_message", [True, False])
@pytest.mark.parametrize("messages_in_queue", [1, 20])
@pytest.mark.parametrize("log_location", ["test_logs/logs"])
@pytest.mark.parametrize("file_size", [1, 0])
@pytest.mark.parametrize("limit_action", [event_storage.REMOVE_OLD])
@pytest.mark.parametrize("connected,log_offline,log_file_exists,make_zip", [
(False, True, True, True),
(False, True, True, False),
(False, True, False, False),
(False, False, False, False),
(True, False, False, False)])
@pytest.mark.parametrize("send_trace", [True, False])
def test_send_thread_control(self, messages_in_queue, valid_message, log_offline,
connected, log_location, file_size, limit_action,
log_file_exists, make_zip, send_trace):
"""
Tests sending message.
Tests what would happen when sending message.
Args:
messages_in_queue: How many messages should be sent
valid_message: Boolean indicating if the sent json should be valid
log_offline: boolean indicating if data should be logged
connected: boolean indicating if the is connection to server
log_location: location of the logs
file_size: how big is the current size of the folder
limit_action: action to perform when limit is exeeded
log_file_exists: boolean indicating if log file exist
make_zip: boolean indicating if log file should be zip
send_trace: Boolean indicating if trace should be automatically sent
"""
# Arrange
test_json_location = os.path.join(os.path.dirname(__file__), TEST_JSON)
self.service = wappsto.Wappsto(json_file_name=test_json_location,
log_offline=log_offline,
log_location=log_location,
log_data_limit=1,
limit_action=limit_action,
compression_period=event_storage.HOUR_PERIOD)
fake_connect(self, ADDRESS, PORT)
if valid_message:
value = self.service.get_network().uuid
else:
value = 1
for x in range(messages_in_queue):
reply = message_data.MessageData(
message_data.SEND_CONTROL,
state_id=value,
data="",
verb=message_data.PUT
)
self.service.socket.sending_queue.put(reply)
self.service.socket.my_socket.send = Mock(side_effect=KeyboardInterrupt)
self.service.socket.connected = connected
self.service.socket.send_data.automatic_trace = send_trace
urlopen_trace_id = sent_json_trace_id = ''
file_path = set_up_log(self, log_file_exists, file_size, make_zip)
# Act
try:
# runs until mock object is run and its side_effect raises
# exception
with patch("logging.Logger.error", side_effect=check_for_logged_info), \
patch("logging.Logger.debug", side_effect=check_for_logged_info), \
patch('urllib.request.urlopen') as urlopen:
self.service.socket.send_data.send_thread()
except KeyboardInterrupt:
pass
# Assert
assert os.path.isdir(self.service.event_storage.log_location)
if connected or log_offline:
if connected:
args, kwargs = self.service.socket.my_socket.send.call_args
args = args[0].decode("utf-8")
else:
with open(file_path, "r") as file:
args = file.readlines()[-1]
arg = json.loads(args)
if urlopen.called:
urlopen_args, urlopen_kwargs = urlopen.call_args
parsed_urlopen = urlparse.urlparse(urlopen_args[0])
urlopen_trace_id = parse_qs(parsed_urlopen.query)['id']
parsed_sent_json = urlparse.urlparse(arg[-1]['params']['url'])
sent_json_trace_id = parse_qs(parsed_sent_json.query)['trace']
assert urlopen_trace_id == sent_json_trace_id
if send_trace:
assert urlopen_trace_id != ''
else:
assert urlopen_trace_id == ''
assert len(arg) <= wappsto.connection.communication.send_data.MAX_BULK_SIZE
assert self.service.socket.sending_queue.qsize() == max(
messages_in_queue - wappsto.connection.communication.send_data.MAX_BULK_SIZE, 0)
assert validate_json("request", arg) == valid_message
for request in arg:
assert request["params"]["data"]["meta"].get("id", None) == value
assert request["params"]["data"]["type"] == "Control"
assert request["method"] == "PUT"
else:
# Message not being sent or saved
pass
@pytest.mark.parametrize("object_name", ["network", "device", "value", "control_state", "report_state"])
@pytest.mark.parametrize("messages_in_queue", [1, 20])
@pytest.mark.parametrize("log_location", ["test_logs/logs"])
@pytest.mark.parametrize("file_size", [1, 0])
@pytest.mark.parametrize("limit_action", [event_storage.REMOVE_OLD])
@pytest.mark.parametrize("connected,log_offline,log_file_exists,make_zip", [
(False, True, True, True),
(False, True, True, False),
(False, True, False, False),
(False, False, False, False),
(True, False, False, False)])
@pytest.mark.parametrize("send_trace", [True, False])
def test_send_thread_delete(self, object_name, messages_in_queue, log_offline,
connected, log_location, file_size, limit_action,
log_file_exists, make_zip, send_trace):
"""
Tests sending DELETE message.
Tests what would happen when sending DELETE message.
Args:
object_name: name of the object to be updated
messages_in_queue: value indicating how many messages should be sent at once
log_offline: boolean indicating if data should be logged
connected: boolean indicating if the is connection to server
log_location: location of the logs
file_size: how big is the current size of the folder
limit_action: action to perform when limit is exeeded
log_file_exists: boolean indicating if log file exist
make_zip: boolean indicating if log file should be zip
send_trace: Boolean indicating if trace should be automatically sent
"""
# Arrange
test_json_location = os.path.join(os.path.dirname(__file__), TEST_JSON)
self.service = wappsto.Wappsto(json_file_name=test_json_location,
log_offline=log_offline,
log_location=log_location,
log_data_limit=1,
limit_action=limit_action,
compression_period=event_storage.HOUR_PERIOD)
fake_connect(self, ADDRESS, PORT)
actual_object = get_object(self, object_name)
if object_name == "control_state" or object_name == "report_state":
reply = message_data.MessageData(
message_data.SEND_DELETE,
network_id=actual_object.parent.parent.parent.uuid,
device_id=actual_object.parent.parent.uuid,
value_id=actual_object.parent.uuid,
state_id=actual_object.uuid
)
if object_name == "value":
reply = message_data.MessageData(
message_data.SEND_DELETE,
network_id=actual_object.parent.parent.uuid,
device_id=actual_object.parent.uuid,
value_id=actual_object.uuid
)
if object_name == "device":
reply = message_data.MessageData(
message_data.SEND_DELETE,
network_id=actual_object.parent.uuid,
device_id=actual_object.uuid
)
if object_name == "network":
reply = message_data.MessageData(
message_data.SEND_DELETE,
network_id=actual_object.uuid
)
for x in range(messages_in_queue):
self.service.socket.sending_queue.put(reply)
self.service.socket.my_socket.send = Mock(side_effect=KeyboardInterrupt)
self.service.socket.add_id_to_confirm_list = Mock()
self.service.socket.connected = connected
self.service.socket.send_data.automatic_trace = send_trace
urlopen_trace_id = sent_json_trace_id = ''
file_path = set_up_log(self, log_file_exists, file_size, make_zip)
# Act
try:
# runs until mock object is run and its side_effect raises
# exception
with patch("logging.Logger.error", side_effect=check_for_logged_info), \
patch("logging.Logger.debug", side_effect=check_for_logged_info), \
patch('urllib.request.urlopen') as urlopen:
self.service.socket.send_data.send_thread()
except KeyboardInterrupt:
pass
# Assert
assert os.path.isdir(self.service.event_storage.log_location)
if connected or log_offline:
if connected:
args, kwargs = self.service.socket.my_socket.send.call_args
args = args[0].decode("utf-8")
else:
with open(file_path, "r") as file:
args = file.readlines()[-1]
arg = json.loads(args)
if urlopen.called:
urlopen_args, urlopen_kwargs = urlopen.call_args
parsed_urlopen = urlparse.urlparse(urlopen_args[0])
urlopen_trace_id = parse_qs(parsed_urlopen.query)['id']
parsed_sent_json = urlparse.urlparse(arg[-1]['params']['url'])
sent_json_trace_id = parse_qs(parsed_sent_json.query)['trace']
assert urlopen_trace_id == sent_json_trace_id
if send_trace:
assert urlopen_trace_id != ''
else:
assert urlopen_trace_id == ''
assert len(arg) <= wappsto.connection.communication.send_data.MAX_BULK_SIZE
assert self.service.socket.sending_queue.qsize() == max(
messages_in_queue - wappsto.connection.communication.send_data.MAX_BULK_SIZE, 0)
for request in arg:
assert request["params"]["url"] is not None
else:
# Message not being sent or saved
pass
@pytest.mark.parametrize("trace_id", [(332)])
def test_send_thread_send_trace(self, trace_id):
"""
Tests sending trace message.
Tests what would happen when sending trace message.
Args:
trace_id: trace id expected to be sent
"""
# Arrange
test_json_location = os.path.join(os.path.dirname(__file__), TEST_JSON)
self.service = wappsto.Wappsto(json_file_name=test_json_location)
fake_connect(self, ADDRESS, PORT)
reply = message_data.MessageData(
message_data.SEND_TRACE,
trace_id=trace_id,
control_value_id=1,
rpc_id=93043873
)
self.service.socket.sending_queue.put(reply)
# Act
with patch("urllib.request.urlopen", side_effect=KeyboardInterrupt) as urlopen:
try:
# runs until mock object is run and its side_effect raises
# exception
self.service.socket.send_data.send_thread()
except KeyboardInterrupt:
if urlopen.called:
urlopen_args, urlopen_kwargs = urlopen.call_args
parsed_id = urlparse.urlparse(urlopen_args[0])
parsed_id = int(parse_qs(parsed_id.query)['id'][0])
# Assert
assert parsed_id == trace_id
| [] | [] | [] | [] | [] | python | 0 | 0 | |
tests/unit/gapic/vision_v1/test_image_annotator.py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.vision_v1.services.image_annotator import ImageAnnotatorAsyncClient
from google.cloud.vision_v1.services.image_annotator import ImageAnnotatorClient
from google.cloud.vision_v1.services.image_annotator import transports
from google.cloud.vision_v1.types import geometry
from google.cloud.vision_v1.types import image_annotator
from google.cloud.vision_v1.types import product_search
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.type import latlng_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert ImageAnnotatorClient._get_default_mtls_endpoint(None) is None
assert (
ImageAnnotatorClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
ImageAnnotatorClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
ImageAnnotatorClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
ImageAnnotatorClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
ImageAnnotatorClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
)
@pytest.mark.parametrize(
"client_class", [ImageAnnotatorClient, ImageAnnotatorAsyncClient,]
)
def test_image_annotator_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "vision.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.ImageAnnotatorGrpcTransport, "grpc"),
(transports.ImageAnnotatorGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_image_annotator_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class", [ImageAnnotatorClient, ImageAnnotatorAsyncClient,]
)
def test_image_annotator_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "vision.googleapis.com:443"
def test_image_annotator_client_get_transport_class():
transport = ImageAnnotatorClient.get_transport_class()
available_transports = [
transports.ImageAnnotatorGrpcTransport,
]
assert transport in available_transports
transport = ImageAnnotatorClient.get_transport_class("grpc")
assert transport == transports.ImageAnnotatorGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(ImageAnnotatorClient, transports.ImageAnnotatorGrpcTransport, "grpc"),
(
ImageAnnotatorAsyncClient,
transports.ImageAnnotatorGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
ImageAnnotatorClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(ImageAnnotatorClient),
)
@mock.patch.object(
ImageAnnotatorAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(ImageAnnotatorAsyncClient),
)
def test_image_annotator_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(ImageAnnotatorClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(ImageAnnotatorClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(ImageAnnotatorClient, transports.ImageAnnotatorGrpcTransport, "grpc", "true"),
(
ImageAnnotatorAsyncClient,
transports.ImageAnnotatorGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(ImageAnnotatorClient, transports.ImageAnnotatorGrpcTransport, "grpc", "false"),
(
ImageAnnotatorAsyncClient,
transports.ImageAnnotatorGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
ImageAnnotatorClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(ImageAnnotatorClient),
)
@mock.patch.object(
ImageAnnotatorAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(ImageAnnotatorAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_image_annotator_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class", [ImageAnnotatorClient, ImageAnnotatorAsyncClient]
)
@mock.patch.object(
ImageAnnotatorClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(ImageAnnotatorClient),
)
@mock.patch.object(
ImageAnnotatorAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(ImageAnnotatorAsyncClient),
)
def test_image_annotator_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(ImageAnnotatorClient, transports.ImageAnnotatorGrpcTransport, "grpc"),
(
ImageAnnotatorAsyncClient,
transports.ImageAnnotatorGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_image_annotator_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(ImageAnnotatorClient, transports.ImageAnnotatorGrpcTransport, "grpc"),
(
ImageAnnotatorAsyncClient,
transports.ImageAnnotatorGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_image_annotator_client_client_options_credentials_file(
client_class, transport_class, transport_name
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_image_annotator_client_client_options_from_dict():
with mock.patch(
"google.cloud.vision_v1.services.image_annotator.transports.ImageAnnotatorGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = ImageAnnotatorClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"request_type", [image_annotator.BatchAnnotateImagesRequest, dict,]
)
def test_batch_annotate_images(request_type, transport: str = "grpc"):
client = ImageAnnotatorClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_annotate_images), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = image_annotator.BatchAnnotateImagesResponse()
response = client.batch_annotate_images(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == image_annotator.BatchAnnotateImagesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, image_annotator.BatchAnnotateImagesResponse)
def test_batch_annotate_images_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ImageAnnotatorClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_annotate_images), "__call__"
) as call:
client.batch_annotate_images()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == image_annotator.BatchAnnotateImagesRequest()
@pytest.mark.asyncio
async def test_batch_annotate_images_async(
transport: str = "grpc_asyncio",
request_type=image_annotator.BatchAnnotateImagesRequest,
):
client = ImageAnnotatorAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_annotate_images), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
image_annotator.BatchAnnotateImagesResponse()
)
response = await client.batch_annotate_images(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == image_annotator.BatchAnnotateImagesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, image_annotator.BatchAnnotateImagesResponse)
@pytest.mark.asyncio
async def test_batch_annotate_images_async_from_dict():
await test_batch_annotate_images_async(request_type=dict)
def test_batch_annotate_images_flattened():
client = ImageAnnotatorClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_annotate_images), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = image_annotator.BatchAnnotateImagesResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.batch_annotate_images(
requests=[
image_annotator.AnnotateImageRequest(
image=image_annotator.Image(content=b"content_blob")
)
],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].requests
mock_val = [
image_annotator.AnnotateImageRequest(
image=image_annotator.Image(content=b"content_blob")
)
]
assert arg == mock_val
def test_batch_annotate_images_flattened_error():
client = ImageAnnotatorClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.batch_annotate_images(
image_annotator.BatchAnnotateImagesRequest(),
requests=[
image_annotator.AnnotateImageRequest(
image=image_annotator.Image(content=b"content_blob")
)
],
)
@pytest.mark.asyncio
async def test_batch_annotate_images_flattened_async():
client = ImageAnnotatorAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_annotate_images), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = image_annotator.BatchAnnotateImagesResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
image_annotator.BatchAnnotateImagesResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.batch_annotate_images(
requests=[
image_annotator.AnnotateImageRequest(
image=image_annotator.Image(content=b"content_blob")
)
],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].requests
mock_val = [
image_annotator.AnnotateImageRequest(
image=image_annotator.Image(content=b"content_blob")
)
]
assert arg == mock_val
@pytest.mark.asyncio
async def test_batch_annotate_images_flattened_error_async():
client = ImageAnnotatorAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.batch_annotate_images(
image_annotator.BatchAnnotateImagesRequest(),
requests=[
image_annotator.AnnotateImageRequest(
image=image_annotator.Image(content=b"content_blob")
)
],
)
@pytest.mark.parametrize(
"request_type", [image_annotator.BatchAnnotateFilesRequest, dict,]
)
def test_batch_annotate_files(request_type, transport: str = "grpc"):
client = ImageAnnotatorClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_annotate_files), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = image_annotator.BatchAnnotateFilesResponse()
response = client.batch_annotate_files(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == image_annotator.BatchAnnotateFilesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, image_annotator.BatchAnnotateFilesResponse)
def test_batch_annotate_files_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ImageAnnotatorClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_annotate_files), "__call__"
) as call:
client.batch_annotate_files()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == image_annotator.BatchAnnotateFilesRequest()
@pytest.mark.asyncio
async def test_batch_annotate_files_async(
transport: str = "grpc_asyncio",
request_type=image_annotator.BatchAnnotateFilesRequest,
):
client = ImageAnnotatorAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_annotate_files), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
image_annotator.BatchAnnotateFilesResponse()
)
response = await client.batch_annotate_files(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == image_annotator.BatchAnnotateFilesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, image_annotator.BatchAnnotateFilesResponse)
@pytest.mark.asyncio
async def test_batch_annotate_files_async_from_dict():
await test_batch_annotate_files_async(request_type=dict)
def test_batch_annotate_files_flattened():
client = ImageAnnotatorClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_annotate_files), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = image_annotator.BatchAnnotateFilesResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.batch_annotate_files(
requests=[
image_annotator.AnnotateFileRequest(
input_config=image_annotator.InputConfig(
gcs_source=image_annotator.GcsSource(uri="uri_value")
)
)
],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].requests
mock_val = [
image_annotator.AnnotateFileRequest(
input_config=image_annotator.InputConfig(
gcs_source=image_annotator.GcsSource(uri="uri_value")
)
)
]
assert arg == mock_val
def test_batch_annotate_files_flattened_error():
client = ImageAnnotatorClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.batch_annotate_files(
image_annotator.BatchAnnotateFilesRequest(),
requests=[
image_annotator.AnnotateFileRequest(
input_config=image_annotator.InputConfig(
gcs_source=image_annotator.GcsSource(uri="uri_value")
)
)
],
)
@pytest.mark.asyncio
async def test_batch_annotate_files_flattened_async():
client = ImageAnnotatorAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_annotate_files), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = image_annotator.BatchAnnotateFilesResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
image_annotator.BatchAnnotateFilesResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.batch_annotate_files(
requests=[
image_annotator.AnnotateFileRequest(
input_config=image_annotator.InputConfig(
gcs_source=image_annotator.GcsSource(uri="uri_value")
)
)
],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].requests
mock_val = [
image_annotator.AnnotateFileRequest(
input_config=image_annotator.InputConfig(
gcs_source=image_annotator.GcsSource(uri="uri_value")
)
)
]
assert arg == mock_val
@pytest.mark.asyncio
async def test_batch_annotate_files_flattened_error_async():
client = ImageAnnotatorAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.batch_annotate_files(
image_annotator.BatchAnnotateFilesRequest(),
requests=[
image_annotator.AnnotateFileRequest(
input_config=image_annotator.InputConfig(
gcs_source=image_annotator.GcsSource(uri="uri_value")
)
)
],
)
@pytest.mark.parametrize(
"request_type", [image_annotator.AsyncBatchAnnotateImagesRequest, dict,]
)
def test_async_batch_annotate_images(request_type, transport: str = "grpc"):
client = ImageAnnotatorClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.async_batch_annotate_images), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.async_batch_annotate_images(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == image_annotator.AsyncBatchAnnotateImagesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_async_batch_annotate_images_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ImageAnnotatorClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.async_batch_annotate_images), "__call__"
) as call:
client.async_batch_annotate_images()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == image_annotator.AsyncBatchAnnotateImagesRequest()
@pytest.mark.asyncio
async def test_async_batch_annotate_images_async(
transport: str = "grpc_asyncio",
request_type=image_annotator.AsyncBatchAnnotateImagesRequest,
):
client = ImageAnnotatorAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.async_batch_annotate_images), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.async_batch_annotate_images(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == image_annotator.AsyncBatchAnnotateImagesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_async_batch_annotate_images_async_from_dict():
await test_async_batch_annotate_images_async(request_type=dict)
def test_async_batch_annotate_images_flattened():
client = ImageAnnotatorClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.async_batch_annotate_images), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.async_batch_annotate_images(
requests=[
image_annotator.AnnotateImageRequest(
image=image_annotator.Image(content=b"content_blob")
)
],
output_config=image_annotator.OutputConfig(
gcs_destination=image_annotator.GcsDestination(uri="uri_value")
),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].requests
mock_val = [
image_annotator.AnnotateImageRequest(
image=image_annotator.Image(content=b"content_blob")
)
]
assert arg == mock_val
arg = args[0].output_config
mock_val = image_annotator.OutputConfig(
gcs_destination=image_annotator.GcsDestination(uri="uri_value")
)
assert arg == mock_val
def test_async_batch_annotate_images_flattened_error():
client = ImageAnnotatorClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.async_batch_annotate_images(
image_annotator.AsyncBatchAnnotateImagesRequest(),
requests=[
image_annotator.AnnotateImageRequest(
image=image_annotator.Image(content=b"content_blob")
)
],
output_config=image_annotator.OutputConfig(
gcs_destination=image_annotator.GcsDestination(uri="uri_value")
),
)
@pytest.mark.asyncio
async def test_async_batch_annotate_images_flattened_async():
client = ImageAnnotatorAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.async_batch_annotate_images), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.async_batch_annotate_images(
requests=[
image_annotator.AnnotateImageRequest(
image=image_annotator.Image(content=b"content_blob")
)
],
output_config=image_annotator.OutputConfig(
gcs_destination=image_annotator.GcsDestination(uri="uri_value")
),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].requests
mock_val = [
image_annotator.AnnotateImageRequest(
image=image_annotator.Image(content=b"content_blob")
)
]
assert arg == mock_val
arg = args[0].output_config
mock_val = image_annotator.OutputConfig(
gcs_destination=image_annotator.GcsDestination(uri="uri_value")
)
assert arg == mock_val
@pytest.mark.asyncio
async def test_async_batch_annotate_images_flattened_error_async():
client = ImageAnnotatorAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.async_batch_annotate_images(
image_annotator.AsyncBatchAnnotateImagesRequest(),
requests=[
image_annotator.AnnotateImageRequest(
image=image_annotator.Image(content=b"content_blob")
)
],
output_config=image_annotator.OutputConfig(
gcs_destination=image_annotator.GcsDestination(uri="uri_value")
),
)
@pytest.mark.parametrize(
"request_type", [image_annotator.AsyncBatchAnnotateFilesRequest, dict,]
)
def test_async_batch_annotate_files(request_type, transport: str = "grpc"):
client = ImageAnnotatorClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.async_batch_annotate_files), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.async_batch_annotate_files(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == image_annotator.AsyncBatchAnnotateFilesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_async_batch_annotate_files_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ImageAnnotatorClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.async_batch_annotate_files), "__call__"
) as call:
client.async_batch_annotate_files()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == image_annotator.AsyncBatchAnnotateFilesRequest()
@pytest.mark.asyncio
async def test_async_batch_annotate_files_async(
transport: str = "grpc_asyncio",
request_type=image_annotator.AsyncBatchAnnotateFilesRequest,
):
client = ImageAnnotatorAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.async_batch_annotate_files), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.async_batch_annotate_files(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == image_annotator.AsyncBatchAnnotateFilesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_async_batch_annotate_files_async_from_dict():
await test_async_batch_annotate_files_async(request_type=dict)
def test_async_batch_annotate_files_flattened():
client = ImageAnnotatorClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.async_batch_annotate_files), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.async_batch_annotate_files(
requests=[
image_annotator.AsyncAnnotateFileRequest(
input_config=image_annotator.InputConfig(
gcs_source=image_annotator.GcsSource(uri="uri_value")
)
)
],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].requests
mock_val = [
image_annotator.AsyncAnnotateFileRequest(
input_config=image_annotator.InputConfig(
gcs_source=image_annotator.GcsSource(uri="uri_value")
)
)
]
assert arg == mock_val
def test_async_batch_annotate_files_flattened_error():
client = ImageAnnotatorClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.async_batch_annotate_files(
image_annotator.AsyncBatchAnnotateFilesRequest(),
requests=[
image_annotator.AsyncAnnotateFileRequest(
input_config=image_annotator.InputConfig(
gcs_source=image_annotator.GcsSource(uri="uri_value")
)
)
],
)
@pytest.mark.asyncio
async def test_async_batch_annotate_files_flattened_async():
client = ImageAnnotatorAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.async_batch_annotate_files), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.async_batch_annotate_files(
requests=[
image_annotator.AsyncAnnotateFileRequest(
input_config=image_annotator.InputConfig(
gcs_source=image_annotator.GcsSource(uri="uri_value")
)
)
],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].requests
mock_val = [
image_annotator.AsyncAnnotateFileRequest(
input_config=image_annotator.InputConfig(
gcs_source=image_annotator.GcsSource(uri="uri_value")
)
)
]
assert arg == mock_val
@pytest.mark.asyncio
async def test_async_batch_annotate_files_flattened_error_async():
client = ImageAnnotatorAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.async_batch_annotate_files(
image_annotator.AsyncBatchAnnotateFilesRequest(),
requests=[
image_annotator.AsyncAnnotateFileRequest(
input_config=image_annotator.InputConfig(
gcs_source=image_annotator.GcsSource(uri="uri_value")
)
)
],
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.ImageAnnotatorGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = ImageAnnotatorClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.ImageAnnotatorGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = ImageAnnotatorClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.ImageAnnotatorGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = ImageAnnotatorClient(client_options=options, transport=transport,)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = ImageAnnotatorClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.ImageAnnotatorGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = ImageAnnotatorClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.ImageAnnotatorGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = ImageAnnotatorClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.ImageAnnotatorGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.ImageAnnotatorGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.ImageAnnotatorGrpcTransport,
transports.ImageAnnotatorGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = ImageAnnotatorClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.ImageAnnotatorGrpcTransport,)
def test_image_annotator_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.ImageAnnotatorTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_image_annotator_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.vision_v1.services.image_annotator.transports.ImageAnnotatorTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.ImageAnnotatorTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"batch_annotate_images",
"batch_annotate_files",
"async_batch_annotate_images",
"async_batch_annotate_files",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
def test_image_annotator_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.vision_v1.services.image_annotator.transports.ImageAnnotatorTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.ImageAnnotatorTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision",
),
quota_project_id="octopus",
)
def test_image_annotator_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.vision_v1.services.image_annotator.transports.ImageAnnotatorTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.ImageAnnotatorTransport()
adc.assert_called_once()
def test_image_annotator_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
ImageAnnotatorClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision",
),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.ImageAnnotatorGrpcTransport,
transports.ImageAnnotatorGrpcAsyncIOTransport,
],
)
def test_image_annotator_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision",
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.ImageAnnotatorGrpcTransport, grpc_helpers),
(transports.ImageAnnotatorGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_image_annotator_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"vision.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision",
),
scopes=["1", "2"],
default_host="vision.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[
transports.ImageAnnotatorGrpcTransport,
transports.ImageAnnotatorGrpcAsyncIOTransport,
],
)
def test_image_annotator_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_image_annotator_host_no_port():
client = ImageAnnotatorClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="vision.googleapis.com"
),
)
assert client.transport._host == "vision.googleapis.com:443"
def test_image_annotator_host_with_port():
client = ImageAnnotatorClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="vision.googleapis.com:8000"
),
)
assert client.transport._host == "vision.googleapis.com:8000"
def test_image_annotator_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.ImageAnnotatorGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_image_annotator_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.ImageAnnotatorGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.ImageAnnotatorGrpcTransport,
transports.ImageAnnotatorGrpcAsyncIOTransport,
],
)
def test_image_annotator_transport_channel_mtls_with_client_cert_source(
transport_class,
):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.ImageAnnotatorGrpcTransport,
transports.ImageAnnotatorGrpcAsyncIOTransport,
],
)
def test_image_annotator_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_image_annotator_grpc_lro_client():
client = ImageAnnotatorClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_image_annotator_grpc_lro_async_client():
client = ImageAnnotatorAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_product_path():
project = "squid"
location = "clam"
product = "whelk"
expected = "projects/{project}/locations/{location}/products/{product}".format(
project=project, location=location, product=product,
)
actual = ImageAnnotatorClient.product_path(project, location, product)
assert expected == actual
def test_parse_product_path():
expected = {
"project": "octopus",
"location": "oyster",
"product": "nudibranch",
}
path = ImageAnnotatorClient.product_path(**expected)
# Check that the path construction is reversible.
actual = ImageAnnotatorClient.parse_product_path(path)
assert expected == actual
def test_product_set_path():
project = "cuttlefish"
location = "mussel"
product_set = "winkle"
expected = "projects/{project}/locations/{location}/productSets/{product_set}".format(
project=project, location=location, product_set=product_set,
)
actual = ImageAnnotatorClient.product_set_path(project, location, product_set)
assert expected == actual
def test_parse_product_set_path():
expected = {
"project": "nautilus",
"location": "scallop",
"product_set": "abalone",
}
path = ImageAnnotatorClient.product_set_path(**expected)
# Check that the path construction is reversible.
actual = ImageAnnotatorClient.parse_product_set_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "squid"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = ImageAnnotatorClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "clam",
}
path = ImageAnnotatorClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = ImageAnnotatorClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "whelk"
expected = "folders/{folder}".format(folder=folder,)
actual = ImageAnnotatorClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "octopus",
}
path = ImageAnnotatorClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = ImageAnnotatorClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "oyster"
expected = "organizations/{organization}".format(organization=organization,)
actual = ImageAnnotatorClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nudibranch",
}
path = ImageAnnotatorClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = ImageAnnotatorClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "cuttlefish"
expected = "projects/{project}".format(project=project,)
actual = ImageAnnotatorClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "mussel",
}
path = ImageAnnotatorClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = ImageAnnotatorClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "winkle"
location = "nautilus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = ImageAnnotatorClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "scallop",
"location": "abalone",
}
path = ImageAnnotatorClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = ImageAnnotatorClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.ImageAnnotatorTransport, "_prep_wrapped_messages"
) as prep:
client = ImageAnnotatorClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.ImageAnnotatorTransport, "_prep_wrapped_messages"
) as prep:
transport_class = ImageAnnotatorClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = ImageAnnotatorAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = ImageAnnotatorClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = ImageAnnotatorClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(ImageAnnotatorClient, transports.ImageAnnotatorGrpcTransport),
(ImageAnnotatorAsyncClient, transports.ImageAnnotatorGrpcAsyncIOTransport),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
| [] | [] | [] | [] | [] | python | 0 | 0 | |
integration/integration_test.go | /*
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package integration
import (
"encoding/json"
"flag"
"fmt"
"log"
"math"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"testing"
"time"
"github.com/google/go-containerregistry/pkg/name"
"github.com/google/go-containerregistry/pkg/v1/daemon"
"github.com/pkg/errors"
"github.com/GoogleContainerTools/kaniko/pkg/timing"
"github.com/GoogleContainerTools/kaniko/pkg/util"
"github.com/GoogleContainerTools/kaniko/testutil"
)
var config *integrationTestConfig
var imageBuilder *DockerFileBuilder
var allDockerfiles []string
const (
daemonPrefix = "daemon://"
integrationPath = "integration"
dockerfilesPath = "dockerfiles"
emptyContainerDiff = `[
{
"Image1": "%s",
"Image2": "%s",
"DiffType": "File",
"Diff": {
"Adds": null,
"Dels": null,
"Mods": null
}
},
{
"Image1": "%s",
"Image2": "%s",
"DiffType": "Metadata",
"Diff": {
"Adds": [],
"Dels": []
}
}
]`
)
func getDockerMajorVersion() int {
out, err := exec.Command("docker", "version", "--format", "{{.Server.Version}}").Output()
if err != nil {
log.Fatal("Error getting docker version of server:", err)
}
versionArr := strings.Split(string(out), ".")
ver, err := strconv.Atoi(versionArr[0])
if err != nil {
log.Fatal("Error getting docker version of server during parsing version string:", err)
}
return ver
}
func launchTests(m *testing.M) (int, error) {
if config.isGcrRepository() {
contextFile, err := CreateIntegrationTarball()
if err != nil {
return 1, errors.Wrap(err, "Failed to create tarball of integration files for build context")
}
fileInBucket, err := UploadFileToBucket(config.gcsBucket, contextFile, contextFile)
if err != nil {
return 1, errors.Wrap(err, "Failed to upload build context")
}
if err = os.Remove(contextFile); err != nil {
return 1, errors.Wrap(err, fmt.Sprintf("Failed to remove tarball at %s", contextFile))
}
RunOnInterrupt(func() { DeleteFromBucket(fileInBucket) })
defer DeleteFromBucket(fileInBucket)
}
if err := buildRequiredImages(); err != nil {
return 1, errors.Wrap(err, "Error while building images")
}
imageBuilder = NewDockerFileBuilder()
return m.Run(), nil
}
func TestMain(m *testing.M) {
var err error
if !meetsRequirements() {
fmt.Println("Missing required tools")
os.Exit(1)
}
if allDockerfiles, err = FindDockerFiles(dockerfilesPath); err != nil {
fmt.Println("Coudn't create map of dockerfiles", err)
os.Exit(1)
} else {
config = initIntegrationTestConfig()
exitCode, err := launchTests(m)
if err != nil {
fmt.Println(err)
}
os.Exit(exitCode)
}
}
func buildRequiredImages() error {
setupCommands := []struct {
name string
command []string
}{
{
name: "Building kaniko image",
command: []string{"docker", "build", "-t", ExecutorImage, "-f", "../deploy/Dockerfile", ".."},
},
{
name: "Building cache warmer image",
command: []string{"docker", "build", "-t", WarmerImage, "-f", "../deploy/Dockerfile_warmer", ".."},
},
{
name: "Building onbuild base image",
command: []string{"docker", "build", "-t", config.onbuildBaseImage, "-f", fmt.Sprintf("%s/Dockerfile_onbuild_base", dockerfilesPath), "."},
},
{
name: "Pushing onbuild base image",
command: []string{"docker", "push", config.onbuildBaseImage},
},
{
name: "Building hardlink base image",
command: []string{"docker", "build", "-t", config.hardlinkBaseImage, "-f", fmt.Sprintf("%s/Dockerfile_hardlink_base", dockerfilesPath), "."},
},
{
name: "Pushing hardlink base image",
command: []string{"docker", "push", config.hardlinkBaseImage},
},
}
for _, setupCmd := range setupCommands {
fmt.Println(setupCmd.name)
cmd := exec.Command(setupCmd.command[0], setupCmd.command[1:]...)
if out, err := RunCommandWithoutTest(cmd); err != nil {
return errors.Wrap(err, fmt.Sprintf("%s failed: %s", setupCmd.name, string(out)))
}
}
return nil
}
func TestRun(t *testing.T) {
for _, dockerfile := range allDockerfiles {
t.Run("test_"+dockerfile, func(t *testing.T) {
dockerfile := dockerfile
t.Parallel()
if _, ok := imageBuilder.DockerfilesToIgnore[dockerfile]; ok {
t.SkipNow()
}
if _, ok := imageBuilder.TestCacheDockerfiles[dockerfile]; ok {
t.SkipNow()
}
buildImage(t, dockerfile, imageBuilder)
dockerImage := GetDockerImage(config.imageRepo, dockerfile)
kanikoImage := GetKanikoImage(config.imageRepo, dockerfile)
diff := containerDiff(t, daemonPrefix+dockerImage, kanikoImage, "--no-cache")
expected := fmt.Sprintf(emptyContainerDiff, dockerImage, kanikoImage, dockerImage, kanikoImage)
checkContainerDiffOutput(t, diff, expected)
})
}
err := logBenchmarks("benchmark")
if err != nil {
t.Logf("Failed to create benchmark file: %v", err)
}
}
func getGitRepo() string {
var branch, repoSlug string
if _, ok := os.LookupEnv("TRAVIS"); ok {
if os.Getenv("TRAVIS_PULL_REQUEST") != "false" {
branch = os.Getenv("TRAVIS_PULL_REQUEST_BRANCH")
repoSlug = os.Getenv("TRAVIS_PULL_REQUEST_SLUG")
log.Printf("Travis CI Pull request source repo: %s branch: %s\n", repoSlug, branch)
} else {
branch = os.Getenv("TRAVIS_BRANCH")
repoSlug = os.Getenv("TRAVIS_REPO_SLUG")
log.Printf("Travis CI repo: %s branch: %s\n", repoSlug, branch)
}
return "github.com/" + repoSlug + "#refs/heads/" + branch
}
return "github.com/GoogleContainerTools/kaniko"
}
func TestGitBuildcontext(t *testing.T) {
repo := getGitRepo()
dockerfile := fmt.Sprintf("%s/%s/Dockerfile_test_run_2", integrationPath, dockerfilesPath)
// Build with docker
dockerImage := GetDockerImage(config.imageRepo, "Dockerfile_test_git")
dockerCmd := exec.Command("docker",
append([]string{"build",
"-t", dockerImage,
"-f", dockerfile,
repo})...)
out, err := RunCommandWithoutTest(dockerCmd)
if err != nil {
t.Errorf("Failed to build image %s with docker command %q: %s %s", dockerImage, dockerCmd.Args, err, string(out))
}
// Build with kaniko
kanikoImage := GetKanikoImage(config.imageRepo, "Dockerfile_test_git")
dockerRunFlags := []string{"run", "--net=host"}
dockerRunFlags = addServiceAccountFlags(dockerRunFlags, config.serviceAccount)
dockerRunFlags = append(dockerRunFlags, ExecutorImage,
"-f", dockerfile,
"-d", kanikoImage,
"-c", fmt.Sprintf("git://%s", repo))
kanikoCmd := exec.Command("docker", dockerRunFlags...)
out, err = RunCommandWithoutTest(kanikoCmd)
if err != nil {
t.Errorf("Failed to build image %s with kaniko command %q: %v %s", dockerImage, kanikoCmd.Args, err, string(out))
}
diff := containerDiff(t, daemonPrefix+dockerImage, kanikoImage, "--no-cache")
expected := fmt.Sprintf(emptyContainerDiff, dockerImage, kanikoImage, dockerImage, kanikoImage)
checkContainerDiffOutput(t, diff, expected)
}
func TestGitBuildcontextSubPath(t *testing.T) {
repo := getGitRepo()
dockerfile := "Dockerfile_test_run_2"
// Build with docker
dockerImage := GetDockerImage(config.imageRepo, "Dockerfile_test_git")
dockerCmd := exec.Command("docker",
append([]string{
"build",
"-t", dockerImage,
"-f", dockerfile,
repo + ":" + filepath.Join(integrationPath, dockerfilesPath),
})...)
out, err := RunCommandWithoutTest(dockerCmd)
if err != nil {
t.Errorf("Failed to build image %s with docker command %q: %s %s", dockerImage, dockerCmd.Args, err, string(out))
}
// Build with kaniko
kanikoImage := GetKanikoImage(config.imageRepo, "Dockerfile_test_git")
dockerRunFlags := []string{"run", "--net=host"}
dockerRunFlags = addServiceAccountFlags(dockerRunFlags, config.serviceAccount)
dockerRunFlags = append(
dockerRunFlags,
ExecutorImage,
"-f", dockerfile,
"-d", kanikoImage,
"-c", fmt.Sprintf("git://%s", repo),
"--context-sub-path", filepath.Join(integrationPath, dockerfilesPath),
)
kanikoCmd := exec.Command("docker", dockerRunFlags...)
out, err = RunCommandWithoutTest(kanikoCmd)
if err != nil {
t.Errorf("Failed to build image %s with kaniko command %q: %v %s", dockerImage, kanikoCmd.Args, err, string(out))
}
diff := containerDiff(t, daemonPrefix+dockerImage, kanikoImage, "--no-cache")
expected := fmt.Sprintf(emptyContainerDiff, dockerImage, kanikoImage, dockerImage, kanikoImage)
checkContainerDiffOutput(t, diff, expected)
}
func TestBuildViaRegistryMirrors(t *testing.T) {
repo := getGitRepo()
dockerfile := fmt.Sprintf("%s/%s/Dockerfile_registry_mirror", integrationPath, dockerfilesPath)
// Build with docker
dockerImage := GetDockerImage(config.imageRepo, "Dockerfile_registry_mirror")
dockerCmd := exec.Command("docker",
append([]string{"build",
"-t", dockerImage,
"-f", dockerfile,
repo})...)
out, err := RunCommandWithoutTest(dockerCmd)
if err != nil {
t.Errorf("Failed to build image %s with docker command %q: %s %s", dockerImage, dockerCmd.Args, err, string(out))
}
// Build with kaniko
kanikoImage := GetKanikoImage(config.imageRepo, "Dockerfile_registry_mirror")
dockerRunFlags := []string{"run", "--net=host"}
dockerRunFlags = addServiceAccountFlags(dockerRunFlags, config.serviceAccount)
dockerRunFlags = append(dockerRunFlags, ExecutorImage,
"-f", dockerfile,
"-d", kanikoImage,
"--registry-mirror", "doesnotexist.example.com",
"--registry-mirror", "us-mirror.gcr.io",
"-c", fmt.Sprintf("git://%s", repo))
kanikoCmd := exec.Command("docker", dockerRunFlags...)
out, err = RunCommandWithoutTest(kanikoCmd)
if err != nil {
t.Errorf("Failed to build image %s with kaniko command %q: %v %s", dockerImage, kanikoCmd.Args, err, string(out))
}
diff := containerDiff(t, daemonPrefix+dockerImage, kanikoImage, "--no-cache")
expected := fmt.Sprintf(emptyContainerDiff, dockerImage, kanikoImage, dockerImage, kanikoImage)
checkContainerDiffOutput(t, diff, expected)
}
func TestBuildWithLabels(t *testing.T) {
repo := getGitRepo()
dockerfile := fmt.Sprintf("%s/%s/Dockerfile_test_label", integrationPath, dockerfilesPath)
testLabel := "mylabel=myvalue"
// Build with docker
dockerImage := GetDockerImage(config.imageRepo, "Dockerfile_test_label:mylabel")
dockerCmd := exec.Command("docker",
append([]string{"build",
"-t", dockerImage,
"-f", dockerfile,
"--label", testLabel,
repo})...)
out, err := RunCommandWithoutTest(dockerCmd)
if err != nil {
t.Errorf("Failed to build image %s with docker command %q: %s %s", dockerImage, dockerCmd.Args, err, string(out))
}
// Build with kaniko
kanikoImage := GetKanikoImage(config.imageRepo, "Dockerfile_test_label:mylabel")
dockerRunFlags := []string{"run", "--net=host"}
dockerRunFlags = addServiceAccountFlags(dockerRunFlags, config.serviceAccount)
dockerRunFlags = append(dockerRunFlags, ExecutorImage,
"-f", dockerfile,
"-d", kanikoImage,
"--label", testLabel,
"-c", fmt.Sprintf("git://%s", repo),
)
kanikoCmd := exec.Command("docker", dockerRunFlags...)
out, err = RunCommandWithoutTest(kanikoCmd)
if err != nil {
t.Errorf("Failed to build image %s with kaniko command %q: %v %s", dockerImage, kanikoCmd.Args, err, string(out))
}
diff := containerDiff(t, daemonPrefix+dockerImage, kanikoImage, "--no-cache")
expected := fmt.Sprintf(emptyContainerDiff, dockerImage, kanikoImage, dockerImage, kanikoImage)
checkContainerDiffOutput(t, diff, expected)
}
func TestBuildWithHTTPError(t *testing.T) {
repo := getGitRepo()
dockerfile := fmt.Sprintf("%s/%s/Dockerfile_test_add_404", integrationPath, dockerfilesPath)
// Build with docker
dockerImage := GetDockerImage(config.imageRepo, "Dockerfile_test_add_404")
dockerCmd := exec.Command("docker",
append([]string{"build",
"-t", dockerImage,
"-f", dockerfile,
repo})...)
out, err := RunCommandWithoutTest(dockerCmd)
if err == nil {
t.Errorf("an error was expected, got %s", string(out))
}
// Build with kaniko
kanikoImage := GetKanikoImage(config.imageRepo, "Dockerfile_test_add_404")
dockerRunFlags := []string{"run", "--net=host"}
dockerRunFlags = addServiceAccountFlags(dockerRunFlags, config.serviceAccount)
dockerRunFlags = append(dockerRunFlags, ExecutorImage,
"-f", dockerfile,
"-d", kanikoImage,
"-c", fmt.Sprintf("git://%s", repo),
)
kanikoCmd := exec.Command("docker", dockerRunFlags...)
out, err = RunCommandWithoutTest(kanikoCmd)
if err == nil {
t.Errorf("an error was expected, got %s", string(out))
}
}
func TestLayers(t *testing.T) {
offset := map[string]int{
"Dockerfile_test_add": 12,
"Dockerfile_test_scratch": 3,
}
for _, dockerfile := range allDockerfiles {
t.Run("test_layer_"+dockerfile, func(t *testing.T) {
dockerfile := dockerfile
t.Parallel()
if _, ok := imageBuilder.DockerfilesToIgnore[dockerfile]; ok {
t.SkipNow()
}
buildImage(t, dockerfile, imageBuilder)
// Pull the kaniko image
dockerImage := GetDockerImage(config.imageRepo, dockerfile)
kanikoImage := GetKanikoImage(config.imageRepo, dockerfile)
pullCmd := exec.Command("docker", "pull", kanikoImage)
RunCommand(pullCmd, t)
checkLayers(t, dockerImage, kanikoImage, offset[dockerfile])
})
}
err := logBenchmarks("benchmark_layers")
if err != nil {
t.Logf("Failed to create benchmark file: %v", err)
}
}
func buildImage(t *testing.T, dockerfile string, imageBuilder *DockerFileBuilder) {
if err := imageBuilder.BuildImage(config, dockerfilesPath, dockerfile); err != nil {
t.Errorf("Error building image: %s", err)
t.FailNow()
}
return
}
// Build each image with kaniko twice, and then make sure they're exactly the same
func TestCache(t *testing.T) {
populateVolumeCache()
for dockerfile := range imageBuilder.TestCacheDockerfiles {
t.Run("test_cache_"+dockerfile, func(t *testing.T) {
dockerfile := dockerfile
t.Parallel()
cache := filepath.Join(config.imageRepo, "cache", fmt.Sprintf("%v", time.Now().UnixNano()))
// Build the initial image which will cache layers
if err := imageBuilder.buildCachedImages(config, cache, dockerfilesPath, 0); err != nil {
t.Fatalf("error building cached image for the first time: %v", err)
}
// Build the second image which should pull from the cache
if err := imageBuilder.buildCachedImages(config, cache, dockerfilesPath, 1); err != nil {
t.Fatalf("error building cached image for the first time: %v", err)
}
// Make sure both images are the same
kanikoVersion0 := GetVersionedKanikoImage(config.imageRepo, dockerfile, 0)
kanikoVersion1 := GetVersionedKanikoImage(config.imageRepo, dockerfile, 1)
diff := containerDiff(t, kanikoVersion0, kanikoVersion1)
expected := fmt.Sprintf(emptyContainerDiff, kanikoVersion0, kanikoVersion1, kanikoVersion0, kanikoVersion1)
checkContainerDiffOutput(t, diff, expected)
})
}
if err := logBenchmarks("benchmark_cache"); err != nil {
t.Logf("Failed to create benchmark file: %v", err)
}
}
func TestRelativePaths(t *testing.T) {
dockerfile := "Dockerfile_relative_copy"
t.Run("test_relative_"+dockerfile, func(t *testing.T) {
t.Parallel()
dockerfile = filepath.Join("./dockerfiles", dockerfile)
contextPath := "./context"
err := imageBuilder.buildRelativePathsImage(
config.imageRepo,
dockerfile,
config.serviceAccount,
contextPath,
)
if err != nil {
t.Fatal(err)
}
dockerImage := GetDockerImage(config.imageRepo, "test_relative_"+dockerfile)
kanikoImage := GetKanikoImage(config.imageRepo, "test_relative_"+dockerfile)
diff := containerDiff(t, daemonPrefix+dockerImage, kanikoImage, "--no-cache")
expected := fmt.Sprintf(emptyContainerDiff, dockerImage, kanikoImage, dockerImage, kanikoImage)
checkContainerDiffOutput(t, diff, expected)
})
}
type fileDiff struct {
Name string
Size int
}
type fileDiffResult struct {
Adds []fileDiff
Dels []fileDiff
}
type metaDiffResult struct {
Adds []string
Dels []string
}
type diffOutput struct {
Image1 string
Image2 string
DiffType string
Diff interface{}
}
func (diff *diffOutput) UnmarshalJSON(data []byte) error {
type Alias diffOutput
aux := &struct{ *Alias }{Alias: (*Alias)(diff)}
var rawJSON json.RawMessage
aux.Diff = &rawJSON
err := json.Unmarshal(data, &aux)
if err != nil {
return err
}
switch diff.DiffType {
case "File":
var dst fileDiffResult
err = json.Unmarshal(rawJSON, &dst)
diff.Diff = &dst
case "Metadata":
var dst metaDiffResult
err = json.Unmarshal(rawJSON, &dst)
diff.Diff = &dst
}
if err != nil {
return err
}
return err
}
var allowedDiffPaths = []string{"/sys"}
func checkContainerDiffOutput(t *testing.T, diff []byte, expected string) {
// Let's compare the json objects themselves instead of strings to avoid
// issues with spaces and indents
t.Helper()
diffInt := []diffOutput{}
expectedInt := []diffOutput{}
err := json.Unmarshal(diff, &diffInt)
if err != nil {
t.Error(err)
}
err = json.Unmarshal([]byte(expected), &expectedInt)
if err != nil {
t.Error(err)
}
// Some differences (ignored paths, etc.) are known and expected.
fdr := diffInt[0].Diff.(*fileDiffResult)
fdr.Adds = filterFileDiff(fdr.Adds)
fdr.Dels = filterFileDiff(fdr.Dels)
// Remove some of the meta diffs that shouldn't be checked
mdr := diffInt[1].Diff.(*metaDiffResult)
mdr.Adds = filterMetaDiff(mdr.Adds)
mdr.Dels = filterMetaDiff(mdr.Dels)
testutil.CheckErrorAndDeepEqual(t, false, nil, expectedInt, diffInt)
}
func filterMetaDiff(metaDiff []string) []string {
// TODO remove this once we agree testing shouldn't run on docker 18.xx
// currently docker 18.xx will build an image with Metadata set
// ArgsEscaped: true, however Docker 19.xx will build an image and have
// ArgsEscaped: false
if config.dockerMajorVersion == 19 {
return metaDiff
}
newDiffs := []string{}
for _, meta := range metaDiff {
if !strings.HasPrefix(meta, "ArgsEscaped") {
newDiffs = append(newDiffs, meta)
}
}
return newDiffs
}
func filterFileDiff(f []fileDiff) []fileDiff {
var newDiffs []fileDiff
for _, diff := range f {
isIgnored := false
for _, p := range allowedDiffPaths {
if util.HasFilepathPrefix(diff.Name, p, false) {
isIgnored = true
break
}
}
if !isIgnored {
newDiffs = append(newDiffs, diff)
}
}
return newDiffs
}
func checkLayers(t *testing.T, image1, image2 string, offset int) {
t.Helper()
img1, err := getImageDetails(image1)
if err != nil {
t.Fatalf("Couldn't get details from image reference for (%s): %s", image1, err)
}
img2, err := getImageDetails(image2)
if err != nil {
t.Fatalf("Couldn't get details from image reference for (%s): %s", image2, err)
}
actualOffset := int(math.Abs(float64(img1.numLayers - img2.numLayers)))
if actualOffset != offset {
t.Fatalf("Difference in number of layers in each image is %d but should be %d. Image 1: %s, Image 2: %s", actualOffset, offset, img1, img2)
}
}
func getImageDetails(image string) (*imageDetails, error) {
ref, err := name.ParseReference(image, name.WeakValidation)
if err != nil {
return nil, fmt.Errorf("Couldn't parse referance to image %s: %s", image, err)
}
imgRef, err := daemon.Image(ref)
if err != nil {
return nil, fmt.Errorf("Couldn't get reference to image %s from daemon: %s", image, err)
}
layers, err := imgRef.Layers()
if err != nil {
return nil, fmt.Errorf("Error getting layers for image %s: %s", image, err)
}
digest, err := imgRef.Digest()
if err != nil {
return nil, fmt.Errorf("Error getting digest for image %s: %s", image, err)
}
return &imageDetails{
name: image,
numLayers: len(layers),
digest: digest.Hex,
}, nil
}
func logBenchmarks(benchmark string) error {
if b, err := strconv.ParseBool(os.Getenv("BENCHMARK")); err == nil && b {
f, err := os.Create(benchmark)
if err != nil {
return err
}
f.WriteString(timing.Summary())
defer f.Close()
}
return nil
}
type imageDetails struct {
name string
numLayers int
digest string
}
func (i imageDetails) String() string {
return fmt.Sprintf("Image: [%s] Digest: [%s] Number of Layers: [%d]", i.name, i.digest, i.numLayers)
}
func initIntegrationTestConfig() *integrationTestConfig {
var c integrationTestConfig
flag.StringVar(&c.gcsBucket, "bucket", "gs://kaniko-test-bucket", "The gcs bucket argument to uploaded the tar-ed contents of the `integration` dir to.")
flag.StringVar(&c.imageRepo, "repo", "gcr.io/kaniko-test", "The (docker) image repo to build and push images to during the test. `gcloud` must be authenticated with this repo or serviceAccount must be set.")
flag.StringVar(&c.serviceAccount, "serviceAccount", "", "The path to the service account push images to GCR and upload/download files to GCS.")
flag.Parse()
if len(c.serviceAccount) > 0 {
absPath, err := filepath.Abs("../" + c.serviceAccount)
if err != nil {
log.Fatalf("Error getting absolute path for service account: %s\n", c.serviceAccount)
}
if _, err := os.Stat(absPath); os.IsNotExist(err) {
log.Fatalf("Service account does not exist: %s\n", absPath)
}
c.serviceAccount = absPath
os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", absPath)
}
if c.imageRepo == "" {
log.Fatal("You must provide a image repository")
}
if c.isGcrRepository() && c.gcsBucket == "" {
log.Fatalf("You must provide a gcs bucket when using a Google Container Registry (\"%s\" was provided)", c.imageRepo)
}
if !strings.HasSuffix(c.imageRepo, "/") {
c.imageRepo = c.imageRepo + "/"
}
c.dockerMajorVersion = getDockerMajorVersion()
c.onbuildBaseImage = c.imageRepo + "onbuild-base:latest"
c.hardlinkBaseImage = c.imageRepo + "hardlink-base:latest"
return &c
}
func meetsRequirements() bool {
requiredTools := []string{"container-diff", "gsutil"}
hasRequirements := true
for _, tool := range requiredTools {
_, err := exec.LookPath(tool)
if err != nil {
fmt.Printf("You must have %s installed and on your PATH\n", tool)
hasRequirements = false
}
}
return hasRequirements
}
// containerDiff compares the container images image1 and image2.
func containerDiff(t *testing.T, image1, image2 string, flags ...string) []byte {
flags = append([]string{"diff"}, flags...)
flags = append(flags, image1, image2,
"-q", "--type=file", "--type=metadata", "--json")
containerdiffCmd := exec.Command("container-diff", flags...)
diff := RunCommand(containerdiffCmd, t)
t.Logf("diff = %s", string(diff))
return diff
}
| [
"\"TRAVIS_PULL_REQUEST\"",
"\"TRAVIS_PULL_REQUEST_BRANCH\"",
"\"TRAVIS_PULL_REQUEST_SLUG\"",
"\"TRAVIS_BRANCH\"",
"\"TRAVIS_REPO_SLUG\"",
"\"BENCHMARK\""
] | [] | [
"TRAVIS_BRANCH",
"TRAVIS_PULL_REQUEST",
"TRAVIS_PULL_REQUEST_SLUG",
"TRAVIS_REPO_SLUG",
"TRAVIS_PULL_REQUEST_BRANCH",
"BENCHMARK"
] | [] | ["TRAVIS_BRANCH", "TRAVIS_PULL_REQUEST", "TRAVIS_PULL_REQUEST_SLUG", "TRAVIS_REPO_SLUG", "TRAVIS_PULL_REQUEST_BRANCH", "BENCHMARK"] | go | 6 | 0 | |
orc8r/cloud/go/tools/combine_swagger/generate/generate_test.go | /*
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package generate_test
import (
"io/ioutil"
"os"
"testing"
"magma/orc8r/cloud/go/tools/combine_swagger/generate"
swaggergen "magma/orc8r/cloud/go/tools/swaggergen/generate"
"github.com/stretchr/testify/assert"
)
func Test_GenerateStandaloneSpec(t *testing.T) {
goldenFilePath := "../testdata/standalone.yml.golden"
targetFilePath := "../testdata/configs/importer2.yml"
specTargetPath := "../testdata/test_result.yml"
os.Remove(specTargetPath)
defer os.Remove(specTargetPath)
specs, err := swaggergen.ParseSwaggerDependencyTree(targetFilePath, os.Getenv("MAGMA_ROOT"))
assert.NoError(t, err)
err = generate.GenerateSpec(targetFilePath, specs, specTargetPath)
assert.NoError(t, err)
actual, err := ioutil.ReadFile(specTargetPath)
assert.NoError(t, err)
expected, err := ioutil.ReadFile(goldenFilePath)
assert.NoError(t, err)
assert.Equal(t, expected, actual)
}
| [
"\"MAGMA_ROOT\""
] | [] | [
"MAGMA_ROOT"
] | [] | ["MAGMA_ROOT"] | go | 1 | 0 | |
global/setplot.py | from __future__ import absolute_import
from __future__ import print_function
import os
import numpy
import matplotlib.pyplot as plt
import datetime
import clawpack.visclaw.colormaps as colormap
import clawpack.visclaw.gaugetools as gaugetools
import clawpack.clawutil.data as clawutil
import clawpack.amrclaw.data as amrclaw
import clawpack.geoclaw.data as geodata
import clawpack.geoclaw.surge.plot as surgeplot
def setplot(plotdata):
plotdata.clearfigures()
plotdata.format = 'binary'
# Load data from output
clawdata = clawutil.ClawInputData(2)
clawdata.read(os.path.join(plotdata.outdir, 'claw.data'))
physics = geodata.GeoClawData()
physics.read(os.path.join(plotdata.outdir, 'geoclaw.data'))
surge_data = geodata.SurgeData()
surge_data.read(os.path.join(plotdata.outdir, 'surge.data'))
friction_data = geodata.FrictionData()
friction_data.read(os.path.join(plotdata.outdir, 'friction.data'))
# Load storm track
track = surgeplot.track_data(os.path.join(plotdata.outdir, 'fort.track'))
# Set afteraxes function
def surge_afteraxes(cd):
surgeplot.surge_afteraxes(cd, track, plot_direction=False,
kwargs={"markersize": 4})
def friction_after_axes(cd):
plt.title(r"Manning's $n$ Coefficient")
# Color limits
surface_limits = [-5.0, 5.0]
speed_limits = [0.0, 3.0]
wind_limits = [0, 45]
pressure_limits = [940, 1013]
friction_bounds = [0.01, 0.04]
# ==========================================================================
# Plot specifications
# ==========================================================================
# Specify set of zooms for plotting
regions = {"World": {"xlimits": (clawdata.lower[0], clawdata.upper[0]),
"ylimits": (clawdata.lower[1], clawdata.upper[1]),
"figsize": (6.4 * 2, 4.8)}}
for (name, region_dict) in regions.items():
# Surface Figure
plotfigure = plotdata.new_plotfigure(name="Surface - %s" % name)
if 'figsize' in region_dict.keys():
plotfigure.kwargs = {"figsize": region_dict['figsize']}
plotaxes = plotfigure.new_plotaxes('surface')
plotaxes.title = "Surface"
plotaxes.xlimits = region_dict["xlimits"]
plotaxes.ylimits = region_dict["ylimits"]
plotaxes.afteraxes = surge_afteraxes
plotaxes.scaled = True
surgeplot.add_surface_elevation(plotaxes, bounds=surface_limits)
surgeplot.add_land(plotaxes)
plotaxes.plotitem_dict['surface'].amr_patchedges_show = [0] * 10
plotaxes.plotitem_dict['land'].amr_patchedges_show = [0] * 10
# Speed Figure
plotfigure = plotdata.new_plotfigure(name="Currents - %s" % name)
if 'figsize' in region_dict.keys():
plotfigure.kwargs = {"figsize": region_dict['figsize']}
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = "Currents"
plotaxes.xlimits = region_dict["xlimits"]
plotaxes.ylimits = region_dict["ylimits"]
plotaxes.afteraxes = surge_afteraxes
plotaxes.scaled = True
surgeplot.add_speed(plotaxes, bounds=speed_limits)
surgeplot.add_land(plotaxes)
plotaxes.plotitem_dict['speed'].amr_patchedges_show = [0] * 10
plotaxes.plotitem_dict['land'].amr_patchedges_show = [0] * 10
#
# Hurricane Forcing fields
#
# Pressure field
plotfigure = plotdata.new_plotfigure(name='Pressure')
plotfigure.show = surge_data.pressure_forcing and True
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = regions['World']['xlimits']
plotaxes.ylimits = regions['World']['ylimits']
if 'figsize' in regions['World'].keys():
plotfigure.kwargs = {"figsize": regions['World']['figsize']}
plotaxes.title = "Pressure Field"
plotaxes.afteraxes = surge_afteraxes
plotaxes.scaled = True
surgeplot.add_pressure(plotaxes, bounds=pressure_limits)
surgeplot.add_land(plotaxes)
# Wind field
plotfigure = plotdata.new_plotfigure(name='Wind Speed')
plotfigure.show = surge_data.wind_forcing and True
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = regions['World']['xlimits']
plotaxes.ylimits = regions['World']['ylimits']
if 'figsize' in regions['World'].keys():
plotfigure.kwargs = {"figsize": regions['World']['figsize']}
plotaxes.title = "Wind Field"
plotaxes.afteraxes = surge_afteraxes
plotaxes.scaled = True
surgeplot.add_wind(plotaxes, bounds=wind_limits)
surgeplot.add_land(plotaxes)
# ========================================================================
# Figures for gauges
# ========================================================================
plotfigure = plotdata.new_plotfigure(name='Gauge Surfaces', figno=300,
type='each_gauge')
plotfigure.show = True
plotfigure.clf_each_gauge = True
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = [-2, 1]
# plotaxes.xlabel = "Days from landfall"
# plotaxes.ylabel = "Surface (m)"
plotaxes.ylimits = [-1, 5]
plotaxes.title = 'Surface'
def gauge_afteraxes(cd):
axes = plt.gca()
surgeplot.plot_landfall_gauge(cd.gaugesoln, axes)
# Fix up plot - in particular fix time labels
axes.set_title('Station %s' % cd.gaugeno)
axes.set_xlabel('Days relative to landfall')
axes.set_ylabel('Surface (m)')
axes.set_xlim([-2, 1])
axes.set_ylim([-1, 5])
axes.set_xticks([-2, -1, 0, 1])
axes.set_xticklabels([r"$-2$", r"$-1$", r"$0$", r"$1$"])
axes.grid(True)
plotaxes.afteraxes = gauge_afteraxes
# Plot surface as blue curve:
plotitem = plotaxes.new_plotitem(plot_type='1d_plot')
plotitem.plot_var = 3
plotitem.plotstyle = 'b-'
#
# Gauge Location Plot
#
def gauge_location_afteraxes(cd):
plt.subplots_adjust(left=0.12, bottom=0.06, right=0.97, top=0.97)
surge_afteraxes(cd)
gaugetools.plot_gauge_locations(cd.plotdata, gaugenos='all',
format_string='ko', add_labels=True)
plotfigure = plotdata.new_plotfigure(name="Gauge Locations")
plotfigure.show = True
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = 'Gauge Locations'
plotaxes.scaled = True
if 'figsize' in regions['World'].keys():
plotfigure.kwargs = {"figsize": regions['World']['figsize']}
plotaxes.xlimits = regions['World']["xlimits"]
plotaxes.ylimits = regions['World']["ylimits"]
plotaxes.afteraxes = gauge_location_afteraxes
surgeplot.add_surface_elevation(plotaxes, bounds=surface_limits)
surgeplot.add_land(plotaxes)
plotaxes.plotitem_dict['surface'].amr_patchedges_show = [0] * 10
plotaxes.plotitem_dict['land'].amr_patchedges_show = [0] * 10
# -----------------------------------------
# Parameters used only when creating html and/or latex hardcopy
# e.g., via pyclaw.plotters.frametools.printframes:
plotdata.printfigs = True # print figures
plotdata.print_format = 'png' # file format
plotdata.print_framenos = 'all' # list of frames to print
plotdata.print_gaugenos = [1, 2, 3, 4] # list of gauges to print
plotdata.print_fignos = 'all' # list of figures to print
plotdata.html = True # create html files of plots?
plotdata.latex = True # create latex file of plots?
plotdata.latex_figsperline = 2 # layout of plots
plotdata.latex_framesperline = 1 # layout of plots
plotdata.latex_makepdf = False # also run pdflatex?
plotdata.parallel = True # parallel plotting
return plotdata
| [] | [] | [] | [] | [] | python | null | null | null |
server/http.go | package server
import (
"encoding/json"
"fmt"
"html/template"
"io/fs"
"net/http"
"os"
"strconv"
"time"
"github.com/andig/evcc/api"
"github.com/andig/evcc/core"
"github.com/andig/evcc/util"
"github.com/andig/evcc/util/test"
"github.com/gorilla/handlers"
"github.com/gorilla/mux"
)
// Assets is the embedded assets file system
var Assets fs.FS
type chargeModeJSON struct {
Mode api.ChargeMode `json:"mode"`
}
type targetSoCJSON struct {
TargetSoC int `json:"targetSoC"`
}
type minSoCJSON struct {
MinSoC int `json:"minSoC"`
}
type route struct {
Methods []string
Pattern string
HandlerFunc http.HandlerFunc
}
// routeLogger traces matched routes including their executing time
func routeLogger(inner http.Handler) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
start := time.Now()
inner.ServeHTTP(w, r)
log.TRACE.Printf(
"%s\t%s\t%s",
r.Method,
r.RequestURI,
time.Since(start),
)
}
}
func indexHandler(site core.SiteAPI) http.HandlerFunc {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/html; charset=UTF-8")
indexTemplate, err := fs.ReadFile(Assets, "index.html")
if err != nil {
log.FATAL.Print("httpd: failed to load embedded template:", err.Error())
log.FATAL.Fatal("Make sure templates are included using the `release` build tag or use `make build`")
}
t, err := template.New("evcc").Delims("[[", "]]").Parse(string(indexTemplate))
if err != nil {
log.FATAL.Fatal("httpd: failed to create main page template:", err.Error())
}
if err := t.Execute(w, map[string]interface{}{
"Version": Version,
"Commit": Commit,
"Configured": len(site.LoadPoints()),
}); err != nil {
log.ERROR.Println("httpd: failed to render main page:", err.Error())
}
})
}
// jsonHandler is a middleware that decorates responses with JSON and CORS headers
func jsonHandler(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
h.ServeHTTP(w, r)
})
}
func jsonResponse(w http.ResponseWriter, r *http.Request, content interface{}) {
w.WriteHeader(http.StatusOK)
if err := json.NewEncoder(w).Encode(content); err != nil {
log.ERROR.Printf("httpd: failed to encode JSON: %v", err)
}
}
// HealthHandler returns current charge mode
func HealthHandler(site core.SiteAPI) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
if !site.Healthy() {
w.WriteHeader(http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
fmt.Fprintln(w, "OK")
}
}
// TemplatesHandler returns current charge mode
func TemplatesHandler() http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
class, ok := vars["class"]
if !ok {
w.WriteHeader(http.StatusBadRequest)
return
}
type template = struct {
Name string `json:"name"`
Sample string `json:"template"`
}
res := make([]template, 0)
for _, conf := range test.ConfigTemplates(class) {
typedSample := fmt.Sprintf("type: %s\n%s", conf.Type, conf.Sample)
t := template{
Name: conf.Name,
Sample: typedSample,
}
res = append(res, t)
}
jsonResponse(w, r, res)
}
}
// StateHandler returns current charge mode
func StateHandler(cache *util.Cache) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
res := cache.State()
for _, k := range []string{"availableVersion", "releaseNotes"} {
delete(res, k)
}
jsonResponse(w, r, res)
}
}
// CurrentChargeModeHandler returns current charge mode
func CurrentChargeModeHandler(loadpoint core.LoadPointAPI) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
res := chargeModeJSON{Mode: loadpoint.GetMode()}
jsonResponse(w, r, res)
}
}
// ChargeModeHandler updates charge mode
func ChargeModeHandler(loadpoint core.LoadPointAPI) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
modeS, ok := vars["mode"]
mode := api.ChargeModeString(modeS)
if mode == "" || string(mode) != modeS || !ok {
w.WriteHeader(http.StatusBadRequest)
return
}
loadpoint.SetMode(mode)
res := chargeModeJSON{Mode: loadpoint.GetMode()}
jsonResponse(w, r, res)
}
}
// CurrentTargetSoCHandler returns current target soc
func CurrentTargetSoCHandler(loadpoint core.LoadPointAPI) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
res := targetSoCJSON{TargetSoC: loadpoint.GetTargetSoC()}
jsonResponse(w, r, res)
}
}
// TargetSoCHandler updates target soc
func TargetSoCHandler(loadpoint core.LoadPointAPI) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
socS, ok := vars["soc"]
soc, err := strconv.ParseInt(socS, 10, 32)
if ok && err == nil {
err = loadpoint.SetTargetSoC(int(soc))
}
if !ok || err != nil {
w.WriteHeader(http.StatusBadRequest)
return
}
res := targetSoCJSON{TargetSoC: loadpoint.GetTargetSoC()}
jsonResponse(w, r, res)
}
}
// CurrentMinSoCHandler returns current minimum soc
func CurrentMinSoCHandler(loadpoint core.LoadPointAPI) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
res := minSoCJSON{MinSoC: loadpoint.GetMinSoC()}
jsonResponse(w, r, res)
}
}
// MinSoCHandler updates minimum soc
func MinSoCHandler(loadpoint core.LoadPointAPI) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
socS, ok := vars["soc"]
soc, err := strconv.ParseInt(socS, 10, 32)
if ok && err == nil {
err = loadpoint.SetMinSoC(int(soc))
}
if !ok || err != nil {
w.WriteHeader(http.StatusBadRequest)
return
}
res := minSoCJSON{MinSoC: loadpoint.GetMinSoC()}
jsonResponse(w, r, res)
}
}
// RemoteDemandHandler updates minimum soc
func RemoteDemandHandler(loadpoint core.LoadPointAPI) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
demandS, ok := vars["demand"]
var source string
if ok {
source, ok = vars["source"]
}
demand, err := core.RemoteDemandString(demandS)
if !ok || err != nil {
w.WriteHeader(http.StatusBadRequest)
return
}
loadpoint.RemoteControl(source, demand)
res := struct {
Demand core.RemoteDemand `json:"demand"`
Source string `json:"source"`
}{
Source: source,
Demand: demand,
}
jsonResponse(w, r, res)
}
}
func timezone() *time.Location {
tz := os.Getenv("TZ")
if tz == "" {
tz = "Local"
}
loc, _ := time.LoadLocation(tz)
return loc
}
// TargetChargeHandler updates target soc
func TargetChargeHandler(loadpoint core.LoadPointAPI) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
socS, ok := vars["soc"]
socV, err := strconv.ParseInt(socS, 10, 32)
if !ok || err != nil {
w.WriteHeader(http.StatusBadRequest)
return
}
timeS, ok := vars["time"]
timeV, err := time.ParseInLocation("2006-01-02T15:04:05", timeS, timezone())
if !ok || err != nil || timeV.Before(time.Now()) {
log.DEBUG.Printf("parse time: %v", err)
w.WriteHeader(http.StatusBadRequest)
return
}
loadpoint.SetTargetCharge(timeV, int(socV))
res := struct {
SoC int64 `json:"soc"`
Time time.Time `json:"time"`
}{
SoC: socV,
Time: timeV,
}
jsonResponse(w, r, res)
}
}
// SocketHandler attaches websocket handler to uri
func SocketHandler(hub *SocketHub) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
ServeWebsocket(hub, w, r)
}
}
// HTTPd wraps an http.Server and adds the root router
type HTTPd struct {
*http.Server
}
// NewHTTPd creates HTTP server with configured routes for loadpoint
func NewHTTPd(url string, site core.SiteAPI, hub *SocketHub, cache *util.Cache) *HTTPd {
routes := map[string]route{
"health": {[]string{"GET"}, "/health", HealthHandler(site)},
"state": {[]string{"GET"}, "/state", StateHandler(cache)},
"templates": {[]string{"GET"}, "/config/templates/{class:[a-z]+}", TemplatesHandler()},
}
router := mux.NewRouter().StrictSlash(true)
// websocket
router.HandleFunc("/ws", SocketHandler(hub))
// static - individual handlers per root and folders
static := router.PathPrefix("/").Subrouter()
static.Use(handlers.CompressHandler)
static.HandleFunc("/", indexHandler(site))
for _, dir := range []string{"css", "js", "ico"} {
static.PathPrefix("/" + dir).Handler(http.FileServer(http.FS(Assets)))
}
// api
api := router.PathPrefix("/api").Subrouter()
api.Use(jsonHandler)
api.Use(handlers.CompressHandler)
api.Use(handlers.CORS(
handlers.AllowedHeaders([]string{
"Accept", "Accept-Language", "Content-Language", "Content-Type", "Origin",
}),
))
// site api
for _, r := range routes {
api.Methods(r.Methods...).Path(r.Pattern).Handler(r.HandlerFunc)
}
// loadpoint api
for id, lp := range site.LoadPoints() {
lpAPI := api.PathPrefix(fmt.Sprintf("/loadpoints/%d", id)).Subrouter()
routes := map[string]route{
"getmode": {[]string{"GET"}, "/mode", CurrentChargeModeHandler(lp)},
"setmode": {[]string{"POST", "OPTIONS"}, "/mode/{mode:[a-z]+}", ChargeModeHandler(lp)},
"gettargetsoc": {[]string{"GET"}, "/targetsoc", CurrentTargetSoCHandler(lp)},
"settargetsoc": {[]string{"POST", "OPTIONS"}, "/targetsoc/{soc:[0-9]+}", TargetSoCHandler(lp)},
"getminsoc": {[]string{"GET"}, "/minsoc", CurrentMinSoCHandler(lp)},
"setminsoc": {[]string{"POST", "OPTIONS"}, "/minsoc/{soc:[0-9]+}", MinSoCHandler(lp)},
"settargetcharge": {[]string{"POST", "OPTIONS"}, "/targetcharge/{soc:[0-9]+}/{time:[0-9TZ:-]+}", TargetChargeHandler(lp)},
"remotedemand": {[]string{"POST", "OPTIONS"}, "/remotedemand/{demand:[a-z]+}/{source}", RemoteDemandHandler(lp)},
}
for _, r := range routes {
lpAPI.Methods(r.Methods...).Path(r.Pattern).Handler(r.HandlerFunc)
}
}
srv := &HTTPd{
Server: &http.Server{
Addr: url,
Handler: router,
ReadTimeout: 5 * time.Second,
WriteTimeout: 10 * time.Second,
IdleTimeout: 120 * time.Second,
ErrorLog: log.ERROR,
},
}
srv.SetKeepAlivesEnabled(true)
return srv
}
// Router returns the main router
func (s *HTTPd) Router() *mux.Router {
return s.Handler.(*mux.Router)
}
| [
"\"TZ\""
] | [] | [
"TZ"
] | [] | ["TZ"] | go | 1 | 0 | |
utils/utils_test.go | package utils
import (
"log"
"os"
"testing"
"github.com/crypto-crawler/bloxroute-go/client"
"github.com/ethereum/go-ethereum/common"
"github.com/stretchr/testify/assert"
)
func TestSubscribeWalletBalance(t *testing.T) {
certFile := os.Getenv("BLOXROUTE_CERT_FILE")
keyFile := os.Getenv("BLOXROUTE_KEY_FILE")
if certFile == "" || keyFile == "" {
assert.FailNow(t, "Please provide the bloXroute cert and key files path in the environment variable variable")
}
stopCh := make(chan struct{})
bloXrouteClient, err := client.NewBloXrouteClientToCloud("BSC-Mainnet", certFile, keyFile, stopCh)
assert.NoError(t, err)
balancesCh := make(chan *WalletBalance)
addresses := []common.Address{
common.HexToAddress("0x95eA23508ecc3521081E72352C13707F6b179Fc1"),
}
err = SubscribeWalletBalance(bloXrouteClient, addresses, balancesCh)
assert.NoError(t, err)
balance := <-balancesCh
log.Println(balance)
if balance.Bnb != nil {
assert.Equal(t, balance.Bnb.String(), "2618353928878013888")
}
balance = <-balancesCh
log.Println(balance)
if balance.Bnb != nil {
assert.Equal(t, balance.Bnb.String(), "2618353928878013888")
}
balance = <-balancesCh
log.Println(balance)
if balance.Bnb != nil {
assert.Equal(t, balance.Bnb.String(), "2618353928878013888")
}
close(stopCh)
}
| [
"\"BLOXROUTE_CERT_FILE\"",
"\"BLOXROUTE_KEY_FILE\""
] | [] | [
"BLOXROUTE_KEY_FILE",
"BLOXROUTE_CERT_FILE"
] | [] | ["BLOXROUTE_KEY_FILE", "BLOXROUTE_CERT_FILE"] | go | 2 | 0 | |
nas_big_data/covertype/best/evaluate.py | import os
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import json
import time
from nas_big_data.covertype.problem_ae import Problem
from nas_big_data.covertype.load_data import load_data
_, (X_test, y_test) = load_data(use_test=True)
arch_seq = [
9,
1,
30,
1,
1,
28,
1,
1,
0,
24,
0,
0,
1,
23,
0,
1,
0,
27,
1,
1,
1,
18,
0,
1,
1,
2,
0,
1,
1,
14,
1,
1,
0,
20,
1,
0,
1,
]
model = Problem.get_keras_model(arch_seq)
model.save_weights("myweights")
t1 = time.time()
model.load_weights("myweights")
y_pred = model.predict(X_test)
t2 = time.time()
data_json = {"timing_predict": t2 - t1}
with open("timing_predict.json", "w") as fp:
json.dump(data_json, fp)
| [] | [] | [
"CUDA_VISIBLE_DEVICES"
] | [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
detector/detector_api.py | from functools import reduce
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from newspaper import Article
import os
from fake_useragent import UserAgent
import pandas as pd
from requests import get
from lxml import etree
from scipy.sparse import hstack
from flask import Flask, request, jsonify
from urllib.parse import urlparse
import numpy as np
import pickle
SHARED_COUNT_API_KEY = os.environ.get('SHARED_COUNT_API_KEY')
PROXY_IP = os.environ.get('PROXY_IP')
PROXY_IP2 = os.environ.get('PROXY_IP2')
PROXY_IP3 = os.environ.get('PROXY_IP3')
AYLIEN_APP_ID4 = os.environ.get('AYLIEN_APP_ID4')
AYLIEN_APP_KEY4 = os.environ.get('AYLIEN_APP_KEY4')
proxies = [PROXY_IP, PROXY_IP2, PROXY_IP3]
categorizer = pickle.load(open('categorizer.pkl', 'rb'))
clf = pickle.load(open('detector_clf.pkl', 'rb'))
body_tfidf = pickle.load(open('detector_body_tfidf.pkl', 'rb'))
title_tfidf = pickle.load(open('detector_title_tfidf.pkl', 'rb'))
def get_reddit_shared_count(url):
headers = {'User-Agent': UserAgent().random}
infos = get(
'https://www.reddit.com/api/info.json?url=' + url,
headers=headers).json()['data']['children']
sub_shared_count = len(infos)
total_score = reduce((lambda x, info: x + info['data']['score']), infos, 0)
total_num_comments = reduce(
(lambda x, info: x + info['data']['num_comments']), infos, 0)
return total_score + sub_shared_count + total_num_comments
def get_popularity(url):
res = get('https://api.sharedcount.com/v1.0/', {
'url': 'http://' + url,
'apikey': SHARED_COUNT_API_KEY
}).json()
reddit_total = get_reddit_shared_count(url)
su_score = res['StumbleUpon'] if res['StumbleUpon'] else 0
pin_score = res['Pinterest'] if res['Pinterest'] else 0
li_score = res['LinkedIn'] if res['LinkedIn'] else 0
fb_score = res['Facebook']['total_count'] if res['Facebook'][
'total_count'] else 0
return fb_score + reddit_total + li_score + pin_score + su_score
app = Flask(__name__)
@app.route('/predict', methods=['POST'])
def predict():
info = request.json
a = Article('')
a.set_html(info['body'])
a.parse()
# if len(a.text.split()) < 20:
# return 'Please enter a valid article url from the source', 400
xml_str = get('http://data.alexa.com/data?cli=10&url=' + info['url'])
tree = etree.fromstring(xml_str.text.encode('utf-8'))
etree.strip_tags(tree, etree.Comment)
world_rank = 0
country_rank = 0
domain = info['url'].replace(urlparse(info['url']).path, '')
for x in tree.xpath('/ALEXA/SD/POPULARITY'):
world_rank = int(x.get('TEXT')) if x.get('TEXT') else 0
domain = x.get('URL') if x.get('URL') else ''
if not domain:
for z in tree.xpath('/ALEXA/SD'):
print(z.get('HOST'))
domain = z.get('HOST') if z.get('HOST') else ''
for x in tree.xpath('/ALEXA/SD/COUNTRY'):
country_rank = int(x.get('RANK')) if x.get('RANK') else 0
body = a.text.replace('ADVERTISEMENT', '').rstrip(r'\n\r')
src_social_score = get_popularity(domain)
print({
# 'src_social_score': src_social_score,
# 'src_has_impt_pages': 1 if info['sourceHasAboutPage'] or info['sourceHasContactPage'] else 0,
# 'src_wot_reputation': info['wotReputation'],
'src_has_about': 1 if info['sourceHasAboutPage'] else 0,
'src_has_contact': 1 if info['sourceHasContactPage'] else 0,
'src_domain_has_number': 1 if info['domainHasNumber'] else 0,
'src_domain_is_blog': 1 if info['isBlogDomain'] else 0,
# 'src_country_rank': 999999999 if country_rank == 0 else country_rank,
'src_world_rank': 999999999 if world_rank == 0 else world_rank,
})
test_df = pd.DataFrame(
{
'body': body,
'title': a.title,
# 'src_social_score': src_social_score,
# 'src_has_impt_pages': 1 if info['sourceHasAboutPage'] or info['sourceHasContactPage'] else 0,
'src_has_about': 1 if info['sourceHasAboutPage'] else 0,
'src_has_contact': 1 if info['sourceHasContactPage'] else 0,
'src_domain_has_number': 1 if info['domainHasNumber'] else 0,
'src_domain_is_blog': 1 if info['isBlogDomain'] else 0,
# 'src_wot_reputation': info['wotReputation'],
# 'src_country_rank': 999999999 if country_rank == 0 else country_rank,
'src_world_rank': 999999999 if world_rank == 0 else world_rank,
},
index=[0])
body = body_tfidf.transform(test_df.body.values)
title = title_tfidf.transform(test_df.title.values)
# print(zip(body_tfidf.get_feature_names(), body_tfidf.idf_))
# print(body)
# print(np.sum(body))
test_df.drop(['body', 'title'], axis=1, inplace=True)
test = hstack([test_df, body, title], format='csr')
pred = clf.predict(test)[0]
proba = clf.predict_proba(test)[0][1]
print(clf.classes_)
print(clf.predict_proba(test)[0])
print(pred)
print(proba)
return jsonify({
'isCredible': bool(pred),
'pct': proba * 100,
'sourceUrl': domain,
'socialScore': src_social_score,
'countryRank': country_rank,
'worldRank': world_rank,
})
@app.route('/categorize', methods=['POST'])
def categorize():
body = request.json
pred_proba = categorizer.predict_proba([body['text']])[0]
res = []
for i in range(len(categorizer.classes_)):
res.append({'label': categorizer.classes_[i], 'score': pred_proba[i]})
res = sorted(res, key=lambda cat: cat['score'], reverse=True)
return jsonify(res)
if __name__ == '__main__':
# clf = joblib.load('model.pkl')
app.run(port=5001, debug=True)
| [] | [] | [
"AYLIEN_APP_ID4",
"SHARED_COUNT_API_KEY",
"PROXY_IP2",
"PROXY_IP3",
"PROXY_IP",
"AYLIEN_APP_KEY4"
] | [] | ["AYLIEN_APP_ID4", "SHARED_COUNT_API_KEY", "PROXY_IP2", "PROXY_IP3", "PROXY_IP", "AYLIEN_APP_KEY4"] | python | 6 | 0 | |
run.py | import os
from flask_migrate import Migrate
from app import create_app, db
from app.models import User, UserInfo, Comment, Article
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
migrate = Migrate(app, db)
@app.shell_context_processor
def make_shell_context():
return dict(db=db, User=User, UserInfo=UserInfo, Article=Article, Comment=Comment)
if __name__ == '__main__':
app.run()
| [] | [] | [
"FLASK_CONFIG"
] | [] | ["FLASK_CONFIG"] | python | 1 | 0 | |
Kai/crab/NANOv7_NoveCampaign/2018/crab_cfg_2018_tt_DL-HDAMPup.py | import os
from WMCore.Configuration import Configuration
from CRABClient.UserUtilities import config, getUsernameFromCRIC
config = Configuration()
config.section_("General")
config.General.requestName = '2018_tt_DL-HDAMPup'
config.General.transferOutputs = True
config.General.transferLogs = True
config.section_("JobType")
config.JobType.allowUndistributedCMSSW = True
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'crab_PSet_2018_tt_DL-HDAMPup.py'
config.JobType.maxMemoryMB = 3000
config.JobType.maxJobRuntimeMin = 1315
config.JobType.numCores = 1
config.JobType.scriptExe = 'crab_script_2018_tt_DL-HDAMPup.sh'
config.JobType.inputFiles = ['crab_script_2018_tt_DL-HDAMPup.py',
os.path.join(os.environ['CMSSW_BASE'],'src/PhysicsTools/NanoAODTools/scripts/haddnano.py'),
]
config.JobType.outputFiles = [] #['hist.root']
config.JobType.sendPythonFolder = True
config.section_("Data")
config.Data.inputDataset = '/TTTo2L2Nu_hdampUP_TuneCP5_13TeV-powheg-pythia8/RunIIAutumn18NanoAODv7-Nano02Apr2020_102X_upgrade2018_realistic_v21_ext1-v1/NANOAODSIM'
config.Data.inputDBS = 'global'
config.Data.splitting = 'FileBased'
if config.Data.splitting == 'FileBased':
config.Data.unitsPerJob = 1
# config.Data.totalUnits = $TOTAL_UNITS
# config.Data.userInputFiles = []
# config.Data.outLFNDirBase = '/store/user/{user}/NoveCampaign'.format(user=getUsernameFromCRIC())
config.Data.outLFNDirBase = '/store/group/fourtop/NoveCampaign'
config.Data.publication = True
config.Data.outputDatasetTag = 'NoveCampaign'
config.section_("Site")
config.Site.storageSite = 'T2_BE_IIHE'
| [] | [] | [
"CMSSW_BASE"
] | [] | ["CMSSW_BASE"] | python | 1 | 0 | |
components/automate-deployment/pkg/manifest/client/http.go | package client
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"net/http"
"os"
"strings"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/crypto/openpgp"
"github.com/chef/automate/components/automate-deployment/pkg/habpkg"
"github.com/chef/automate/components/automate-deployment/pkg/manifest"
"github.com/chef/automate/components/automate-deployment/pkg/manifest/parser"
)
const (
defaultLatestManifestURLFmt = "https://packages.chef.io/manifests/%s/automate/latest.json"
defaultManifestURLFmt = "https://packages.chef.io/manifests/automate/%s.json"
packagesChefIOSigAsc = `-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: GnuPG v1.4.12 (Darwin)
Comment: GPGTools - http://gpgtools.org
mQGiBEppC7QRBADfsOkZU6KZK+YmKw4wev5mjKJEkVGlus+NxW8wItX5sGa6kdUu
twAyj7Yr92rF+ICFEP3gGU6+lGo0Nve7KxkN/1W7/m3G4zuk+ccIKmjp8KS3qn99
dxy64vcji9jIllVa+XXOGIp0G8GEaj7mbkixL/bMeGfdMlv8Gf2XPpp9vwCgn/GC
JKacfnw7MpLKUHOYSlb//JsEAJqao3ViNfav83jJKEkD8cf59Y8xKia5OpZqTK5W
ShVnNWS3U5IVQk10ZDH97Qn/YrK387H4CyhLE9mxPXs/ul18ioiaars/q2MEKU2I
XKfV21eMLO9LYd6Ny/Kqj8o5WQK2J6+NAhSwvthZcIEphcFignIuobP+B5wNFQpe
DbKfA/0WvN2OwFeWRcmmd3Hz7nHTpcnSF+4QX6yHRF/5BgxkG6IqBIACQbzPn6Hm
sMtm/SVf11izmDqSsQptCrOZILfLX/mE+YOl+CwWSHhl+YsFts1WOuh1EhQD26aO
Z84HuHV5HFRWjDLw9LriltBVQcXbpfSrRP5bdr7Wh8vhqJTPjrQnT3BzY29kZSBQ
YWNrYWdlcyA8cGFja2FnZXNAb3BzY29kZS5jb20+iGAEExECACAFAkppC7QCGwMG
CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRApQKupg++Caj8sAKCOXmdG36gWji/K
+o+XtBfvdMnFYQCfTCEWxRy2BnzLoBBFCjDSK6sJqCu0IENIRUYgUGFja2FnZXMg
PHBhY2thZ2VzQGNoZWYuaW8+iGIEExECACIFAlQwYFECGwMGCwkIBwMCBhUIAgkK
CwQWAgMBAh4BAheAAAoJEClAq6mD74JqX94An26z99XOHWpLN8ahzm7cp13t4Xid
AJ9wVcgoUBzvgg91lKfv/34cmemZn7kCDQRKaQu0EAgAg7ZLCVGVTmLqBM6njZEd
Zbv+mZbvwLBSomdiqddE6u3eH0X3GuwaQfQWHUVG2yedyDMiG+EMtCdEeeRebTCz
SNXQ8Xvi22hRPoEsBSwWLZI8/XNg0n0f1+GEr+mOKO0BxDB2DG7DA0nnEISxwFkK
OFJFebR3fRsrWjj0KjDxkhse2ddU/jVz1BY7Nf8toZmwpBmdozETMOTx3LJy1HZ/
Te9FJXJMUaB2lRyluv15MVWCKQJro4MQG/7QGcIfrIZNfAGJ32DDSjV7/YO+IpRY
IL4CUBQ65suY4gYUG4jhRH6u7H1p99sdwsg5OIpBe/v2Vbc/tbwAB+eJJAp89Zeu
twADBQf/ZcGoPhTGFuzbkcNRSIz+boaeWPoSxK2DyfScyCAuG41CY9+g0HIw9Sq8
DuxQvJ+vrEJjNvNE3EAEdKl/zkXMZDb1EXjGwDi845TxEMhhD1dDw2qpHqnJ2mtE
WpZ7juGwA3sGhi6FapO04tIGacCfNNHmlRGipyq5ZiKIRq9mLEndlECr8cwaKgkS
0wWu+xmMZe7N5/t/TK19HXNh4tVacv0F3fYK54GUjt2FjCQV75USnmNY4KPTYLXA
dzC364hEMlXpN21siIFgB04w+TXn5UF3B4FfAy5hevvr4DtV4MvMiGLu0oWjpaLC
MpmrR3Ny2wkmO0h+vgri9uIP06ODWIhJBBgRAgAJBQJKaQu0AhsMAAoJEClAq6mD
74Jq4hIAoJ5KrYS8kCwj26SAGzglwggpvt3CAJ0bekyky56vNqoegB+y4PQVDv4K
zA==
=IxPr
-----END PGP PUBLIC KEY BLOCK-----`
)
var packagesChefIOKeyRing openpgp.EntityList
func init() {
entityList, err := openpgp.ReadArmoredKeyRing(strings.NewReader(packagesChefIOSigAsc))
if err != nil {
panic(errors.Wrap(err, "Failed to read pacakges.chef.io public key"))
}
packagesChefIOKeyRing = entityList
}
// A HTTP client makes HTTP requests to retrieve and parse a Manifest. The
// default manifests are stored on S3, but the manifestURLFmt can be
// overridden for testing.
type HTTP struct {
HTTPClient *http.Client
latestManifestURLFmt string
manifestURLFmt string
noVerify bool
}
// An Opt represent an option that can be passed to NewClient
type Opt func(c *HTTP)
// NewHTTPClient returns a client with the given options applied.
func NewHTTPClient(options ...Opt) *HTTP {
c := &HTTP{
HTTPClient: &http.Client{},
}
for _, option := range options {
option(c)
}
if c.latestManifestURLFmt == "" {
c.latestManifestURLFmt = defaultLatestManifestURLFmt
}
if c.manifestURLFmt == "" {
c.manifestURLFmt = defaultManifestURLFmt
}
// We allow skipping manifest verification if needed by setting this environment
// variable. Set it only if you must
if os.Getenv("CHEF_AUTOMATE_SKIP_MANIFEST_VERIFICATION") == "true" {
c.noVerify = true
}
return c
}
// CurrentURLFormat returns an Opt that can be passed to NewClient which sets
// the latestManifestURLFmt to the given string. Used in testing.
func LatestURLFormat(urlFormat string) Opt {
return func(c *HTTP) {
c.latestManifestURLFmt = urlFormat
}
}
// URLFormat returns an Opt that can be passed to NewClient which sets
// the manifestURLFmt to the given string. Used in testing.
func URLFormat(urlFormat string) Opt {
return func(c *HTTP) {
c.manifestURLFmt = urlFormat
}
}
// NoVerify disables signature verification of the manifest
func NoVerify(noVerify bool) Opt {
return func(c *HTTP) {
c.noVerify = noVerify
}
}
// GetCurrentManifest retrieves the current manifest for the given
// channel.
func (c *HTTP) GetCurrentManifest(ctx context.Context, channel string) (*manifest.A2, error) {
url := fmt.Sprintf(c.latestManifestURLFmt, channel)
return c.manifestFromURL(ctx, url)
}
// GetCurrentManifest retrieves the current manifest for the given
// channel.
func (c *HTTP) GetManifest(ctx context.Context, release string) (*manifest.A2, error) {
url := fmt.Sprintf(c.manifestURLFmt, release)
return c.manifestFromURL(ctx, url)
}
func (c *HTTP) manifestFromURL(ctx context.Context, url string) (*manifest.A2, error) {
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, err
}
req = req.WithContext(ctx)
response, err := c.HTTPClient.Do(req)
if err != nil {
return nil, err
}
defer response.Body.Close() // nolint: errcheck
switch response.StatusCode {
case http.StatusOK:
// Yay!
case http.StatusNotFound:
return nil, manifest.NewNoSuchManifestError(errors.Errorf("%s: %s", url, response.Status))
default:
return nil, errors.Errorf("Unexpected HTTP response from %s: %s", url, response.Status)
}
body, err := ioutil.ReadAll(response.Body)
if err != nil {
return nil, err
}
if !c.noVerify {
signatureURL := fmt.Sprintf("%s.asc", url)
logrus.WithField("url", signatureURL).Debug("Checking manifest signature")
sigReq, err := http.NewRequest("GET", signatureURL, nil)
if err != nil {
return nil, errors.Wrap(err, "failed to GET manifest signature")
}
sigReq = sigReq.WithContext(ctx)
sigResp, err := c.HTTPClient.Do(sigReq)
if err != nil {
return nil, err
}
defer sigResp.Body.Close() // nolint: errcheck
if sigResp.StatusCode != http.StatusOK {
return nil, errors.Errorf("Failed to GET manifest signature. status=%s", sigResp.Status)
}
sigBody, err := ioutil.ReadAll(sigResp.Body)
if err != nil {
return nil, errors.Wrap(err, "failed to read signature response")
}
_, err = openpgp.CheckArmoredDetachedSignature(packagesChefIOKeyRing,
bytes.NewBuffer(body), bytes.NewBuffer(sigBody))
if err != nil {
return nil, errors.Wrap(err,
"Failed to verify manifest signature. This may indicate that the manifest is corrupt or has been tampered with. If this problem persists, please contact Chef Support")
}
}
m, err := parser.ManifestFromBytes(body)
if err != nil {
return nil, err
}
m.HartOverrides = []habpkg.Hart{}
return m, nil
}
| [
"\"CHEF_AUTOMATE_SKIP_MANIFEST_VERIFICATION\""
] | [] | [
"CHEF_AUTOMATE_SKIP_MANIFEST_VERIFICATION"
] | [] | ["CHEF_AUTOMATE_SKIP_MANIFEST_VERIFICATION"] | go | 1 | 0 | |
fuzzers/005-tilegrid/orphan_int_column/top.py | import os
import random
random.seed(int(os.getenv("SEED"), 16))
from prjxray import util
from prjxray.db import Database
INT_TILE_TYPES = ['INT_L', 'INT_R']
HCLK_TILE_TYPES = ['HCLK_L', 'HCLK_R', 'HCLK_L_BOT_UTURN', 'HCLK_R_BOT_UTURN']
def get_int_column_roots(grid):
for tile_name in sorted(grid.tiles()):
loc = grid.loc_of_tilename(tile_name)
gridinfo = grid.gridinfo_at_loc(loc)
if gridinfo.tile_type not in INT_TILE_TYPES:
continue
next_gridinfo = grid.gridinfo_at_loc((loc.grid_x, loc.grid_y + 1))
if next_gridinfo.tile_type in INT_TILE_TYPES:
continue
if next_gridinfo.tile_type in HCLK_TILE_TYPES:
continue
assert next_gridinfo.tile_type in [
'B_TERM_INT', 'BRKH_INT', 'BRKH_B_TERM_INT'
], next_gridinfo.tile_type
yield tile_name
def build_int_columns(grid):
int_columns = {}
for root_tile_name in get_int_column_roots(grid):
assert root_tile_name not in int_columns
int_columns[root_tile_name] = []
tile_name = root_tile_name
gridinfo = grid.gridinfo_at_tilename(tile_name)
# Walk up INT column.
while gridinfo.tile_type in INT_TILE_TYPES:
int_columns[root_tile_name].append(tile_name)
loc = grid.loc_of_tilename(tile_name)
tile_name = grid.tilename_at_loc((loc.grid_x, loc.grid_y - 1))
gridinfo = grid.gridinfo_at_tilename(tile_name)
if gridinfo.tile_type in HCLK_TILE_TYPES:
loc = grid.loc_of_tilename(tile_name)
tile_name = grid.tilename_at_loc((loc.grid_x, loc.grid_y - 1))
gridinfo = grid.gridinfo_at_tilename(tile_name)
assert gridinfo.tile_type in [
'T_TERM_INT', 'BRKH_INT', 'BRKH_TERM_INT'
], gridinfo.tile_type
return int_columns
def pair_int_tiles(grid, int_tiles):
tiles_left = set(int_tiles)
while tiles_left:
tile_name = tiles_left.pop()
gridinfo = grid.gridinfo_at_tilename(tile_name)
loc = grid.loc_of_tilename(tile_name)
assert gridinfo.tile_type in INT_TILE_TYPES
if gridinfo.tile_type == 'INT_L':
other_int_tile = 'INT_R'
other_loc = (loc.grid_x + 1, loc.grid_y)
else:
other_int_tile = 'INT_L'
other_loc = (loc.grid_x - 1, loc.grid_y)
paired_tile_name = grid.tilename_at_loc(other_loc)
paired_gridinfo = grid.gridinfo_at_tilename(paired_tile_name)
assert paired_gridinfo.tile_type == other_int_tile, paired_gridinfo.tile_type
tiles_left.remove(paired_tile_name)
yield sorted([tile_name, paired_tile_name])
def is_orphan_int_row(grid, int_l, int_r):
""" Returns true if given INT pair have no adjcent sites. """
loc = grid.loc_of_tilename(int_l)
if grid.gridinfo_at_loc(
(loc.grid_x - 1, loc.grid_y)).tile_type != 'INT_INTERFACE_L':
return False
if grid.gridinfo_at_loc(
(loc.grid_x - 2, loc.grid_y)).tile_type != 'VFRAME':
return False
loc = grid.loc_of_tilename(int_r)
if grid.gridinfo_at_loc(
(loc.grid_x + 1, loc.grid_y)).tile_type != 'INT_INTERFACE_R':
return False
if grid.gridinfo_at_loc(
(loc.grid_x + 2,
loc.grid_y)).tile_type not in ['CLK_FEED', 'CLK_BUFG_REBUF', 'NULL']:
return False
return True
def gen_orphan_ints(grid):
int_columns = build_int_columns(grid)
for int_l_column, int_r_column in sorted(pair_int_tiles(
grid, int_columns.keys())):
found_site = False
for int_l, int_r in zip(int_columns[int_l_column],
int_columns[int_r_column]):
if not is_orphan_int_row(grid, int_l, int_r):
found_site = True
break
if not found_site:
yield int_columns[int_l_column], int_columns[int_r_column]
def write_params(params):
pinstr = 'tile,val\n'
for tile, (val) in sorted(params.items()):
pinstr += '%s,%s\n' % (tile, val)
open('params.csv', 'w').write(pinstr)
def build_cross_int(params, grid, int_l, int_r):
""" Toggles INT_R.ER1BEG1.EE4END0 """
loc = grid.loc_of_tilename(int_r)
origin_tile = grid.tilename_at_loc((loc.grid_x - 4, loc.grid_y))
origin_gridinfo = grid.gridinfo_at_tilename(origin_tile)
assert origin_gridinfo.tile_type == 'CLBLL_R'
origin_site = sorted(origin_gridinfo.sites.keys())[random.randint(0, 1)]
dest_tile = grid.tilename_at_loc((loc.grid_x + 4, loc.grid_y))
dest_gridinfo = grid.gridinfo_at_tilename(dest_tile)
assert dest_gridinfo.tile_type == 'CLBLL_L'
dest_site = sorted(dest_gridinfo.sites.keys())[0]
dest2_tile = grid.tilename_at_loc((loc.grid_x + 4, loc.grid_y + 1))
dest2_gridinfo = grid.gridinfo_at_tilename(dest2_tile)
assert dest2_gridinfo.tile_type == 'CLBLL_L', dest2_gridinfo.tile_type
dest2_site = sorted(dest2_gridinfo.sites.keys())[1]
if random.randint(0, 1):
dest_wire = 'origin_wire_{origin_site}'.format(origin_site=origin_site)
else:
dest_wire = '1'
if random.randint(0, 1):
dest_wire2 = 'origin_wire_{origin_site}'.format(
origin_site=origin_site)
else:
dest_wire2 = '1'
if random.randint(0, 1):
dest_wire3 = 'origin_wire_{origin_site}'.format(
origin_site=origin_site)
else:
dest_wire3 = '1'
if random.randint(0, 1):
dest_wire4 = 'origin_wire_{origin_site}'.format(
origin_site=origin_site)
else:
dest_wire4 = '1'
# origin_site.AQ -> dest_tile.D6 enables INT_R.ER1BEG1.EE4END0
print(
"""
// Force origin FF into A position with MUXF7_L and MUXF8
wire origin_wire_{origin_site};
wire f7_to_lo_{origin_site};
wire lut_to_f7_{origin_site};
(* KEEP, DONT_TOUCH, LOC = "{origin_site}" *)
LUT6_L #(
.INIT(0)
) lut_rom_{origin_site} (
.I0(1),
.I1(origin_wire_{origin_site}),
.I2(0),
.I3(1),
.I4(1),
.I5(1),
.LO(lut_to_f7_{origin_site})
);
(* KEEP, DONT_TOUCH, LOC = "{origin_site}" *)
MUXF7_D f7_{origin_site} (
.I0(lut_to_f7_{origin_site}),
.LO(f7_to_lo_{origin_site}),
.O(origin_wire_{origin_site})
);
(* KEEP, DONT_TOUCH, LOC = "{origin_site}" *)
MUXF8 f8_{origin_site} (
.I1(f7_to_lo_{origin_site})
);
wire dest_wire_{dest_site};
wire dest_wire2_{dest_site};
wire d_lut_to_f7_{dest_site}, f7_to_f8_{dest_site};
// Force destination LUT into D position with MUXF7_L and MUXF8
(* KEEP, DONT_TOUCH, LOC = "{dest_site}" *)
LUT6_L #(
.INIT(0)
) d_lut_rom_{dest_site} (
.I0(1),
.I1(1),
.I2(0),
.I3(1),
.I4(1),
.I5(dest_wire_{dest_site}),
.LO(d_lut_to_f7_{dest_site})
);
wire c_lut_to_f7_{dest_site};
// Force destination LUT into C position with MUXF7_L and MUXF8
(* KEEP, DONT_TOUCH, LOC = "{dest_site}" *)
LUT6_L #(
.INIT(0)
) c_lut_rom_{dest_site} (
.I0(1),
.I1(1),
.I2(0),
.I3(1),
.I4(1),
.I5(dest_wire2_{dest_site}),
.LO(c_lut_to_f7_{dest_site})
);
(* KEEP, DONT_TOUCH, LOC = "{dest_site}" *)
MUXF7_L f7_{dest_site} (
.I0(d_lut_to_f7_{dest_site}),
.I1(c_lut_to_f7_{dest_site}),
.LO(f7_to_f8_{dest_site})
);
(* KEEP, DONT_TOUCH, LOC = "{dest_site}" *)
MUXF8 f8_{dest_site} (
.I0(f7_to_f8_{dest_site})
);
assign dest_wire_{dest_site} = {dest_wire};
assign dest_wire2_{dest_site} = {dest_wire2};
wire dest_wire3_{dest_site2};
wire dest_wire4_{dest_site2};
wire lut_to_f7_{dest_site2}, f7_to_f8_{dest_site2};
// Force destination LUT into D position with MUXF7_L and MUXF8
(* KEEP, DONT_TOUCH, LOC = "{dest_site2}" *)
LUT6_L #(
.INIT(0)
) d_lut_rom_{dest_site2} (
.I0(dest_wire3_{dest_site2}),
.I1(1),
.I2(0),
.I3(1),
.I4(1),
.I5(),
.LO(lut_to_f7_{dest_site2})
);
// Force destination LUT into C position with MUXF7_L and MUXF8
wire c_lut_to_f7_{dest_site2};
(* KEEP, DONT_TOUCH, LOC = "{dest_site2}" *)
LUT6_L #(
.INIT(0)
) c_lut_rom_{dest_site2} (
.I0(dest_wire4_{dest_site2}),
.I1(1),
.I2(0),
.I3(1),
.I4(1),
.I5(1),
.LO(c_lut_to_f7_{dest_site2})
);
(* KEEP, DONT_TOUCH, LOC = "{dest_site2}" *)
MUXF7_L f7_{dest_site2} (
.I0(lut_to_f7_{dest_site2}),
.I1(c_lut_to_f7_{dest_site2}),
.LO(f7_to_f8_{dest_site2})
);
(* KEEP, DONT_TOUCH, LOC = "{dest_site2}" *)
MUXF8 f8_{dest_site2} (
.I0(f7_to_f8_{dest_site2})
);
assign dest_wire3_{dest_site2} = {dest_wire3};
assign dest_wire4_{dest_site2} = {dest_wire4};
""".format(
dest_site=dest_site,
dest_site2=dest2_site,
origin_site=origin_site,
dest_wire=dest_wire,
dest_wire2=dest_wire2,
dest_wire3=dest_wire3,
dest_wire4=dest_wire4,
))
def run():
print('''
module top();
''')
int_tiles = []
db = Database(util.get_db_root())
grid = db.grid()
params = {}
for int_l_column, int_r_column in gen_orphan_ints(grid):
for int_l, int_r in zip(int_l_column[1:6:2], int_r_column[1:6:2]):
build_cross_int(params, grid, int_l, int_r)
int_tiles.append(int_l)
print("endmodule")
with open('top.txt', 'w') as f:
for tile in int_tiles:
print(tile, file=f)
if __name__ == '__main__':
run()
| [] | [] | [
"SEED"
] | [] | ["SEED"] | python | 1 | 0 | |
files/src/server.go | package main
import (
"log"
"os"
"github.com/armon/go-socks5"
"github.com/caarlos0/env"
)
type params struct {
User string `env:"PROXY_USER" envDefault:""`
Password string `env:"PROXY_PASSWORD" envDefault:""`
Port string `env:"PROXY_PORT" envDefault:"1080"`
}
func main() {
// Working with app params
cfg := params{}
err := env.Parse(&cfg)
if err != nil {
log.Printf("%+v\n", err)
}
//Initialize socks5 config
socsk5conf := &socks5.Config{
Logger: log.New(os.Stdout, "", log.LstdFlags),
}
if cfg.User+cfg.Password != "" {
creds := socks5.StaticCredentials{
os.Getenv("PROXY_USER"): os.Getenv("PROXY_PASSWORD"),
}
cator := socks5.UserPassAuthenticator{Credentials: creds}
socsk5conf.AuthMethods = []socks5.Authenticator{cator}
}
server, err := socks5.New(socsk5conf)
if err != nil {
log.Fatal(err)
}
log.Printf("Start listening proxy service on port %s\n", cfg.Port)
if err := server.ListenAndServe("tcp", ":"+cfg.Port); err != nil {
log.Fatal(err)
}
}
| [
"\"PROXY_USER\"",
"\"PROXY_PASSWORD\""
] | [] | [
"PROXY_PASSWORD",
"PROXY_USER"
] | [] | ["PROXY_PASSWORD", "PROXY_USER"] | go | 2 | 0 | |
go/test/endtoend/recovery/unshardedrecovery/recovery.go | /*
Copyright 2020 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package unshardedrecovery
import (
"context"
"flag"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path"
"testing"
"vitess.io/vitess/go/test/endtoend/recovery"
"vitess.io/vitess/go/test/endtoend/sharding/initialsharding"
"vitess.io/vitess/go/vt/log"
"vitess.io/vitess/go/vt/vtgate/vtgateconn"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"vitess.io/vitess/go/test/endtoend/cluster"
)
var (
master *cluster.Vttablet
replica1 *cluster.Vttablet
replica2 *cluster.Vttablet
replica3 *cluster.Vttablet
localCluster *cluster.LocalProcessCluster
newInitDBFile string
cell = cluster.DefaultCell
hostname = "localhost"
keyspaceName = "ks"
dbPassword = "VtDbaPass"
shardKsName = fmt.Sprintf("%s/%s", keyspaceName, shardName)
dbCredentialFile string
shardName = "0"
commonTabletArg = []string{
"-vreplication_healthcheck_topology_refresh", "1s",
"-vreplication_healthcheck_retry_delay", "1s",
"-vreplication_retry_delay", "1s",
"-degraded_threshold", "5s",
"-lock_tables_timeout", "5s",
"-watch_replication_stream",
"-serving_state_grace_period", "1s"}
recoveryKS1 = "recovery_ks1"
recoveryKS2 = "recovery_ks2"
vtInsertTest = `create table vt_insert_test (
id bigint auto_increment,
msg varchar(64),
primary key (id)
) Engine=InnoDB`
vSchema = `{
"tables": {
"vt_insert_test": {}
}
}`
)
// TestMainImpl creates cluster for unsharded recovery testing.
func TestMainImpl(m *testing.M) {
defer cluster.PanicHandler(nil)
flag.Parse()
exitCode, err := func() (int, error) {
localCluster = cluster.NewCluster(cell, hostname)
defer localCluster.Teardown()
// Start topo server
err := localCluster.StartTopo()
if err != nil {
return 1, err
}
// Start keyspace
keyspace := &cluster.Keyspace{
Name: keyspaceName,
}
localCluster.Keyspaces = append(localCluster.Keyspaces, *keyspace)
dbCredentialFile = initialsharding.WriteDbCredentialToTmp(localCluster.TmpDirectory)
initDb, _ := ioutil.ReadFile(path.Join(os.Getenv("VTROOT"), "/config/init_db.sql"))
sql := string(initDb)
newInitDBFile = path.Join(localCluster.TmpDirectory, "init_db_with_passwords.sql")
sql = sql + initialsharding.GetPasswordUpdateSQL(localCluster)
ioutil.WriteFile(newInitDBFile, []byte(sql), 0666)
extraArgs := []string{"-db-credentials-file", dbCredentialFile}
commonTabletArg = append(commonTabletArg, "-db-credentials-file", dbCredentialFile)
shard := cluster.Shard{
Name: shardName,
}
var mysqlProcs []*exec.Cmd
for i := 0; i < 4; i++ {
tabletType := "replica"
if i == 0 {
tabletType = "master"
}
tablet := localCluster.NewVttabletInstance(tabletType, 0, cell)
tablet.VttabletProcess = localCluster.VtprocessInstanceFromVttablet(tablet, shard.Name, keyspaceName)
tablet.VttabletProcess.DbPassword = dbPassword
tablet.VttabletProcess.ExtraArgs = commonTabletArg
if recovery.UseXb {
tablet.VttabletProcess.ExtraArgs = append(tablet.VttabletProcess.ExtraArgs, recovery.XbArgs...)
}
tablet.VttabletProcess.SupportsBackup = true
tablet.VttabletProcess.EnableSemiSync = true
tablet.MysqlctlProcess = *cluster.MysqlCtlProcessInstance(tablet.TabletUID, tablet.MySQLPort, localCluster.TmpDirectory)
tablet.MysqlctlProcess.InitDBFile = newInitDBFile
tablet.MysqlctlProcess.ExtraArgs = extraArgs
proc, err := tablet.MysqlctlProcess.StartProcess()
if err != nil {
return 1, err
}
mysqlProcs = append(mysqlProcs, proc)
shard.Vttablets = append(shard.Vttablets, tablet)
}
for _, proc := range mysqlProcs {
if err := proc.Wait(); err != nil {
return 1, err
}
}
master = shard.Vttablets[0]
replica1 = shard.Vttablets[1]
replica2 = shard.Vttablets[2]
replica3 = shard.Vttablets[3]
for _, tablet := range []cluster.Vttablet{*master, *replica1} {
if err := tablet.VttabletProcess.Setup(); err != nil {
return 1, err
}
}
if err := localCluster.VtctlclientProcess.InitShardMaster(keyspaceName, shard.Name, cell, master.TabletUID); err != nil {
return 1, err
}
return m.Run(), nil
}()
if err != nil {
log.Error(err.Error())
os.Exit(1)
} else {
os.Exit(exitCode)
}
}
// TestRecoveryImpl does following
// - create a shard with master and replica1 only
// - run InitShardMaster
// - insert some data
// - take a backup
// - insert more data on the master
// - take another backup
// - create a recovery keyspace after first backup
// - bring up tablet_replica2 in the new keyspace
// - check that new tablet does not have data created after backup1
// - create second recovery keyspace after second backup
// - bring up tablet_replica3 in second keyspace
// - check that new tablet has data created after backup1 but not data created after backup2
// - check that vtgate queries work correctly
func TestRecoveryImpl(t *testing.T) {
defer cluster.PanicHandler(t)
defer tabletsTeardown()
verifyInitialReplication(t)
err := localCluster.VtctlclientProcess.ExecuteCommand("Backup", replica1.Alias)
assert.NoError(t, err)
backups := listBackups(t)
require.Equal(t, len(backups), 1)
assert.Contains(t, backups[0], replica1.Alias)
_, err = master.VttabletProcess.QueryTablet("insert into vt_insert_test (msg) values ('test2')", keyspaceName, true)
assert.NoError(t, err)
cluster.VerifyRowsInTablet(t, replica1, keyspaceName, 2)
err = localCluster.VtctlclientProcess.ApplyVSchema(keyspaceName, vSchema)
assert.NoError(t, err)
output, err := localCluster.VtctlclientProcess.ExecuteCommandWithOutput("GetVSchema", keyspaceName)
assert.NoError(t, err)
assert.Contains(t, output, "vt_insert_test")
recovery.RestoreTablet(t, localCluster, replica2, recoveryKS1, "0", keyspaceName, commonTabletArg)
output, err = localCluster.VtctlclientProcess.ExecuteCommandWithOutput("GetSrvVSchema", cell)
assert.NoError(t, err)
assert.Contains(t, output, keyspaceName)
assert.Contains(t, output, recoveryKS1)
err = localCluster.VtctlclientProcess.ExecuteCommand("GetSrvKeyspace", cell, keyspaceName)
assert.NoError(t, err)
output, err = localCluster.VtctlclientProcess.ExecuteCommandWithOutput("GetVSchema", recoveryKS1)
assert.NoError(t, err)
assert.Contains(t, output, "vt_insert_test")
cluster.VerifyRowsInTablet(t, replica2, keyspaceName, 1)
cluster.VerifyLocalMetadata(t, replica2, recoveryKS1, shardName, cell)
// update the original row in master
_, err = master.VttabletProcess.QueryTablet("update vt_insert_test set msg = 'msgx1' where id = 1", keyspaceName, true)
assert.NoError(t, err)
//verify that master has new value
qr, err := master.VttabletProcess.QueryTablet("select msg from vt_insert_test where id = 1", keyspaceName, true)
assert.NoError(t, err)
assert.Equal(t, "msgx1", fmt.Sprintf("%s", qr.Rows[0][0].ToBytes()))
//verify that restored replica has old value
qr, err = replica2.VttabletProcess.QueryTablet("select msg from vt_insert_test where id = 1", keyspaceName, true)
assert.NoError(t, err)
assert.Equal(t, "test1", fmt.Sprintf("%s", qr.Rows[0][0].ToBytes()))
err = localCluster.VtctlclientProcess.ExecuteCommand("Backup", replica1.Alias)
assert.NoError(t, err)
_, err = master.VttabletProcess.QueryTablet("insert into vt_insert_test (msg) values ('test3')", keyspaceName, true)
assert.NoError(t, err)
cluster.VerifyRowsInTablet(t, replica1, keyspaceName, 3)
recovery.RestoreTablet(t, localCluster, replica3, recoveryKS2, "0", keyspaceName, commonTabletArg)
output, err = localCluster.VtctlclientProcess.ExecuteCommandWithOutput("GetVSchema", recoveryKS2)
assert.NoError(t, err)
assert.Contains(t, output, "vt_insert_test")
cluster.VerifyRowsInTablet(t, replica3, keyspaceName, 2)
// update the original row in master
_, err = master.VttabletProcess.QueryTablet("update vt_insert_test set msg = 'msgx2' where id = 1", keyspaceName, true)
assert.NoError(t, err)
//verify that master has new value
qr, err = master.VttabletProcess.QueryTablet("select msg from vt_insert_test where id = 1", keyspaceName, true)
assert.NoError(t, err)
assert.Equal(t, "msgx2", fmt.Sprintf("%s", qr.Rows[0][0].ToBytes()))
//verify that restored replica has old value
qr, err = replica3.VttabletProcess.QueryTablet("select msg from vt_insert_test where id = 1", keyspaceName, true)
assert.NoError(t, err)
assert.Equal(t, "msgx1", fmt.Sprintf("%s", qr.Rows[0][0].ToBytes()))
vtgateInstance := localCluster.NewVtgateInstance()
vtgateInstance.TabletTypesToWait = "REPLICA"
err = vtgateInstance.Setup()
localCluster.VtgateGrpcPort = vtgateInstance.GrpcPort
assert.NoError(t, err)
defer vtgateInstance.TearDown()
err = vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.master", keyspaceName, shardName), 1)
assert.NoError(t, err)
err = vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", keyspaceName, shardName), 1)
assert.NoError(t, err)
err = vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", recoveryKS1, shardName), 1)
assert.NoError(t, err)
err = vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", recoveryKS2, shardName), 1)
assert.NoError(t, err)
// Build vtgate grpc connection
grpcAddress := fmt.Sprintf("%s:%d", localCluster.Hostname, localCluster.VtgateGrpcPort)
vtgateConn, err := vtgateconn.Dial(context.Background(), grpcAddress)
assert.NoError(t, err)
defer vtgateConn.Close()
session := vtgateConn.Session("@replica", nil)
//check that vtgate doesn't route queries to new tablet
recovery.VerifyQueriesUsingVtgate(t, session, "select count(*) from vt_insert_test", "INT64(3)")
recovery.VerifyQueriesUsingVtgate(t, session, "select msg from vt_insert_test where id = 1", `VARCHAR("msgx2")`)
recovery.VerifyQueriesUsingVtgate(t, session, fmt.Sprintf("select count(*) from %s.vt_insert_test", recoveryKS1), "INT64(1)")
recovery.VerifyQueriesUsingVtgate(t, session, fmt.Sprintf("select msg from %s.vt_insert_test where id = 1", recoveryKS1), `VARCHAR("test1")`)
recovery.VerifyQueriesUsingVtgate(t, session, fmt.Sprintf("select count(*) from %s.vt_insert_test", recoveryKS2), "INT64(2)")
recovery.VerifyQueriesUsingVtgate(t, session, fmt.Sprintf("select msg from %s.vt_insert_test where id = 1", recoveryKS2), `VARCHAR("msgx1")`)
// check that new keyspace is accessible with 'use ks'
cluster.ExecuteQueriesUsingVtgate(t, session, "use "+recoveryKS1+"@replica")
recovery.VerifyQueriesUsingVtgate(t, session, "select count(*) from vt_insert_test", "INT64(1)")
cluster.ExecuteQueriesUsingVtgate(t, session, "use "+recoveryKS2+"@replica")
recovery.VerifyQueriesUsingVtgate(t, session, "select count(*) from vt_insert_test", "INT64(2)")
// check that new tablet is accessible with use `ks:shard`
cluster.ExecuteQueriesUsingVtgate(t, session, "use `"+recoveryKS1+":0@replica`")
recovery.VerifyQueriesUsingVtgate(t, session, "select count(*) from vt_insert_test", "INT64(1)")
cluster.ExecuteQueriesUsingVtgate(t, session, "use `"+recoveryKS2+":0@replica`")
recovery.VerifyQueriesUsingVtgate(t, session, "select count(*) from vt_insert_test", "INT64(2)")
}
// verifyInitialReplication will create schema in master, insert some data to master and verify the same data in replica.
func verifyInitialReplication(t *testing.T) {
_, err := master.VttabletProcess.QueryTablet(vtInsertTest, keyspaceName, true)
assert.NoError(t, err)
_, err = master.VttabletProcess.QueryTablet("insert into vt_insert_test (msg) values ('test1')", keyspaceName, true)
assert.NoError(t, err)
cluster.VerifyRowsInTablet(t, replica1, keyspaceName, 1)
}
func listBackups(t *testing.T) []string {
output, err := localCluster.ListBackups(shardKsName)
assert.NoError(t, err)
return output
}
func tabletsTeardown() {
var mysqlProcs []*exec.Cmd
for _, tablet := range []*cluster.Vttablet{master, replica1, replica2, replica3} {
proc, _ := tablet.MysqlctlProcess.StopProcess()
mysqlProcs = append(mysqlProcs, proc)
tablet.VttabletProcess.TearDown()
}
for _, proc := range mysqlProcs {
proc.Wait()
}
}
| [
"\"VTROOT\""
] | [] | [
"VTROOT"
] | [] | ["VTROOT"] | go | 1 | 0 | |
test.py | from __future__ import print_function, absolute_import
import os
import gc
import sys
import time
import math
import h5py
import scipy
import datetime
import argparse
import os.path as osp
import numpy as np
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.utils.data import DataLoader
import utils.data_manager as data_manager
from utils.video_loader import VideoDataset, ImageDataset
import transforms.spatial_transforms as ST
import transforms.temporal_transforms as TT
import torchvision.transforms as T
import models
from utils.utils import AverageMeter, Logger, save_checkpoint
from utils.eval_metrics import evaluate
parser = argparse.ArgumentParser(description='Testing using all frames')
# Datasets
parser.add_argument('--root', type=str, default='/data/datasets/')
parser.add_argument('-d', '--dataset', type=str, default='mars',
choices=data_manager.get_names())
parser.add_argument('-j', '--workers', default=4, type=int)
parser.add_argument('--height', type=int, default=256)
parser.add_argument('--width', type=int, default=128)
# Augment
parser.add_argument('--test_frames', default=32, type=int, help='frames/clip for test')
# Optimization options
parser.add_argument('--test_batch', default=1, type=int, help="has to be 1")
parser.add_argument('--img_test_batch', default=128, type=int)
# Architecture
parser.add_argument('--vid_arch', type=str, default='vid_nonlocalresnet50')
parser.add_argument('--img_arch', type=str, default='img_resnet50')
# Miscs
parser.add_argument('--resume', type=str, default='log/best_model.pth.tar', metavar='PATH')
parser.add_argument('--save_dir', type=str, default='log')
parser.add_argument('--gpu_devices', default='0', type=str)
args = parser.parse_args()
def main():
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
print(torch.cuda.device_count())
use_gpu = torch.cuda.is_available()
sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt'))
print("==========\nArgs:{}\n==========".format(args))
print("Initializing dataset {}".format(args.dataset))
dataset = data_manager.init_dataset(name=args.dataset, root=args.root)
# Data augmentation
spatial_transform_test = ST.Compose([
ST.Scale((args.height, args.width), interpolation=3),
ST.ToTensor(),
ST.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
temporal_transform_test = None
transform_test_img = T.Compose([
T.Resize((args.height, args.width), interpolation=3),
T.ToTensor(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
pin_memory = True if use_gpu else False
queryloader = DataLoader(
VideoDataset(dataset.query, spatial_transform=spatial_transform_test, temporal_transform=temporal_transform_test),
batch_size=args.test_batch, shuffle=False, num_workers=0,
pin_memory=pin_memory, drop_last=False
)
galleryloader = DataLoader(
VideoDataset(dataset.gallery, spatial_transform=spatial_transform_test, temporal_transform=temporal_transform_test),
batch_size=args.test_batch, shuffle=False, num_workers=0,
pin_memory=pin_memory, drop_last=False
)
queryimgloader = DataLoader(
ImageDataset(dataset.query_img, transform=transform_test_img),
batch_size=args.img_test_batch, shuffle=False, num_workers=args.workers,
pin_memory=pin_memory, drop_last=False
)
galleryimgloader = DataLoader(
ImageDataset(dataset.gallery_img, transform=transform_test_img),
batch_size=args.img_test_batch, shuffle=False, num_workers=args.workers,
pin_memory=pin_memory, drop_last=False
)
print("Initializing model: {} and {}".format(args.vid_arch, args.img_arch))
vid_model = models.init_model(name=args.vid_arch)
img_model = models.init_model(name=args.img_arch)
print("Video model size: {:.5f}M".format(sum(p.numel() for p in vid_model.parameters())/1000000.0))
print("Image model size: {:.5f}M".format(sum(p.numel() for p in img_model.parameters())/1000000.0))
print("Loading checkpoint from '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
vid_model.load_state_dict(checkpoint['vid_model_state_dict'])
img_model.load_state_dict(checkpoint['img_model_state_dict'])
if use_gpu:
vid_model = vid_model.cuda()
img_model = img_model.cuda()
print("Evaluate")
with torch.no_grad():
test(vid_model, img_model, queryloader, galleryloader, queryimgloader, galleryimgloader, use_gpu)
def extract_vid_feature(model, vids, use_gpu):
n, c, f, h, w = vids.size()
assert(n == 1)
feat = torch.FloatTensor()
for i in range(math.ceil(f/args.test_frames)):
clip = vids[:, :, i*args.test_frames:(i+1)*args.test_frames, :, :]
if use_gpu:
clip = clip.cuda()
output = model(clip)
output = output.data.cpu()
feat = torch.cat((feat, output), 1)
feat = feat.mean(1)
return feat
def test(vid_model, img_model, queryloader, galleryloader, queryimgloader, galleryimgloader, use_gpu):
since = time.time()
vid_model.eval()
img_model.eval()
print("Extract video features")
vid_qf, vid_q_pids, vid_q_camids = [], [], []
for batch_idx, (vids, pids, camids) in enumerate(queryloader):
if (batch_idx+1)%1000==0 or (batch_idx+1)%len(queryloader)==0:
print("{}/{}".format(batch_idx+1, len(queryloader)))
vid_qf.append(extract_vid_feature(vid_model, vids, use_gpu).squeeze())
vid_q_pids.extend(pids)
vid_q_camids.extend(camids)
vid_qf = torch.stack(vid_qf)
vid_q_pids = np.asarray(vid_q_pids)
vid_q_camids = np.asarray(vid_q_camids)
print("Extracted features for query set, obtained {} matrix".format(vid_qf.shape))
vid_gf, vid_g_pids, vid_g_camids = [], [], []
for batch_idx, (vids, pids, camids) in enumerate(galleryloader):
if (batch_idx + 1) % 1000==0 or (batch_idx+1)%len(galleryloader)==0:
print("{}/{}".format(batch_idx+1, len(galleryloader)))
vid_gf.append(extract_vid_feature(vid_model, vids, use_gpu).squeeze())
vid_g_pids.extend(pids)
vid_g_camids.extend(camids)
vid_gf = torch.stack(vid_gf)
vid_g_pids = np.asarray(vid_g_pids)
vid_g_camids = np.asarray(vid_g_camids)
if args.dataset == 'mars':
# gallery set must contain query set, otherwise 140 query imgs will not have ground truth.
vid_gf = torch.cat((vid_qf, vid_gf), 0)
vid_g_pids = np.append(vid_q_pids, vid_g_pids)
vid_g_camids = np.append(vid_q_camids, vid_g_camids)
print("Extracted features for gallery set, obtained {} matrix".format(vid_gf.shape))
print("Extract image features")
img_qf, img_q_pids, img_q_camids = [], [], []
for batch_idx, (imgs, pids, camids) in enumerate(queryimgloader):
if use_gpu:
imgs = imgs.cuda()
feat = img_model(imgs).data.cpu()
img_qf.append(feat)
img_q_pids.extend(pids)
img_q_camids.extend(camids)
img_qf = torch.cat(img_qf, 0)
img_q_pids = np.asarray(img_q_pids)
img_q_camids = np.asarray(img_q_camids)
print("Extracted features for query set, obtained {} matrix".format(img_qf.shape))
img_gf, img_g_pids, img_g_camids = [], [], []
for batch_idx, (imgs, pids, camids) in enumerate(galleryimgloader):
if use_gpu:
imgs = imgs.cuda()
feat = img_model(imgs).data.cpu()
img_gf.append(feat)
img_g_pids.extend(pids)
img_g_camids.extend(camids)
img_gf = torch.cat(img_gf, 0)
img_g_pids = np.asarray(img_g_pids)
img_g_camids = np.asarray(img_g_camids)
if args.dataset == 'mars':
# gallery set must contain query set, otherwise 140 query imgs will not have ground truth.
img_gf = torch.cat((img_qf, img_gf), 0)
img_g_pids = np.append(img_q_pids, img_g_pids)
img_g_camids = np.append(img_q_camids, img_g_camids)
print("Extracted features for gallery set, obtained {} matrix".format(img_gf.shape))
time_elapsed = time.time() - since
print('Extracting features complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
print("Computing distance matrix")
m, n = vid_qf.size(0), vid_gf.size(0)
img_distmat = torch.zeros((m,n))
vid_distmat = torch.zeros((m,n))
i2v_distmat = torch.zeros((m,n))
v2i_distmat = torch.zeros((m,n))
img_q_norm = torch.norm(img_qf, p=2, dim=1, keepdim=True)
img_g_norm = torch.norm(img_gf, p=2, dim=1, keepdim=True)
vid_q_norm = torch.norm(vid_qf, p=2, dim=1, keepdim=True)
vid_g_norm = torch.norm(vid_gf, p=2, dim=1, keepdim=True)
img_qf = img_qf.div(img_q_norm.expand_as(img_qf))
img_gf = img_gf.div(img_g_norm.expand_as(img_gf))
vid_qf = vid_qf.div(vid_q_norm.expand_as(vid_qf))
vid_gf = vid_gf.div(vid_g_norm.expand_as(vid_gf))
for i in range(m):
img_distmat[i] = - torch.mm(img_qf[i:i+1], img_gf.t())
vid_distmat[i] = - torch.mm(vid_qf[i:i+1], vid_gf.t())
i2v_distmat[i] = - torch.mm(img_qf[i:i+1], vid_gf.t())
v2i_distmat[i] = - torch.mm(vid_qf[i:i+1], img_gf.t())
img_distmat = img_distmat.numpy()
vid_distmat = vid_distmat.numpy()
i2v_distmat = i2v_distmat.numpy()
v2i_distmat = v2i_distmat.numpy()
print('image to image')
cmc, mAP = evaluate(img_distmat, img_q_pids, img_g_pids, img_q_camids, img_g_camids)
print('top1:{:.2%} top5:{:.2%} top10:{:.2%} mAP:{:.2%}'.format(cmc[0],cmc[4],cmc[9],mAP))
print('video to video')
cmc, mAP = evaluate(vid_distmat, vid_q_pids, vid_g_pids, vid_q_camids, vid_g_camids)
print('top1:{:.2%} top5:{:.2%} top10:{:.2%} mAP:{:.2%}'.format(cmc[0],cmc[4],cmc[9],mAP))
print('video to image')
cmc, mAP = evaluate(v2i_distmat, vid_q_pids, img_g_pids, vid_q_camids, img_g_camids)
print('top1:{:.2%} top5:{:.2%} top10:{:.2%} mAP:{:.2%}'.format(cmc[0],cmc[4],cmc[9],mAP))
print('image to video')
cmc, mAP = evaluate(i2v_distmat, img_q_pids, vid_g_pids, img_q_camids, vid_g_camids)
print('top1:{:.2%} top5:{:.2%} top10:{:.2%} mAP:{:.2%}'.format(cmc[0],cmc[4],cmc[9],mAP))
return cmc[0]
if __name__ == '__main__':
main()
| [] | [] | [
"CUDA_VISIBLE_DEVICES"
] | [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
src/test/java/net/sourceforge/myvd/test/acl/TestValidateSubjects.java | /*
* Copyright 2008 Marc Boorshtein
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.sourceforge.myvd.test.acl;
import java.util.ArrayList;
import java.util.HashMap;
import net.sourceforge.myvd.chain.SearchInterceptorChain;
import net.sourceforge.myvd.core.InsertChain;
import net.sourceforge.myvd.inserts.Insert;
import net.sourceforge.myvd.inserts.accessControl.AccessControlItem;
import net.sourceforge.myvd.router.Router;
import net.sourceforge.myvd.server.Server;
import net.sourceforge.myvd.test.util.OpenLDAPUtils;
import net.sourceforge.myvd.test.util.StartOpenLDAP;
import net.sourceforge.myvd.types.DistinguishedName;
import net.sourceforge.myvd.types.Password;
import net.sourceforge.myvd.types.SessionVariables;
import com.novell.ldap.util.DN;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.BeforeClass;
import org.junit.AfterClass;
import static org.junit.Assert.*;
public class TestValidateSubjects {
private static StartOpenLDAP openldapServer;
private static Server server;
private static InsertChain globalChain;
private static Router router;
@BeforeClass
public static void setUp() throws Exception {
OpenLDAPUtils.killAllOpenLDAPS();
openldapServer = new StartOpenLDAP();
openldapServer.startServer(System.getenv("PROJ_DIR") + "/test/ACITest",10983,"cn=admin,dc=domain,dc=com","manager");
server = new Server(System.getenv("PROJ_DIR") + "/test/TestServer/testACI.props");
server.startServer();
globalChain = server.getGlobalChain();
router = server.getRouter();
}
@Test
public void testPublic() throws Exception {
AccessControlItem aci = new AccessControlItem(0,"cn=test,ou=myorg,dc=domain,dc=com#entry#grant:r,w,o#[all]#public:");
SearchInterceptorChain chain = new SearchInterceptorChain(new DistinguishedName("uid=testuser,ou=users,dc=domain,dc=com"),new Password(""),0,this.globalChain,new HashMap<Object,Object>(),new HashMap<Object,Object>(),this.router);
if (! aci.checkSubject(chain,null)) {
fail("subject check failed");
}
}
@Test
public void testSubtreePass() throws Exception {
AccessControlItem aci = new AccessControlItem(0,"cn=test,ou=myorg,dc=domain,dc=com#entry#grant:r,w,o#[all]#subtree:ou=users,dc=domain,dc=com");
SearchInterceptorChain chain = new SearchInterceptorChain(new DistinguishedName("uid=testuser,ou=users,dc=domain,dc=com"),new Password(""),0,this.globalChain,new HashMap<Object,Object>(),new HashMap<Object,Object>(),this.router);
if (! aci.checkSubject(chain,null)) {
fail("subject check failed");
}
}
@Test
public void testSubtreeFail() throws Exception {
AccessControlItem aci = new AccessControlItem(0,"cn=test,ou=myorg,dc=domain,dc=com#entry#grant:r,w,o#[all]#subtree:ou=apps,dc=domain,dc=com");
SearchInterceptorChain chain = new SearchInterceptorChain(new DistinguishedName("uid=testuser,ou=users,dc=domain,dc=com"),new Password(""),0,this.globalChain,new HashMap<Object,Object>(),new HashMap<Object,Object>(),this.router);
if (aci.checkSubject(chain,null)) {
fail("subject check failed");
}
}
@Test
public void testThisPass() throws Exception {
AccessControlItem aci = new AccessControlItem(0,"cn=test,ou=myorg,dc=domain,dc=com#entry#grant:r,w,o#[all]#this:");
SearchInterceptorChain chain = new SearchInterceptorChain(new DistinguishedName("uid=testuser,ou=users,dc=domain,dc=com"),new Password(""),0,this.globalChain,new HashMap<Object,Object>(),new HashMap<Object,Object>(),this.router);
if (! aci.checkSubject(chain,new DN("uid=testuser,ou=users,dc=domain,dc=com"))) {
fail("subject check failed");
}
}
@Test
public void testThisFail() throws Exception {
AccessControlItem aci = new AccessControlItem(0,"cn=test,ou=myorg,dc=domain,dc=com#entry#grant:r,w,o#[all]#this:");
SearchInterceptorChain chain = new SearchInterceptorChain(new DistinguishedName("uid=testuser,ou=users,dc=domain,dc=com"),new Password(""),0,this.globalChain,new HashMap<Object,Object>(),new HashMap<Object,Object>(),this.router);
if (aci.checkSubject(chain,new DN("uid=testuser1,ou=users,dc=domain,dc=com"))) {
fail("subject check failed");
}
}
@Test
public void testDNPass() throws Exception {
AccessControlItem aci = new AccessControlItem(0,"cn=test,ou=myorg,dc=domain,dc=com#entry#grant:r,w,o#[all]#dn:uid=testuser,ou=users,dc=domain,dc=com");
SearchInterceptorChain chain = new SearchInterceptorChain(new DistinguishedName("uid=testuser,ou=users,dc=domain,dc=com"),new Password(""),0,this.globalChain,new HashMap<Object,Object>(),new HashMap<Object,Object>(),this.router);
if (! aci.checkSubject(chain,null)) {
fail("subject check failed");
}
}
@Test
public void testDNFail() throws Exception {
AccessControlItem aci = new AccessControlItem(0,"cn=test,ou=myorg,dc=domain,dc=com#entry#grant:r,w,o#[all]#dn:uid=testuser1,ou=users,dc=domain,dc=com");
SearchInterceptorChain chain = new SearchInterceptorChain(new DistinguishedName("uid=testuser,ou=users,dc=domain,dc=com"),new Password(""),0,this.globalChain,new HashMap<Object,Object>(),new HashMap<Object,Object>(),this.router);
if (aci.checkSubject(chain,null)) {
fail("subject check failed");
}
}
@Test
public void testStaticGroupPass() throws Exception {
AccessControlItem aci = new AccessControlItem(0,"cn=test,ou=myorg,dc=domain,dc=com#entry#grant:r,w,o#[all]#group:cn=staticgroup1,ou=groups,dc=domain,dc=com");
HashMap<Object,Object> session = new HashMap<Object,Object>();
session.put(SessionVariables.BOUND_INTERCEPTORS,new ArrayList<String>());
SearchInterceptorChain chain = new SearchInterceptorChain(new DistinguishedName("uid=testuser,ou=users,dc=domain,dc=com"),new Password(""),0,this.globalChain,session,new HashMap<Object,Object>(),this.router);
if (! aci.checkSubject(chain,null)) {
fail("subject check failed");
}
chain = new SearchInterceptorChain(new DistinguishedName("uid=testuser2,ou=users,dc=domain,dc=com"),new Password(""),0,this.globalChain,session,new HashMap<Object,Object>(),this.router);
if (! aci.checkSubject(chain,null)) {
fail("subject check failed");
}
}
@Test
public void testStaticGroupFail() throws Exception {
AccessControlItem aci = new AccessControlItem(0,"cn=test,ou=myorg,dc=domain,dc=com#entry#grant:r,w,o#[all]#group:cn=staticgroup1,ou=groups,dc=domain,dc=com");
SearchInterceptorChain chain = new SearchInterceptorChain(new DistinguishedName("uid=testuser1,ou=users,dc=domain,dc=com"),new Password(""),0,this.globalChain,new HashMap<Object,Object>(),new HashMap<Object,Object>(),this.router);
if (aci.checkSubject(chain,null)) {
fail("subject check failed");
}
}
@Test
public void testDynGroupPass() throws Exception {
AccessControlItem aci = new AccessControlItem(0,"cn=test,ou=myorg,dc=domain,dc=com#entry#grant:r,w,o#[all]#dynamic-group:cn=dynamicgroup1,ou=groups,dc=domain,dc=com");
HashMap<Object,Object> session = new HashMap<Object,Object>();
session.put(SessionVariables.BOUND_INTERCEPTORS,new ArrayList<String>());
SearchInterceptorChain chain = new SearchInterceptorChain(new DistinguishedName("uid=testuser,ou=users,dc=domain,dc=com"),new Password(""),0,this.globalChain,session,new HashMap<Object,Object>(),this.router);
if (! aci.checkSubject(chain,null)) {
fail("subject check failed");
}
chain = new SearchInterceptorChain(new DistinguishedName("uid=testuser1,ou=users,dc=domain,dc=com"),new Password(""),0,this.globalChain,session,new HashMap<Object,Object>(),this.router);
if (! aci.checkSubject(chain,null)) {
fail("subject check failed");
}
}
@Test
public void testDynGroupFail() throws Exception {
AccessControlItem aci = new AccessControlItem(0,"cn=test,ou=myorg,dc=domain,dc=com#entry#grant:r,w,o#[all]#dynamic-group:cn=dynamicgroup1,ou=groups,dc=domain,dc=com");
HashMap<Object,Object> session = new HashMap<Object,Object>();
session.put(SessionVariables.BOUND_INTERCEPTORS,new ArrayList<String>());
SearchInterceptorChain chain = new SearchInterceptorChain(new DistinguishedName("uid=testuser2,ou=users,dc=domain,dc=com"),new Password(""),0,this.globalChain,session,new HashMap<Object,Object>(),this.router);
if (aci.checkSubject(chain,null)) {
fail("subject check failed");
}
}
@AfterClass
public static void tearDown() throws Exception {
openldapServer.stopServer();
server.stopServer();
}
}
| [
"\"PROJ_DIR\"",
"\"PROJ_DIR\""
] | [] | [
"PROJ_DIR"
] | [] | ["PROJ_DIR"] | java | 1 | 0 | |
rest_react/wsgi.py | """
WSGI config for rest_react project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'rest_react.settings')
application = get_wsgi_application()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
visualization/model_wrapper.py | from typing import Tuple, List
import os
import logging
import torch
from transformers import BertForQuestionAnswering, BertTokenizer, BertConfig
# from transformers import AutoModelForQuestionAnswering, AutoTokenizer
from data_utils import QASample, SquadExample, QAInputFeatures, RawResult, read_squad_example, \
convert_qa_example_to_features, parse_prediction
logging.basicConfig(format="%(asctime)-15s %(message)s", level=os.environ.get("LOGLEVEL", "INFO"))
logger = logging.getLogger(__name__)
class BertQAModel:
def __init__(self, model_path: str, model_type: str, lower_case: bool, cache_dir: str, device: str = "cpu"):
self.model_path = model_path
self.model_type = model_type
self.lower_case = lower_case
self.cache_dir = cache_dir
self.device = device
self.model = self.load_model()
self.tokenizer = self.load_tokenizer()
def load_model(self):
# Load a pretrained model that has been fine-tuned
config = BertConfig.from_pretrained(self.model_type, output_hidden_states=True, cache_dir=self.cache_dir)
# config = AutoConfig.from_pretrained(r"C:\REPO\DL-project\explain-BERT-QA\bert-base-uncased-squad-v1\config.json",
# output_hidden_states=True, cache_dir=self.cache_dir)
# config = BertConfig.from_pretrained(r"C:\REPO\DL-project\explain-BERT-QA\bert-base-uncased-squad-v1\config.json",
# output_hidden_states=True, cache_dir=self.cache_dir)
# config = BertConfig.from_pretrained("bert-base-uncased",
# output_hidden_states=True, cache_dir=self.cache_dir)
pretrained_weights = torch.load(self.model_path, map_location=torch.device(self.device))
# pretrained_weights = BertForQuestionAnswering.from_pretrained(r"C:\REPO\DL-project\explain-BERT-QA\bert-base-uncased-squad-v1\pytorch_model.bin")
# pretrained_weights = AutoModelForQuestionAnswering.from_pretrained(r"C:\REPO\DL-project\explain-BERT-QA\bert-base-uncased-squad-v1\pytorch_model.bin")
model = BertForQuestionAnswering.from_pretrained(self.model_type,
state_dict=pretrained_weights,
config=config,
cache_dir=self.cache_dir)
return model
def load_tokenizer(self):
return BertTokenizer.from_pretrained(self.model_type, cache_dir=self.cache_dir, do_lower_case=self.lower_case)
def tokenize_and_predict(self, input_sample: QASample) -> Tuple:
squad_formatted_sample: SquadExample = read_squad_example(input_sample)
input_features: QAInputFeatures = self.tokenize(squad_formatted_sample)
with torch.no_grad():
inputs = {'input_ids': input_features.input_ids,
'attention_mask': input_features.input_mask,
'token_type_ids': input_features.segment_ids
}
# Make Prediction
output: Tuple = self.model(**inputs) # output format: start_logits, end_logits, hidden_states
# Parse Prediction
prediction, hidden_states = self.parse_model_output(output, squad_formatted_sample, input_features)
logger.info("Predicted Answer: {}".format(prediction["text"]))
logger.info("Start token: {}, End token: {}".format(prediction["start_index"], prediction["end_index"]))
return prediction, hidden_states, input_features
def tokenize(self, input_sample: SquadExample) -> QAInputFeatures:
features = convert_qa_example_to_features(example=input_sample,
tokenizer=self.tokenizer,
max_seq_length=384,
doc_stride=128,
max_query_length=64,
is_training=False)
features.input_ids = torch.tensor([features.input_ids], dtype=torch.long)
features.input_mask = torch.tensor([features.input_mask], dtype=torch.long)
features.segment_ids = torch.tensor([features.segment_ids], dtype=torch.long)
features.cls_index = torch.tensor([features.cls_index], dtype=torch.long)
features.p_mask = torch.tensor([features.p_mask], dtype=torch.float)
return features
@staticmethod
def parse_model_output(output: Tuple, sample: SquadExample, features: QAInputFeatures) -> Tuple:
def to_list(tensor):
return tensor.detach().cpu().tolist()
result: RawResult = RawResult(unique_id=1,
start_logits=to_list(output[0][0]),
end_logits=to_list(output[1][0]))
nbest_predictions: List = parse_prediction(sample, features, result)
return nbest_predictions[0], output[2] # top prediction, hidden states
| [] | [] | [
"LOGLEVEL"
] | [] | ["LOGLEVEL"] | python | 1 | 0 | |
apps/server/database/db.go | package database
import (
"context"
"fmt"
"os"
"github.com/jackc/pgx/v4"
)
func Connect() *pgx.Conn {
conn, err := pgx.Connect(context.Background(), os.Getenv("DATABASE_URL"))
if err != nil {
fmt.Fprintf(os.Stderr, "Unable to connect to database: %v\n", err)
os.Exit(1)
}
var res int
err = conn.QueryRow(context.Background(), "select 1").Scan(&res)
if err != nil {
fmt.Fprintf(os.Stderr, "Database connection not working: %v\n", err)
os.Exit(1)
}
return conn
}
| [
"\"DATABASE_URL\""
] | [] | [
"DATABASE_URL"
] | [] | ["DATABASE_URL"] | go | 1 | 0 | |
nbdev/test.py | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/04_test.ipynb (unless otherwise specified).
__all__ = ['get_all_flags', 'get_cell_flags', 'NoExportPreprocessor', 'test_nb']
# Cell
from .imports import *
from .sync import *
from .export import *
from .export import _mk_flag_re
from .export2html import _re_notebook2script
from nbconvert.preprocessors import ExecutePreprocessor
# Cell
_re_all_flag = ReTstFlags(True)
# Cell
def get_all_flags(cells):
"Check for all test flags in `cells`"
if len(Config().get('tst_flags',''))==0: return []
result = []
for cell in cells:
if cell['cell_type'] == 'code': result.extend(_re_all_flag.findall(cell['source']))
return set(result)
# Cell
_re_flags = ReTstFlags(False)
# Cell
def get_cell_flags(cell):
"Check for any special test flag in `cell`"
if cell['cell_type'] != 'code' or len(Config().get('tst_flags',''))==0: return []
return _re_flags.findall(cell['source'])
# Cell
class NoExportPreprocessor(ExecutePreprocessor):
"An `ExecutePreprocessor` that executes cells that don't have a flag in `flags`"
def __init__(self, flags, **kwargs):
self.flags = flags
super().__init__(**kwargs)
def preprocess_cell(self, cell, resources, index):
if 'source' not in cell or cell['cell_type'] != "code": return cell, resources
for f in get_cell_flags(cell):
if f not in self.flags: return cell, resources
if check_re(cell, _re_notebook2script): return cell, resources
return super().preprocess_cell(cell, resources, index)
# Cell
def test_nb(fn, flags=None):
"Execute tests in notebook in `fn` with `flags`"
os.environ["IN_TEST"] = '1'
if flags is None: flags = []
try:
nb = read_nb(fn)
nb = call_cb('begin_test_nb', nb, fn, flags)
for f in get_all_flags(nb['cells']):
if f not in flags: return
ep = NoExportPreprocessor(flags, timeout=600, kernel_name='python3')
pnb = nbformat.from_dict(nb)
ep.preprocess(pnb)
nb = call_cb('after_test_nb', fn)
finally: os.environ.pop("IN_TEST") | [] | [] | [
"IN_TEST"
] | [] | ["IN_TEST"] | python | 1 | 0 | |
subprojects/internal-integ-testing/src/main/groovy/org/gradle/integtests/fixtures/executer/InProcessGradleExecuter.java | /*
* Copyright 2010 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.integtests.fixtures.executer;
import junit.framework.AssertionFailedError;
import org.apache.commons.io.output.TeeOutputStream;
import org.gradle.BuildResult;
import org.gradle.StartParameter;
import org.gradle.api.Task;
import org.gradle.api.UncheckedIOException;
import org.gradle.api.execution.TaskExecutionGraph;
import org.gradle.api.execution.TaskExecutionGraphListener;
import org.gradle.api.execution.TaskExecutionListener;
import org.gradle.api.internal.StartParameterInternal;
import org.gradle.api.internal.TaskInternal;
import org.gradle.api.internal.classpath.ModuleRegistry;
import org.gradle.api.internal.file.FileCollectionFactory;
import org.gradle.api.internal.file.TestFiles;
import org.gradle.api.logging.configuration.ConsoleOutput;
import org.gradle.api.specs.Spec;
import org.gradle.api.tasks.TaskState;
import org.gradle.cli.CommandLineParser;
import org.gradle.configuration.GradleLauncherMetaData;
import org.gradle.execution.MultipleBuildFailures;
import org.gradle.initialization.BuildRequestContext;
import org.gradle.initialization.DefaultBuildCancellationToken;
import org.gradle.initialization.DefaultBuildRequestContext;
import org.gradle.initialization.DefaultBuildRequestMetaData;
import org.gradle.initialization.NoOpBuildEventConsumer;
import org.gradle.initialization.layout.BuildLayoutFactory;
import org.gradle.integtests.fixtures.logging.GroupedOutputFixture;
import org.gradle.internal.Factory;
import org.gradle.internal.InternalListener;
import org.gradle.internal.IoActions;
import org.gradle.internal.SystemProperties;
import org.gradle.internal.classpath.ClassPath;
import org.gradle.internal.event.ListenerManager;
import org.gradle.internal.exceptions.LocationAwareException;
import org.gradle.internal.hash.HashUtil;
import org.gradle.internal.invocation.BuildAction;
import org.gradle.internal.jvm.Jvm;
import org.gradle.internal.logging.LoggingManagerInternal;
import org.gradle.internal.nativeintegration.ProcessEnvironment;
import org.gradle.internal.os.OperatingSystem;
import org.gradle.internal.time.Time;
import org.gradle.launcher.Main;
import org.gradle.launcher.cli.Parameters;
import org.gradle.launcher.cli.ParametersConverter;
import org.gradle.launcher.cli.action.ExecuteBuildAction;
import org.gradle.launcher.exec.BuildActionExecuter;
import org.gradle.launcher.exec.BuildActionParameters;
import org.gradle.launcher.exec.BuildActionResult;
import org.gradle.launcher.exec.DefaultBuildActionParameters;
import org.gradle.process.internal.JavaExecHandleBuilder;
import org.gradle.test.fixtures.file.TestDirectoryProvider;
import org.gradle.test.fixtures.file.TestFile;
import org.gradle.testfixtures.internal.NativeServicesTestFixture;
import org.gradle.tooling.internal.provider.serialization.DeserializeMap;
import org.gradle.tooling.internal.provider.serialization.PayloadClassLoaderRegistry;
import org.gradle.tooling.internal.provider.serialization.PayloadSerializer;
import org.gradle.tooling.internal.provider.serialization.SerializeMap;
import org.gradle.util.CollectionUtils;
import org.gradle.util.DeprecationLogger;
import org.gradle.util.GUtil;
import org.gradle.util.GradleVersion;
import org.hamcrest.Matcher;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.TreeSet;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.CopyOnWriteArraySet;
import java.util.jar.Attributes;
import java.util.jar.JarEntry;
import java.util.jar.JarOutputStream;
import java.util.jar.Manifest;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import static org.gradle.integtests.fixtures.executer.OutputScrapingExecutionResult.flattenTaskPaths;
import static org.gradle.util.Matchers.hasMessage;
import static org.gradle.util.Matchers.isEmpty;
import static org.gradle.util.Matchers.normalizedLineSeparators;
import static org.hamcrest.CoreMatchers.containsString;
import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.CoreMatchers.hasItem;
import static org.hamcrest.CoreMatchers.not;
import static org.hamcrest.CoreMatchers.nullValue;
import static org.hamcrest.CoreMatchers.startsWith;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.fail;
public class InProcessGradleExecuter extends DaemonGradleExecuter {
private final ProcessEnvironment processEnvironment = GLOBAL_SERVICES.get(ProcessEnvironment.class);
public static final TestFile COMMON_TMP = new TestFile(new File("build/tmp"));
static {
LoggingManagerInternal loggingManager = GLOBAL_SERVICES.getFactory(LoggingManagerInternal.class).create();
loggingManager.start();
}
public InProcessGradleExecuter(GradleDistribution distribution, TestDirectoryProvider testDirectoryProvider) {
super(distribution, testDirectoryProvider);
}
public InProcessGradleExecuter(GradleDistribution distribution, TestDirectoryProvider testDirectoryProvider, GradleVersion gradleVersion, IntegrationTestBuildContext buildContext) {
super(distribution, testDirectoryProvider, gradleVersion, buildContext);
}
@Override
public GradleExecuter reset() {
DeprecationLogger.reset();
return super.reset();
}
@Override
protected ExecutionResult doRun() {
if (isForkRequired()) {
return createGradleHandle().waitForFinish();
}
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
ByteArrayOutputStream errorStream = new ByteArrayOutputStream();
BuildListenerImpl buildListener = new BuildListenerImpl();
BuildResult result = doRun(outputStream, errorStream, buildListener);
if (result.getFailure() != null) {
throw new UnexpectedBuildFailure(result.getFailure());
}
return assertResult(new InProcessExecutionResult(buildListener.executedTasks, buildListener.skippedTasks,
OutputScrapingExecutionResult.from(outputStream.toString(), errorStream.toString())));
}
@Override
protected ExecutionFailure doRunWithFailure() {
if (isForkRequired()) {
return createGradleHandle().waitForFailure();
}
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
ByteArrayOutputStream errorStream = new ByteArrayOutputStream();
BuildListenerImpl buildListener = new BuildListenerImpl();
BuildResult result = doRun(outputStream, errorStream, buildListener);
if (result.getFailure() == null) {
throw new AssertionError("expected build to fail but it did not.");
}
return assertResult(new InProcessExecutionFailure(buildListener.executedTasks, buildListener.skippedTasks,
OutputScrapingExecutionFailure.from(outputStream.toString(), errorStream.toString()), result.getFailure()));
}
private boolean isForkRequired() {
if (isDaemonExplicitlyRequired() || !getJavaHome().equals(Jvm.current().getJavaHome())) {
return true;
}
File gradleProperties = new File(getWorkingDir(), "gradle.properties");
if (gradleProperties.isFile()) {
Properties properties = GUtil.loadProperties(gradleProperties);
if (properties.getProperty("org.gradle.java.home") != null || properties.getProperty("org.gradle.jvmargs") != null) {
return true;
}
}
return false;
}
private <T extends ExecutionResult> T assertResult(T result) {
getResultAssertion().execute(result);
return result;
}
@Override
protected GradleHandle createGradleHandle() {
configureConsoleCommandLineArgs();
return super.createGradleHandle();
}
@Override
protected Factory<JavaExecHandleBuilder> getExecHandleFactory() {
return () -> {
NativeServicesTestFixture.initialize();
GradleInvocation invocation = buildInvocation();
JavaExecHandleBuilder builder = TestFiles.execFactory().newJavaExec();
builder.workingDir(getWorkingDir());
builder.setExecutable(new File(getJavaHome(), "bin/java"));
builder.classpath(getExecHandleFactoryClasspath());
builder.jvmArgs(invocation.launcherJvmArgs);
builder.environment(invocation.environmentVars);
builder.setMain(Main.class.getName());
builder.args(invocation.args);
builder.setStandardInput(connectStdIn());
return builder;
};
}
private Collection<File> getExecHandleFactoryClasspath() {
Collection<File> classpath = cleanup(GLOBAL_SERVICES.get(ModuleRegistry.class).getAdditionalClassPath().getAsFiles());
if (!OperatingSystem.current().isWindows()) {
return classpath;
}
// Use a Class-Path manifest JAR to circumvent too long command line issues on Windows (cap 8191)
// Classpath is huge here because it's the test runtime classpath
return Collections.singleton(getClasspathManifestJarFor(classpath));
}
private Collection<File> cleanup(List<File> files) {
List<File> result = new LinkedList<>();
String prefix = Jvm.current().getJavaHome().getPath() + File.separator;
for (File file : files) {
if (file.getPath().startsWith(prefix)) {
// IDEA adds the JDK's bootstrap classpath to the classpath it uses to run test - remove this
continue;
}
result.add(file);
}
return result;
}
private File getClasspathManifestJarFor(Collection<File> classpath) {
String cpString = classpath.stream()
.map(File::toURI)
.map(Object::toString)
.collect(Collectors.joining(" "));
File cpJar = new File(getDefaultTmpDir(), "daemon-classpath-manifest-" + HashUtil.createCompactMD5(cpString) + ".jar");
if (!cpJar.isFile()) {
Manifest manifest = new Manifest();
manifest.getMainAttributes().put(Attributes.Name.MANIFEST_VERSION, "1.0");
manifest.getMainAttributes().put(Attributes.Name.CLASS_PATH, cpString);
JarOutputStream output = null;
try {
output = new JarOutputStream(new FileOutputStream(cpJar), manifest);
output.putNextEntry(new JarEntry("META-INF/"));
output.closeEntry();
} catch (IOException e) {
throw new UncheckedIOException(e);
} finally {
IoActions.closeQuietly(output);
}
}
return cpJar;
}
private BuildResult doRun(OutputStream outputStream, OutputStream errorStream, BuildListenerImpl listener) {
// Capture the current state of things that we will change during execution
InputStream originalStdIn = System.in;
Properties originalSysProperties = new Properties();
originalSysProperties.putAll(System.getProperties());
File originalUserDir = new File(originalSysProperties.getProperty("user.dir")).getAbsoluteFile();
Map<String, String> originalEnv = new HashMap<>(System.getenv());
GradleInvocation invocation = buildInvocation();
Set<String> changedEnvVars = new HashSet<>(invocation.environmentVars.keySet());
try {
return executeBuild(invocation, outputStream, errorStream, listener);
} finally {
// Restore the environment
System.setProperties(originalSysProperties);
processEnvironment.maybeSetProcessDir(originalUserDir);
for (String envVar : changedEnvVars) {
String oldValue = originalEnv.get(envVar);
if (oldValue != null) {
processEnvironment.maybeSetEnvironmentVariable(envVar, oldValue);
} else {
processEnvironment.maybeRemoveEnvironmentVariable(envVar);
}
}
System.setProperty("user.dir", originalSysProperties.getProperty("user.dir"));
System.setIn(originalStdIn);
}
}
private LoggingManagerInternal createLoggingManager(StartParameter startParameter, OutputStream outputStream, OutputStream errorStream) {
LoggingManagerInternal loggingManager = GLOBAL_SERVICES.getFactory(LoggingManagerInternal.class).create();
loggingManager.captureSystemSources();
ConsoleOutput consoleOutput = startParameter.getConsoleOutput();
loggingManager.attachConsole(new TeeOutputStream(System.out, outputStream), new TeeOutputStream(System.err, errorStream), consoleOutput, consoleAttachment.getConsoleMetaData());
return loggingManager;
}
private BuildResult executeBuild(GradleInvocation invocation, OutputStream outputStream, OutputStream errorStream, BuildListenerImpl listener) {
// Augment the environment for the execution
System.setIn(connectStdIn());
processEnvironment.maybeSetProcessDir(getWorkingDir());
for (Map.Entry<String, String> entry : invocation.environmentVars.entrySet()) {
processEnvironment.maybeSetEnvironmentVariable(entry.getKey(), entry.getValue());
}
Map<String, String> implicitJvmSystemProperties = getImplicitJvmSystemProperties();
System.getProperties().putAll(implicitJvmSystemProperties);
// TODO: Reuse more of CommandlineActionFactory
CommandLineParser parser = new CommandLineParser();
BuildLayoutFactory buildLayoutFactory = new BuildLayoutFactory();
FileCollectionFactory fileCollectionFactory = TestFiles.fileCollectionFactory();
ParametersConverter parametersConverter = new ParametersConverter(buildLayoutFactory, fileCollectionFactory);
parametersConverter.configure(parser);
final Parameters parameters = new Parameters(fileCollectionFactory);
parameters.getStartParameter().setCurrentDir(getWorkingDir());
parameters.getLayout().setCurrentDir(getWorkingDir());
parametersConverter.convert(parser.parse(getAllArgs()), parameters);
BuildActionExecuter<BuildActionParameters> actionExecuter = GLOBAL_SERVICES.get(BuildActionExecuter.class);
ListenerManager listenerManager = GLOBAL_SERVICES.get(ListenerManager.class);
listenerManager.addListener(listener);
try {
// TODO: Reuse more of BuildActionsFactory
StartParameterInternal startParameter = parameters.getStartParameter();
BuildAction action = new ExecuteBuildAction(startParameter);
BuildActionParameters buildActionParameters = createBuildActionParameters(startParameter);
BuildRequestContext buildRequestContext = createBuildRequestContext();
LoggingManagerInternal loggingManager = createLoggingManager(startParameter, outputStream, errorStream);
loggingManager.start();
try {
startMeasurement();
try {
BuildActionResult result = actionExecuter.execute(action, buildRequestContext, buildActionParameters, GLOBAL_SERVICES);
if (result.getException() != null) {
return new BuildResult(null, result.getException());
}
if (result.getFailure() != null) {
PayloadSerializer payloadSerializer = new PayloadSerializer(new TestClassLoaderRegistry());
return new BuildResult(null, (RuntimeException) payloadSerializer.deserialize(result.getFailure()));
}
return new BuildResult(null, null);
} finally {
stopMeasurement();
}
} finally {
loggingManager.stop();
}
} finally {
listenerManager.removeListener(listener);
}
}
private BuildActionParameters createBuildActionParameters(StartParameter startParameter) {
return new DefaultBuildActionParameters(
System.getProperties(),
System.getenv(),
SystemProperties.getInstance().getCurrentDir(),
startParameter.getLogLevel(),
false,
startParameter.isContinuous(),
ClassPath.EMPTY
);
}
private BuildRequestContext createBuildRequestContext() {
return new DefaultBuildRequestContext(
new DefaultBuildRequestMetaData(new GradleLauncherMetaData(), Time.currentTimeMillis(), interactive),
new DefaultBuildCancellationToken(),
new NoOpBuildEventConsumer());
}
@Override
public void assertCanExecute() {
assertNull(getExecutable());
String defaultEncoding = getImplicitJvmSystemProperties().get("file.encoding");
if (defaultEncoding != null) {
assertEquals(Charset.forName(defaultEncoding), Charset.defaultCharset());
}
Locale defaultLocale = getDefaultLocale();
if (defaultLocale != null) {
assertEquals(defaultLocale, Locale.getDefault());
}
assertFalse(isRequiresGradleDistribution());
}
@Override
protected TestFile getDefaultTmpDir() {
// File.createTempFile sets the location of the temp directory to a static variable on the first call. This prevents future
// changes to java.io.tmpdir from having any effect in the same process. We set this to use a common tmp directory for all
// tests running in the same process so that we don't have a situation where one process initializes with a tmp directory
// that it then removes, causing an IOException for any future tests that run in the same process and call File.createTempFile.
return COMMON_TMP;
}
@Override
public GradleExecuter withTestConsoleAttached() {
return withTestConsoleAttached(ConsoleAttachment.ATTACHED);
}
@Override
public GradleExecuter withTestConsoleAttached(ConsoleAttachment consoleAttachment) {
this.consoleAttachment = consoleAttachment;
return this;
}
private static class BuildListenerImpl implements TaskExecutionGraphListener, InternalListener {
private final List<String> executedTasks = new CopyOnWriteArrayList<>();
private final Set<String> skippedTasks = new CopyOnWriteArraySet<>();
@Override
public void graphPopulated(TaskExecutionGraph graph) {
List<Task> planned = new ArrayList<>(graph.getAllTasks());
graph.addTaskExecutionListener(new TaskListenerImpl(planned, executedTasks, skippedTasks));
}
}
private static class TaskListenerImpl implements TaskExecutionListener {
private final List<Task> planned;
private final List<String> executedTasks;
private final Set<String> skippedTasks;
TaskListenerImpl(List<Task> planned, List<String> executedTasks, Set<String> skippedTasks) {
this.planned = planned;
this.executedTasks = executedTasks;
this.skippedTasks = skippedTasks;
}
@Override
public void beforeExecute(Task task) {
if (!planned.contains(task)) {
System.out.println("Warning: " + task + " was executed even though it is not part of the task plan!");
}
String taskPath = path(task);
executedTasks.add(taskPath);
}
@Override
public void afterExecute(Task task, TaskState state) {
String taskPath = path(task);
if (state.getSkipped()) {
skippedTasks.add(taskPath);
}
}
private String path(Task task) {
return ((TaskInternal)task).getIdentityPath().getPath();
}
}
public static class InProcessExecutionResult implements ExecutionResult {
protected static final Spec<String> NOT_BUILD_SRC_TASK = t -> !t.startsWith(":buildSrc:");
protected final List<String> executedTasks;
protected final Set<String> skippedTasks;
private final ExecutionResult outputResult;
InProcessExecutionResult(List<String> executedTasks, Set<String> skippedTasks, ExecutionResult outputResult) {
this.executedTasks = executedTasks;
this.skippedTasks = skippedTasks;
this.outputResult = outputResult;
}
@Override
public ExecutionResult getIgnoreBuildSrc() {
List<String> executedTasks = CollectionUtils.filter(this.executedTasks, NOT_BUILD_SRC_TASK);
Set<String> skippedTasks = CollectionUtils.filter(this.skippedTasks, NOT_BUILD_SRC_TASK);
return new InProcessExecutionResult(executedTasks, skippedTasks, outputResult.getIgnoreBuildSrc());
}
@Override
public String getOutput() {
return outputResult.getOutput();
}
@Override
public String getNormalizedOutput() {
return outputResult.getNormalizedOutput();
}
@Override
public String getFormattedOutput() {
return outputResult.getFormattedOutput();
}
@Override
public String getPlainTextOutput() {
return outputResult.getPlainTextOutput();
}
@Override
public GroupedOutputFixture getGroupedOutput() {
return outputResult.getGroupedOutput();
}
@Override
public ExecutionResult assertOutputEquals(String expectedOutput, boolean ignoreExtraLines, boolean ignoreLineOrder) {
outputResult.assertOutputEquals(expectedOutput, ignoreExtraLines, ignoreLineOrder);
return this;
}
@Override
public ExecutionResult assertNotOutput(String expectedOutput) {
outputResult.assertNotOutput(expectedOutput);
return this;
}
@Override
public ExecutionResult assertOutputContains(String expectedOutput) {
outputResult.assertOutputContains(expectedOutput);
return this;
}
@Override
public ExecutionResult assertContentContains(String content, String expectedOutput, String label) {
outputResult.assertContentContains(content, expectedOutput, label);
return null;
}
@Override
public ExecutionResult assertHasPostBuildOutput(String expectedOutput) {
outputResult.assertHasPostBuildOutput(expectedOutput);
return this;
}
@Override
public boolean hasErrorOutput(String expectedOutput) {
return outputResult.hasErrorOutput(expectedOutput);
}
@Override
public ExecutionResult assertHasErrorOutput(String expectedOutput) {
outputResult.assertHasErrorOutput(expectedOutput);
return this;
}
@Override
public String getError() {
return outputResult.getError();
}
@Override
public ExecutionResult assertTasksExecutedInOrder(Object... taskPaths) {
Set<String> expected = TaskOrderSpecs.exact(taskPaths).getTasks();
assertTasksExecuted(expected);
assertTaskOrder(taskPaths);
outputResult.assertTasksExecutedInOrder(taskPaths);
return this;
}
@Override
public ExecutionResult assertTasksExecuted(Object... taskPaths) {
Set<String> flattenedTasks = new TreeSet<>(flattenTaskPaths(taskPaths));
assertEquals(new TreeSet<>(executedTasks), new TreeSet<>(flattenedTasks));
outputResult.assertTasksExecuted(flattenedTasks);
return this;
}
@Override
public ExecutionResult assertTasksExecutedAndNotSkipped(Object... taskPaths) {
assertTasksExecuted(taskPaths);
assertTasksNotSkipped(taskPaths);
return this;
}
@Override
public ExecutionResult assertTaskExecuted(String taskPath) {
assertThat(executedTasks, hasItem(taskPath));
outputResult.assertTaskExecuted(taskPath);
return this;
}
@Override
public ExecutionResult assertTaskNotExecuted(String taskPath) {
assertThat(executedTasks, not(hasItem(taskPath)));
outputResult.assertTaskNotExecuted(taskPath);
return this;
}
@Override
public ExecutionResult assertTaskOrder(Object... taskPaths) {
TaskOrderSpecs.exact(taskPaths).assertMatches(-1, executedTasks);
outputResult.assertTaskOrder(taskPaths);
return this;
}
@Override
public ExecutionResult assertTasksSkipped(Object... taskPaths) {
Set<String> expected = new TreeSet<>(flattenTaskPaths(taskPaths));
assertThat(skippedTasks, equalTo(expected));
outputResult.assertTasksSkipped(expected);
return this;
}
@Override
public ExecutionResult assertTaskSkipped(String taskPath) {
assertThat(skippedTasks, hasItem(taskPath));
outputResult.assertTaskSkipped(taskPath);
return this;
}
@Override
public ExecutionResult assertTasksNotSkipped(Object... taskPaths) {
Set<String> expected = new TreeSet<>(flattenTaskPaths(taskPaths));
Set<String> notSkipped = getNotSkippedTasks();
assertThat(notSkipped, equalTo(expected));
outputResult.assertTasksNotSkipped(expected);
return this;
}
@Override
public ExecutionResult assertTaskNotSkipped(String taskPath) {
assertThat(getNotSkippedTasks(), hasItem(taskPath));
outputResult.assertTaskNotSkipped(taskPath);
return this;
}
private Set<String> getNotSkippedTasks() {
Set<String> notSkipped = new TreeSet<>(executedTasks);
notSkipped.removeAll(skippedTasks);
return notSkipped;
}
}
private static class InProcessExecutionFailure extends InProcessExecutionResult implements ExecutionFailure {
private static final Pattern LOCATION_PATTERN = Pattern.compile("(?m)^((\\w+ )+'.+') line: (\\d+)$");
private final ExecutionFailure outputFailure;
private final Throwable failure;
private final List<String> fileNames = new ArrayList<>();
private final List<String> lineNumbers = new ArrayList<>();
private final List<String> descriptions = new ArrayList<>();
InProcessExecutionFailure(List<String> tasks, Set<String> skippedTasks, ExecutionFailure outputFailure, Throwable failure) {
super(tasks, skippedTasks, outputFailure);
this.outputFailure = outputFailure;
this.failure = failure;
if (failure instanceof MultipleBuildFailures) {
for (Throwable cause : ((MultipleBuildFailures) failure).getCauses()) {
extractDetails(cause);
}
} else {
extractDetails(failure);
}
}
private void extractDetails(Throwable failure) {
String failureMessage = failure.getMessage() == null ? "" : failure.getMessage();
java.util.regex.Matcher matcher = LOCATION_PATTERN.matcher(failureMessage);
if (matcher.find()) {
fileNames.add(matcher.group(1));
lineNumbers.add(matcher.group(3));
descriptions.add(failureMessage.substring(matcher.end()).trim());
} else {
descriptions.add(failureMessage.trim());
}
}
@Override
public InProcessExecutionFailure getIgnoreBuildSrc() {
List<String> executedTasks = CollectionUtils.filter(this.executedTasks, NOT_BUILD_SRC_TASK);
Set<String> skippedTasks = CollectionUtils.filter(this.skippedTasks, NOT_BUILD_SRC_TASK);
return new InProcessExecutionFailure(executedTasks, skippedTasks, outputFailure.getIgnoreBuildSrc(), failure);
}
@Override
public ExecutionFailure assertHasLineNumber(int lineNumber) {
outputFailure.assertHasLineNumber(lineNumber);
assertThat(this.lineNumbers, hasItem(equalTo(String.valueOf(lineNumber))));
return this;
}
@Override
public ExecutionFailure assertHasFileName(String filename) {
outputFailure.assertHasFileName(filename);
assertThat(this.fileNames, hasItem(equalTo(filename)));
return this;
}
@Override
public ExecutionFailure assertHasResolution(String resolution) {
outputFailure.assertHasResolution(resolution);
return this;
}
@Override
public ExecutionFailure assertHasFailures(int count) {
outputFailure.assertHasFailures(count);
if (count == 1) {
assertFalse(failure instanceof MultipleBuildFailures);
} else {
assertEquals(((MultipleBuildFailures) failure).getCauses().size(), count);
}
return this;
}
@Override
public ExecutionFailure assertHasCause(String description) {
assertThatCause(startsWith(description));
return this;
}
@Override
public ExecutionFailure assertThatCause(Matcher<? super String> matcher) {
outputFailure.assertThatCause(matcher);
List<Throwable> causes = new ArrayList<>();
extractCauses(failure, causes);
Matcher<Throwable> messageMatcher = hasMessage(normalizedLineSeparators(matcher));
for (Throwable cause : causes) {
if (messageMatcher.matches(cause)) {
return this;
}
}
fail(String.format("Could not find matching cause in: %s%nFailure is: %s", causes, failure));
return this;
}
private void extractCauses(Throwable failure, List<Throwable> causes) {
if (failure instanceof MultipleBuildFailures) {
MultipleBuildFailures exception = (MultipleBuildFailures) failure;
for (Throwable componentFailure : exception.getCauses()) {
extractCauses(componentFailure, causes);
}
} else if (failure instanceof LocationAwareException) {
causes.addAll(((LocationAwareException) failure).getReportableCauses());
} else {
causes.add(failure);
}
}
@Override
public ExecutionFailure assertHasNoCause(String description) {
outputFailure.assertHasNoCause(description);
Matcher<Throwable> matcher = hasMessage(containsString(description));
List<Throwable> causes = new ArrayList<>();
extractCauses(failure, causes);
for (Throwable cause : causes) {
if (matcher.matches(cause)) {
throw new AssertionFailedError(String.format("Expected no failure with description '%s', found: %s", description, cause));
}
}
return this;
}
@Override
public ExecutionFailure assertHasNoCause() {
outputFailure.assertHasNoCause();
if (failure instanceof LocationAwareException) {
LocationAwareException exception = (LocationAwareException) failure;
assertThat(exception.getReportableCauses(), isEmpty());
} else {
assertThat(failure.getCause(), nullValue());
}
return this;
}
@Override
public ExecutionFailure assertHasDescription(String context) {
assertThatDescription(startsWith(context));
return this;
}
@Override
public ExecutionFailure assertThatDescription(Matcher<? super String> matcher) {
outputFailure.assertThatDescription(matcher);
assertThat(descriptions, hasItem(normalizedLineSeparators(matcher)));
return this;
}
@Override
public ExecutionFailure assertTestsFailed() {
new DetailedExecutionFailure(this).assertTestsFailed();
return this;
}
@Override
public DependencyResolutionFailure assertResolutionFailure(String configurationPath) {
return new DependencyResolutionFailure(this, configurationPath);
}
}
private static class TestClassLoaderRegistry implements PayloadClassLoaderRegistry {
@Override
public SerializeMap newSerializeSession() {
throw new UnsupportedOperationException();
}
@Override
public DeserializeMap newDeserializeSession() {
return (classLoaderDetails, className) -> {
// Assume everything is loaded into the current classloader
return Class.forName(className);
};
}
}
}
| [] | [] | [] | [] | [] | java | 0 | 0 | |
tool/kratos/logger/logger.go | // Copyright 2013 bee authors
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package yeeLogger
import (
"errors"
"fmt"
"io"
"os"
"path/filepath"
"sync"
"sync/atomic"
"text/template"
"time"
"github.com/zzpu/kratos/tool/kratos/logger/colors"
)
var errInvalidLogLevel = errors.New("logger: invalid log level")
const (
levelDebug = iota
levelError
levelFatal
levelCritical
levelSuccess
levelWarn
levelInfo
levelHint
)
var (
sequenceNo uint64
instance *BeeLogger
once sync.Once
)
var debugMode = os.Getenv("DEBUG_ENABLED") == "1"
var logLevel = levelInfo
// BeeLogger logs logging records to the specified io.Writer
type BeeLogger struct {
mu sync.Mutex
output io.Writer
}
// LogRecord represents a log record and contains the timestamp when the record
// was created, an increasing id, level and the actual formatted log line.
type LogRecord struct {
ID string
Level string
Message string
Filename string
LineNo int
}
var Log = GetBeeLogger(os.Stdout)
var (
logRecordTemplate *template.Template
debugLogRecordTemplate *template.Template
)
// GetBeeLogger initializes the logger instance with a NewColorWriter output
// and returns a singleton
func GetBeeLogger(w io.Writer) *BeeLogger {
once.Do(func() {
var (
err error
simpleLogFormat = `{{Now "2006/01/02 15:04:05"}} {{.Level}} ▶ {{.ID}} {{.Message}}{{EndLine}}`
debugLogFormat = `{{Now "2006/01/02 15:04:05"}} {{.Level}} ▶ {{.ID}} {{.Filename}}:{{.LineNo}} {{.Message}}{{EndLine}}`
)
// Initialize and parse logging templates
funcs := template.FuncMap{
"Now": Now,
"EndLine": EndLine,
}
logRecordTemplate, err = template.New("simpleLogFormat").Funcs(funcs).Parse(simpleLogFormat)
if err != nil {
panic(err)
}
debugLogRecordTemplate, err = template.New("debugLogFormat").Funcs(funcs).Parse(debugLogFormat)
if err != nil {
panic(err)
}
instance = &BeeLogger{output: colors.NewColorWriter(w)}
})
return instance
}
// SetOutput sets the logger output destination
func (l *BeeLogger) SetOutput(w io.Writer) {
l.mu.Lock()
defer l.mu.Unlock()
l.output = colors.NewColorWriter(w)
}
// Now returns the current local time in the specified layout
func Now(layout string) string {
return time.Now().Format(layout)
}
// EndLine returns the a newline escape character
func EndLine() string {
return "\n"
}
func (l *BeeLogger) getLevelTag(level int) string {
switch level {
case levelFatal:
return "FATAL "
case levelSuccess:
return "SUCCESS "
case levelHint:
return "HINT "
case levelDebug:
return "DEBUG "
case levelInfo:
return "INFO "
case levelWarn:
return "WARN "
case levelError:
return "ERROR "
case levelCritical:
return "CRITICAL"
default:
panic(errInvalidLogLevel)
}
}
func (l *BeeLogger) getColorLevel(level int) string {
switch level {
case levelCritical:
return colors.RedBold(l.getLevelTag(level))
case levelFatal:
return colors.RedBold(l.getLevelTag(level))
case levelInfo:
return colors.BlueBold(l.getLevelTag(level))
case levelHint:
return colors.CyanBold(l.getLevelTag(level))
case levelDebug:
return colors.YellowBold(l.getLevelTag(level))
case levelError:
return colors.RedBold(l.getLevelTag(level))
case levelWarn:
return colors.YellowBold(l.getLevelTag(level))
case levelSuccess:
return colors.GreenBold(l.getLevelTag(level))
default:
panic(errInvalidLogLevel)
}
}
// mustLog logs the message according to the specified level and arguments.
// It panics in case of an error.
func (l *BeeLogger) mustLog(level int, message string, args ...interface{}) {
if level > logLevel {
return
}
// Acquire the lock
l.mu.Lock()
defer l.mu.Unlock()
// Create the logging record and pass into the output
record := LogRecord{
ID: fmt.Sprintf("%04d", atomic.AddUint64(&sequenceNo, 1)),
Level: l.getColorLevel(level),
Message: fmt.Sprintf(message, args...),
}
err := logRecordTemplate.Execute(l.output, record)
if err != nil {
panic(err)
}
}
// mustLogDebug logs a debug message only if debug mode
// is enabled. i.e. DEBUG_ENABLED="1"
func (l *BeeLogger) mustLogDebug(message string, file string, line int, args ...interface{}) {
if !debugMode {
return
}
// Change the output to Stderr
l.SetOutput(os.Stderr)
// Create the log record
record := LogRecord{
ID: fmt.Sprintf("%04d", atomic.AddUint64(&sequenceNo, 1)),
Level: l.getColorLevel(levelDebug),
Message: fmt.Sprintf(message, args...),
LineNo: line,
Filename: filepath.Base(file),
}
err := debugLogRecordTemplate.Execute(l.output, record)
if err != nil {
panic(err)
}
}
// Debug outputs a debug log message
func (l *BeeLogger) Debug(message string, file string, line int) {
l.mustLogDebug(message, file, line)
}
// Debugf outputs a formatted debug log message
func (l *BeeLogger) Debugf(message string, file string, line int, vars ...interface{}) {
l.mustLogDebug(message, file, line, vars...)
}
// Info outputs an information log message
func (l *BeeLogger) Info(message string) {
l.mustLog(levelInfo, message)
}
// Infof outputs a formatted information log message
func (l *BeeLogger) Infof(message string, vars ...interface{}) {
l.mustLog(levelInfo, message, vars...)
}
// Warn outputs a warning log message
func (l *BeeLogger) Warn(message string) {
l.mustLog(levelWarn, message)
}
// Warnf outputs a formatted warning log message
func (l *BeeLogger) Warnf(message string, vars ...interface{}) {
l.mustLog(levelWarn, message, vars...)
}
// Error outputs an error log message
func (l *BeeLogger) Error(message string) {
l.mustLog(levelError, message)
}
// Errorf outputs a formatted error log message
func (l *BeeLogger) Errorf(message string, vars ...interface{}) {
l.mustLog(levelError, message, vars...)
}
// Fatal outputs a fatal log message and exists
func (l *BeeLogger) Fatal(message string) {
l.mustLog(levelFatal, message)
os.Exit(255)
}
// Fatalf outputs a formatted log message and exists
func (l *BeeLogger) Fatalf(message string, vars ...interface{}) {
l.mustLog(levelFatal, message, vars...)
os.Exit(255)
}
// Success outputs a success log message
func (l *BeeLogger) Success(message string) {
l.mustLog(levelSuccess, message)
}
// Successf outputs a formatted success log message
func (l *BeeLogger) Successf(message string, vars ...interface{}) {
l.mustLog(levelSuccess, message, vars...)
}
// Hint outputs a hint log message
func (l *BeeLogger) Hint(message string) {
l.mustLog(levelHint, message)
}
// Hintf outputs a formatted hint log message
func (l *BeeLogger) Hintf(message string, vars ...interface{}) {
l.mustLog(levelHint, message, vars...)
}
// Critical outputs a critical log message
func (l *BeeLogger) Critical(message string) {
l.mustLog(levelCritical, message)
}
// Criticalf outputs a formatted critical log message
func (l *BeeLogger) Criticalf(message string, vars ...interface{}) {
l.mustLog(levelCritical, message, vars...)
}
| [
"\"DEBUG_ENABLED\""
] | [] | [
"DEBUG_ENABLED"
] | [] | ["DEBUG_ENABLED"] | go | 1 | 0 | |
evaluation/old_compare/compare_fth300_fb.py | import os
import numpy as np
import pickle
import shutil
import datetime
import cv2
import copyreg
from tqdm.std import tqdm
from . import compare_deepfeatures
from . import compare_compoelem_new
from . import compare_combined_vgg19
from . import compare_combined_sift
from . import compare_linkingArt
from . import compare_sift
from . import compare_orb
from . import compare_brief
# fix cv2 keypoint pickling error
def _pickle_keypoint(keypoint): # : cv2.KeyPoint
return cv2.KeyPoint, (
keypoint.pt[0],
keypoint.pt[1],
keypoint.size,
keypoint.angle,
keypoint.response,
keypoint.octave,
keypoint.class_id,
)
# Apply the bundling to pickle
copyreg.pickle(cv2.KeyPoint().__class__, _pickle_keypoint)
#dataset_cleaned_extended_balanced = ceb_dataset -> combination of clean_data (all with _art classes nativity and virgin) dataset and files from prathmesn & ronak from 18.03.
osuname = os.uname().nodename
print("osuname", osuname)
if osuname == 'MBP-von-Tilman' or osuname == 'MacBook-Pro-von-Tilman.local':
COMPOELEM_ROOT = "/Users/tilman/Documents/Programme/Python/new_bachelor_thesis/compoelem"
elif osuname == 'lme117':
COMPOELEM_ROOT = "/home/zi14teho/compositional_elements"
else:
COMPOELEM_ROOT = os.getenv('COMPOELEM_ROOT')
DATASTORE_NAME = "combined_datastore_ceb_dataset"
DATASTORE_FILE = COMPOELEM_ROOT+"/final_evaluation/"+DATASTORE_NAME+".pkl"
EVAL_RESULTS_FILE = COMPOELEM_ROOT+"/final_evaluation/evaluation_log.pkl"
datastore = pickle.load(open(DATASTORE_FILE, "rb"))
try:
evaluation_log = pickle.load(open(EVAL_RESULTS_FILE, "rb"))
# for log_entry in evaluation_log:
# log_entry["new"] = False
shutil.copyfile(EVAL_RESULTS_FILE, EVAL_RESULTS_FILE+"_"+str(datetime.date.today())+"_backup")
except FileNotFoundError as e:
evaluation_log = []
# [evaluation_log.append(experiment) for experiment in compare_deepfeatures.eval_all_combinations(datastore, DATASTORE_NAME, "imageNet_vgg19_bn_features")]
# pickle.dump(evaluation_log, open(EVAL_RESULTS_FILE, "wb"))
# [evaluation_log.append(experiment) for experiment in compare_deepfeatures.eval_all_combinations(datastore, DATASTORE_NAME, "places365_resnet50_feature_noFC")]
# pickle.dump(evaluation_log, open(EVAL_RESULTS_FILE, "wb"))
# [evaluation_log.append(experiment) for experiment in compare_compoelem.eval_all_combinations(datastore, DATASTORE_NAME)]
#fallback: yes, no
#filter_threshold: 150, 200, 250, 300
# [evaluation_log.append(experiment) for experiment in compare_compoelem_new.eval_all_combinations(datastore, DATASTORE_NAME, 150, True)]
# [evaluation_log.append(experiment) for experiment in compare_compoelem_new.eval_all_combinations(datastore, DATASTORE_NAME, 200, True)]
# [evaluation_log.append(experiment) for experiment in compare_compoelem_new.eval_all_combinations(datastore, DATASTORE_NAME, 250, True)]
[evaluation_log.append(experiment) for experiment in compare_compoelem_new.eval_all_combinations(datastore, DATASTORE_NAME, 300, True)]
# [evaluation_log.append(experiment) for experiment in compare_compoelem_new.eval_all_combinations(datastore, DATASTORE_NAME, 150, False)]
# [evaluation_log.append(experiment) for experiment in compare_compoelem_new.eval_all_combinations(datastore, DATASTORE_NAME, 200, False)]
# [evaluation_log.append(experiment) for experiment in compare_compoelem_new.eval_all_combinations(datastore, DATASTORE_NAME, 250, False)]
# [evaluation_log.append(experiment) for experiment in compare_compoelem_new.eval_all_combinations(datastore, DATASTORE_NAME, 300, False)]
# def eval_all_combinations(datastore, datastore_name, filter_threshold, with_fallback):
try:
evaluation_log = pickle.load(open(EVAL_RESULTS_FILE, "rb"))
pickle.dump(evaluation_log, open(EVAL_RESULTS_FILE, "wb"))
except Exception as e:
print("open err",e)
# [evaluation_log.append(experiment) for experiment in compare_combined_vgg19.eval_all_combinations(datastore, DATASTORE_NAME)]
# pickle.dump(evaluation_log, open(EVAL_RESULTS_FILE, "wb"))
# [evaluation_log.append(experiment) for experiment in compare_sift.eval_all_combinations(datastore, DATASTORE_NAME)]
# pickle.dump(evaluation_log, open(EVAL_RESULTS_FILE, "wb"))
# [evaluation_log.append(experiment) for experiment in compare_orb.eval_all_combinations(datastore, DATASTORE_NAME)]
# pickle.dump(evaluation_log, open(EVAL_RESULTS_FILE, "wb"))
# [evaluation_log.append(experiment) for experiment in compare_brief.eval_all_combinations(datastore, DATASTORE_NAME)]
# pickle.dump(evaluation_log, open(EVAL_RESULTS_FILE, "wb"))
# [evaluation_log.append(experiment) for experiment in compare_combined_sift.eval_all_combinations(datastore, DATASTORE_NAME)]
# pickle.dump(evaluation_log, open(EVAL_RESULTS_FILE, "wb"))
# [evaluation_log.append(experiment) for experiment in compare_linkingArt.eval_all_combinations(datastore, DATASTORE_NAME)]
# pickle.dump(evaluation_log, open(EVAL_RESULTS_FILE, "wb"))
def get_new_evaluation_log():
evaluation_log = pickle.load(open(EVAL_RESULTS_FILE, "rb"))
new_log_entries = list(filter(lambda log_entry: log_entry["new"], evaluation_log))
return new_log_entries
print("new_log_entries: {}, evaluation_log_size:{}".format(len(get_new_evaluation_log()), len(evaluation_log)))
| [] | [] | [
"COMPOELEM_ROOT"
] | [] | ["COMPOELEM_ROOT"] | python | 1 | 0 | |
vendor/github.com/lunny/tango/tan.go | // Copyright 2015 The Tango Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tango
import (
"net/http"
"os"
"strconv"
"strings"
"sync"
)
// Version returns tango's version
func Version() string {
return "0.5.4.0517"
}
// Tango describes tango object
type Tango struct {
Router
handlers []Handler
logger Logger
ErrHandler Handler
ctxPool sync.Pool
respPool sync.Pool
}
var (
// ClassicHandlers the default handlers
ClassicHandlers = []Handler{
Logging(),
Recovery(false),
Compresses([]string{}),
Static(StaticOptions{Prefix: "public"}),
Return(),
Param(),
Contexts(),
}
)
// Logger returns logger interface
func (t *Tango) Logger() Logger {
return t.logger
}
// Get sets a route with GET method
func (t *Tango) Get(url string, c interface{}, middlewares ...Handler) {
t.Route([]string{"GET", "HEAD:Get"}, url, c, middlewares...)
}
// Post sets a route with POST method
func (t *Tango) Post(url string, c interface{}, middlewares ...Handler) {
t.Route([]string{"POST"}, url, c, middlewares...)
}
// Head sets a route with HEAD method
func (t *Tango) Head(url string, c interface{}, middlewares ...Handler) {
t.Route([]string{"HEAD"}, url, c, middlewares...)
}
// Options sets a route with OPTIONS method
func (t *Tango) Options(url string, c interface{}, middlewares ...Handler) {
t.Route([]string{"OPTIONS"}, url, c, middlewares...)
}
// Trace sets a route with TRACE method
func (t *Tango) Trace(url string, c interface{}, middlewares ...Handler) {
t.Route([]string{"TRACE"}, url, c, middlewares...)
}
// Patch sets a route with PATCH method
func (t *Tango) Patch(url string, c interface{}, middlewares ...Handler) {
t.Route([]string{"PATCH"}, url, c, middlewares...)
}
// Delete sets a route with DELETE method
func (t *Tango) Delete(url string, c interface{}, middlewares ...Handler) {
t.Route([]string{"DELETE"}, url, c, middlewares...)
}
// Put sets a route with PUT method
func (t *Tango) Put(url string, c interface{}, middlewares ...Handler) {
t.Route([]string{"PUT"}, url, c, middlewares...)
}
// Any sets a route every support method is OK.
func (t *Tango) Any(url string, c interface{}, middlewares ...Handler) {
t.Route(SupportMethods, url, c, middlewares...)
t.Route([]string{"HEAD:Get"}, url, c, middlewares...)
}
// Use addes some global handlers
func (t *Tango) Use(handlers ...Handler) {
t.handlers = append(t.handlers, handlers...)
}
// GetAddress parses address
func getAddress(args ...interface{}) string {
var host string
var port int
if len(args) == 1 {
switch arg := args[0].(type) {
case string:
addrs := strings.Split(args[0].(string), ":")
if len(addrs) == 1 {
host = addrs[0]
} else if len(addrs) >= 2 {
host = addrs[0]
_port, _ := strconv.ParseInt(addrs[1], 10, 0)
port = int(_port)
}
case int:
port = arg
}
} else if len(args) >= 2 {
if arg, ok := args[0].(string); ok {
host = arg
}
if arg, ok := args[1].(int); ok {
port = arg
}
}
if envHost := os.Getenv("HOST"); len(envHost) != 0 {
host = envHost
} else if len(host) == 0 {
host = "0.0.0.0"
}
if envPort, _ := strconv.ParseInt(os.Getenv("PORT"), 10, 32); envPort != 0 {
port = int(envPort)
} else if port == 0 {
port = 8000
}
addr := host + ":" + strconv.FormatInt(int64(port), 10)
return addr
}
// Run the http server. Listening on os.GetEnv("PORT") or 8000 by default.
func (t *Tango) Run(args ...interface{}) {
addr := getAddress(args...)
t.logger.Info("Listening on http://" + addr)
err := http.ListenAndServe(addr, t)
if err != nil {
t.logger.Error(err)
}
}
// RunTLS runs the https server with special cert and key files
func (t *Tango) RunTLS(certFile, keyFile string, args ...interface{}) {
addr := getAddress(args...)
t.logger.Info("Listening on https://" + addr)
err := http.ListenAndServeTLS(addr, certFile, keyFile, t)
if err != nil {
t.logger.Error(err)
}
}
// HandlerFunc describes the handle function
type HandlerFunc func(ctx *Context)
// Handle executes the handler
func (h HandlerFunc) Handle(ctx *Context) {
h(ctx)
}
// WrapBefore wraps a http standard handler to tango's before action executes
func WrapBefore(handler http.Handler) HandlerFunc {
return func(ctx *Context) {
handler.ServeHTTP(ctx.ResponseWriter, ctx.Req())
ctx.Next()
}
}
// WrapAfter wraps a http standard handler to tango's after action executes
func WrapAfter(handler http.Handler) HandlerFunc {
return func(ctx *Context) {
ctx.Next()
handler.ServeHTTP(ctx.ResponseWriter, ctx.Req())
}
}
// UseHandler adds a standard http handler to tango's
func (t *Tango) UseHandler(handler http.Handler) {
t.Use(WrapBefore(handler))
}
// ServeHTTP implementes net/http interface so that it could run with net/http
func (t *Tango) ServeHTTP(w http.ResponseWriter, req *http.Request) {
resp := t.respPool.Get().(*responseWriter)
resp.reset(w)
ctx := t.ctxPool.Get().(*Context)
ctx.tan = t
ctx.reset(req, resp)
ctx.invoke()
// if there is no logging or error handle, so the last written check.
if !ctx.Written() {
p := req.URL.Path
if len(req.URL.RawQuery) > 0 {
p = p + "?" + req.URL.RawQuery
}
if ctx.Route() != nil {
if ctx.Result == nil {
ctx.WriteString("")
t.logger.Info(req.Method, ctx.Status(), p)
t.ctxPool.Put(ctx)
t.respPool.Put(resp)
return
}
panic("result should be handler before")
}
if ctx.Result == nil {
ctx.Result = NotFound()
}
ctx.HandleError()
t.logger.Error(req.Method, ctx.Status(), p)
}
t.ctxPool.Put(ctx)
t.respPool.Put(resp)
}
// NewWithLog creates tango with the special logger and handlers
func NewWithLog(logger Logger, handlers ...Handler) *Tango {
tan := &Tango{
Router: newRouter(),
logger: logger,
handlers: make([]Handler, 0),
ErrHandler: Errors(),
}
tan.ctxPool.New = func() interface{} {
return &Context{
tan: tan,
Logger: tan.logger,
}
}
tan.respPool.New = func() interface{} {
return &responseWriter{}
}
tan.Use(handlers...)
return tan
}
// New creates tango with the default logger and handlers
func New(handlers ...Handler) *Tango {
return NewWithLog(NewLogger(os.Stdout), handlers...)
}
// Classic returns the tango with default handlers and logger
func Classic(l ...Logger) *Tango {
var logger Logger
if len(l) == 0 {
logger = NewLogger(os.Stdout)
} else {
logger = l[0]
}
return NewWithLog(
logger,
ClassicHandlers...,
)
}
| [
"\"HOST\"",
"\"PORT\""
] | [] | [
"PORT",
"HOST"
] | [] | ["PORT", "HOST"] | go | 2 | 0 | |
cmd/api-server/main.go | package main
import (
"flag"
"fmt"
"net"
"os"
"github.com/kelseyhightower/envconfig"
kubeclient "github.com/kubeshop/testkube-operator/client"
executorsclientv1 "github.com/kubeshop/testkube-operator/client/executors/v1"
scriptsclient "github.com/kubeshop/testkube-operator/client/scripts/v2"
testsclientv1 "github.com/kubeshop/testkube-operator/client/tests"
testsclientv2 "github.com/kubeshop/testkube-operator/client/tests/v2"
testsuitesclientv1 "github.com/kubeshop/testkube-operator/client/testsuites/v1"
apiv1 "github.com/kubeshop/testkube/internal/app/api/v1"
"github.com/kubeshop/testkube/internal/migrations"
"github.com/kubeshop/testkube/internal/pkg/api"
"github.com/kubeshop/testkube/internal/pkg/api/repository/result"
"github.com/kubeshop/testkube/internal/pkg/api/repository/storage"
"github.com/kubeshop/testkube/internal/pkg/api/repository/testresult"
"github.com/kubeshop/testkube/pkg/analytics"
"github.com/kubeshop/testkube/pkg/migrator"
"github.com/kubeshop/testkube/pkg/secret"
"github.com/kubeshop/testkube/pkg/ui"
)
type MongoConfig struct {
DSN string `envconfig:"API_MONGO_DSN" default:"mongodb://localhost:27017"`
DB string `envconfig:"API_MONGO_DB" default:"testkube"`
}
var Config MongoConfig
var verbose = flag.Bool("v", false, "enable verbosity level")
func init() {
flag.Parse()
ui.Verbose = *verbose
envconfig.Process("mongo", &Config)
}
func runMigrations() (err error) {
ui.Info("Available migrations for", api.Version)
results := migrations.Migrator.GetValidMigrations(api.Version, migrator.MigrationTypeServer)
if len(results) == 0 {
ui.Warn("No migrations available for", api.Version)
return nil
}
for _, migration := range results {
fmt.Printf("- %+v - %s\n", migration.Version(), migration.Info())
}
return migrations.Migrator.Run(api.Version, migrator.MigrationTypeServer)
}
func main() {
analytics.SendAnonymousInfo()
port := os.Getenv("APISERVER_PORT")
namespace := "testkube"
if ns, ok := os.LookupEnv("TESTKUBE_NAMESPACE"); ok {
namespace = ns
}
ln, err := net.Listen("tcp", ":"+port)
ui.ExitOnError("Checking if port "+port+"is free", err)
ln.Close()
ui.Debug("TCP Port is available", port)
// DI
db, err := storage.GetMongoDataBase(Config.DSN, Config.DB)
ui.ExitOnError("Getting mongo database", err)
kubeClient, err := kubeclient.GetClient()
ui.ExitOnError("Getting kubernetes client", err)
secretClient, err := secret.NewClient(namespace)
ui.ExitOnError("Getting secret client", err)
scriptsClient := scriptsclient.NewClient(kubeClient, namespace)
testsClientV1 := testsclientv1.NewClient(kubeClient, namespace)
testsClientV2 := testsclientv2.NewClient(kubeClient, namespace)
executorsClient := executorsclientv1.NewClient(kubeClient, namespace)
webhooksClient := executorsclientv1.NewWebhooksClient(kubeClient, namespace)
testsuitesClient := testsuitesclientv1.NewClient(kubeClient, namespace)
resultsRepository := result.NewMongoRespository(db)
testResultsRepository := testresult.NewMongoRespository(db)
migrations.Migrator.Add(migrations.NewVersion_0_9_2(scriptsClient, testsClientV1, testsClientV2, testsuitesClient))
if err := runMigrations(); err != nil {
ui.ExitOnError("Running server migrations", err)
}
err = apiv1.NewTestkubeAPI(
namespace,
resultsRepository,
testResultsRepository,
testsClientV2,
executorsClient,
testsuitesClient,
secretClient,
webhooksClient,
).Run()
ui.ExitOnError("Running API Server", err)
}
| [
"\"APISERVER_PORT\""
] | [] | [
"APISERVER_PORT"
] | [] | ["APISERVER_PORT"] | go | 1 | 0 | |
reminders/reminders/celery.py | import os
from celery import Celery
from celery.schedules import crontab
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'reminders.settings')
app = Celery('reminders')
# Using a string here means the worker doesn't have to serialize
# the configuration object to child processes.
# - namespace='CELERY' means all celery-related configuration keys
# should have a `CELERY_` prefix.
app.config_from_object('django.conf:settings', namespace='CELERY')
app.conf.beat_schedule = {
'add-every-60-seconds': {
'task': 'schedule.tasks.remind_by_mail',
'schedule': 10.0
},
'test_task_every_10_seconds': {
'task': 'schedule.tasks.print_test',
'schedule': 10.0
},
}
app.conf.timezone = 'UTC'
# Load task modules from all registered Django app configs.
app.autodiscover_tasks()
@app.task(bind=True)
def debug_task(self):
print(f'Request: {self.request!r}')
| [] | [] | [] | [] | [] | python | 0 | 0 | |
testbot/src/main/java/com/sedmelluq/discord/lavaplayer/demo/BotApplicationManager.java | package com.sedmelluq.discord.lavaplayer.demo;
import com.neovisionaries.i18n.CountryCode;
import com.sedmelluq.discord.lavaplayer.demo.controller.BotCommandMappingHandler;
import com.sedmelluq.discord.lavaplayer.demo.controller.BotController;
import com.sedmelluq.discord.lavaplayer.demo.controller.BotControllerManager;
import com.sedmelluq.discord.lavaplayer.demo.music.MusicController;
import com.sedmelluq.discord.lavaplayer.player.AudioConfiguration;
import com.sedmelluq.discord.lavaplayer.player.AudioPlayerManager;
import com.sedmelluq.discord.lavaplayer.player.DefaultAudioPlayerManager;
import com.sedmelluq.discord.lavaplayer.source.http.HttpAudioSourceManager;
import com.sedmelluq.discord.lavaplayer.source.local.LocalAudioSourceManager;
import com.sedmelluq.discord.lavaplayer.source.soundcloud.SoundCloudAudioSourceManager;
import com.sedmelluq.discord.lavaplayer.source.spotify.SpotifyAudioSourceManager;
import com.sedmelluq.discord.lavaplayer.source.twitch.TwitchStreamAudioSourceManager;
import com.sedmelluq.discord.lavaplayer.source.youtube.YoutubeAudioSourceManager;
import com.sedmelluq.discord.lavaplayer.track.lyrics.LyricsInfo;
import com.sedmelluq.discord.lavaplayer.track.lyrics.LyricsManager;
import com.sedmelluq.lava.common.tools.DaemonThreadFactory;
import net.dv8tion.jda.api.entities.ChannelType;
import net.dv8tion.jda.api.entities.Guild;
import net.dv8tion.jda.api.entities.Member;
import net.dv8tion.jda.api.entities.Message;
import net.dv8tion.jda.api.events.guild.GuildLeaveEvent;
import net.dv8tion.jda.api.events.message.MessageReceivedEvent;
import net.dv8tion.jda.api.hooks.ListenerAdapter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
public class BotApplicationManager extends ListenerAdapter {
private static final Logger log = LoggerFactory.getLogger(BotApplicationManager.class);
private final Map<Long, BotGuildContext> guildContexts;
private final BotControllerManager controllerManager;
private final AudioPlayerManager playerManager;
private final ScheduledExecutorService executorService;
public BotApplicationManager() {
guildContexts = new HashMap<>();
controllerManager = new BotControllerManager();
controllerManager.registerController(new MusicController.Factory());
playerManager = new DefaultAudioPlayerManager();
//playerManager.useRemoteNodes("localhost:8080");
playerManager.getConfiguration().setResamplingQuality(AudioConfiguration.ResamplingQuality.LOW);
playerManager.registerSourceManager(new YoutubeAudioSourceManager());
String spToken = System.getenv("spotifyToken");
playerManager.registerSourceManager(new SpotifyAudioSourceManager(spToken.split(":")[0], spToken.split(":")[1], CountryCode.EU));
playerManager.registerSourceManager(SoundCloudAudioSourceManager.createDefault());
playerManager.registerSourceManager(new TwitchStreamAudioSourceManager());
playerManager.registerSourceManager(new HttpAudioSourceManager());
playerManager.registerSourceManager(new LocalAudioSourceManager());
executorService = Executors.newScheduledThreadPool(1, new DaemonThreadFactory("bot"));
try {
LyricsInfo l = LyricsManager.getLyrics("bohemian rhapsody");
System.out.println(l.getLyrics());
} catch (Exception e) {
e.printStackTrace();
}
}
public ScheduledExecutorService getExecutorService() {
return executorService;
}
public AudioPlayerManager getPlayerManager() {
return playerManager;
}
private BotGuildContext createGuildState(long guildId, Guild guild) {
BotGuildContext context = new BotGuildContext(guildId);
for (BotController controller : controllerManager.createControllers(this, context, guild)) {
context.controllers.put(controller.getClass(), controller);
}
return context;
}
private synchronized BotGuildContext getContext(Guild guild) {
long guildId = Long.parseLong(guild.getId());
BotGuildContext context = guildContexts.get(guildId);
if (context == null) {
context = createGuildState(guildId, guild);
guildContexts.put(guildId, context);
}
return context;
}
@Override
public void onMessageReceived(final MessageReceivedEvent event) {
Member member = event.getMember();
if (!event.isFromType(ChannelType.TEXT) || member == null || member.getUser().isBot()) {
return;
}
BotGuildContext guildContext = getContext(event.getGuild());
controllerManager.dispatchMessage(guildContext.controllers, "r!", event.getMessage(), new BotCommandMappingHandler() {
@Override
public void commandNotFound(Message message, String name) {
}
@Override
public void commandWrongParameterCount(Message message, String name, String usage, int given, int required) {
event.getTextChannel().sendMessage("Wrong argument count for command").queue();
}
@Override
public void commandWrongParameterType(Message message, String name, String usage, int index, String value, Class<?> expectedType) {
event.getTextChannel().sendMessage("Wrong argument type for command").queue();;
}
@Override
public void commandRestricted(Message message, String name) {
event.getTextChannel().sendMessage("Command not permitted").queue();;
}
@Override
public void commandException(Message message, String name, Throwable throwable) {
event.getTextChannel().sendMessage("Command threw an exception").queue();;
log.error("Command with content {} threw an exception.", message.getContentDisplay(), throwable);
}
});
}
@Override
public void onGuildLeave(GuildLeaveEvent event) {
// do stuff
}
}
| [
"\"spotifyToken\""
] | [] | [
"spotifyToken"
] | [] | ["spotifyToken"] | java | 1 | 0 | |
src/test/tinc/tincrepo/mpp/gpdb/tests/storage/crashrecovery/test_suspendcheckpoint_crashrecovery_21_to_30.py | """
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import tinctest
from tinctest.lib import local_path
from tinctest.models.scenario import ScenarioTestCase
from mpp.lib.PSQL import PSQL
from mpp.lib.gpfilespace import Gpfilespace
from mpp.lib.filerep_util import Filerepe2e_Util
class SuspendcheckpointCrashrecoveryTestCase(ScenarioTestCase):
'''
Testing state of prepared transactions upon crash-recovery
@gucs gp_create_table_random_default_distribution=off
'''
def __init__(self, methodName):
self.gpfile = Gpfilespace()
self.filereputil = Filerepe2e_Util()
super(SuspendcheckpointCrashrecoveryTestCase,self).__init__(methodName)
def setUp(self):
super(SuspendcheckpointCrashrecoveryTestCase, self).setUp()
'''Create filespace '''
self.gpfile.create_filespace('filespace_test_a')
def tearDown(self):
''' Cleanup up the filespace created , reset skip chekpoint fault'''
self.gpfile.drop_filespace('filespace_test_a')
port = os.getenv('PGPORT')
self.filereputil.inject_fault(f='checkpoint', y='reset', r='primary', o='0', p=port)
super(SuspendcheckpointCrashrecoveryTestCase, self).tearDown()
def test_crash_recovery_21_to_30(self):
'''
@note : Steps are same as Cdbfast and Previous tinc schedule
@param skip_state : skip checkpoint
@param cluster_state : sync/change_tracking/resync
@param ddl_type : create/drop
@fault_type : commit/abort .
@crash_type : gpstop_i/gpstop_a/failover_to_primary
@description: Test the state of prepared transactions upon crash-recovery.
Faults are used to suspend the transactions before segments flush commit/abort to xlog.
Crash followed by recovery are performed to evaluate the transaction state
Steps:
0. Check the state of the cluster before proceeding the test execution
1. Run any fault 'skip checkpoint' before pre_sqls
2. Run pre_sqls if any
3. Run any faults required before the trigger_sqls based on the fault_type as well as cluster_state
4. Run trigger_sqls - these are the transactions which will be suspended
5. Crash and recover.
6. Run post_sqls to validate whether the transactions at step 4 are commited/ aborted as expected
7. Recover and Validate using gpcheckcat and gpcheckmirrorseg
@data_provider data_types_provider
'''
test_num = self.test_data[0][0]+self.test_data[0][1]
tinctest.logger.info("\n ===============================================")
tinctest.logger.info("\n Starting New Test: %s " % test_num )
tinctest.logger.info("\n ===============================================")
pass_num = self.test_data[1][0]
cluster_state = self.test_data[1][1]
ddl_type = self.test_data[1][2]
test_type = self.test_data[1][3]
aborting_create_needed = self.test_data[1][4]
if test_type == 'abort':
test_dir = '%s_%s_tests' % ('abort', ddl_type)
elif aborting_create_needed == 'True':
test_dir = '%s_%s_%s_tests' % ('abort', ddl_type, 'needed')
else:
test_dir = '%s_%s_tests' % (test_type, ddl_type)
if aborting_create_needed == True and test_type == 'commit':
test_dir = 'abort_create_needed_tests'
elif aborting_create_needed == True and test_type == 'abort' :
test_dir = 'abort_abort_create_needed_tests'
tinctest.logger.info("TestDir == %s " % test_dir )
test_case_list0 = []
test_case_list0.append('mpp.gpdb.tests.storage.lib.dbstate.DbStateClass.check_system')
self.test_case_scenario.append(test_case_list0)
test_case_list1 = []
test_case_list1.append(('mpp.gpdb.tests.storage.crashrecovery.SuspendCheckpointCrashRecovery.set_faults_before_executing_pre_sqls', [cluster_state]))
self.test_case_scenario.append(test_case_list1)
test_case_list2 = []
test_case_list2.append('mpp.gpdb.tests.storage.crashrecovery.%s.pre_sql.test_pre_sqls.TestPreSQLClass' % test_dir)
self.test_case_scenario.append(test_case_list2)
test_case_list3 = []
test_case_list3.append(('mpp.gpdb.tests.storage.crashrecovery.SuspendCheckpointCrashRecovery.set_faults_before_executing_trigger_sqls', [pass_num, cluster_state, test_type, ddl_type, aborting_create_needed]))
self.test_case_scenario.append(test_case_list3)
test_case_list4 = []
test_case_list4.append('mpp.gpdb.tests.storage.crashrecovery.%s.trigger_sql.test_triggersqls.TestTriggerSQLClass' % test_dir)
test_case_list4.append(('mpp.gpdb.tests.storage.crashrecovery.SuspendCheckpointCrashRecovery.run_crash_and_recovery_fast', [test_dir, pass_num, cluster_state, test_type, ddl_type, aborting_create_needed]))
self.test_case_scenario.append(test_case_list4)
test_case_list5 = []
test_case_list5.append('mpp.gpdb.tests.storage.crashrecovery.%s.post_sql.test_postsqls.TestPostSQLClass' % test_dir)
self.test_case_scenario.append(test_case_list5)
test_case_list6 = []
test_case_list6.append(('mpp.gpdb.tests.storage.crashrecovery.SuspendCheckpointCrashRecovery.validate_system',[cluster_state]))
self.test_case_scenario.append(test_case_list6)
test_case_list7 = []
test_case_list7.append(('mpp.gpdb.tests.storage.crashrecovery.SuspendCheckpointCrashRecovery.backup_output_dir',[test_dir, test_num]))
self.test_case_scenario.append(test_case_list7)
@tinctest.dataProvider('data_types_provider')
def test_data_provider():
data = {
'21_abort_phase2_pass1_create_inchangetracking':[1,'change_tracking','create','abort',False]
,'22_abort_phase2_pass2_create_inchangetracking':[2,'change_tracking','create','abort',False]
,'23_abort_phase2_pass1_create_inresync':[1,'resync','create','abort',False]
,'24_abort_phase2_pass2_create_inresync':[2,'resync','create','abort',False]
,'25_commit_phase1_aborting_create_needed_insync':[0,'sync','create','commit',True]
,'26_commit_phase1_aborting_create_needed_inchangetracking':[0,'change_tracking','create','commit',True]
,'27_commit_phase1_aborting_create_needed_inresync':[0,'resync','create','commit',True]
,'28_commit_phase2_pass1_aborting_create_needed_insync':[1,'sync','create','commit',True]
,'29_commit_phase2_pass2_aborting_create_needed_insync':[2,'sync','create','commit',True]
,'30_commit_phase2_pass1_aborting_create_needed_inchangetracking':[1,'change_tracking','create','commit',True]
}
return data
| [] | [] | [
"PGPORT"
] | [] | ["PGPORT"] | python | 1 | 0 | |
example/commitpr/main.go | // Copyright 2018 The go-github AUTHORS. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// The commitpr command utilizes go-github as a CLI tool for
// pushing files to a branch and creating a pull request from it.
// It takes an auth token as an environment variable and creates
// the commit and the PR under the account affiliated with that token.
//
// The purpose of this example is to show how to use refs, trees and commits to
// create commits and pull requests.
//
// Note, if you want to push a single file, you probably prefer to use the
// content API. An example is available here:
// https://godoc.org/github.com/google/go-github/github#example-RepositoriesService-CreateFile
//
// Note, for this to work at least 1 commit is needed, so you if you use this
// after creating a repository you might want to make sure you set `AutoInit` to
// `true`.
package main
import (
"context"
"errors"
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"strings"
"time"
"github.com/google/go-github/v37/github"
"golang.org/x/oauth2"
)
var (
sourceOwner = flag.String("source-owner", "", "Name of the owner (user or org) of the repo to create the commit in.")
sourceRepo = flag.String("source-repo", "", "Name of repo to create the commit in.")
commitMessage = flag.String("commit-message", "", "Content of the commit message.")
commitBranch = flag.String("commit-branch", "", "Name of branch to create the commit in. If it does not already exists, it will be created using the `base-branch` parameter")
baseBranch = flag.String("base-branch", "master", "Name of branch to create the `commit-branch` from.")
prRepoOwner = flag.String("merge-repo-owner", "", "Name of the owner (user or org) of the repo to create the PR against. If not specified, the value of the `-source-owner` flag will be used.")
prRepo = flag.String("merge-repo", "", "Name of repo to create the PR against. If not specified, the value of the `-source-repo` flag will be used.")
prBranch = flag.String("merge-branch", "master", "Name of branch to create the PR against (the one you want to merge your branch in via the PR).")
prSubject = flag.String("pr-title", "", "Title of the pull request. If not specified, no pull request will be created.")
prDescription = flag.String("pr-text", "", "Text to put in the description of the pull request.")
sourceFiles = flag.String("files", "", `Comma-separated list of files to commit and their location.
The local file is separated by its target location by a semi-colon.
If the file should be in the same location with the same name, you can just put the file name and omit the repetition.
Example: README.md,main.go:github/examples/commitpr/main.go`)
authorName = flag.String("author-name", "", "Name of the author of the commit.")
authorEmail = flag.String("author-email", "", "Email of the author of the commit.")
)
var client *github.Client
var ctx = context.Background()
// getRef returns the commit branch reference object if it exists or creates it
// from the base branch before returning it.
func getRef() (ref *github.Reference, err error) {
if ref, _, err = client.Git.GetRef(ctx, *sourceOwner, *sourceRepo, "refs/heads/"+*commitBranch); err == nil {
return ref, nil
}
// We consider that an error means the branch has not been found and needs to
// be created.
if *commitBranch == *baseBranch {
return nil, errors.New("The commit branch does not exist but `-base-branch` is the same as `-commit-branch`")
}
if *baseBranch == "" {
return nil, errors.New("The `-base-branch` should not be set to an empty string when the branch specified by `-commit-branch` does not exists")
}
var baseRef *github.Reference
if baseRef, _, err = client.Git.GetRef(ctx, *sourceOwner, *sourceRepo, "refs/heads/"+*baseBranch); err != nil {
return nil, err
}
newRef := &github.Reference{Ref: github.String("refs/heads/" + *commitBranch), Object: &github.GitObject{SHA: baseRef.Object.SHA}}
ref, _, err = client.Git.CreateRef(ctx, *sourceOwner, *sourceRepo, newRef)
return ref, err
}
// getTree generates the tree to commit based on the given files and the commit
// of the ref you got in getRef.
func getTree(ref *github.Reference) (tree *github.Tree, err error) {
// Create a tree with what to commit.
entries := []*github.TreeEntry{}
// Load each file into the tree.
for _, fileArg := range strings.Split(*sourceFiles, ",") {
file, content, err := getFileContent(fileArg)
if err != nil {
return nil, err
}
entries = append(entries, &github.TreeEntry{Path: github.String(file), Type: github.String("blob"), Content: github.String(string(content)), Mode: github.String("100644")})
}
tree, _, err = client.Git.CreateTree(ctx, *sourceOwner, *sourceRepo, *ref.Object.SHA, entries)
return tree, err
}
// getFileContent loads the local content of a file and return the target name
// of the file in the target repository and its contents.
func getFileContent(fileArg string) (targetName string, b []byte, err error) {
var localFile string
files := strings.Split(fileArg, ":")
switch {
case len(files) < 1:
return "", nil, errors.New("empty `-files` parameter")
case len(files) == 1:
localFile = files[0]
targetName = files[0]
default:
localFile = files[0]
targetName = files[1]
}
b, err = ioutil.ReadFile(localFile)
return targetName, b, err
}
// pushCommit creates the commit in the given reference using the given tree.
func pushCommit(ref *github.Reference, tree *github.Tree) (err error) {
// Get the parent commit to attach the commit to.
parent, _, err := client.Repositories.GetCommit(ctx, *sourceOwner, *sourceRepo, *ref.Object.SHA)
if err != nil {
return err
}
// This is not always populated, but is needed.
parent.Commit.SHA = parent.SHA
// Create the commit using the tree.
date := time.Now()
author := &github.CommitAuthor{Date: &date, Name: authorName, Email: authorEmail}
commit := &github.Commit{Author: author, Message: commitMessage, Tree: tree, Parents: []*github.Commit{parent.Commit}}
newCommit, _, err := client.Git.CreateCommit(ctx, *sourceOwner, *sourceRepo, commit)
if err != nil {
return err
}
// Attach the commit to the master branch.
ref.Object.SHA = newCommit.SHA
_, _, err = client.Git.UpdateRef(ctx, *sourceOwner, *sourceRepo, ref, false)
return err
}
// createPR creates a pull request. Based on: https://godoc.org/github.com/google/go-github/github#example-PullRequestsService-Create
func createPR() (err error) {
if *prSubject == "" {
return errors.New("missing `-pr-title` flag; skipping PR creation")
}
if *prRepoOwner != "" && *prRepoOwner != *sourceOwner {
*commitBranch = fmt.Sprintf("%s:%s", *sourceOwner, *commitBranch)
} else {
prRepoOwner = sourceOwner
}
if *prRepo == "" {
prRepo = sourceRepo
}
newPR := &github.NewPullRequest{
Title: prSubject,
Head: commitBranch,
Base: prBranch,
Body: prDescription,
MaintainerCanModify: github.Bool(true),
}
pr, _, err := client.PullRequests.Create(ctx, *prRepoOwner, *prRepo, newPR)
if err != nil {
return err
}
fmt.Printf("PR created: %s\n", pr.GetHTMLURL())
return nil
}
func main() {
flag.Parse()
token := os.Getenv("GITHUB_AUTH_TOKEN")
if token == "" {
log.Fatal("Unauthorized: No token present")
}
if *sourceOwner == "" || *sourceRepo == "" || *commitBranch == "" || *sourceFiles == "" || *authorName == "" || *authorEmail == "" {
log.Fatal("You need to specify a non-empty value for the flags `-source-owner`, `-source-repo`, `-commit-branch`, `-files`, `-author-name` and `-author-email`")
}
ts := oauth2.StaticTokenSource(&oauth2.Token{AccessToken: token})
tc := oauth2.NewClient(ctx, ts)
client = github.NewClient(tc)
ref, err := getRef()
if err != nil {
log.Fatalf("Unable to get/create the commit reference: %s\n", err)
}
if ref == nil {
log.Fatalf("No error where returned but the reference is nil")
}
tree, err := getTree(ref)
if err != nil {
log.Fatalf("Unable to create the tree based on the provided files: %s\n", err)
}
if err := pushCommit(ref, tree); err != nil {
log.Fatalf("Unable to create the commit: %s\n", err)
}
if err := createPR(); err != nil {
log.Fatalf("Error while creating the pull request: %s", err)
}
}
| [
"\"GITHUB_AUTH_TOKEN\""
] | [] | [
"GITHUB_AUTH_TOKEN"
] | [] | ["GITHUB_AUTH_TOKEN"] | go | 1 | 0 | |
main.go | package main
import (
"log"
"os"
"os/signal"
"syscall"
"github.com/awoo-detat/moon/bot"
)
func main() {
log.Println("starting")
token := os.Getenv("DISCORD_TOKEN")
if token == "" {
log.Fatal("DISCORD_TOKEN not found in environment variables")
}
auth := "Bot " + token
moon := bot.New(auth)
log.Println("created")
moon.Start()
defer moon.Close()
sc := make(chan os.Signal, 1)
signal.Notify(sc, syscall.SIGINT, syscall.SIGTERM, os.Interrupt, os.Kill)
<-sc
}
| [
"\"DISCORD_TOKEN\""
] | [] | [
"DISCORD_TOKEN"
] | [] | ["DISCORD_TOKEN"] | go | 1 | 0 | |
dev-tools/mage/settings.go | // Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package mage
import (
"fmt"
"go/build"
"io/ioutil"
"log"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"sync"
"time"
"github.com/magefile/mage/sh"
"github.com/pkg/errors"
"golang.org/x/tools/go/vcs"
"github.com/elastic/beats/v7/dev-tools/mage/gotool"
)
const (
fpmVersion = "1.11.0"
// Docker images. See https://github.com/elastic/golang-crossbuild.
beatsFPMImage = "docker.elastic.co/beats-dev/fpm"
// BeatsCrossBuildImage is the image used for crossbuilding Beats.
BeatsCrossBuildImage = "docker.elastic.co/beats-dev/golang-crossbuild"
elasticBeatsImportPath = "github.com/elastic/beats"
elasticBeatsModulePath = "github.com/elastic/beats/v7"
)
// Common settings with defaults derived from files, CWD, and environment.
var (
GOOS = build.Default.GOOS
GOARCH = build.Default.GOARCH
GOARM = EnvOr("GOARM", "")
Platform = MakePlatformAttributes(GOOS, GOARCH, GOARM)
BinaryExt = ""
XPackDir = "../x-pack"
RaceDetector = false
TestCoverage = false
// CrossBuildMountModcache, if true, mounts $GOPATH/pkg/mod into
// the crossbuild images at /go/pkg/mod, read-only.
CrossBuildMountModcache = true
BeatName = EnvOr("BEAT_NAME", filepath.Base(CWD()))
BeatServiceName = EnvOr("BEAT_SERVICE_NAME", BeatName)
BeatIndexPrefix = EnvOr("BEAT_INDEX_PREFIX", BeatName)
BeatDescription = EnvOr("BEAT_DESCRIPTION", "")
BeatVendor = EnvOr("BEAT_VENDOR", "Elastic")
BeatLicense = EnvOr("BEAT_LICENSE", "ASL 2.0")
BeatURL = EnvOr("BEAT_URL", "https://www.elastic.co/products/beats/"+BeatName)
BeatUser = EnvOr("BEAT_USER", "root")
BeatProjectType ProjectType
Snapshot bool
versionQualified bool
versionQualifier string
FuncMap = map[string]interface{}{
"beat_doc_branch": BeatDocBranch,
"beat_version": BeatQualifiedVersion,
"commit": CommitHash,
"commit_short": CommitHashShort,
"date": BuildDate,
"elastic_beats_dir": ElasticBeatsDir,
"go_version": GoVersion,
"repo": GetProjectRepoInfo,
"title": strings.Title,
"tolower": strings.ToLower,
"contains": strings.Contains,
}
)
func init() {
if GOOS == "windows" {
BinaryExt = ".exe"
}
var err error
RaceDetector, err = strconv.ParseBool(EnvOr("RACE_DETECTOR", "false"))
if err != nil {
panic(errors.Wrap(err, "failed to parse RACE_DETECTOR env value"))
}
TestCoverage, err = strconv.ParseBool(EnvOr("TEST_COVERAGE", "false"))
if err != nil {
panic(errors.Wrap(err, "failed to parse TEST_COVERAGE env value"))
}
Snapshot, err = strconv.ParseBool(EnvOr("SNAPSHOT", "false"))
if err != nil {
panic(errors.Wrap(err, "failed to parse SNAPSHOT env value"))
}
versionQualifier, versionQualified = os.LookupEnv("VERSION_QUALIFIER")
}
// ProjectType specifies the type of project (OSS vs X-Pack).
type ProjectType uint8
// Project types.
const (
OSSProject ProjectType = iota
XPackProject
CommunityProject
)
// ErrUnknownProjectType is returned if an unknown ProjectType value is used.
var ErrUnknownProjectType = fmt.Errorf("unknown ProjectType")
// EnvMap returns map containing the common settings variables and all variables
// from the environment. args are appended to the output prior to adding the
// environment variables (so env vars have the highest precedence).
func EnvMap(args ...map[string]interface{}) map[string]interface{} {
envMap := varMap(args...)
// Add the environment (highest precedence).
for _, e := range os.Environ() {
env := strings.SplitN(e, "=", 2)
envMap[env[0]] = env[1]
}
return envMap
}
func varMap(args ...map[string]interface{}) map[string]interface{} {
data := map[string]interface{}{
"GOOS": GOOS,
"GOARCH": GOARCH,
"GOARM": GOARM,
"Platform": Platform,
"BinaryExt": BinaryExt,
"XPackDir": XPackDir,
"BeatName": BeatName,
"BeatServiceName": BeatServiceName,
"BeatIndexPrefix": BeatIndexPrefix,
"BeatDescription": BeatDescription,
"BeatVendor": BeatVendor,
"BeatLicense": BeatLicense,
"BeatURL": BeatURL,
"BeatUser": BeatUser,
"Snapshot": Snapshot,
"Qualifier": versionQualifier,
}
// Add the extra args to the map.
for _, m := range args {
for k, v := range m {
data[k] = v
}
}
return data
}
func dumpVariables() (string, error) {
var dumpTemplate = `## Variables
GOOS = {{.GOOS}}
GOARCH = {{.GOARCH}}
GOARM = {{.GOARM}}
Platform = {{.Platform}}
BinaryExt = {{.BinaryExt}}
XPackDir = {{.XPackDir}}
BeatName = {{.BeatName}}
BeatServiceName = {{.BeatServiceName}}
BeatIndexPrefix = {{.BeatIndexPrefix}}
BeatDescription = {{.BeatDescription}}
BeatVendor = {{.BeatVendor}}
BeatLicense = {{.BeatLicense}}
BeatURL = {{.BeatURL}}
BeatUser = {{.BeatUser}}
VersionQualifier = {{.Qualifier}}
## Functions
beat_doc_branch = {{ beat_doc_branch }}
beat_version = {{ beat_version }}
commit = {{ commit }}
date = {{ date }}
elastic_beats_dir = {{ elastic_beats_dir }}
go_version = {{ go_version }}
repo.RootImportPath = {{ repo.RootImportPath }}
repo.CanonicalRootImportPath = {{ repo.CanonicalRootImportPath }}
repo.RootDir = {{ repo.RootDir }}
repo.ImportPath = {{ repo.ImportPath }}
repo.SubDir = {{ repo.SubDir }}
`
return Expand(dumpTemplate)
}
// DumpVariables writes the template variables and values to stdout.
func DumpVariables() error {
out, err := dumpVariables()
if err != nil {
return err
}
fmt.Println(out)
return nil
}
var (
commitHash string
commitHashOnce sync.Once
)
// CommitHash returns the full length git commit hash.
func CommitHash() (string, error) {
var err error
commitHashOnce.Do(func() {
commitHash, err = sh.Output("git", "rev-parse", "HEAD")
})
return commitHash, err
}
// CommitHashShort returns the short length git commit hash.
func CommitHashShort() (string, error) {
shortHash, err := CommitHash()
if len(shortHash) > 6 {
shortHash = shortHash[:6]
}
return shortHash, err
}
var (
elasticBeatsDirValue string
elasticBeatsDirErr error
elasticBeatsDirLock sync.Mutex
)
// SetElasticBeatsDir sets the internal elastic beats dir to a preassigned value
func SetElasticBeatsDir(path string) {
elasticBeatsDirLock.Lock()
defer elasticBeatsDirLock.Unlock()
elasticBeatsDirValue = path
}
// ElasticBeatsDir returns the path to Elastic beats dir.
func ElasticBeatsDir() (string, error) {
elasticBeatsDirLock.Lock()
defer elasticBeatsDirLock.Unlock()
if elasticBeatsDirValue != "" || elasticBeatsDirErr != nil {
return elasticBeatsDirValue, elasticBeatsDirErr
}
elasticBeatsDirValue, elasticBeatsDirErr = findElasticBeatsDir()
if elasticBeatsDirErr == nil {
log.Println("Found Elastic Beats dir at", elasticBeatsDirValue)
}
return elasticBeatsDirValue, elasticBeatsDirErr
}
// findElasticBeatsDir returns the root directory of the Elastic Beats module, using "go list".
//
// When running within the Elastic Beats repo, this will return the repo root. Otherwise,
// it will return the root directory of the module from within the module cache or vendor
// directory.
func findElasticBeatsDir() (string, error) {
repo, err := GetProjectRepoInfo()
if err != nil {
return "", err
}
if repo.IsElasticBeats() {
return repo.RootDir, nil
}
return gotool.ListModuleCacheDir(elasticBeatsModulePath)
}
var (
buildDate = time.Now().UTC().Format(time.RFC3339)
)
// BuildDate returns the time that the build started.
func BuildDate() string {
return buildDate
}
var (
goVersionValue string
goVersionErr error
goVersionOnce sync.Once
)
// GoVersion returns the version of Go defined in the project's .go-version
// file.
func GoVersion() (string, error) {
goVersionOnce.Do(func() {
goVersionValue = os.Getenv("BEAT_GO_VERSION")
if goVersionValue != "" {
return
}
goVersionValue, goVersionErr = getBuildVariableSources().GetGoVersion()
})
return goVersionValue, goVersionErr
}
var (
beatVersionRegex = regexp.MustCompile(`(?m)^const defaultBeatVersion = "(.+)"\r?$`)
beatVersionValue string
beatVersionErr error
beatVersionOnce sync.Once
)
// BeatQualifiedVersion returns the Beat's qualified version. The value can be overwritten by
// setting VERSION_QUALIFIER in the environment.
func BeatQualifiedVersion() (string, error) {
version, err := beatVersion()
if err != nil {
return "", err
}
// version qualifier can intentionally be set to "" to override build time var
if !versionQualified || versionQualifier == "" {
return version, nil
}
return version + "-" + versionQualifier, nil
}
// BeatVersion returns the Beat's version. The value can be overridden by
// setting BEAT_VERSION in the environment.
func beatVersion() (string, error) {
beatVersionOnce.Do(func() {
beatVersionValue = os.Getenv("BEAT_VERSION")
if beatVersionValue != "" {
return
}
beatVersionValue, beatVersionErr = getBuildVariableSources().GetBeatVersion()
})
return beatVersionValue, beatVersionErr
}
var (
beatDocBranchRegex = regexp.MustCompile(`(?m)doc-branch:\s*([^\s]+)\r?$`)
beatDocBranchValue string
beatDocBranchErr error
beatDocBranchOnce sync.Once
)
// BeatDocBranch returns the documentation branch name associated with the
// Beat branch.
func BeatDocBranch() (string, error) {
beatDocBranchOnce.Do(func() {
beatDocBranchValue = os.Getenv("BEAT_DOC_BRANCH")
if beatDocBranchValue != "" {
return
}
beatDocBranchValue, beatDocBranchErr = getBuildVariableSources().GetDocBranch()
})
return beatDocBranchValue, beatDocBranchErr
}
// --- BuildVariableSources
var (
// DefaultBeatBuildVariableSources contains the default locations build
// variables are read from by Elastic Beats.
DefaultBeatBuildVariableSources = &BuildVariableSources{
BeatVersion: "{{ elastic_beats_dir }}/libbeat/version/version.go",
GoVersion: "{{ elastic_beats_dir }}/.go-version",
DocBranch: "{{ elastic_beats_dir }}/libbeat/docs/version.asciidoc",
}
buildVariableSources *BuildVariableSources
buildVariableSourcesLock sync.Mutex
)
// SetBuildVariableSources sets the BuildVariableSources that defines where
// certain build data should be sourced from. Community Beats must call this.
func SetBuildVariableSources(s *BuildVariableSources) {
buildVariableSourcesLock.Lock()
defer buildVariableSourcesLock.Unlock()
buildVariableSources = s
}
func getBuildVariableSources() *BuildVariableSources {
buildVariableSourcesLock.Lock()
defer buildVariableSourcesLock.Unlock()
if buildVariableSources != nil {
return buildVariableSources
}
repo, err := GetProjectRepoInfo()
if err != nil {
panic(err)
}
if repo.IsElasticBeats() {
buildVariableSources = DefaultBeatBuildVariableSources
return buildVariableSources
}
panic(errors.Errorf("magefile must call devtools.SetBuildVariableSources() "+
"because it is not an elastic beat (repo=%+v)", repo.RootImportPath))
}
// BuildVariableSources is used to explicitly define what files contain build
// variables and how to parse the values from that file. This removes ambiguity
// about where the data is sources and allows a degree of customization for
// community Beats.
//
// Default parsers are used if one is not defined.
type BuildVariableSources struct {
// File containing the Beat version.
BeatVersion string
// Parses the Beat version from the BeatVersion file.
BeatVersionParser func(data []byte) (string, error)
// File containing the Go version to be used in cross-builds.
GoVersion string
// Parses the Go version from the GoVersion file.
GoVersionParser func(data []byte) (string, error)
// File containing the documentation branch.
DocBranch string
// Parses the documentation branch from the DocBranch file.
DocBranchParser func(data []byte) (string, error)
}
func (s *BuildVariableSources) expandVar(in string) (string, error) {
return expandTemplate("inline", in, map[string]interface{}{
"elastic_beats_dir": ElasticBeatsDir,
})
}
// GetBeatVersion reads the BeatVersion file and parses the version from it.
func (s *BuildVariableSources) GetBeatVersion() (string, error) {
file, err := s.expandVar(s.BeatVersion)
if err != nil {
return "", err
}
data, err := ioutil.ReadFile(file)
if err != nil {
return "", errors.Wrapf(err, "failed to read beat version file=%v", file)
}
if s.BeatVersionParser == nil {
s.BeatVersionParser = parseBeatVersion
}
return s.BeatVersionParser(data)
}
// GetGoVersion reads the GoVersion file and parses the version from it.
func (s *BuildVariableSources) GetGoVersion() (string, error) {
file, err := s.expandVar(s.GoVersion)
if err != nil {
return "", err
}
data, err := ioutil.ReadFile(file)
if err != nil {
return "", errors.Wrapf(err, "failed to read go version file=%v", file)
}
if s.GoVersionParser == nil {
s.GoVersionParser = parseGoVersion
}
return s.GoVersionParser(data)
}
// GetDocBranch reads the DocBranch file and parses the branch from it.
func (s *BuildVariableSources) GetDocBranch() (string, error) {
file, err := s.expandVar(s.DocBranch)
if err != nil {
return "", err
}
data, err := ioutil.ReadFile(file)
if err != nil {
return "", errors.Wrapf(err, "failed to read doc branch file=%v", file)
}
if s.DocBranchParser == nil {
s.DocBranchParser = parseDocBranch
}
return s.DocBranchParser(data)
}
func parseBeatVersion(data []byte) (string, error) {
matches := beatVersionRegex.FindSubmatch(data)
if len(matches) == 2 {
return string(matches[1]), nil
}
return "", errors.New("failed to parse beat version file")
}
func parseGoVersion(data []byte) (string, error) {
return strings.TrimSpace(string(data)), nil
}
func parseDocBranch(data []byte) (string, error) {
matches := beatDocBranchRegex.FindSubmatch(data)
if len(matches) == 2 {
return string(matches[1]), nil
}
return "", errors.New("failed to parse beat doc branch")
}
// --- ProjectRepoInfo
// ProjectRepoInfo contains information about the project's repo.
type ProjectRepoInfo struct {
RootImportPath string // Import path at the project root.
CanonicalRootImportPath string // Pre-modules root import path (does not contain semantic import version identifier).
RootDir string // Root directory of the project.
ImportPath string // Import path of the current directory.
SubDir string // Relative path from the root dir to the current dir.
}
// IsElasticBeats returns true if the current project is
// github.com/elastic/beats.
func (r *ProjectRepoInfo) IsElasticBeats() bool {
return r.CanonicalRootImportPath == elasticBeatsImportPath
}
var (
repoInfoValue *ProjectRepoInfo
repoInfoErr error
repoInfoOnce sync.Once
)
// GetProjectRepoInfo returns information about the repo including the root
// import path and the current directory's import path.
func GetProjectRepoInfo() (*ProjectRepoInfo, error) {
repoInfoOnce.Do(func() {
if isUnderGOPATH() {
repoInfoValue, repoInfoErr = getProjectRepoInfoUnderGopath()
} else {
repoInfoValue, repoInfoErr = getProjectRepoInfoWithModules()
}
})
return repoInfoValue, repoInfoErr
}
func isUnderGOPATH() bool {
underGOPATH := false
srcDirs, err := listSrcGOPATHs()
if err != nil {
return false
}
for _, srcDir := range srcDirs {
rel, err := filepath.Rel(srcDir, CWD())
if err != nil {
continue
}
if !strings.Contains(rel, "..") {
underGOPATH = true
}
}
return underGOPATH
}
func getProjectRepoInfoWithModules() (*ProjectRepoInfo, error) {
var (
cwd = CWD()
rootDir string
subDir string
)
possibleRoot := cwd
var errs []string
for {
isRoot, err := isGoModRoot(possibleRoot)
if err != nil {
errs = append(errs, err.Error())
}
if isRoot {
rootDir = possibleRoot
subDir, err = filepath.Rel(rootDir, cwd)
if err != nil {
errs = append(errs, err.Error())
}
break
}
possibleRoot = filepath.Dir(possibleRoot)
}
if rootDir == "" {
return nil, errors.Errorf("failed to find root dir of module file: %v", errs)
}
rootImportPath, err := gotool.GetModuleName()
if err != nil {
return nil, err
}
return &ProjectRepoInfo{
RootImportPath: rootImportPath,
CanonicalRootImportPath: filepath.ToSlash(extractCanonicalRootImportPath(rootImportPath)),
RootDir: rootDir,
SubDir: subDir,
ImportPath: filepath.ToSlash(filepath.Join(rootImportPath, subDir)),
}, nil
}
func isGoModRoot(path string) (bool, error) {
gomodPath := filepath.Join(path, "go.mod")
_, err := os.Stat(gomodPath)
if os.IsNotExist(err) {
return false, nil
}
if err != nil {
return false, err
}
return true, nil
}
func getProjectRepoInfoUnderGopath() (*ProjectRepoInfo, error) {
var (
cwd = CWD()
errs []string
rootDir string
)
srcDirs, err := listSrcGOPATHs()
if err != nil {
return nil, err
}
for _, srcDir := range srcDirs {
_, root, err := vcs.FromDir(cwd, srcDir)
if err != nil {
// Try the next gopath.
errs = append(errs, err.Error())
continue
}
rootDir = filepath.Join(srcDir, root)
break
}
if rootDir == "" {
return nil, errors.Errorf("error while determining root directory: %v", errs)
}
subDir, err := filepath.Rel(rootDir, cwd)
if err != nil {
return nil, errors.Wrap(err, "failed to get relative path to repo root")
}
rootImportPath, err := gotool.GetModuleName()
if err != nil {
return nil, err
}
return &ProjectRepoInfo{
RootImportPath: rootImportPath,
CanonicalRootImportPath: filepath.ToSlash(extractCanonicalRootImportPath(rootImportPath)),
RootDir: rootDir,
SubDir: subDir,
ImportPath: filepath.ToSlash(filepath.Join(rootImportPath, subDir)),
}, nil
}
func extractCanonicalRootImportPath(rootImportPath string) string {
// In order to be compatible with go modules, the root import
// path of any module at major version v2 or higher must include
// the major version.
// Ref: https://github.com/golang/go/wiki/Modules#semantic-import-versioning
//
// Thus, Beats has to include the major version as well.
// This regex removes the major version from the import path.
re := regexp.MustCompile(`(/v[1-9][0-9]*)$`)
return re.ReplaceAllString(rootImportPath, "")
}
func listSrcGOPATHs() ([]string, error) {
var (
cwd = CWD()
errs []string
srcDirs []string
)
for _, gopath := range filepath.SplitList(build.Default.GOPATH) {
gopath = filepath.Clean(gopath)
if !strings.HasPrefix(cwd, gopath) {
// Fixes an issue on macOS when /var is actually /private/var.
var err error
gopath, err = filepath.EvalSymlinks(gopath)
if err != nil {
errs = append(errs, err.Error())
continue
}
}
srcDirs = append(srcDirs, filepath.Join(gopath, "src"))
}
if len(srcDirs) == 0 {
return srcDirs, errors.Errorf("failed to find any GOPATH %v", errs)
}
return srcDirs, nil
}
| [
"\"BEAT_GO_VERSION\"",
"\"BEAT_VERSION\"",
"\"BEAT_DOC_BRANCH\""
] | [] | [
"BEAT_VERSION",
"BEAT_GO_VERSION",
"BEAT_DOC_BRANCH"
] | [] | ["BEAT_VERSION", "BEAT_GO_VERSION", "BEAT_DOC_BRANCH"] | go | 3 | 0 | |
api/auth/token.go | package auth
import (
"fmt"
"net/http"
"os"
"strings"
"time"
jwt "github.com/dgrijalva/jwt-go"
"github.com/gofrs/uuid"
)
// CreateToken creates a token
func CreateToken(userID uuid.UUID) (string, error) {
claims := jwt.MapClaims{}
claims["authorized"] = true
claims["user_id"] = userID.String()
claims["exp"] = time.Now().Add(time.Hour * 1).Unix() //Token expires after 1 hour
token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
return token.SignedString([]byte(os.Getenv("API_SECRET")))
}
func extractToken(r *http.Request) string {
keys := r.URL.Query()
token := keys.Get("token")
if token != "" {
return token
}
bearerToken := r.Header.Get("Authorization")
if len(strings.Split(bearerToken, " ")) == 2 {
return strings.Split(bearerToken, " ")[1]
}
return ""
}
// ExtractJWTToken extracts and returns a jwt token from request
func ExtractJWTToken(r *http.Request) (*jwt.Token, error) {
tokenString := extractToken(r)
token, err := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) {
if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {
return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"])
}
return []byte(os.Getenv("API_SECRET")), nil
})
if err != nil {
return nil, err
}
return token, nil
}
// ValidateToken validates the token in request
func ValidateToken(token *jwt.Token) error {
if !token.Valid {
return fmt.Errorf("token is not valid")
}
return nil
}
// ExtractUserID extract the user from token in request
func ExtractUserID(token *jwt.Token) (uuid.UUID, error) {
claims, ok := token.Claims.(jwt.MapClaims)
if ok {
uid, err := uuid.FromString(fmt.Sprintf("%s", claims["user_id"]))
if err != nil {
return uuid.Nil, fmt.Errorf("failed to parse UUID %v: %v", claims["user_id"], err)
}
return uid, nil
}
return uuid.Nil, fmt.Errorf("failed to extract UUID")
}
| [
"\"API_SECRET\"",
"\"API_SECRET\""
] | [] | [
"API_SECRET"
] | [] | ["API_SECRET"] | go | 1 | 0 | |
mongo/integration/mtest/setup.go | // Copyright (C) MongoDB, Inc. 2017-present.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
package mtest
import (
"context"
"errors"
"fmt"
"math"
"os"
"strconv"
"strings"
"time"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/description"
"go.mongodb.org/mongo-driver/mongo/options"
"go.mongodb.org/mongo-driver/mongo/readpref"
"go.mongodb.org/mongo-driver/mongo/writeconcern"
"go.mongodb.org/mongo-driver/x/mongo/driver"
"go.mongodb.org/mongo-driver/x/mongo/driver/connstring"
"go.mongodb.org/mongo-driver/x/mongo/driver/ocsp"
"go.mongodb.org/mongo-driver/x/mongo/driver/topology"
)
const (
// TestDb specifies the name of default test database.
TestDb = "test"
)
// testContext holds the global context for the integration tests. The testContext members should only be initialized
// once during the global setup in TestMain. These variables should only be accessed indirectly through MongoTest
// instances.
var testContext struct {
connString connstring.ConnString
topo *topology.Topology
topoKind TopologyKind
// shardedReplicaSet will be true if we're connected to a sharded cluster and each shard is backed by a replica set.
// We track this as a separate boolean rather than setting topoKind to ShardedReplicaSet because a general
// "Sharded" constraint in a test should match both Sharded and ShardedReplicaSet.
shardedReplicaSet bool
client *mongo.Client // client used for setup and teardown
serverVersion string
authEnabled bool
sslEnabled bool
enterpriseServer bool
dataLake bool
requireAPIVersion bool
serverParameters bson.Raw
singleMongosLoadBalancerURI string
multiMongosLoadBalancerURI string
serverless bool
}
func setupClient(cs connstring.ConnString, opts *options.ClientOptions) (*mongo.Client, error) {
wcMajority := writeconcern.New(writeconcern.WMajority())
// set ServerAPIOptions to latest version if required
if opts.ServerAPIOptions == nil && testContext.requireAPIVersion {
opts.SetServerAPIOptions(options.ServerAPI(driver.TestServerAPIVersion))
}
// for sharded clusters, pin to one host. Due to how the cache is implemented on 4.0 and 4.2, behavior
// can be inconsistent when multiple mongoses are used
return mongo.Connect(Background, opts.ApplyURI(cs.Original).SetWriteConcern(wcMajority).SetHosts(cs.Hosts[:1]))
}
// Setup initializes the current testing context.
// This function must only be called one time and must be called before any tests run.
func Setup(setupOpts ...*SetupOptions) error {
opts := MergeSetupOptions(setupOpts...)
var err error
switch {
case opts.URI != nil:
testContext.connString, err = connstring.ParseAndValidate(*opts.URI)
default:
testContext.connString, err = getClusterConnString()
}
if err != nil {
return fmt.Errorf("error getting connection string: %v", err)
}
testContext.dataLake = os.Getenv("ATLAS_DATA_LAKE_INTEGRATION_TEST") == "true"
testContext.requireAPIVersion = os.Getenv("REQUIRE_API_VERSION") == "true"
connectionOpts := []topology.ConnectionOption{
topology.WithOCSPCache(func(ocsp.Cache) ocsp.Cache {
return ocsp.NewCache()
}),
}
serverOpts := []topology.ServerOption{
topology.WithConnectionOptions(func(opts ...topology.ConnectionOption) []topology.ConnectionOption {
return append(opts, connectionOpts...)
}),
}
if testContext.requireAPIVersion {
serverOpts = append(serverOpts,
topology.WithServerAPI(func(*driver.ServerAPIOptions) *driver.ServerAPIOptions {
return driver.NewServerAPIOptions(driver.TestServerAPIVersion)
}),
)
}
testContext.topo, err = topology.New(
topology.WithConnString(func(connstring.ConnString) connstring.ConnString {
return testContext.connString
}),
topology.WithServerOptions(func(opts ...topology.ServerOption) []topology.ServerOption {
return append(opts, serverOpts...)
}),
)
if err != nil {
return fmt.Errorf("error creating topology: %v", err)
}
if err = testContext.topo.Connect(); err != nil {
return fmt.Errorf("error connecting topology: %v", err)
}
testContext.client, err = setupClient(testContext.connString, options.Client())
if err != nil {
return fmt.Errorf("error connecting test client: %v", err)
}
pingCtx, cancel := context.WithTimeout(Background, 2*time.Second)
defer cancel()
if err := testContext.client.Ping(pingCtx, readpref.Primary()); err != nil {
return fmt.Errorf("ping error: %v; make sure the deployment is running on URI %v", err,
testContext.connString.Original)
}
if testContext.serverVersion, err = getServerVersion(); err != nil {
return fmt.Errorf("error getting server version: %v", err)
}
switch testContext.topo.Kind() {
case description.Single:
testContext.topoKind = Single
case description.ReplicaSet, description.ReplicaSetWithPrimary, description.ReplicaSetNoPrimary:
testContext.topoKind = ReplicaSet
case description.Sharded:
testContext.topoKind = Sharded
case description.LoadBalanced:
testContext.topoKind = LoadBalanced
default:
return fmt.Errorf("could not detect topology kind; current topology: %s", testContext.topo.String())
}
// If we're connected to a sharded cluster, determine if the cluster is backed by replica sets.
if testContext.topoKind == Sharded {
// Run a find against config.shards and get each document in the collection.
cursor, err := testContext.client.Database("config").Collection("shards").Find(Background, bson.D{})
if err != nil {
return fmt.Errorf("error running find against config.shards: %v", err)
}
defer cursor.Close(Background)
var shards []struct {
Host string `bson:"host"`
}
if err := cursor.All(Background, &shards); err != nil {
return fmt.Errorf("error getting results find against config.shards: %v", err)
}
// Each document's host field will contain a single hostname if the shard is a standalone. If it's a replica
// set, the host field will be in the format "replicaSetName/host1,host2,...". Therefore, we can determine that
// the shard is a standalone if the "/" character isn't present.
var foundStandalone bool
for _, shard := range shards {
if !strings.Contains(shard.Host, "/") {
foundStandalone = true
break
}
}
if !foundStandalone {
testContext.shardedReplicaSet = true
}
}
// For load balanced clusters, retrieve the required LB URIs and add additional information (e.g. TLS options) to
// them if necessary.
if testContext.topoKind == LoadBalanced {
singleMongosURI := os.Getenv("SINGLE_MONGOS_LB_URI")
if singleMongosURI == "" {
return errors.New("SINGLE_MONGOS_LB_URI must be set when running against load balanced clusters")
}
testContext.singleMongosLoadBalancerURI, err = addNecessaryParamsToURI(singleMongosURI)
if err != nil {
return fmt.Errorf("error getting single mongos load balancer uri: %v", err)
}
multiMongosURI := os.Getenv("MULTI_MONGOS_LB_URI")
if multiMongosURI == "" {
return errors.New("MULTI_MONGOS_LB_URI must be set when running against load balanced clusters")
}
testContext.multiMongosLoadBalancerURI, err = addNecessaryParamsToURI(multiMongosURI)
if err != nil {
return fmt.Errorf("error getting multi mongos load balancer uri: %v", err)
}
}
testContext.authEnabled = os.Getenv("AUTH") == "auth"
testContext.sslEnabled = os.Getenv("SSL") == "ssl"
testContext.serverless = os.Getenv("SERVERLESS") == "serverless"
biRes, err := testContext.client.Database("admin").RunCommand(Background, bson.D{{"buildInfo", 1}}).DecodeBytes()
if err != nil {
return fmt.Errorf("buildInfo error: %v", err)
}
modulesRaw, err := biRes.LookupErr("modules")
if err == nil {
// older server versions don't report "modules" field in buildInfo result
modules, _ := modulesRaw.Array().Values()
for _, module := range modules {
if module.StringValue() == "enterprise" {
testContext.enterpriseServer = true
break
}
}
}
// Get server parameters if test is not running against ADL; ADL does not have "getParameter" command.
if !testContext.dataLake {
db := testContext.client.Database("admin")
testContext.serverParameters, err = db.RunCommand(Background, bson.D{{"getParameter", "*"}}).DecodeBytes()
if err != nil {
return fmt.Errorf("error getting serverParameters: %v", err)
}
}
return nil
}
// Teardown cleans up resources initialized by Setup.
// This function must be called once after all tests have finished running.
func Teardown() error {
// Dropping the test database causes an error against Atlas Data Lake.
if !testContext.dataLake {
if err := testContext.client.Database(TestDb).Drop(Background); err != nil {
return fmt.Errorf("error dropping test database: %v", err)
}
}
if err := testContext.client.Disconnect(Background); err != nil {
return fmt.Errorf("error disconnecting test client: %v", err)
}
if err := testContext.topo.Disconnect(Background); err != nil {
return fmt.Errorf("error disconnecting test topology: %v", err)
}
return nil
}
func getServerVersion() (string, error) {
var serverStatus bson.Raw
err := testContext.client.Database(TestDb).RunCommand(
Background,
bson.D{{"buildInfo", 1}},
).Decode(&serverStatus)
if err != nil {
return "", err
}
version, err := serverStatus.LookupErr("version")
if err != nil {
return "", errors.New("no version string in serverStatus response")
}
return version.StringValue(), nil
}
// addOptions appends connection string options to a URI.
func addOptions(uri string, opts ...string) string {
if !strings.ContainsRune(uri, '?') {
if uri[len(uri)-1] != '/' {
uri += "/"
}
uri += "?"
} else {
uri += "&"
}
for _, opt := range opts {
uri += opt
}
return uri
}
// addTLSConfig checks for the environmental variable indicating that the tests are being run
// on an SSL-enabled server, and if so, returns a new URI with the necessary configuration.
func addTLSConfig(uri string) string {
if os.Getenv("SSL") == "ssl" {
uri = addOptions(uri, "ssl=", "true")
}
caFile := os.Getenv("MONGO_GO_DRIVER_CA_FILE")
if len(caFile) == 0 {
return uri
}
return addOptions(uri, "sslCertificateAuthorityFile=", caFile)
}
// addCompressors checks for the environment variable indicating that the tests are being run with compression
// enabled. If so, it returns a new URI with the necessary configuration
func addCompressors(uri string) string {
comp := os.Getenv("MONGO_GO_DRIVER_COMPRESSOR")
if len(comp) == 0 {
return uri
}
return addOptions(uri, "compressors=", comp)
}
func addServerlessAuthCredentials(uri string) (string, error) {
if os.Getenv("SERVERLESS") != "serverless" {
return uri, nil
}
user := os.Getenv("SERVERLESS_ATLAS_USER")
if user == "" {
return "", fmt.Errorf("serverless expects SERVERLESS_ATLAS_USER to be set")
}
password := os.Getenv("SERVERLESS_ATLAS_PASSWORD")
if password == "" {
return "", fmt.Errorf("serverless expects SERVERLESS_ATLAS_PASSWORD to be set")
}
var scheme string
// remove the scheme
if strings.HasPrefix(uri, "mongodb+srv://") {
scheme = "mongodb+srv://"
} else if strings.HasPrefix(uri, "mongodb://") {
scheme = "mongodb://"
} else {
return "", fmt.Errorf("scheme must be \"mongodb\" or \"mongodb+srv\"")
}
uri = scheme + user + ":" + password + "@" + uri[len(scheme):]
return uri, nil
}
// getClusterConnString gets the globally configured connection string.
func getClusterConnString() (connstring.ConnString, error) {
uri := os.Getenv("MONGODB_URI")
if uri == "" {
uri = "mongodb://localhost:27017"
}
uri, err := addNecessaryParamsToURI(uri)
if err != nil {
return connstring.ConnString{}, err
}
return connstring.ParseAndValidate(uri)
}
func addNecessaryParamsToURI(uri string) (string, error) {
uri = addTLSConfig(uri)
uri = addCompressors(uri)
return addServerlessAuthCredentials(uri)
}
// CompareServerVersions compares two version number strings (i.e. positive integers separated by
// periods). Comparisons are done to the lesser precision of the two versions. For example, 3.2 is
// considered equal to 3.2.11, whereas 3.2.0 is considered less than 3.2.11.
//
// Returns a positive int if version1 is greater than version2, a negative int if version1 is less
// than version2, and 0 if version1 is equal to version2.
func CompareServerVersions(v1 string, v2 string) int {
n1 := strings.Split(v1, ".")
n2 := strings.Split(v2, ".")
for i := 0; i < int(math.Min(float64(len(n1)), float64(len(n2)))); i++ {
i1, err := strconv.Atoi(n1[i])
if err != nil {
return 1
}
i2, err := strconv.Atoi(n2[i])
if err != nil {
return -1
}
difference := i1 - i2
if difference != 0 {
return difference
}
}
return 0
}
| [
"\"ATLAS_DATA_LAKE_INTEGRATION_TEST\"",
"\"REQUIRE_API_VERSION\"",
"\"SINGLE_MONGOS_LB_URI\"",
"\"MULTI_MONGOS_LB_URI\"",
"\"AUTH\"",
"\"SSL\"",
"\"SERVERLESS\"",
"\"SSL\"",
"\"MONGO_GO_DRIVER_CA_FILE\"",
"\"MONGO_GO_DRIVER_COMPRESSOR\"",
"\"SERVERLESS\"",
"\"SERVERLESS_ATLAS_USER\"",
"\"SERVERLESS_ATLAS_PASSWORD\"",
"\"MONGODB_URI\""
] | [] | [
"REQUIRE_API_VERSION",
"SERVERLESS_ATLAS_PASSWORD",
"AUTH",
"SSL",
"MONGODB_URI",
"ATLAS_DATA_LAKE_INTEGRATION_TEST",
"MONGO_GO_DRIVER_CA_FILE",
"SINGLE_MONGOS_LB_URI",
"MULTI_MONGOS_LB_URI",
"MONGO_GO_DRIVER_COMPRESSOR",
"SERVERLESS",
"SERVERLESS_ATLAS_USER"
] | [] | ["REQUIRE_API_VERSION", "SERVERLESS_ATLAS_PASSWORD", "AUTH", "SSL", "MONGODB_URI", "ATLAS_DATA_LAKE_INTEGRATION_TEST", "MONGO_GO_DRIVER_CA_FILE", "SINGLE_MONGOS_LB_URI", "MULTI_MONGOS_LB_URI", "MONGO_GO_DRIVER_COMPRESSOR", "SERVERLESS", "SERVERLESS_ATLAS_USER"] | go | 12 | 0 | |
agents/mongodb/internal/profiler/collector/collector_test.go | // pmm-agent
// Copyright 2019 Percona LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package collector
import (
"context"
"fmt"
"strings"
"sync"
"testing"
"time"
"github.com/percona/percona-toolkit/src/go/mongolib/proto"
"github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/primitive"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
"go.mongodb.org/mongo-driver/mongo/readpref"
)
const (
MgoTimeoutDialInfo = 5 * time.Second
MgoTimeoutSessionSync = 5 * time.Second
MgoTimeoutSessionSocket = 5 * time.Second
)
type ProfilerStatus struct {
Was int64 `bson:"was"`
SlowMs int64 `bson:"slowms"`
GleStats struct {
ElectionID string `bson:"electionId"`
LastOpTime int64 `bson:"lastOpTime"`
} `bson:"$gleStats"`
}
func BenchmarkCollector(b *testing.B) {
maxLoops := 3
maxDocs := 100
timeout := time.Millisecond*time.Duration(maxDocs*maxLoops) + cursorTimeout*time.Duration(maxLoops*2) + time.Second
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
url := "mongodb://root:[email protected]:27017"
// time.Millisecond*time.Duration(maxDocs*maxLoops): time it takes to write all docs for all iterations
// cursorTimeout*time.Duration(maxLoops*2): Wait time between loops to produce iter.TryNext to return a false
client, err := createSession(url, "pmm-agent")
if err != nil {
return
}
cleanUpDBs(client) // Just in case there are old dbs with matching names
defer cleanUpDBs(client)
ps := ProfilerStatus{}
err = client.Database("admin").RunCommand(ctx, primitive.M{"profile": -1}).Decode(&ps)
defer func() { // restore profiler status
client.Database("admin").RunCommand(ctx, primitive.D{{"profile", ps.Was}, {"slowms", ps.SlowMs}})
}()
// Enable profilling all queries (2, slowms = 0)
res := client.Database("admin").RunCommand(ctx, primitive.D{{"profile", 2}, {"slowms", 0}})
if res.Err() != nil {
return
}
for n := 0; n < b.N; n++ {
ctr := New(client, "test", logrus.WithField("component", "profiler-test"))
wg := &sync.WaitGroup{}
wg.Add(1)
go genData(ctx, client, maxLoops, maxDocs)
profiles := make([]proto.SystemProfile, 0)
docsChan, err := ctr.Start(ctx)
if err != nil {
return
}
go func() {
i := 0
for profile := range docsChan {
profiles = append(profiles, profile)
i++
if i >= 300 {
wg.Done()
}
}
}()
wg.Wait()
ctr.Stop()
}
cancel()
}
func TestCollector(t *testing.T) {
maxLoops := 3
maxDocs := 100
url := "mongodb://root:[email protected]:27017"
// time.Millisecond*time.Duration(maxDocs*maxLoops): time it takes to write all docs for all iterations
// cursorTimeout*time.Duration(maxLoops*2): Wait time between loops to produce iter.TryNext to return a false
timeout := time.Millisecond*time.Duration(maxDocs*maxLoops) + cursorTimeout*time.Duration(maxLoops*2) + 5*time.Second
logrus.SetLevel(logrus.TraceLevel)
defer logrus.SetLevel(logrus.InfoLevel)
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
client, err := createSession(url, "pmm-agent")
require.NoError(t, err)
cleanUpDBs(client) // Just in case there are old dbs with matching names
defer cleanUpDBs(client)
// It's done create DB before the test.
doc := bson.M{}
client.Database("test_collector").Collection("test").InsertOne(context.TODO(), doc)
<-time.After(time.Second)
ctr := New(client, "test_collector", logrus.WithField("component", "collector-test"))
// Start the collector
profiles := make([]proto.SystemProfile, 0)
docsChan, err := ctr.Start(ctx)
wg := &sync.WaitGroup{}
wg.Add(1)
<-time.After(time.Second)
go genData(ctx, client, maxLoops, maxDocs)
go func() {
defer wg.Done()
i := 0
for profile := range docsChan {
select {
case <-ctx.Done():
return
default:
}
profiles = append(profiles, profile)
i++
if i >= 300 {
return
}
}
}()
wg.Wait()
ctr.Stop()
assert.Equal(t, maxDocs*maxLoops, len(profiles))
}
func genData(ctx context.Context, client *mongo.Client, maxLoops, maxDocs int) {
interval := time.Millisecond
ticker := time.NewTicker(interval)
defer ticker.Stop()
for j := 0; j < maxLoops; j++ {
select {
case <-ctx.Done():
return
default:
}
for i := 0; i < maxDocs; i++ {
select {
case <-ticker.C:
doc := bson.M{"first_name": "zapp", "last_name": "brannigan"}
client.Database("test_collector").Collection("people").InsertOne(context.TODO(), doc)
case <-ctx.Done():
return
}
}
<-time.After(cursorTimeout)
}
}
func createSession(dsn string, agentID string) (*mongo.Client, error) {
ctx, cancel := context.WithTimeout(context.Background(), MgoTimeoutDialInfo)
defer cancel()
opts := options.Client().
ApplyURI(dsn).
SetDirect(true).
SetReadPreference(readpref.Nearest()).
SetSocketTimeout(MgoTimeoutSessionSocket).
SetAppName(fmt.Sprintf("QAN-mongodb-profiler-%s", agentID))
client, err := mongo.Connect(ctx, opts)
if err != nil {
return nil, err
}
return client, nil
}
func cleanUpDBs(sess *mongo.Client) error {
dbs, err := sess.ListDatabaseNames(context.TODO(), bson.M{})
if err != nil {
return err
}
for _, dbname := range dbs {
if strings.HasPrefix("test_", dbname) {
err = sess.Database(dbname).Drop(context.TODO())
}
}
return nil
}
| [] | [] | [] | [] | [] | go | null | null | null |
integration/tests/cook/test_dynamic_clusters.py | import logging
import os
import subprocess
import time
import unittest
import pytest
from tests.cook import util
@pytest.mark.timeout(util.DEFAULT_TEST_TIMEOUT_SECS) # individual test timeout
class TestDynamicClusters(util.CookTest):
@classmethod
def setUpClass(cls):
cls.cook_url = util.retrieve_cook_url()
util.init_cook_session(cls.cook_url)
def setUp(self):
self.cook_url = type(self).cook_url
self.logger = logging.getLogger(__name__)
self.user_factory = util.UserFactory(self)
@unittest.skipUnless(util.using_kubernetes(), 'Test requires kubernetes')
@unittest.skipUnless(os.getenv('COOK_TEST_DYNAMIC_CLUSTERS') is not None,
'Requires setting the COOK_TEST_DYNAMIC_CLUSTERS environment variable')
@pytest.mark.serial
def test_dynamic_clusters(self):
"""
Test that dynamic cluster configuration functionality is working.
"""
docker_image = util.docker_image()
container = {'type': 'docker',
'docker': {'image': docker_image}}
admin = self.user_factory.admin()
# Force all clusters to have state = deleted via the API
clusters = [cluster for cluster in util.compute_clusters(self.cook_url)['db-configs'] if cluster["state"] == "running"]
with admin:
self.logger.info(f'Clusters {clusters}')
# First state = draining
for cluster in clusters:
cluster["state"] = "draining"
cluster["state-locked?"] = True
self.logger.info(f'Trying to update cluster {cluster}')
data, resp = util.update_compute_cluster(self.cook_url, cluster)
self.assertEqual(201, resp.status_code, resp.content)
# Then state = deleted
for cluster in clusters:
cluster["state"] = "deleted"
util.wait_until(lambda: util.update_compute_cluster(self.cook_url, cluster),
lambda x: 201 == x[1].status_code,
300000, 5000)
# Create at least one new cluster with a unique test name (using one of the existing cluster's IP and cert)
test_cluster_name = f'test_cluster_{round(time.time() * 1000)}'
test_cluster = {
"name": test_cluster_name,
"state": "running",
"base-path": clusters[0]["base-path"],
"ca-cert": clusters[0]["ca-cert"],
"template": clusters[0]["template"]
}
data, resp = util.create_compute_cluster(self.cook_url, test_cluster)
self.assertEqual(201, resp.status_code, resp.content)
# Test create cluster with duplicate name
data, resp = util.create_compute_cluster(self.cook_url, test_cluster)
self.assertEqual(422, resp.status_code, resp.content)
self.assertEqual(f'Compute cluster with name {test_cluster_name} already exists',
data['error']['message'],
resp.content)
# Check that a job schedules successfully
command = "true"
job_uuid, resp = util.submit_job(self.cook_url, command=command, container=container)
self.assertEqual(201, resp.status_code, resp.content)
instance = util.wait_for_instance(self.cook_url, job_uuid)
message = repr(instance)
self.assertIsNotNone(instance['compute-cluster'], message)
instance_compute_cluster_name = instance['compute-cluster']['name']
self.assertEqual(test_cluster["name"], instance_compute_cluster_name, instance['compute-cluster'])
util.wait_for_instance(self.cook_url, job_uuid, status='success')
running_clusters = [cluster for cluster in util.compute_clusters(self.cook_url)['db-configs'] if cluster["state"] == "running"]
self.assertEqual(1, len(running_clusters), running_clusters)
self.assertEqual(test_cluster["name"], running_clusters[0]["name"], running_clusters)
with admin:
# Delete test cluster
# First state = draining
test_cluster["state"] = "draining"
data, resp = util.update_compute_cluster(self.cook_url, test_cluster)
self.assertEqual(201, resp.status_code, resp.content)
# Then state = deleted
test_cluster["state"] = "deleted"
util.wait_until(lambda: util.update_compute_cluster(self.cook_url, test_cluster),
lambda x: 201 == x[1].status_code,
300000, 5000)
# Hard-delete the original non-test clusters
for cluster in clusters:
self.logger.info(f'Trying to delete cluster {cluster}')
resp = util.delete_compute_cluster(self.cook_url, cluster)
self.assertEqual(204, resp.status_code, resp.content)
# Force give up leadership
resp = util.shutdown_leader(self.cook_url, "test_dynamic_clusters")
self.assertEqual(b'Accepted', resp)
# Old clusters should be re-created
# wait for cook to come up
util.wait_until(lambda: [cluster for cluster in util.compute_clusters(self.cook_url)['db-configs'] if cluster["state"] == "running"],
lambda x: len(x) == len(clusters),
420000, 5000)
# Check that a job schedules successfully
command = "true"
job_uuid, resp = util.submit_job(self.cook_url, command=command, container=container)
self.assertEqual(201, resp.status_code, resp.content)
util.wait_for_instance(self.cook_url, job_uuid, status='success')
with admin:
# Hard-delete test cluster
resp = util.delete_compute_cluster(self.cook_url, test_cluster)
self.assertEqual(204, resp.status_code, resp.content)
| [] | [] | [
"COOK_TEST_DYNAMIC_CLUSTERS"
] | [] | ["COOK_TEST_DYNAMIC_CLUSTERS"] | python | 1 | 0 | |
fileshackproject/wsgi.py | """
WSGI config for fileshackproject project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'fileshackproject.settings')
application = get_wsgi_application()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
test/kb_variation_importer_server_test.py | # -*- coding: utf-8 -*-
import unittest
import os # noqa: F401
import json # noqa: F401
import time
import requests
import shutil
import uuid
from os import environ
try:
from ConfigParser import ConfigParser # py2
except:
from configparser import ConfigParser # py3
from pprint import pprint # noqa: F401
from DataFileUtil.DataFileUtilClient import DataFileUtil
from mock import patch
from biokbase.workspace.client import Workspace as workspaceService
from kb_variation_importer.kb_variation_importerImpl import kb_variation_importer
from kb_variation_importer.kb_variation_importerServer import MethodContext
from kb_variation_importer.authclient import KBaseAuth as _KBaseAuth
mock_assembly = {
"assembly_id": "Carsonella_ruddii_HT.fna.gz_assembly",
"base_counts": {
"A": 67508,
"C": 11789,
"G": 11134,
"T": 67112
},
"contigs": {
"CP003544.1": {
"contig_id": "CP003544.1",
"description": "Candidatus Carsonella ruddii HT isolate Thao2000, complete genome",
"gc_content": 0.1455,
"length": 157543,
"md5": "2648e704354959e79f5de6fff3b5b9db",
"name": "CP003544.1"
}
},
"dna_size": 157543,
"gc_content": 0.1455,
"num_contigs": 1,
"type": "Unknown"
}
class kb_variation_importerTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
token = environ.get('KB_AUTH_TOKEN', None)
config_file = environ.get('KB_DEPLOYMENT_CONFIG', None)
cls.cfg = {}
config = ConfigParser()
config.read(config_file)
for nameval in config.items('kb_variation_importer'):
cls.cfg[nameval[0]] = nameval[1]
# Getting username from Auth profile for token
authServiceUrl = cls.cfg['auth-service-url']
auth_client = _KBaseAuth(authServiceUrl)
user_id = auth_client.get_user(token)
# WARNING: don't call any logging methods on the context object,
# it'll result in a NoneType error
cls.ctx = MethodContext(None)
cls.ctx.update({'token': token,
'user_id': user_id,
'provenance': [
{'service': 'kb_variation_importer',
'method': 'please_never_use_it_in_production',
'method_params': []
}],
'authenticated': 1})
cls.wsURL = cls.cfg['workspace-url']
cls.wsClient = workspaceService(cls.wsURL)
cls.serviceImpl = kb_variation_importer(cls.cfg)
cls.scratch = cls.cfg['scratch']
cls.callback_url = os.environ['SDK_CALLBACK_URL']
@classmethod
def tearDownClass(cls):
if hasattr(cls, 'wsName'):
cls.wsClient.delete_workspace({'workspace': cls.wsName})
print('Test workspace was deleted')
def getWsClient(self):
return self.__class__.wsClient
def getWsName(self):
if hasattr(self.__class__, 'wsName'):
return self.__class__.wsName
suffix = int(time.time() * 1000)
wsName = "test_kb_variation_importer_" + str(suffix)
ret = self.getWsClient().create_workspace({'workspace': wsName}) # noqa
self.__class__.wsName = wsName
return wsName
def getImpl(self):
return self.__class__.serviceImpl
def getContext(self):
return self.__class__.ctx
@staticmethod
def fake_staging_download(params):
scratch = '/kb/module/work/tmp/'
inpath = params['staging_file_subdir_path']
shutil.copy('/kb/module/data/'+ inpath, scratch + inpath)
return {'copy_file_path': scratch + inpath}
# NOTE: According to Python unittest naming rules test method names should start from 'test'. # noqa
@patch.object(DataFileUtil, "download_staging_file",
new=fake_staging_download)
def test_your_method(self):
# Prepare test objects in workspace if needed using
# self.getWsClient().save_objects({'workspace': self.getWsName(),
# 'objects': []})
#
# Run your method by
# ret = self.getImpl().your_method(self.getContext(), parameters...)
#
# Check returned data with
# self.assertEqual(ret[...], ...) or other unittest methods
params = {
'workspace_name' : self.getWsName(),
'variation_object_name' : 'Test_variation_object_name',
'genome_ref' : '18590/2/8',
'variation_file_subdir_path' : 'test_with_chr.vcf',
'variation_attributes_subdir_path' : 'population_locality.txt',
}
ret = self.getImpl().import_variation(self.getContext(), params)[0]
self.assertIsNotNone(ret['report_ref'], ret['report_name'])
pass
| [] | [] | [
"SDK_CALLBACK_URL"
] | [] | ["SDK_CALLBACK_URL"] | python | 1 | 0 | |
cmd/handleDomain.go | package cmd
import (
"database/sql"
"fmt"
"log"
"os"
_ "github.com/go-sql-driver/mysql"
"github.com/spf13/cobra"
)
var domainDsn string
var domainForce bool
func init() {
domainCmd.PersistentFlags().StringVarP(&domainDsn, "dsn", "d", "", "MySQL/MariaDB Data Source Name as described in https://github.com/go-sql-driver/mysql")
domainRemoveCmd.Flags().BoolVarP(&domainForce, "force", "f", false, "If true, delete domain with all hosts. If false (default), a domain isn't deleted if any host of the domain exists.")
rootCmd.AddCommand(domainCmd)
domainCmd.AddCommand(domainListCmd)
domainCmd.AddCommand(domainAddCmd)
domainCmd.AddCommand(domainRemoveCmd)
}
var domainCmd = &cobra.Command{
Use: "domain",
Short: "Manage domain entries",
Long: `Manage dynpower domain entries in database.`,
Run: func(cmd *cobra.Command,
args []string) {
handleDomain()
},
}
var domainListCmd = &cobra.Command{
Use: "list",
Short: "List domains in database",
Long: `List all domains in the dynpower database. If a DSN is submitted by the flag --dsn, this DSN will be used. If no DSN is provided, dynpower-cli tries to use the environment variables DBHOST, DBUSER, DBNAME and DBPASSWORD.`,
Run: func(cmd *cobra.Command,
args []string) {
listDomain(domainDsn)
},
}
var domainAddCmd = &cobra.Command{
Use: "add [domain] [access key]",
Short: "Add domain with access key to database",
Long: `Add domain with access key to dynpower database. If a DSN is submitted by the flag --dsn, this DSN will be used. If no DSN is provided, dynpower-cli tries to use the environment variables DBHOST, DBUSER, DBNAME and DBPASSWORD.`,
Args: cobra.ExactArgs(2),
Run: func(cmd *cobra.Command,
args []string) {
addDomain(domainDsn, args[0], args[1])
},
}
var domainRemoveCmd = &cobra.Command{
Use: "remove [domain]",
Short: "Remove domain from database",
Long: `Remove domain from dynpower database. If a DSN is submitted by the flag --dsn, this DSN will be used. If no DSN is provided, dynpower-cli tries to use the environment variables DBHOST, DBUSER, DBNAME and DBPASSWORD.`,
Args: cobra.ExactArgs(1),
Run: func(cmd *cobra.Command,
args []string) {
removeDomain(domainDsn, args[0])
},
}
func dbConn(dsn string) (db *sql.DB) {
if len(dsn) < 1 {
dbname := os.Getenv("DBNAME") // maybe use LookupEnv to detect if env variable exists
dbhost := os.Getenv("DBHOST")
dbuser := os.Getenv("DBUSER")
dbpassword := os.Getenv("DBPASSWORD")
dbport := os.Getenv("DBPORT")
if len(dbport) < 1 {
dbport = "3306"
}
if len(dbname) >= 1 && len(dbhost) >= 1 && len(dbuser) >= 1 && len(dbpassword) >= 1 {
dsn = dbuser + ":" + dbpassword + "@tcp(" + dbhost + ":" + dbport + ")/" + dbname
} else {
fmt.Println("No database connect parameter found, exiting. Please use --dsn or environment variables to define database connection.")
os.Exit(1)
}
}
db, err := sql.Open("mysql", dsn)
if err != nil {
fmt.Println("Error by connecting database.")
fmt.Println(err.Error())
os.Exit(1)
}
return db
}
/*
* Check database connection by performing a query, exit in error case
*/
func checkDb(dsn string) {
db := dbConn(dsn)
_, err := db.Query("SELECT r.hostname, d.domainname, d.access_key FROM dynrecords r, domains d WHERE d.id=r.domain_id")
if err != nil {
log.Println("Database problem: " + err.Error())
os.Exit(1)
//panic(err.Error()) // proper error handling instead of panic in your app
}
return
}
func listDomain(dsn string) {
db := dbConn(dsn)
//log.Println(dsn)
var maxStrlenSQL sql.NullInt64
var maxStrlen int
errLen := db.QueryRow("SELECT max(length(domainname)) as maxstrlen from domains").Scan(&maxStrlenSQL)
if errLen != nil {
fmt.Println(errLen)
os.Exit(1)
}
if maxStrlenSQL.Valid {
maxStrlen = int(maxStrlenSQL.Int64)
} else {
maxStrlen = 0
}
if maxStrlen == 0 {
fmt.Println("No domain in database, use add command to create domain entry")
os.Exit(0)
}
results, err := db.Query("SELECT domainname, dt_created, dt_updated FROM domains ORDER by domainname")
if err != nil {
log.Println("Database problem: " + err.Error())
os.Exit(1)
//panic(err.Error()) // proper error handling instead of panic in your app
}
defer db.Close()
maxStrlen = maxStrlen + 2
fmt.Println("Domains in database:")
fmt.Printf("%-"+fmt.Sprintf("%d", maxStrlen)+"s%-21s%-21s\n", "Domain", "Created", "Updated")
var domainname, dtCreated, dtUpdated string
for results.Next() {
// for each row, scan the result into our tag composite object
err = results.Scan(&domainname, &dtCreated, &dtUpdated)
if err != nil {
panic(err.Error()) // proper error handling instead of panic in your app
}
// and then print out the tag's Name attribute
fmt.Printf("%-"+fmt.Sprintf("%d", maxStrlen)+"s%-21s%-21s\n", domainname, dtCreated, dtUpdated)
}
}
func addDomain(dsn string, domain string, accessKey string) {
db := dbConn(dsn)
//log.Println(dsn)
//log.Println("in add...")
//log.Println(domain)
//log.Println(accessKey)
hashedAccessKey, err := HashPassword(accessKey)
if err != nil {
fmt.Println(err.Error())
os.Exit(1)
}
//log.Println(hashedAccessKey)
result, err := db.Prepare("INSERT INTO domains(domainname, access_key, dt_created) VALUES(?,?, now())")
if err != nil {
fmt.Println(err.Error())
os.Exit(1)
}
_, insertErr := result.Exec(domain, hashedAccessKey)
if insertErr != nil {
fmt.Println(insertErr.Error())
os.Exit(1)
}
fmt.Println("Domain " + domain + " added successfully")
defer db.Close()
}
func removeHosts(dsn string, domain string) {
db := dbConn(dsn)
defer db.Close()
//log.Println(dsn)
//log.Println("in add...")
//log.Println(domain)
//log.Println(accessKey)
var domainID int
// todo: check existence of domain
// remove host
// get domain id
errLen := db.QueryRow("SELECT id from domains d WHERE d.domainname=?", domain).Scan(&domainID)
if errLen != nil {
fmt.Println(errLen)
os.Exit(1)
}
result, err := db.Prepare("DELETE FROM dynrecords WHERE domain_id=?")
if err != nil {
fmt.Println(err.Error())
os.Exit(1)
}
deleteResult, removeErr := result.Exec(domainID)
if removeErr != nil {
fmt.Println(removeErr.Error())
os.Exit(1)
}
rowsAffected, removeErr := deleteResult.RowsAffected()
if removeErr != nil {
fmt.Println(removeErr.Error())
os.Exit(1)
}
if rowsAffected == 0 {
fmt.Println("No host entry deleted")
} else {
fmt.Println("Hosts of domain " + domain + " removed successfully")
}
}
func removeDomain(dsn string, domain string) {
db := dbConn(dsn)
var count int
err := db.QueryRow("SELECT count(*) as cnt FROM dynrecords r, domains d WHERE d.id=r.domain_id AND d.domainname=?", domain).Scan(&count)
if err != nil {
fmt.Println(err.Error())
os.Exit(1)
}
defer db.Close()
if count > 0 && domainForce == true {
fmt.Println("Delete host entries...")
removeHosts(dsn, domain)
} else if count > 0 && domainForce == false {
fmt.Println("Could not delete domain, because there are host entries of domain " + domain + ".\nPlease delete host entries first or use --force flag.")
os.Exit(0)
}
// count = 0
// delete domain entry
result, err := db.Prepare("DELETE FROM domains WHERE domainname=?")
if err != nil {
fmt.Println(err.Error())
os.Exit(1)
}
deleteResult, removeErr := result.Exec(domain)
if removeErr != nil {
fmt.Println(removeErr.Error())
os.Exit(1)
}
rowsAffected, removeErr := deleteResult.RowsAffected()
if removeErr != nil {
fmt.Println(removeErr.Error())
os.Exit(1)
}
if rowsAffected == 0 {
fmt.Println("Nothing deleted, does the domain " + domain + " exist?")
} else {
fmt.Println("Domain " + domain + " removed successfully")
}
}
/*
* Handle domain-related commands
*/
func handleDomain() {
fmt.Println("\nUnknown or missing command.\nRun dynpower-cli domain --help to show available commands.")
}
| [
"\"DBNAME\"",
"\"DBHOST\"",
"\"DBUSER\"",
"\"DBPASSWORD\"",
"\"DBPORT\""
] | [] | [
"DBHOST",
"DBUSER",
"DBNAME",
"DBPASSWORD",
"DBPORT"
] | [] | ["DBHOST", "DBUSER", "DBNAME", "DBPASSWORD", "DBPORT"] | go | 5 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.