filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "jdma_site.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
db/db.go
|
package db
import (
"os"
"sync"
"github.com/msanvarov/gin-rest-prisma-boilerplate/prisma-client"
)
var (
client *prisma.Client
once sync.Once
)
func DB() *prisma.Client {
once.Do(func() {
if prismaEndpoint := os.Getenv("PRISMA_ENDPOINT"); prismaEndpoint != "" {
client = prisma.New(&prisma.Options{
Endpoint: prismaEndpoint,
})
} else {
client = prisma.New(nil)
}
})
return client
}
|
[
"\"PRISMA_ENDPOINT\""
] |
[] |
[
"PRISMA_ENDPOINT"
] |
[]
|
["PRISMA_ENDPOINT"]
|
go
| 1 | 0 | |
scripts/janalyze.py
|
#!/usr/bin/env nix-shell
#!nix-shell -p python3Packages.requests python3Packages.tabulate -i python3
"""
Jormungandr Analysis Tools
"""
__version__ = "0.1.0"
import argparse, requests, os, json, sys
from argparse import RawTextHelpFormatter
from requests.exceptions import HTTPError
from tabulate import tabulate
globalAggregate = None
globalEpochBlocks = None
globalPools = None
api_url_base = None
api_url = None
def get_api(path):
r = endpoint(f'{api_url}/{path}')
return r.text
def get_tip():
return get_api("tip")
def get_block(block_id):
r = endpoint(f'{api_url}/block/{block_id}')
hex_block = r.content.hex()
return hex_block
def parse_block(block):
return {
"epoch": int(block[16:24], 16),
"slot": int(block[24:32], 16),
"parent": block[104:168],
"pool": block[168:232],
}
def aggregateall():
tip = get_tip()
block = parse_block(get_block(tip))
currentEpoch = block['epoch']
poolTotal = {}
blockTotal = 0
while block["parent"] != ("0" * 64):
if args.aggregateall > 0:
if (currentEpoch - args.aggregateall + 1) > block['epoch']:
break
pool = block["pool"]
if pool not in poolTotal:
poolTotal[pool] = {}
poolTotal[pool]['blocks'] = 1
else:
poolTotal[pool]['blocks'] = poolTotal[pool]['blocks'] + 1
block = parse_block(get_block(block['parent']))
blockTotal += 1
lowestEpoch = block['epoch']
if args.aggregateall > 0:
print(f'\nJormungandr Block Aggregate for epochs {lowestEpoch + 1} - {currentEpoch}:\n')
else:
print('\nJormungandr Overall Block Aggregate:\n')
headers = [f'Pool (Node ID)', "Blocks (#)", "Block Percent (%)"]
table = []
for pool, data in poolTotal.items():
record = [ pool, data['blocks'], data['blocks'] / blockTotal * 100 ]
table.append(record)
print(f'{tabulate(sorted(table, key=lambda x: x[0]), headers, tablefmt="psql")}')
print(f'TotalBlocks: {blockTotal} \n')
def aggregate(silent=False):
global globalAggregate
global globalEpochBlocks
tip = get_tip()
block = parse_block(get_block(tip))
epochBlockTotal = {}
currentEpoch = block['epoch']
epochs = {}
pools = {}
while block["parent"] != ("0" * 64):
if args.full == False:
if (currentEpoch - args.aggregate + 1) > block['epoch']:
break
epoch = block['epoch']
parent = block['parent']
pool = block['pool']
if epoch not in epochs:
epochs[epoch] = {}
epochBlockTotal[epoch] = 0
if pool not in epochs[epoch]:
epochs[epoch][pool] = {}
epochs[epoch][pool]['blocks'] = 1
epochBlockTotal[epoch] = epochBlockTotal[epoch] + 1
else:
epochs[epoch][pool]['blocks'] = epochs[epoch][pool]['blocks'] + 1
epochBlockTotal[epoch] = epochBlockTotal[epoch] + 1
block = parse_block(get_block(block['parent']))
for epoch, epochData in epochs.items():
epochs[epoch]['stats'] = {}
epochs[epoch]['stats']['blocksum'] = epochBlockTotal[epoch]
for pool, poolData in epochData.items():
if pool != 'stats':
epochs[epoch][pool]['percent'] = poolData['blocks'] / epochBlockTotal[epoch] * 100
if silent == False:
if args.json == True:
print(json.dumps(epochs, sort_keys=True))
else:
print('\nJormungandr Epoch Block Aggregate:\n')
for epoch, epochData in epochs.items():
headers = [f'EPOCH {epoch}, Pool (Node ID)', "Blocks (#)", "Block Percent (%)"]
table = []
for pool, data in epochData.items():
if pool != 'stats':
record = [ pool, data['blocks'], data['percent'] ]
table.append(record)
if args.bigvaluesort == True:
print(f'{tabulate(sorted(table, key=lambda x: x[1], reverse=True), headers, tablefmt="psql")}')
else:
print(f'{tabulate(sorted(table, key=lambda x: x[0]), headers, tablefmt="psql")}')
print(f'{"Totalblocks:":<21}{epochData["stats"]["blocksum"]}\n\n')
globalAggregate = epochs
def distribution(silent=False):
global globalPools
epoch = 0
unassigned = 0
dangling = 0
stakeSum = 0
totalPercentStaked = 0
total = 0
pools = {}
r = endpoint(f'{api_url}/stake')
raw = r.json()
epoch = raw['epoch']
dangling = raw['stake']['dangling']
unassigned = raw['stake']['unassigned']
if args.bigvaluesort == True:
sortedRaw = sorted(raw['stake']['pools'], key = lambda x: x[1], reverse=True)
else:
sortedRaw = sorted(raw['stake']['pools'])
for [pool, stake] in sortedRaw:
pools[pool] = {}
pools[pool]['stake'] = stake
pools[pool]['percent'] = 0
stakeSum = stakeSum + stake
total = stakeSum + unassigned + dangling
totalPercentStaked = stakeSum / total
# Calculate percentage stake delegation of total staked ADA
for pool in pools.keys():
pools[pool]['percent'] = pools[pool]['stake'] / stakeSum * 100
pools['stats'] = {}
pools['stats']['epoch'] = epoch
pools['stats']['dangling'] = dangling
pools['stats']['unassigned'] = unassigned
pools['stats']['total'] = total
pools['stats']['stakesum'] = stakeSum
pools['stats']['totalpercentstaked'] = totalPercentStaked
if silent == False:
if args.json == True:
print(json.dumps(pools, sort_keys=True))
else:
print('\nJormungandr Stake Pool Distribution:\n')
print(f'{"Epoch:":<21}{epoch}')
print(f'{"Dangling:":<21}{dangling / 1e6:,.6f} ADA')
print(f'{"Unassigned:":<21}{unassigned / 1e6:,.6f} ADA')
print(f'{"Total:":<21}{total / 1e6:,.6f} ADA')
print(f'{"TotalStaked:":<21}{stakeSum / 1e6:,.6f} ADA')
print(f'{"TotalPercentStaked:":<21}{totalPercentStaked * 100:.2f}%\n')
headers = [f'EPOCH {epoch}, Pool (Node ID)', "Stake (ADA)", "Percent (%)"]
table = []
for pool, poolData in pools.items():
if pool != 'stats':
if args.nozero == False or poolData['stake'] != 0:
record = [ pool, poolData['stake'] / 1e6, poolData['percent'] ]
table.append(record)
if args.bigvaluesort == True:
print(f'{tabulate(sorted(table, key=lambda x: x[1], reverse=True), headers, tablefmt="psql", floatfmt=("%s", "0.6f"))}\n\n')
else:
print(f'{tabulate(sorted(table, key=lambda x: x[0]), headers, tablefmt="psql", floatfmt=("%s", "0.6f"))}\n\n')
globalPools = pools
def crossref():
if globalAggregate == None:
args.aggregate = 1
aggregate(silent=True)
if globalPools == None:
distribution(silent=True)
crossref = globalPools
epoch = crossref['stats']['epoch']
for pool, poolData in crossref.items():
if pool != 'stats':
if pool in globalAggregate[epoch]:
crossref[pool]['blocks'] = globalAggregate[epoch][pool]['blocks']
crossref[pool]['percentBlocks'] = globalAggregate[epoch][pool]['percent']
else:
crossref[pool]['blocks'] = None
crossref[pool]['percentBlocks'] = None
if args.json == True:
print(json.dumps(crossref, sort_keys=True))
else:
print('\nJormungandr Stake and Block Distribution Cross Reference:\n')
print(f'{"Epoch:":<21}{epoch}')
print(f'{"Dangling:":<21}{crossref["stats"]["dangling"] / 1e6:,.6f} ADA')
print(f'{"Unassigned:":<21}{crossref["stats"]["unassigned"] / 1e6:,.6f} ADA')
print(f'{"TotalADA:":<21}{crossref["stats"]["total"] / 1e6:,.6f} ADA')
print(f'{"TotalBlocks:":<21}{globalAggregate[epoch]["stats"]["blocksum"]}')
print(f'{"TotalStaked:":<21}{crossref["stats"]["stakesum"] / 1e6:,.6f} ADA')
print(f'{"TotalPercentStaked:":<21}{crossref["stats"]["totalpercentstaked"] * 100:.2f}%\n')
headers = [f'EPOCH {epoch}, Pool (Node ID)', "Stake (ADA)", "Blocks (#)", "PercentStaked (%)", "PercentBlocks (%)"]
table = []
for pool, poolData in crossref.items():
if pool != 'stats':
if args.nozero == False or (not (poolData['stake'] == 0 and poolData['blocks'] == None)):
record = [ pool, poolData['stake'] / 1e6, poolData['blocks'], poolData['percent'], poolData['percentBlocks'] ]
table.append(record)
if args.bigvaluesort == True:
print(f'{tabulate(sorted(table, key=lambda x: x[1], reverse=True), headers, tablefmt="psql", floatfmt=("%s", "0.6f", "g", "g", "g"))}\n\n')
else:
print(f'{tabulate(sorted(table, key=lambda x: x[0]), headers, tablefmt="psql", floatfmt=("%s", "0.6f", "g", "g", "g"))}\n\n')
def stats():
r = endpoint(f'{api_url}/node/stats')
if args.json == True:
print(json.dumps(r.json(), sort_keys=True))
else:
print('Current node stats:\n')
print(json.dumps(r.json(), sort_keys=True, indent=2))
def endpoint(url):
try:
r = requests.get(url)
r.raise_for_status()
except HTTPError as http_err:
print("\nWeb API unavailable.\nError Details:\n")
print(f"HTTP error occurred: {http_err}")
exit(1)
except Exception as err:
print("\nWeb API unavailable.\nError Details:\n")
print(f"Other error occurred: {err}")
exit(1)
else:
return(r)
def check_int(value):
ivalue = int(value)
if ivalue <= 0:
raise argparse.ArgumentTypeError("%s is an invalid positive int value" % value)
return ivalue
def main():
global api_url_base
global api_url
if args.restapi is not None:
api_url_base = args.restapi
else:
api_url_base = os.environ.get("JORMUNGANDR_RESTAPI_URL", "http://localhost:3001/api")
api_url = f"{api_url_base}/v0"
if args.stats == True:
stats()
if args.aggregateall is not None:
aggregateall()
if args.aggregate is not None:
aggregate()
if args.distribution == True:
distribution()
if args.crossref == True:
crossref()
exit(0)
if __name__ == "__main__":
if len(sys.argv) == 1:
print(f'\nRun `{sys.argv[0]} -h` for help and usage information\n')
exit(0)
parser = argparse.ArgumentParser(description=(
"Jormungandr analysis tools\n\n"),
formatter_class=RawTextHelpFormatter)
parser.add_argument("-aa", "--aggregateall", nargs="?", metavar="X", type=check_int, const=0,
help="Calculate total block creation per pool for X epochs time starting with the tip or leave blank for all")
parser.add_argument("-a", "--aggregate", nargs="?", metavar="X", type=check_int, const=1,
help="Calculate aggregate block creation per pool for X epochs starting with the tip epoch (default = 1)")
parser.add_argument("-b", "--bigvaluesort", action="store_true",
help="Show non <-j|--json> output sorted by big to small value rather than keys where possible")
parser.add_argument("-d", "--distribution", action="store_true",
help="Calculate the stake distribution for the current epoch only")
parser.add_argument("-f", "--full", action="store_true",
help="Calculate the full epoch history where possible")
parser.add_argument("-j", "--json", action="store_true",
help="Output raw json only")
parser.add_argument("-n", "--nozero", action="store_true",
help="Don't show zero value staking pools (blocks minted or stake valued)")
parser.add_argument("-s", "--stats", action="store_true",
help="Show the current node stats")
parser.add_argument("-v", "--version", action="store_true",
help="Show the program version and exit")
parser.add_argument("-x", "--crossref", action="store_true",
help="Analyse the current epoch, cross referencing both block aggregate and stake distributions")
parser.add_argument("-r", "--restapi", nargs="?", metavar="RESTAPI", type=str, const="http://127.0.0.1:3001/api",
help="Set the rest api to utilize; by default: \"http://127.0.0.1:3001/api\". An env var of JORMUNGANDR_RESTAPI_URL can also be seperately set. ")
args = parser.parse_args()
if args.version:
print(f'Version: {__version__}\n')
exit(0)
main()
|
[] |
[] |
[
"JORMUNGANDR_RESTAPI_URL"
] |
[]
|
["JORMUNGANDR_RESTAPI_URL"]
|
python
| 1 | 0 | |
main.go
|
package main
import (
"context"
"database/sql"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"regexp"
"strings"
firebase "firebase.google.com/go"
"firebase.google.com/go/auth"
graphql "github.com/graph-gophers/graphql-go"
"google.golang.org/api/option"
_ "github.com/GoogleCloudPlatform/cloudsql-proxy/proxy/dialers/postgres"
_ "github.com/lib/pq"
)
const UserIDKey = UserID("userID")
var (
// Postgres database
db *sql.DB
// Firebase app and authentication client
app *firebase.App
client *auth.Client
// GraphQL executable schema
schema *graphql.Schema
)
type UserID string
// Represents a GraphQL query or mutation.
type Query struct {
// FIXME: OperationName is not appearing
Query string `json:"query"`
OperationName string `json:"operationName"`
Variables map[string]interface{} `json:"variables"`
}
// var query Query
// err := json.NewDecoder(r.Body).Decode(&query)
// if err != nil {
// log.Print(fmt.Errorf("error due to json.NewDecoder.Decode: %w", err))
// RespondServerError(w)
// return
// }
// client, err := app.Auth(context.TODO())
// if err != nil {
// log.Print(fmt.Errorf("error due to app.Auth: %w", err))
// RespondServerError(w)
// return
// }
var whiteSpaceRe = regexp.MustCompile(`( |\t|\n)+`)
func handler(w http.ResponseWriter, r *http.Request) {
// Set headers:
setHeaders(w)
if r.Method == "OPTIONS" {
w.WriteHeader(http.StatusOK)
return
}
// Get current user ID (authenticated):
var userID string
authHeader := r.Header.Get("Authorization")
if authHeader == "" {
// No-op; defer to query or mutation
} else {
idToken := strings.TrimPrefix(authHeader, "Bearer ")
token, err := client.VerifyIDToken(context.TODO(), idToken)
if err != nil {
log.Print(fmt.Errorf("error due to client.VerifyUserID: %w", err))
RespondUnauthorized(w)
return
}
userID = token.UID
}
// Read request body:
dataIn, err := ioutil.ReadAll(r.Body)
if err != nil {
log.Print(fmt.Errorf("error due to ioutil.ReadAll: %w", err))
RespondServerError(w)
return
}
// Decode query:
var query Query
err = json.Unmarshal(dataIn, &query)
if err != nil {
log.Print(fmt.Errorf("error due to json.Unmarshal: %w", err))
RespondServerError(w)
return
}
// Execute query:
debugQuery := strings.TrimSpace(whiteSpaceRe.ReplaceAllString(query.Query, " "))
log.Printf("query=%s variables=%+v", debugQuery, query.Variables)
ctx := context.WithValue(context.TODO(), UserIDKey, userID)
resp := schema.Exec(ctx, query.Query, query.OperationName, query.Variables)
if resp.Errors != nil {
log.Printf("error due to schema.Exec: %+v", resp.Errors)
}
// Encode response:
dataOut, err := json.Marshal(resp)
if err != nil {
log.Print(fmt.Errorf("error due to json.MarshalIndent: %w", err))
RespondServerError(w)
return
}
// Done:
fmt.Fprintln(w, string(dataOut))
}
func main() {
var err error
/*
* Postgres
*/
log.Print("setting up postgres")
db, err = sql.Open("cloudsqlpostgres", fmt.Sprintf(`
host=codex-ef322:us-west1:codex-db
user=postgres
password=%s
dbname=codex
sslmode=disable
`, os.Getenv("PSQL_PW")))
must(err, "crash due to sql.Open")
var testStr string
err = db.QueryRow(`select 'hello, world!'`).Scan(&testStr)
must(err, "crash due to db.QueryRow")
defer db.Close()
/*
* Firebase auth
*/
log.Print("setting up firebase auth")
opt := option.WithCredentialsFile("secret/firebase-admin-sdk.json")
app, err = firebase.NewApp(context.TODO(), nil, opt)
must(err, "crash due to firebase.NewApp")
client, err = app.Auth(context.TODO())
must(err, "crash due to app.Auth")
/*
* Schema
*/
log.Print("setting up schema")
bstr, err := ioutil.ReadFile("schema.graphql")
must(err, "crash due to ioutil.ReadFile")
schema, err = graphql.ParseSchema(string(bstr), &RootResolver{})
must(err, "crash due to graphql.ParseSchema")
/*
* Web server
*/
log.Print("setting up web server")
log.Print("ready")
port := os.Getenv("PORT")
if port == "" {
port = "8080"
}
http.HandleFunc("/graphql", handler)
http.HandleFunc("/favicon.ico", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
})
err = http.ListenAndServe(fmt.Sprintf(":%s", port), nil)
must(err, "crash due to http.ListenAndServe")
}
|
[
"\"PSQL_PW\"",
"\"PORT\""
] |
[] |
[
"PORT",
"PSQL_PW"
] |
[]
|
["PORT", "PSQL_PW"]
|
go
| 2 | 0 | |
control-plane-operator/controllers/hostedcontrolplane/render/manifests_test.go
|
package render
import (
"io/ioutil"
"os"
"path/filepath"
"strings"
"testing"
"github.com/google/go-cmp/cmp"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/yaml"
configv1 "github.com/openshift/api/config/v1"
hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
)
func TestIgnitionConfigRendering(t *testing.T) {
testCases := []struct {
name string
params *ClusterParams
}{
{
name: "No ssh key",
params: &ClusterParams{},
},
{
name: "Single ssh key",
params: &ClusterParams{SSHKey: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC7xaGqJaFd51jCl+MjZzgH1WfgKbNmn+AbvRXOabeNYNRTZiRNcFlWHQPxL/fFWiJ5rDkyTRm6dI49TflU5lMSOcKwoO0sZlMbrDrUeDf2cy/7KffpAto+Te8vB4udAERMJHY89v9/RF6GgMLpW+lbIT3Gyj+MbIF8aAz0vt6VJA8Ptwq2SlxWSPLbxoe5nNP1JaOubG4Arm6t75smJ+wvexV8d9duvFWig2MW5lMTAa6QpSAp6Gd03dWSUiH5++dk3vlNMR9hZMv7/DWqyauGi0MYtuywQqVWr3YMQve72VJTo/qVhvfFylKEFTKA0h5Cl3ziL0DbgM/RDsUqaLynB7b6jAJkhXd02wv6+IkHly02SEnLHGJs50uK7J7GdAWWbKfRByVGg5kP5DwiTEln357ukT7OH8Ys6PNd0Lzzy/oA4Gv+uDzI1RMMBsTcv3SwASuht+EZzQ5hoSCkM6QoEtpruSCEdCtvTEq9idcrVijKbYURtrDdH5WAN9ZYUF13s94870srbG3uavvT2G1IcWjBjiVVoJM8cifYnTHllHX/oPw9iZxhjlrC5Uc+dgRhnpoRYMar30Kg/No1GYj2EPEZgvHVde6KqActTFnD0K5xJEAUzKutu7TDUePm+MYREt4HMeT4LxsVUar9Aak5pgmUKLqKHLY8NeQxWtKMbQ== [email protected]"},
},
{
name: "Multiple ssh keys",
params: &ClusterParams{
SSHKey: `ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC7xaGqJaFd51jCl+MjZzgH1WfgKbNmn+AbvRXOabeNYNRTZiRNcFlWHQPxL/fFWiJ5rDkyTRm6dI49TflU5lMSOcKwoO0sZlMbrDrUeDf2cy/7KffpAto+Te8vB4udAERMJHY89v9/RF6GgMLpW+lbIT3Gyj+MbIF8aAz0vt6VJA8Ptwq2SlxWSPLbxoe5nNP1JaOubG4Arm6t75smJ+wvexV8d9duvFWig2MW5lMTAa6QpSAp6Gd03dWSUiH5++dk3vlNMR9hZMv7/DWqyauGi0MYtuywQqVWr3YMQve72VJTo/qVhvfFylKEFTKA0h5Cl3ziL0DbgM/RDsUqaLynB7b6jAJkhXd02wv6+IkHly02SEnLHGJs50uK7J7GdAWWbKfRByVGg5kP5DwiTEln357ukT7OH8Ys6PNd0Lzzy/oA4Gv+uDzI1RMMBsTcv3SwASuht+EZzQ5hoSCkM6QoEtpruSCEdCtvTEq9idcrVijKbYURtrDdH5WAN9ZYUF13s94870srbG3uavvT2G1IcWjBjiVVoJM8cifYnTHllHX/oPw9iZxhjlrC5Uc+dgRhnpoRYMar30Kg/No1GYj2EPEZgvHVde6KqActTFnD0K5xJEAUzKutu7TDUePm+MYREt4HMeT4LxsVUar9Aak5pgmUKLqKHLY8NeQxWtKMbQ== [email protected]
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC7xaGqJaFd51jCl+MjZzgH1WfgKbNmn+AbvRXOabeNYNRTZiRNcFlWHQPxL/fFWiJ5rDkyTRm6dI49TflU5lMSOcKwoO0sZlMbrDrUeDf2cy/7KffpAto+Te8vB4udAERMJHY89v9/RF6GgMLpW+lbIT3Gyj+MbIF8aAz0vt6VJA8Ptwq2SlxWSPLbxoe5nNP1JaOubG4Arm6t75smJ+wvexV8d9duvFWig2MW5lMTAa6QpSAp6Gd03dWSUiH5++dk3vlNMR9hZMv7/DWqyauGi0MYtuywQqVWr3YMQve72VJTo/qVhvfFylKEFTKA0h5Cl3ziL0DbgM/RDsUqaLynB7b6jAJkhXd02wv6+IkHly02SEnLHGJs50uK7J7GdAWWbKfRByVGg5kP5DwiTEln357ukT7OH8Ys6PNd0Lzzy/oA4Gv+uDzI1RMMBsTcv3SwASuht+EZzQ5hoSCkM6QoEtpruSCEdCtvTEq9idcrVijKbYURtrDdH5WAN9ZYUF13s94870srbG3uavvT2G1IcWjBjiVVoJM8cifYnTHllHX/oPw9iZxhjlrC5Uc+dgRhnpoRYMar30Kg/No1GYj2EPEZgvHVde6KqActTFnD0K5xJEAUzKutu7TDUePm+MYREt4HMeT4LxsVUar9Aak5pgmUKLqKHLY8NeQxWtKMbQ== [email protected]`,
},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
ctx := newClusterManifestContext(nil, map[string]string{"release": "1.2.3"}, tc.params, nil, nil, nil)
ctx.ignitionConfigs()
for name, value := range ctx.manifests {
CompareWithFixture(t, value, WithExtension(name))
}
})
}
}
// CompareWithFixture will compare output with a test fixture and allows to automatically update them
// by setting the UPDATE env var.
// If output is not a []byte or string, it will get serialized as yaml prior to the comparison.
// The fixtures are stored in $PWD/testdata/prefix${testName}.yaml
func CompareWithFixture(t *testing.T, output interface{}, opts ...Option) {
t.Helper()
options := &Options{
Extension: ".yaml",
}
for _, opt := range opts {
opt(options)
}
var serializedOutput []byte
switch v := output.(type) {
case []byte:
serializedOutput = v
case string:
serializedOutput = []byte(v)
default:
serialized, err := yaml.Marshal(v)
if err != nil {
t.Fatalf("failed to yaml marshal output of type %T: %v", output, err)
}
serializedOutput = serialized
}
golden, err := golden(t, options)
if err != nil {
t.Fatalf("failed to get absolute path to testdata file: %v", err)
}
if os.Getenv("UPDATE") != "" {
if err := os.MkdirAll(filepath.Dir(golden), 0755); err != nil {
t.Fatalf("failed to create fixture directory: %v", err)
}
if err := ioutil.WriteFile(golden, serializedOutput, 0644); err != nil {
t.Fatalf("failed to write updated fixture: %v", err)
}
}
expected, err := ioutil.ReadFile(golden)
if err != nil {
t.Fatalf("failed to read testdata file: %v", err)
}
if diff := cmp.Diff(string(expected), string(serializedOutput)); diff != "" {
t.Errorf("got diff between expected and actual result:\nfile: %s\ndiff:\n%s\n\nIf this is expected, re-run the test with `UPDATE=true go test ./...` to update the fixtures.", golden, diff)
}
}
type Options struct {
Prefix string
Suffix string
Extension string
}
type Option func(*Options)
func WithExtension(extension string) Option {
return func(o *Options) {
o.Extension = extension
}
}
// golden determines the golden file to use
func golden(t *testing.T, opts *Options) (string, error) {
if opts.Extension == "" {
opts.Extension = ".yaml"
}
return filepath.Abs(filepath.Join("testdata", sanitizeFilename(opts.Prefix+t.Name()+opts.Suffix)) + opts.Extension)
}
func sanitizeFilename(s string) string {
result := strings.Builder{}
for _, r := range s {
if (r >= 'a' && r < 'z') || (r >= 'A' && r < 'Z') || r == '_' || r == '.' || (r >= '0' && r <= '9') {
// The thing is documented as returning a nil error so lets just drop it
_, _ = result.WriteRune(r)
continue
}
if !strings.HasSuffix(result.String(), "_") {
result.WriteRune('_')
}
}
return "zz_fixture_" + result.String()
}
func clusterParamsSecrets() *corev1.SecretList {
return &corev1.SecretList{
Items: []corev1.Secret{
{ObjectMeta: metav1.ObjectMeta{Name: "root-ca"}, Data: map[string][]byte{"ca.crt": []byte("foo")}},
{ObjectMeta: metav1.ObjectMeta{Name: "bootstrap-kubeconfig"}, Data: map[string][]byte{"kubeconfig": []byte("kk")}},
},
}
}
func clusterParamsConfigMaps() *corev1.ConfigMapList {
return &corev1.ConfigMapList{
Items: []corev1.ConfigMap{
{ObjectMeta: metav1.ObjectMeta{Name: "combined-ca"}, Data: map[string]string{"ca.crt": "foo"}},
},
}
}
func TestMachineConfigServerRendering(t *testing.T) {
testCases := []struct {
name string
params *ClusterParams
}{
{
name: "No extra AWS tags",
params: &ClusterParams{PlatformType: "AWS"},
},
{
name: "AWS resource tags are passed on",
params: &ClusterParams{
PlatformType: "AWS",
AWSResourceTags: []hyperv1.AWSResourceTag{
{Key: "foo", Value: "bar"},
{Key: "baz", Value: "bar"},
},
},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
ctx := newClusterManifestContext(nil, map[string]string{"release": "1.2.3"}, tc.params, nil, clusterParamsSecrets(), clusterParamsConfigMaps())
ctx.machineConfigServer()
renderedManifests, err := ctx.renderManifests()
if err != nil {
t.Fatalf("failed to render manifests: %v", err)
}
value, exists := renderedManifests["machine-config-server-configmap.yaml"]
if !exists {
t.Fatalf("Bug: machine-config-server/machine-config-server-configmap.yaml manifest does not exist")
}
var configMap corev1.ConfigMap
if err := yaml.Unmarshal(value, &configMap); err != nil {
t.Fatalf("failed to unmarshal manifest into a configmap: %v, raw manifest: %s", err, string(value))
}
configRaw, exists := configMap.Data["cluster-infrastructure-02-config.yaml"]
if !exists {
t.Fatalf("configmap %s does not have a 'cluster-infrastructure-02-config.yaml' key", string(value))
}
var infrastructure configv1.Infrastructure
if err := yaml.Unmarshal([]byte(configRaw), &infrastructure); err != nil {
t.Fatalf("failed to unmarshal 'data' key into a configv1.Infrastructure: %v, raw: %s", err, configRaw)
}
if diff := cmp.Diff(configV1RTToHyperV1RT(infrastructure.Status.PlatformStatus.AWS.ResourceTags), tc.params.AWSResourceTags); diff != "" {
t.Errorf("AWS resource tags in infrastructure differ from input: %s\nRaw manifest: %s", diff, string(value))
}
})
}
}
func configV1RTToHyperV1RT(in []configv1.AWSResourceTag) []hyperv1.AWSResourceTag {
var result []hyperv1.AWSResourceTag
for _, entry := range in {
result = append(result, hyperv1.AWSResourceTag{Key: entry.Key, Value: entry.Value})
}
return result
}
|
[
"\"UPDATE\""
] |
[] |
[
"UPDATE"
] |
[]
|
["UPDATE"]
|
go
| 1 | 0 | |
train.py
|
from __future__ import print_function
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import torch
import torch.optim as optim
import torch.backends.cudnn as cudnn
import argparse
import torch.utils.data as data
from data import WiderFaceDetection, detection_collate, preproc, cfg_mnet, cfg_re50
from layers.modules import MultiBoxLoss
from layers.functions.prior_box import PriorBox
import time
import datetime
import math
from models.retinaface import RetinaFace
parser = argparse.ArgumentParser(description='Retinaface Training')
parser.add_argument('--training_dataset', default='./data/widerface/train/label.txt', help='Training dataset directory')
parser.add_argument('--network', default='mobile0.25', help='Backbone network mobile0.25 or resnet50')
parser.add_argument('--num_workers', default=0, type=int, help='Number of workers used in dataloading')
parser.add_argument('--lr', '--learning-rate', default=1e-3, type=float, help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
parser.add_argument('--resume_net', default=None, help='resume net for retraining')
parser.add_argument('--resume_epoch', default=0, type=int, help='resume iter for retraining')
parser.add_argument('--weight_decay', default=5e-4, type=float, help='Weight decay for SGD')
parser.add_argument('--gamma', default=0.1, type=float, help='Gamma update for SGD')
parser.add_argument('--save_folder', default='./weights/', help='Location to save checkpoint models')
args = parser.parse_args()
if not os.path.exists(args.save_folder):
os.mkdir(args.save_folder)
cfg = None
if args.network == "mobile0.25":
cfg = cfg_mnet
elif args.network == "resnet50":
cfg = cfg_re50
rgb_mean = (104, 117, 123) # bgr order
num_classes = 2
img_dim = cfg['image_size']
num_gpu = cfg['ngpu']
batch_size = cfg['batch_size']
max_epoch = cfg['epoch']
gpu_train = cfg['gpu_train']
num_workers = args.num_workers
momentum = args.momentum
weight_decay = args.weight_decay
initial_lr = args.lr
gamma = args.gamma
training_dataset = args.training_dataset
save_folder = args.save_folder
net = RetinaFace(cfg=cfg)
print("Printing net...")
print(net)
if args.resume_net is not None:
print('Loading resume network...')
state_dict = torch.load(args.resume_net)
# create new OrderedDict that does not contain `module.`
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in state_dict.items():
head = k[:7]
if head == 'module.':
name = k[7:] # remove `module.`
else:
name = k
new_state_dict[name] = v
net.load_state_dict(new_state_dict)
if num_gpu > 1 and gpu_train:
net = torch.nn.DataParallel(net).cuda()
else:
net = net.cuda()
cudnn.benchmark = True
optimizer = optim.SGD(net.parameters(), lr=initial_lr, momentum=momentum, weight_decay=weight_decay)
criterion = MultiBoxLoss(num_classes, 0.35, True, 0, True, 7, 0.35, False)
priorbox = PriorBox(cfg, image_size=(img_dim, img_dim))
with torch.no_grad():
priors = priorbox.forward()
priors = priors.cuda()
def train():
net.train()
epoch = 0 + args.resume_epoch
print('Loading Dataset...')
dataset = WiderFaceDetection(training_dataset, preproc(img_dim, rgb_mean))
epoch_size = math.ceil(len(dataset) / batch_size)
max_iter = max_epoch * epoch_size
stepvalues = (cfg['decay1'] * epoch_size, cfg['decay2'] * epoch_size)
step_index = 0
if args.resume_epoch > 0:
start_iter = args.resume_epoch * epoch_size
else:
start_iter = 0
for iteration in range(start_iter, max_iter):
if iteration % epoch_size == 0:
# create batch iterator
batch_iterator = iter(data.DataLoader(dataset, batch_size, shuffle=True, num_workers=num_workers, collate_fn=detection_collate))
if (epoch % 10 == 0 and epoch > 0) or (epoch % 5 == 0 and epoch > cfg['decay1']):
torch.save(net.state_dict(), save_folder + cfg['name'] + '_epoch_' + str(epoch) + '.pth')
epoch += 1
load_t0 = time.time()
if iteration in stepvalues:
step_index += 1
lr = adjust_learning_rate(optimizer, gamma, epoch, step_index, iteration, epoch_size)
# load train data
images, targets = next(batch_iterator)
images = images.cuda()
targets = [anno.cuda() for anno in targets]
# forward
out = net(images)
# backprop
optimizer.zero_grad()
loss_l, loss_c, loss_landm = criterion(out, priors, targets)
loss = cfg['loc_weight'] * loss_l + loss_c + loss_landm
loss.backward()
optimizer.step()
load_t1 = time.time()
batch_time = load_t1 - load_t0
eta = int(batch_time * (max_iter - iteration))
print('Epoch:{}/{} || Epochiter: {}/{} || Iter: {}/{} || Loc: {:.4f} Cla: {:.4f} Landm: {:.4f} || LR: {:.8f} || Batchtime: {:.4f} s || ETA: {}'
.format(epoch, max_epoch, (iteration % epoch_size) + 1,
epoch_size, iteration + 1, max_iter, loss_l.item(), loss_c.item(), loss_landm.item(), lr, batch_time, str(datetime.timedelta(seconds=eta))))
torch.save(net.state_dict(), save_folder + cfg['name'] + '_Final.pth')
# torch.save(net.state_dict(), save_folder + 'Final_Retinaface.pth')
def adjust_learning_rate(optimizer, gamma, epoch, step_index, iteration, epoch_size):
"""Sets the learning rate
# Adapted from PyTorch Imagenet example:
# https://github.com/pytorch/examples/blob/master/imagenet/main.py
"""
warmup_epoch = -1
if epoch <= warmup_epoch:
lr = 1e-6 + (initial_lr - 1e-6) * iteration / (epoch_size * warmup_epoch)
else:
lr = initial_lr * (gamma ** (step_index))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
if __name__ == '__main__':
train()
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
codebank/usecase/process_transaction.go
|
package usecase
import (
"encoding/json"
"github.com/codeedu/codebank/domain"
"github.com/codeedu/codebank/dto"
"github.com/codeedu/codebank/infrastructure/kafka"
"os"
"time"
)
type UseCaseTransaction struct {
TransactionRepository domain.TransactionRepository
KafkaProducer kafka.KafkaProducer
}
func NewUseCaseTransaction(transactionRepository domain.TransactionRepository) UseCaseTransaction {
return UseCaseTransaction{TransactionRepository: transactionRepository}
}
func (u UseCaseTransaction) ProcessTransaction(transactionDto dto.Transaction) (domain.Transaction, error) {
creditCard := u.hydrateCreditCard(transactionDto)
ccBalanceAndLimit, err := u.TransactionRepository.GetCreditCard(*creditCard)
if err != nil {
return domain.Transaction{}, err
}
creditCard.ID = ccBalanceAndLimit.ID
creditCard.Limit = ccBalanceAndLimit.Limit
creditCard.Balance = ccBalanceAndLimit.Balance
t := u.newTransaction(transactionDto, ccBalanceAndLimit)
t.ProcessAndValidate(creditCard)
err = u.TransactionRepository.SaveTransaction(*t, *creditCard)
if err != nil {
return domain.Transaction{}, err
}
transactionDto.ID = t.ID
transactionDto.CreatedAt = t.CreatedAt
transactionDto.Status = t.Status
transactionJson, err := json.Marshal(transactionDto)
if err != nil {
return domain.Transaction{}, err
}
err = u.KafkaProducer.Publish(string(transactionJson), os.Getenv("KafkaTransactionsTopic"))
if err != nil {
return domain.Transaction{}, err
}
return *t, nil
}
func (u UseCaseTransaction) hydrateCreditCard(transactionDto dto.Transaction) *domain.CreditCard {
creditCard := domain.NewCreditCard()
creditCard.Name = transactionDto.Name
creditCard.Number = transactionDto.Number
creditCard.ExpirationMonth = transactionDto.ExpirationMonth
creditCard.ExpirationYear = transactionDto.ExpirationYear
creditCard.CVV = transactionDto.CVV
return creditCard
}
func (u UseCaseTransaction) newTransaction(transaction dto.Transaction, cc domain.CreditCard) *domain.Transaction {
t := domain.NewTransaction()
t.CreditCardId = cc.ID
t.Amount = transaction.Amount
t.Store = transaction.Store
t.Description = transaction.Description
t.CreatedAt = time.Now()
return t
}
|
[
"\"KafkaTransactionsTopic\""
] |
[] |
[
"KafkaTransactionsTopic"
] |
[]
|
["KafkaTransactionsTopic"]
|
go
| 1 | 0 | |
src/MainAPP/asgi.py
|
"""
ASGI entrypoint file for default channel layer.
Points to the channel layer configured as "default" so you can point
ASGI applications at "HomeAutomation.asgi:channel_layer" as their channel layer.
"""
import os
from channels.asgi import get_channel_layer
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "MainAPP.settings.production")
channel_layer = get_channel_layer()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
pkg/reconciler/bucket_reconciler.go
|
package reconciler
import (
"context"
"fmt"
"net/url"
"os"
algov1beta1 "pipeline-operator/pkg/apis/algorun/v1beta1"
utils "pipeline-operator/pkg/utilities"
"regexp"
"strings"
corev1 "k8s.io/api/core/v1"
"github.com/minio/minio-go/v6"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
// NewBucketReconciler returns a new BucketReconciler
func NewBucketReconciler(pipelineDeployment *algov1beta1.PipelineDeployment,
request *reconcile.Request,
manager manager.Manager) BucketReconciler {
return BucketReconciler{
pipelineDeployment: pipelineDeployment,
request: request,
manager: manager,
}
}
// BucketReconciler reconciles the S3 bucket for a deployment
type BucketReconciler struct {
pipelineDeployment *algov1beta1.PipelineDeployment
request *reconcile.Request
manager manager.Manager
}
// Reconcile executes the Storage Bucket reconciliation process
func (bucketReconciler *BucketReconciler) Reconcile() error {
kubeUtil := utils.NewKubeUtil(bucketReconciler.manager, bucketReconciler.request)
storageSecretName, err := kubeUtil.GetStorageSecretName(&bucketReconciler.pipelineDeployment.Spec)
if storageSecretName != "" && err == nil {
// Get the MC config secret
storageSecret := &corev1.Secret{}
err := bucketReconciler.manager.GetClient().Get(
context.TODO(),
types.NamespacedName{
Name: storageSecretName,
Namespace: bucketReconciler.pipelineDeployment.Spec.DeploymentNamespace,
},
storageSecret)
if err != nil {
return err
}
// Parse the secret
endpoint, accessKey, secret, err := parseEnvURLStr(string(storageSecret.Data["connection-string"]))
if err != nil {
return err
}
// Create the bucket
minioClient, err := minio.New(endpoint.Host, accessKey, secret, endpoint.Scheme == "https")
if err != nil {
return err
}
// default bucket name
bucketName := fmt.Sprintf("%s.%s",
strings.ToLower(bucketReconciler.pipelineDeployment.Spec.DeploymentOwner),
strings.ToLower(bucketReconciler.pipelineDeployment.Spec.DeploymentName))
// Use env var if set
bucketNameEnv := os.Getenv("STORAGE_BUCKET_NAME")
if bucketNameEnv != "" {
bucketName = strings.Replace(bucketNameEnv, "{deploymentowner}", bucketReconciler.pipelineDeployment.Spec.DeploymentOwner, -1)
bucketName = strings.Replace(bucketName, "{deploymentname}", bucketReconciler.pipelineDeployment.Spec.DeploymentName, -1)
bucketName = strings.ToLower(bucketName)
}
// default region
regionName := "us-east-1"
// Use env var if set
regionNameEnv := os.Getenv("STORAGE_REGION")
if regionNameEnv != "" {
regionName = regionNameEnv
}
exists, err := minioClient.BucketExists(bucketName)
if err != nil {
return err
}
if !exists {
err = minioClient.MakeBucket(bucketName, regionName)
if err != nil {
return err
}
}
} else {
log.Error(err, "Storage Connection String secret doesn't exist. Unable to reconcile storage bucket.")
}
return nil
}
// parse url usually obtained from env.
func parseEnvURL(envURL string) (*url.URL, string, string, error) {
u, e := url.Parse(envURL)
if e != nil {
return nil, "", "", fmt.Errorf("S3 Endpoint url invalid [%s]", envURL)
}
var accessKey, secretKey string
// Check if username:password is provided in URL, with no
// access keys or secret we proceed and perform anonymous
// requests.
if u.User != nil {
accessKey = u.User.Username()
secretKey, _ = u.User.Password()
}
// Look for if URL has invalid values and return error.
if !((u.Scheme == "http" || u.Scheme == "https") &&
(u.Path == "/" || u.Path == "") && u.Opaque == "" &&
!u.ForceQuery && u.RawQuery == "" && u.Fragment == "") {
return nil, "", "", fmt.Errorf("S3 Endpoint url invalid [%s]", u.String())
}
// Now that we have validated the URL to be in expected style.
u.User = nil
return u, accessKey, secretKey, nil
}
// parse url usually obtained from env.
func parseEnvURLStr(envURL string) (*url.URL, string, string, error) {
var envURLStr string
u, accessKey, secretKey, err := parseEnvURL(envURL)
if err != nil {
// url parsing can fail when accessKey/secretKey contains non url encoded values
// such as #. Strip accessKey/secretKey from envURL and parse again.
re := regexp.MustCompile("^(https?://)(.*?):(.*?)@(.*?)$")
res := re.FindAllStringSubmatch(envURL, -1)
// regex will return full match, scheme, accessKey, secretKey and endpoint:port as
// captured groups.
if res == nil || len(res[0]) != 5 {
return nil, "", "", err
}
for k, v := range res[0] {
if k == 2 {
accessKey = v
}
if k == 3 {
secretKey = v
}
if k == 1 || k == 4 {
envURLStr = fmt.Sprintf("%s%s", envURLStr, v)
}
}
u, _, _, err = parseEnvURL(envURLStr)
if err != nil {
return nil, "", "", err
}
}
// Check if username:password is provided in URL, with no
// access keys or secret we proceed and perform anonymous
// requests.
if u.User != nil {
accessKey = u.User.Username()
secretKey, _ = u.User.Password()
}
return u, accessKey, secretKey, nil
}
|
[
"\"STORAGE_BUCKET_NAME\"",
"\"STORAGE_REGION\""
] |
[] |
[
"STORAGE_BUCKET_NAME",
"STORAGE_REGION"
] |
[]
|
["STORAGE_BUCKET_NAME", "STORAGE_REGION"]
|
go
| 2 | 0 | |
tests/integration/devfile/cmd_devfile_debug_test.go
|
package devfile
import (
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
"time"
"github.com/openshift/odo/pkg/util"
"github.com/openshift/odo/tests/helper"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("odo devfile debug command tests", func() {
var namespace, context, componentName, currentWorkingDirectory, projectDirPath, originalKubeconfig string
var projectDir = "/projectDir"
// Using program command according to cliRunner in devfile
cliRunner := helper.GetCliRunner()
// This is run after every Spec (It)
var _ = BeforeEach(func() {
SetDefaultEventuallyTimeout(10 * time.Minute)
context = helper.CreateNewContext()
os.Setenv("GLOBALODOCONFIG", filepath.Join(context, "config.yaml"))
// Devfile push requires experimental mode to be set
helper.CmdShouldPass("odo", "preference", "set", "Experimental", "true")
originalKubeconfig = os.Getenv("KUBECONFIG")
helper.LocalKubeconfigSet(context)
namespace = cliRunner.CreateRandNamespaceProject()
currentWorkingDirectory = helper.Getwd()
componentName = helper.RandString(6)
helper.Chdir(context)
projectDirPath = context + projectDir
})
// Clean up after the test
// This is run after every Spec (It)
var _ = AfterEach(func() {
cliRunner.DeleteNamespaceProject(namespace)
helper.Chdir(currentWorkingDirectory)
err := os.Setenv("KUBECONFIG", originalKubeconfig)
Expect(err).NotTo(HaveOccurred())
helper.DeleteDir(context)
os.Unsetenv("GLOBALODOCONFIG")
})
Context("odo debug on a nodejs:latest component", func() {
It("check that machine output debug information works", func() {
helper.MakeDir(projectDirPath)
helper.Chdir(projectDirPath)
helper.CmdShouldPass("odo", "create", "nodejs", "--project", namespace, componentName)
helper.CopyExample(filepath.Join("source", "devfiles", "nodejs", "project"), projectDirPath)
helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", "devfile-with-debugrun.yaml"), filepath.Join(projectDirPath, "devfile-with-debugrun.yaml"))
helper.RenameFile("devfile-with-debugrun.yaml", "devfile.yaml")
helper.CmdShouldPass("odo", "push", "--debug")
httpPort, err := util.HTTPGetFreePort()
Expect(err).NotTo(HaveOccurred())
freePort := strconv.Itoa(httpPort)
stopChannel := make(chan bool)
go func() {
helper.CmdShouldRunAndTerminate(60*time.Second, stopChannel, "odo", "debug", "port-forward", "--local-port", freePort)
}()
// Make sure that the debug information output, outputs correctly.
// We do *not* check the json output since the debugProcessID will be different each time.
helper.WaitForCmdOut("odo", []string{"debug", "info", "-o", "json"}, 1, false, func(output string) bool {
if strings.Contains(output, `"kind": "OdoDebugInfo"`) &&
strings.Contains(output, `"localPort": `+freePort) {
return true
}
return false
})
stopChannel <- true
})
It("should expect a ws connection when tried to connect on default debug port locally", func() {
helper.MakeDir(projectDirPath)
helper.Chdir(projectDirPath)
helper.CmdShouldPass("odo", "create", "nodejs", "--project", namespace, componentName)
helper.CopyExample(filepath.Join("source", "devfiles", "nodejs", "project"), projectDirPath)
helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", "devfile-with-debugrun.yaml"), filepath.Join(projectDirPath, "devfile-with-debugrun.yaml"))
helper.RenameFile("devfile-with-debugrun.yaml", "devfile.yaml")
helper.CmdShouldPass("odo", "push")
helper.CmdShouldPass("odo", "push", "--debug")
stopChannel := make(chan bool)
go func() {
helper.CmdShouldRunAndTerminate(60*time.Second, stopChannel, "odo", "debug", "port-forward")
}()
// 400 response expected because the endpoint expects a websocket request and we are doing a HTTP GET
// We are just using this to validate if nodejs agent is listening on the other side
helper.HttpWaitForWithStatus("http://localhost:5858", "WebSockets request was expected", 12, 5, 400)
stopChannel <- true
})
})
Context("odo debug info should work on a odo component", func() {
It("should start a debug session and run debug info on a running debug session", func() {
helper.MakeDir(projectDirPath)
helper.Chdir(projectDirPath)
helper.CmdShouldPass("odo", "create", "nodejs", "nodejs-cmp-"+namespace, "--project", namespace)
helper.CopyExample(filepath.Join("source", "devfiles", "nodejs", "project"), projectDirPath)
helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", "devfile-with-debugrun.yaml"), filepath.Join(projectDirPath, "devfile-with-debugrun.yaml"))
helper.RenameFile("devfile-with-debugrun.yaml", "devfile.yaml")
helper.CmdShouldPass("odo", "push", "--debug")
httpPort, err := util.HTTPGetFreePort()
Expect(err).NotTo(HaveOccurred())
freePort := strconv.Itoa(httpPort)
stopChannel := make(chan bool)
go func() {
helper.CmdShouldRunAndTerminate(60*time.Second, stopChannel, "odo", "debug", "port-forward", "--local-port", freePort)
}()
// 400 response expected because the endpoint expects a websocket request and we are doing a HTTP GET
// We are just using this to validate if nodejs agent is listening on the other side
helper.HttpWaitForWithStatus("http://localhost:"+freePort, "WebSockets request was expected", 12, 5, 400)
runningString := helper.CmdShouldPass("odo", "debug", "info")
Expect(runningString).To(ContainSubstring(freePort))
Expect(helper.ListFilesInDir(os.TempDir())).To(ContainElement(namespace + "-nodejs-cmp-" + namespace + "-odo-debug.json"))
stopChannel <- true
})
It("should start a debug session and run debug info on a closed debug session", func() {
helper.MakeDir(projectDirPath)
helper.Chdir(projectDirPath)
helper.CmdShouldPass("odo", "create", "nodejs", "--project", namespace, componentName)
helper.CopyExample(filepath.Join("source", "devfiles", "nodejs", "project"), projectDirPath)
helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", "devfile-with-debugrun.yaml"), filepath.Join(projectDirPath, "devfile-with-debugrun.yaml"))
helper.RenameFile("devfile-with-debugrun.yaml", "devfile.yaml")
helper.CmdShouldPass("odo", "push", "--debug")
httpPort, err := util.HTTPGetFreePort()
Expect(err).NotTo(HaveOccurred())
freePort := strconv.Itoa(httpPort)
stopChannel := make(chan bool)
go func() {
helper.CmdShouldRunAndTerminate(60*time.Second, stopChannel, "odo", "debug", "port-forward", "--local-port", freePort)
}()
// 400 response expected because the endpoint expects a websocket request and we are doing a HTTP GET
// We are just using this to validate if nodejs agent is listening on the other side
helper.HttpWaitForWithStatus("http://localhost:"+freePort, "WebSockets request was expected", 12, 5, 400)
runningString := helper.CmdShouldPass("odo", "debug", "info")
Expect(runningString).To(ContainSubstring(freePort))
stopChannel <- true
failString := helper.CmdShouldFail("odo", "debug", "info")
Expect(failString).To(ContainSubstring("not running"))
// according to https://golang.org/pkg/os/#Signal On Windows, sending os.Interrupt to a process with os.Process.Signal is not implemented
// discussion on the go repo https://github.com/golang/go/issues/6720
// session.Interrupt() will not work as it internally uses syscall.SIGINT
// thus debug port-forward won't stop running
// the solution is to use syscall.SIGKILL for windows but this will kill the process immediately
// and the cleaning and closing tasks for debug port-forward won't run and the debug info file won't be cleared
// thus we skip this last check
// CTRL_C_EVENTS from the terminal works fine https://github.com/golang/go/issues/6720#issuecomment-66087737
// here's a hack to generate the event https://golang.org/cl/29290044
// but the solution is unacceptable https://github.com/golang/go/issues/6720#issuecomment-66087749
if runtime.GOOS != "windows" {
Expect(helper.ListFilesInDir(os.TempDir())).To(Not(ContainElement(namespace + "-app" + "-nodejs-cmp-" + namespace + "-odo-debug.json")))
}
})
})
})
|
[
"\"KUBECONFIG\""
] |
[] |
[
"KUBECONFIG"
] |
[]
|
["KUBECONFIG"]
|
go
| 1 | 0 | |
mkimg/main.go
|
package main
import (
"bytes"
"log"
"os"
"github.com/philpearl/scratchbuild"
)
func main() {
user := os.Getenv("USER")
pass := os.Getenv("PASS")
dir := "./appdir"
log.Printf("USER=[%s] PASS=[%s] dir=[%s]", user, pass, dir)
name := user + "/test"
registry := "https://index.docker.io"
log.Printf("will create image %s on %s", name, registry)
o := scratchbuild.Options{
Dir: dir,
Name: name,
BaseURL: registry,
Tag: "latest",
User: user,
Password: pass,
}
b := &bytes.Buffer{}
if err := scratchbuild.TarDirectory(dir, b); err != nil {
log.Fatalf("failed to tar layer. %s", err)
}
c := scratchbuild.New(&o)
token, err := c.Auth()
if err != nil {
log.Fatalf("failed to authorize. %s", err)
}
c.Token = token
log.Printf("authorized as USER=%s on %s", user, registry)
if err := c.BuildImage(&scratchbuild.ImageConfig{
Entrypoint: []string{"/app"},
}, b.Bytes()); err != nil {
log.Fatalf("failed to build and send image. %s", err)
}
log.Printf("uploaded image %s to %s", name, registry)
}
|
[
"\"USER\"",
"\"PASS\""
] |
[] |
[
"USER",
"PASS"
] |
[]
|
["USER", "PASS"]
|
go
| 2 | 0 | |
es6widgetexample/jstest.py
|
"""Run IPython widget javascript tests
run with `gulp tests; python -m jstest`
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import os
try:
from unittest.mock import patch
except ImportError:
# py2
from mock import patch
from notebook import jstest
from es6widgetexample.install import install
here = os.path.dirname(__file__)
class WidgetTestController(jstest.JSController):
"""Javascript test subclass that installs widget nbextension in test
environment
"""
def __init__(self, section, *args, **kwargs):
extra_args = kwargs.pop('extra_args', None)
super(WidgetTestController, self).__init__(section, *args, **kwargs)
test_cases = os.path.join(
here, 'static', 'es6-widget-example', 'js', 'tests', self.section)
self.cmd = [
'casperjs', 'test', test_cases, '--engine=%s' % self.engine
]
if extra_args is not None:
self.cmd = self.cmd + extra_args
def setup(self):
super(WidgetTestController, self).setup()
with patch.dict(os.environ, self.env):
install(user=True, enable=True)
def prepare_controllers(options):
"""Monkeypatched prepare_controllers for running widget js tests
instead of notebook js tests
"""
if options.testgroups:
groups = options.testgroups
else:
groups = ['']
return [
WidgetTestController(g, extra_args=options.extra_args)
for g in groups
], []
def main():
with patch.object(jstest, 'prepare_controllers', prepare_controllers):
jstest.main()
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
djavue-python-brasil/settings.py
|
"""
Django settings for djavue-python-brasil project.
Generated by 'django-admin startproject' using Django 2.2.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
from commons.version import VERSION
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv('DJANGO_SECRET_KEY', 'a_secret_key')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.getenv('DJANGO_DEBUG', '1') == '1'
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core',
]
SENTRY_DSN = os.getenv('SENTRY_DSN', '')
if SENTRY_DSN:
INSTALLED_APPS.append('raven.contrib.django.raven_compat')
RAVEN_CONFIG = {
'dsn': SENTRY_DSN,
'release': VERSION,
'tags': {
'release': VERSION,
},
'processors': [
'raven.processors.SanitizePasswordsProcessor',
],
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'djavue-python-brasil.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'djavue-python-brasil.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.getenv('DJANGO_DB_NAME', 'app'),
'USER': os.getenv('DJANGO_DB_USER', 'app'),
'PASSWORD': os.getenv('DJANGO_DB_PASSWORD', 'app'),
'HOST': os.getenv('DJANGO_DB_HOST', 'localhost'),
'PORT': os.getenv('DJANGO_DB_PORT', '5432'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.getenv('DJANGO_STATIC_ROOT', os.path.join(BASE_DIR, 'static'))
LOGGING = {
'version': 1,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
}
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': os.getenv('DJANGO_LOG_FILE', './djavue-python-brasil.log'),
'formatter': 'simple'
},
},
'loggers': {
'': {
'handlers': ['file'],
'level': 'DEBUG' if DEBUG else 'INFO',
'propagate': True,
},
'django': {
'handlers': ['file'],
'level': 'DEBUG' if DEBUG else 'INFO',
'propagate': True,
},
}
}
if DEBUG:
# make all loggers use the console.
for logger in LOGGING['loggers']:
LOGGING['loggers'][logger]['handlers'] = ['console']
|
[] |
[] |
[
"DJANGO_SECRET_KEY",
"DJANGO_DB_HOST",
"SENTRY_DSN",
"DJANGO_DB_NAME",
"DJANGO_DEBUG",
"DJANGO_DB_PORT",
"DJANGO_STATIC_ROOT",
"DJANGO_DB_USER",
"DJANGO_LOG_FILE",
"DJANGO_DB_PASSWORD"
] |
[]
|
["DJANGO_SECRET_KEY", "DJANGO_DB_HOST", "SENTRY_DSN", "DJANGO_DB_NAME", "DJANGO_DEBUG", "DJANGO_DB_PORT", "DJANGO_STATIC_ROOT", "DJANGO_DB_USER", "DJANGO_LOG_FILE", "DJANGO_DB_PASSWORD"]
|
python
| 10 | 0 | |
python/qibuild/test/projects/stagescript/src/shlib.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2019 SoftBank Robotics. All rights reserved.
# Use of this source code is governed by a BSD-style license (see the COPYING file).
""" ShLib """
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
def load_shlib(name, verbose=False):
""" Load a shared library, try to handle as many cases as possible """
import ctypes
import os
import sys
medir = os.path.dirname(os.path.abspath(__file__))
if verbose:
print("PATH: " + os.environ.get('PATH', ''))
print("LD_LIBRARY_PATH: " + os.environ.get('LD_LIBRARY_PATH', ''))
print("DYLD_LIBRARY_PATH: " + os.environ.get('DYLD_LIBRARY_PATH', ''))
if sys.platform.startswith('linux'):
soname = [".so"]
prefix = "lib"
paths = ['', '../lib', os.path.join(medir, '..', 'lib')]
elif sys.platform.startswith('darwin'):
soname = [".dylib"]
prefix = "lib"
paths = ['', '../lib', os.path.join(medir, '..', 'lib')]
else:
# Windows has no RPATH equivalent, but allows for changing the
# search path within the process.
ctypes.windll.kernel32.SetDllDirectoryA(os.path.join(medir))
soname = [".dll", "_d.dll"]
prefix = ''
paths = ['', '.', os.path.join(medir, '..', 'lib'), os.path.join(medir, '..', 'bin')]
if verbose:
print("Loading " + name)
handle = None
for s in soname:
for p in paths:
path = os.path.join(p, prefix + name + s)
try:
handle = ctypes.cdll.LoadLibrary(path)
break
except Exception as e:
if verbose:
print(path + " : " + str(e))
return handle
|
[] |
[] |
[
"LD_LIBRARY_PATH",
"PATH",
"DYLD_LIBRARY_PATH"
] |
[]
|
["LD_LIBRARY_PATH", "PATH", "DYLD_LIBRARY_PATH"]
|
python
| 3 | 0 | |
ses/main.py
|
import re
import os
import json
import sys
import traceback
from hashlib import sha1
from queue import Queue, Empty
from threading import Thread
from itertools import zip_longest
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from collections import defaultdict
from google.ads.googleads.client import GoogleAdsClient
from google.api_core import protobuf_helpers
from .banner import banner
from .auth import load_user_auth, load_organization_auth
cache_directory = os.path.join(
os.getenv('HOME'), '.cache', 'sem-emergency-stop'
)
blob_directory = os.path.join(cache_directory, 'blobs')
match_customer_id = re.compile(r'^customers/\d+/customerClients/(\d+)$').match
def grouper(iterable, n, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue)
def parse_customer_id(resource_name):
return int(match_customer_id(resource_name).group(1))
def query(service, customer_id, query):
return service.search_stream(customer_id=str(customer_id), query=query)
def collect_customer_ids(client):
service = client.get_service('GoogleAdsService', version='v8')
return [
parse_customer_id(row.customer_client.resource_name)
for response in query(
service,
client.login_customer_id,
'SELECT customer.id FROM customer_client',
)
for row in response.results
]
def load_blob(sha1_hash):
with open(os.path.join(blob_directory, sha1_hash), 'rb') as f:
return json.load(f)
def load_campaign_sets(sha1_hash):
return load_blob(sha1_hash)['campaign_sets']
def store_blob(obj):
data = json.dumps(obj, sort_keys=True).encode('utf-8')
sha1_hash = sha1(data).hexdigest()
with open(os.path.join(blob_directory, sha1_hash), 'wb') as f:
f.write(data)
return sha1_hash
def store_customer_campaign_set(customer_id, campaign_ids):
return store_blob(
{
'customer_id': customer_id,
'campaign_ids': sorted(campaign_ids),
}
)
def store_campaign_sets(campaign_sets):
return store_blob(
{
'campaign_sets': sorted(campaign_sets),
}
)
def collect_campaign_ids(client, customer_id):
service = client.get_service('GoogleAdsService', version='v8')
return [
row.campaign.id
for response in query(
service,
customer_id,
"""
SELECT campaign.id
FROM campaign
WHERE
campaign.status = 'ENABLED'
AND campaign.experiment_type = 'BASE'
AND campaign.advertising_channel_type != 'VIDEO'
AND campaign.advertising_channel_type != 'LOCAL'""",
)
for row in response.results
]
def retrieve_campaign_ids(
client, verbose, customer_ids, campaign_sets, progress_queue
):
while True:
try:
customer_id = customer_ids.get_nowait()
except Empty:
return
ids = collect_campaign_ids(client, customer_id)
campaign_set = store_customer_campaign_set(customer_id, ids)
campaign_sets.put(campaign_set)
progress_queue.put_nowait(('customers', 1))
progress_queue.put_nowait(('campaigns', len(ids)))
customer_ids.task_done()
def get_operation(client, service, customer_id, campaign_id, is_pause):
operation = client.get_type('CampaignOperation', version='v8')
campaign = operation.update
campaign.resource_name = service.campaign_path(customer_id, campaign_id)
enum = client.get_type('CampaignStatusEnum', version='v8')
campaign.status = enum.PAUSED if is_pause else enum.ENABLED
operation.update_mask.CopyFrom(protobuf_helpers.field_mask(None, campaign))
return operation
def mutate_campaigns(
client,
service,
sha1_hash,
verbose,
no_dry_run,
is_pause,
campaign_set_queue,
progress_queue,
):
campaign_set = load_blob(sha1_hash)
customer_id = campaign_set['customer_id']
campaign_ids = campaign_set['campaign_ids']
if not campaign_ids:
progress_queue.put(('customers', 1))
return
for chunk in grouper(campaign_ids, 1000):
request = client.get_type('MutateCampaignsRequest')
request.customer_id = str(customer_id)
request.validate_only = not no_dry_run
for campaign_id in chunk:
if campaign_id:
request.operations.append(
get_operation(
client, service, customer_id, campaign_id, is_pause
)
)
service.mutate_campaigns(request)
progress_queue.put(('campaigns', len(request.operations)))
progress_queue.put(('customers', 1))
def mutate_worker(
client, verbose, no_dry_run, is_pause, campaign_set_queue, progress_queue
):
service = client.get_service('CampaignService', version='v8')
while True:
try:
sha1_hash = campaign_set_queue.get_nowait()
except Empty:
return
try:
mutate_campaigns(
client,
service,
sha1_hash,
verbose,
no_dry_run,
is_pause,
campaign_set_queue,
progress_queue,
)
except Exception:
# We don't want this worker thread to die and block joining
# at the end of the process.
traceback.print_exc()
campaign_set_queue.task_done()
def get_all(queue):
while True:
try:
yield queue.get_nowait()
except Empty:
return
def start_workers(num, func, args):
for i in range(num):
Thread(target=func, args=args).start()
def progress_monitor(totals, progress_queue, exit_queue):
progress = defaultdict(int)
while True:
metric, n = progress_queue.get()
progress[metric] += n
end = "\n" if metric == 'exit' else "\r"
print(
f" completed {progress['customers']}/{totals['customers']} "
f"customers and {progress['campaigns']} campaigns",
end=end,
)
if metric == 'exit':
exit_queue.put(True)
return
def start_progress_monitor(totals):
progress_queue = Queue()
exit_queue = Queue()
Thread(
target=progress_monitor, args=(totals, progress_queue, exit_queue)
).start()
return progress_queue, exit_queue
def collect(client, args):
customer_id_queue = Queue()
campaign_set_queue = Queue()
print('[1/3] getting customer ids...')
customer_ids = collect_customer_ids(client)
customer_count = len(customer_ids)
if customer_count == 1:
print('found one customer')
else:
print(f'found {customer_count} customers')
for customer_id in customer_ids:
customer_id_queue.put(customer_id)
progress_queue, exit_queue = start_progress_monitor(
{'customers': customer_count}
)
progress_queue.put_nowait(('init', 1))
print('[2/3] getting campaign ids...')
start_workers(
args.workers,
retrieve_campaign_ids,
(
client,
args.verbose,
customer_id_queue,
campaign_set_queue,
progress_queue,
),
)
customer_id_queue.join()
progress_queue.put_nowait(('exit', 1))
exit_queue.get()
campaign_sets = store_campaign_sets(get_all(campaign_set_queue))
print(f'[2/3] committed campaign sets {campaign_sets}')
return campaign_sets
def pause_unpause(client, args, is_pause):
campaign_sets_id = args.campaign_sets or collect(client, args)
step_num = 1 if args.campaign_sets else 3
step = f'[{step_num}/{step_num}]'
print(f'{step} loading campaign sets {campaign_sets_id}...')
campaign_set_queue = Queue()
campaign_sets = load_campaign_sets(campaign_sets_id)
for campaign_set in campaign_sets:
campaign_set_queue.put(campaign_set)
progress_queue, exit_queue = start_progress_monitor(
{'customers': len(campaign_sets)}
)
progress_queue.put_nowait(('init', 1))
print(f"{step} {'' if is_pause else 'un'}pausing campaigns...")
start_workers(
args.workers,
mutate_worker,
(
client,
args.verbose,
args.no_dry_run,
is_pause,
campaign_set_queue,
progress_queue,
),
)
campaign_set_queue.join()
progress_queue.put_nowait(('exit', 1))
exit_queue.get()
print('done')
if is_pause:
print('you can unpause by running')
print(f'{sys.argv[0]} unpause --no-dry-run {campaign_sets_id}')
def pause(client, args):
return pause_unpause(client, args, True)
def unpause(client, args):
return pause_unpause(client, args, False)
def setup(client, args):
print('All set up!')
def parse_arguments(args):
parser = ArgumentParser(
formatter_class=RawDescriptionHelpFormatter,
description=banner + '\n\nEmergency stop for all Google SEM',
)
subparsers = parser.add_subparsers(help='sub-command help')
all_shared = ArgumentParser(add_help=False)
all_shared.add_argument(
'--workers',
help='use NUM workers in parallel',
type=int,
metavar='NUM',
default=16,
)
all_shared.add_argument('-v', '--verbose', action='store_true')
collect_parser = subparsers.add_parser(
'collect', help='only collect campaign ids', parents=[all_shared]
)
collect_parser.set_defaults(func=collect)
mutation_shared = ArgumentParser(add_help=False)
mutation_shared.add_argument(
'--no-dry-run',
help='actually perform the mutations',
action='store_true',
)
pause_parser = subparsers.add_parser(
'pause', help='pause campaigns', parents=[all_shared, mutation_shared]
)
pause_parser.add_argument(
'campaign_sets',
help='use CAMPAIGN-SETS for pausing',
metavar='CAMPAIGN-SETS',
nargs='?',
)
pause_parser.set_defaults(func=pause)
unpause_parser = subparsers.add_parser(
'unpause',
help='unpause campaigns',
parents=[all_shared, mutation_shared],
)
unpause_parser.add_argument(
'campaign_sets',
help='use CAMPAIGN-SETS for unpausing (use the hash from pausing)',
metavar='CAMPAIGN-SETS',
)
unpause_parser.set_defaults(func=unpause)
setup_parser = subparsers.add_parser(
'setup', help='set up authentication only', parents=[all_shared]
)
setup_parser.set_defaults(func=setup)
return parser.parse_args(args or ['pause', '--help'])
def run():
os.makedirs(blob_directory, exist_ok=True)
args = parse_arguments(sys.argv[1:])
print(banner)
credentials = {
**load_organization_auth(),
**load_user_auth(),
'use_proto_plus': False,
}
client = GoogleAdsClient.load_from_dict(credentials)
if 'no_dry_run' in args:
if args.no_dry_run:
print(
"\033[31mYou are about to do a non-dry run, please type YOLO:"
)
if input('> ') != 'YOLO':
print('alright, that was close!')
sys.exit(-1)
else:
print('*** THIS IS A DRY RUN ***')
print('to perform a non-dry run, supply --no-dry-run')
args.func(client, args)
if 'no_dry_run' in args and not args.no_dry_run:
print('*** THIS WAS A DRY RUN ***')
|
[] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
python
| 1 | 0 | |
hookutils.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import errno
import hashlib
import os
import sys
from subprocess import Popen, PIPE, CalledProcessError, check_output
# GIT_DIFF_INDEX =
# GIT_DIFF_NO_HEAD = 'git diff-index --diff-filter=ACM --cached --name-only 4b825dc642cb6eb9a060e54bf8d69288fbee4904'
def execute(command, fail_on_error=True):
"""
Execute a command and returns the standard output lines.
If the command exits with a non-zero exit code and fail_on_error is true
then this script exits with a non zero exit code, which signals git-commit to fail.
:param command: the command to execute
:param fail_on_error: true if the script should exit on a non zero command exit
"""
lines = []
proc = Popen(command, shell=True, stdout=PIPE, stderr=PIPE)
for line in proc.stdout.readlines():
lines.append(line.strip())
for line in proc.stderr.readlines():
if line[:len('exit status')] != 'exit status':
print 'stderr> %s' % line.strip()
proc.wait()
code = proc.returncode
if code != 0 and fail_on_error:
print 'exit code = %s' % code
exit(code)
return lines
def calculate_head():
try:
return check_output('git rev-parse --verify HEAD', shell=True, stderr=PIPE).strip()
except CalledProcessError:
return '4b825dc642cb6eb9a060e54bf8d69288fbee4904'
def mkdir_p(path):
"""
Create subdirectories without throwing an error if the directories exist
Origin http://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
"""
try:
os.makedirs(path)
except OSError, exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def find_commits():
return execute('git diff-index --diff-filter=ACM --cached --name-only ' + os.environ['_GIT_HEAD'])
def hashfile(file_path):
"""
Calculates the hash of a provided file path.
:param file_path: file to calculate the hash
"""
blocksize = 8192
afile = open(file_path, 'r')
buf = afile.read(blocksize)
hasher = hashlib.md5()
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(blocksize)
afile.close()
return hasher.hexdigest()
def in_git_directory():
try:
git_toplevel()
return True
except CalledProcessError:
return False
def git_toplevel():
"""
Returns the git toplevel directory
"""
return check_output('git rev-parse --show-toplevel', shell=True, stderr=PIPE).strip()
def get_files_to_format(extension):
def extension_filter(filename):
return filename.endswith(extension)
if len(sys.argv) > 1:
args = sys.argv[1:]
return filter(extension_filter, args)
else:
os.chdir(git_toplevel())
return filter(extension_filter, find_commits())
|
[] |
[] |
[
"_GIT_HEAD"
] |
[]
|
["_GIT_HEAD"]
|
python
| 1 | 0 | |
scripts/vpc/vpc_resource.py
|
import fixtures
import testtools
import os
from connections import ContrailConnections
from contrail_test_init import *
from vn_test import *
from vm_test import *
from quantum_test import *
from vnc_api_test import *
from nova_test import *
from testresources import OptimisingTestSuite, TestResource
from vpc_fixture_new import VPCFixture
from vpc_vn_fixture import VPCVNFixture
from vpc_vm_fixture import VPCVMFixture
class VPCTestSetup(fixtures.Fixture):
def __init__(self, test_resource):
super(VPCTestSetup, self).__init__()
self.test_resource = test_resource
def setUp(self):
super(VPCTestSetup, self).setUp()
if 'PARAMS_FILE' in os.environ:
self.ini_file = os.environ.get('PARAMS_FILE')
else:
self.ini_file = 'params.ini'
self.inputs = self.useFixture(ContrailTestInit(self.ini_file))
self.connections = ContrailConnections(self.inputs)
self.quantum_fixture = self.connections.quantum_fixture
self.nova_fixture = self.connections.nova_fixture
self.vnc_lib = self.connections.vnc_lib
self.logger = self.inputs.logger
self.setup_common_objects()
return self
# end setUp
def setup_common_objects(self):
self.vpc1_cidr = '10.2.5.0/24'
self.vpc1_vn1_cidr = '10.2.5.0/25'
self.vpc1_vn2_cidr = '10.2.5.128/25'
self.vpc2_cidr = '10.2.50.0/24'
self.vpc2_vn1_cidr = '10.2.50.0/25'
self.vpc1_fixture = self.useFixture(VPCFixture(self.vpc1_cidr,
connections=self.connections))
# assert self.vpc1_fixture.verify_on_setup()
self.vpc2_fixture = self.useFixture(VPCFixture(self.vpc2_cidr,
connections=self.connections))
# assert self.vpc2_fixture.verify_on_setup()
self.vpc1_vn1_fixture = self.useFixture(VPCVNFixture(
self.vpc1_fixture,
subnet_cidr=self.vpc1_vn1_cidr,
connections=self.connections))
# assert self.vpc1_vn1_fixture.verify_on_setup()
self.vpc1_vn2_fixture = self.useFixture(VPCVNFixture(
self.vpc1_fixture,
subnet_cidr=self.vpc1_vn2_cidr,
connections=self.connections))
self.vpc2_vn1_fixture = self.useFixture(VPCVNFixture(
self.vpc2_fixture,
subnet_cidr=self.vpc2_vn1_cidr,
connections=self.connections))
# assert self.vpc1_vn2_fixture.verify_on_setup()
self.vpc1_vn1_vm1_fixture = self.useFixture(
VPCVMFixture(self.vpc1_vn1_fixture,
image_name='ubuntu',
connections=self.connections))
self.vpc1_vn1_vm2_fixture = self.useFixture(VPCVMFixture(
self.vpc1_vn1_fixture,
image_name='ubuntu-traffic',
connections=self.connections))
self.vpc1_vn2_vm1_fixture = self.useFixture(VPCVMFixture(
self.vpc1_vn2_fixture,
image_name='ubuntu-traffic',
connections=self.connections))
self.vpc2_vn1_vm1_fixture = self.useFixture(VPCVMFixture(
self.vpc2_vn1_fixture,
image_name='ubuntu-traffic',
connections=self.connections))
# end setup_common_objects
def verify_common_objects(self):
assert self.vpc1_fixture.verify_on_setup()
assert self.vpc2_fixture.verify_on_setup()
assert self.vpc1_vn1_fixture.verify_on_setup()
assert self.vpc1_vn2_fixture.verify_on_setup()
assert self.vpc2_vn1_fixture.verify_on_setup()
assert self.vpc1_vn1_vm1_fixture.verify_on_setup()
assert self.vpc1_vn1_vm2_fixture.verify_on_setup()
assert self.vpc1_vn2_vm1_fixture.verify_on_setup()
assert self.vpc2_vn1_vm1_fixture.verify_on_setup()
self.vpc1_vn1_vm1_fixture.c_vm_fixture.wait_till_vm_is_up()
self.vpc1_vn1_vm2_fixture.c_vm_fixture.wait_till_vm_is_up()
self.vpc1_vn2_vm1_fixture.c_vm_fixture.wait_till_vm_is_up()
self.vpc2_vn1_vm1_fixture.c_vm_fixture.wait_till_vm_is_up()
# end verify_common_objects
def tearDown(self):
print "Tearing down resources"
super(VPCTestSetup, self).cleanUp()
def dirtied(self):
self.test_resource.dirtied(self)
class _VPCTestSetupResource(TestResource):
def make(self, dependencyresource):
base_setup = VPCTestSetup(self)
base_setup.setUp()
return base_setup
# end make
def clean(self, base_setup):
print "Am cleaning up here"
# super(_VPCTestSetupResource,self).clean()
base_setup.tearDown()
# end
VPCTestSetupResource = _VPCTestSetupResource()
|
[] |
[] |
[
"PARAMS_FILE"
] |
[]
|
["PARAMS_FILE"]
|
python
| 1 | 0 | |
docs/conf.py
|
# -*- coding: utf-8 -*-
# =============================================================================
# SPHINX CONFIGURATION: behave documentation build configuration file
# =============================================================================
import os.path
import sys
# -- ENSURE: Local workspace is used (for sphinx apidocs).
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
# -- OPTIONAL EXTENSIONS/PARTS:
# NOTES:
# * sphinxcontrib.youtube: Not easily installable
# => other package with same name in pypi
try:
import sphinxcontrib.youtube
has_extension_sphinxcontrib_youtube = True
except ImportError:
has_extension_sphinxcontrib_youtube = False
# ------------------------------------------------------------------------------
# GENERAL CONFIGGURATION
# ------------------------------------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "1.3"
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named "sphinx.ext.*") or your custom ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.ifconfig",
"sphinx.ext.extlinks",
]
if has_extension_sphinxcontrib_youtube:
extensions.append("sphinxcontrib.youtube")
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
source_encoding = "utf-8"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"behave"
authors = u"Benno Rice, Richard Jones and Jens Engel"
copyright = u"2012-2016, %s" % authors
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
from behave import __version__
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ""
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = "%B %d, %Y"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, "()" will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
extlinks = {
"pypi": ("https://pypi.python.org/pypi/%s", ""),
"github": ("https://github.com/%s", "github:/"),
"issue": ("https://github.com/behave/behave/issue/%s", "issue #"),
"youtube": ("https://www.youtube.com/watch?v=%s", "youtube:video="),
}
# -- SUPPORT: Documentation variation-points with sphinx.ext.ifconfig
def setup(app):
# -- VARIATION-POINT: supports_video
# BASED-ON: installed("sphinxcontrib-youtube") and output-mode
# TODO: Check for output-mode, too (supported on: HTML, ...)
supports_video = has_extension_sphinxcontrib_youtube
app.add_config_value("supports_video", supports_video, "env")
# ------------------------------------------------------------------------------
# OPTIONS FOR: HTML OUTPUT
# ------------------------------------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
if on_rtd:
html_theme = "default"
else:
html_theme = "kr"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# agogo options:
# headerfont (CSS font family): Font for headings.
# pagewidth (CSS length): Width of the page content, default 70em.
# documentwidth (CSS length): Width of the document (without sidebar), default 50em.
# sidebarwidth (CSS length): Width of the sidebar, default 20em.
# bgcolor (CSS color): Background color.
# headerbg (CSS value for “background”): background for the header area, default a grayish gradient.
# footerbg (CSS value for “background”): background for the footer area, default a light gray gradient.
# linkcolor (CSS color): Body link color.
# headercolor1, headercolor2 (CSS color): colors for <h1> and <h2> headings.
# headerlinkcolor (CSS color): Color for the backreference link in headings.
# textalign (CSS text-align value): Text alignment for the body, default is justify.
html_theme_options = {
#"bodyfont": '"Ubuntu", sans-serif', # (CSS font family): Font for normal text.
#"github_fork": "behave/behave"
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["_themes"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_static/behave_logo1.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# If not "", a "Last updated on:" timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = "%b %d, %Y"
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ""
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "behavedoc"
# ------------------------------------------------------------------------------
# OPTIONS FOR: LATEX OUTPUT
# ------------------------------------------------------------------------------
latex_elements = {
# The paper size ("letterpaper" or "a4paper").
#"papersize": "letterpaper",
# The font size ("10pt", "11pt" or "12pt").
#"pointsize": "10pt",
# Additional stuff for the LaTeX preamble.
#"preamble": "",
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
("index", "behave.tex", u"behave Documentation", authors, "manual"),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# ------------------------------------------------------------------------------
# OPTIONS FOR: MANUAL PAGE (man page) OUTPUT
# ------------------------------------------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
("index", "behave", u"behave Documentation", [authors], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# ------------------------------------------------------------------------------
# OPTIONS FOR: Texinfo output
# ------------------------------------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
("index", "behave", u"behave Documentation", authors,
"behave", "A test runner for behave (feature tests).", "Miscellaneous"),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: "footnote", "no", or "inline".
#texinfo_show_urls = "footnote"
|
[] |
[] |
[
"READTHEDOCS"
] |
[]
|
["READTHEDOCS"]
|
python
| 1 | 0 | |
cmd/apis/ghcrtapis.go
|
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package apis
import (
"fmt"
"os"
"regexp"
"github.com/apigee/apigeecli/apiclient"
proxybundle "github.com/apigee/apigeecli/bundlegen/proxybundle"
"github.com/apigee/apigeecli/client/apis"
"github.com/apigee/apigeecli/clilog"
"github.com/spf13/cobra"
)
var GhCreateCmd = &cobra.Command{
Use: "github",
Aliases: []string{"gh"},
Short: "Creates an API proxy from a GitHub repo",
Long: "Creates an API proxy from a GitHub repo",
Args: func(cmd *cobra.Command, args []string) (err error) {
//(\w+)?\/apiproxy$
re := regexp.MustCompile(`(\w+)?\/apiproxy$`)
if ok := re.Match([]byte(ghPath)); !ok {
return fmt.Errorf("Github path must end with /apiproxy")
}
return apiclient.SetApigeeOrg(org)
},
RunE: func(cmd *cobra.Command, args []string) (err error) {
if os.Getenv("GITHUB_TOKEN") == "" {
clilog.Info.Println("Github token is not set as an env var. Running unauthenticated")
}
if err = proxybundle.GitHubImportBundle(ghOwner, ghRepo, ghPath); err != nil {
proxybundle.CleanUp()
return err
}
_, err = apis.CreateProxy(name, bundleName)
proxybundle.CleanUp()
return err
},
}
const bundleName = "apiproxy.zip"
var ghOwner, ghRepo, ghPath string
func init() {
GhCreateCmd.Flags().StringVarP(&name, "name", "n",
"", "API Proxy name")
GhCreateCmd.Flags().StringVarP(&ghOwner, "owner", "u",
"", "The github organization or username. ex: In https://github.com/apigee, apigee is the owner name")
GhCreateCmd.Flags().StringVarP(&ghRepo, "repo", "r",
"", "The github repo name. ex: https://github.com/apigee/api-platform-samples, api-platform-samples is the repo")
GhCreateCmd.Flags().StringVarP(&ghPath, "proxy-path", "p",
"", "The path in the repo to the apiproxy folder. ex: sample-proxies/apikey/apiproxy")
_ = GhCreateCmd.MarkFlagRequired("name")
_ = GhCreateCmd.MarkFlagRequired("owner")
_ = GhCreateCmd.MarkFlagRequired("repo")
_ = GhCreateCmd.MarkFlagRequired("proxy-path")
}
|
[
"\"GITHUB_TOKEN\""
] |
[] |
[
"GITHUB_TOKEN"
] |
[]
|
["GITHUB_TOKEN"]
|
go
| 1 | 0 | |
python/ray/util/sgd/torch/examples/pytorch_pbt_failure.py
|
import argparse
import numpy as np
import os
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, Subset
from torchvision.datasets import CIFAR10
import torchvision.transforms as transforms
import ray
from ray import tune
from ray.tune import CLIReporter
from ray.tune.schedulers import PopulationBasedTraining
from ray.tune.utils.mock import FailureInjectorCallback
from ray.util.sgd.torch import TorchTrainer, TrainingOperator
from ray.util.sgd.torch.resnet import ResNet18
from ray.util.sgd.utils import BATCH_SIZE
from ray.tune.utils.release_test_util import ProgressCallback
parser = argparse.ArgumentParser()
parser.add_argument(
"--smoke-test",
action="store_true",
default=False,
help="Finish quickly for training.")
args = parser.parse_args()
def initialization_hook():
# Need this for avoiding a connection restart issue on AWS.
os.environ["NCCL_SOCKET_IFNAME"] = "^docker0,lo"
os.environ["NCCL_LL_THRESHOLD"] = "0"
# set the below if needed
# print("NCCL DEBUG SET")
# os.environ["NCCL_DEBUG"] = "INFO"
def cifar_creator(config):
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465),
(0.2023, 0.1994, 0.2010)),
]) # meanstd transformation
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465),
(0.2023, 0.1994, 0.2010)),
])
train_dataset = CIFAR10(
root="~/data", train=True, download=True, transform=transform_train)
validation_dataset = CIFAR10(
root="~/data", train=False, download=False, transform=transform_test)
if config.get("test_mode"):
train_dataset = Subset(train_dataset, list(range(64)))
validation_dataset = Subset(validation_dataset, list(range(64)))
train_loader = DataLoader(
train_dataset, batch_size=config[BATCH_SIZE], num_workers=2)
validation_loader = DataLoader(
validation_dataset, batch_size=config[BATCH_SIZE], num_workers=2)
return train_loader, validation_loader
def optimizer_creator(model, config):
"""Returns optimizer"""
return torch.optim.SGD(
model.parameters(),
lr=config.get("lr", 0.1),
momentum=config.get("momentum", 0.9))
ray.init(address="auto" if not args.smoke_test else None, log_to_driver=True)
num_training_workers = 1 if args.smoke_test else 3
CustomTrainingOperator = TrainingOperator.from_creators(
model_creator=ResNet18,
optimizer_creator=optimizer_creator,
data_creator=cifar_creator,
loss_creator=nn.CrossEntropyLoss)
TorchTrainable = TorchTrainer.as_trainable(
training_operator_cls=CustomTrainingOperator,
initialization_hook=initialization_hook,
num_workers=num_training_workers,
config={
"test_mode": args.smoke_test,
BATCH_SIZE: 128 * num_training_workers,
},
use_gpu=not args.smoke_test,
backend="gloo", # This should also work with NCCL
)
pbt_scheduler = PopulationBasedTraining(
time_attr="training_iteration",
metric="val_loss",
mode="min",
perturbation_interval=1,
hyperparam_mutations={
# distribution for resampling
"lr": lambda: np.random.uniform(0.001, 1),
# allow perturbations within this set of categorical values
"momentum": [0.8, 0.9, 0.99],
})
reporter = CLIReporter()
reporter.add_metric_column("val_loss", "loss")
reporter.add_metric_column("val_accuracy", "acc")
analysis = tune.run(
TorchTrainable,
num_samples=4,
config={
"lr": tune.choice([0.001, 0.01, 0.1]),
"momentum": 0.8,
"head_location": None,
"worker_locations": None
},
max_failures=-1, # used for fault tolerance
checkpoint_freq=2, # used for fault tolerance
progress_reporter=reporter,
scheduler=pbt_scheduler,
callbacks=[
FailureInjectorCallback(time_between_checks=90),
ProgressCallback()
],
stop={"training_iteration": 1} if args.smoke_test else None)
print(analysis.get_best_config(metric="val_loss", mode="min"))
|
[] |
[] |
[
"NCCL_DEBUG",
"NCCL_SOCKET_IFNAME",
"NCCL_LL_THRESHOLD"
] |
[]
|
["NCCL_DEBUG", "NCCL_SOCKET_IFNAME", "NCCL_LL_THRESHOLD"]
|
python
| 3 | 0 | |
pkg/config/config.go
|
/*
Copyright [email protected]
Licensed under the Apache License, Version 2.0 (the "License")
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"flag"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"strings"
"github.com/go-git/go-git/v5"
"github.com/maksim-paskal/auto-devops/pkg/types"
"github.com/maksim-paskal/auto-devops/pkg/utils"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
giturls "github.com/whilp/git-urls"
"gopkg.in/yaml.v3"
)
const (
autoDevopsYaml = ".auto-devops.yaml"
)
//nolint: gochecknoglobals
var (
gitVersion = "dev"
BootstrapZip = flag.String("bootstrap", os.Getenv("AUTO_DEVOPS_BOOTSTRAP"), "path to archive")
LogLevel = flag.String("log.level", "INFO", "log level")
Boostrap = types.Boostrap{
DelimsLeft: "{%",
DelimsRight: "%}",
Answers: make(map[string]string),
}
)
func GetVersion() string {
return gitVersion
}
func Init() error { //nolint:cyclop,funlen
log.SetReportCaller(true)
level, err := log.ParseLevel(*LogLevel)
if err != nil {
return errors.Wrap(err, "error parsing level")
}
log.SetLevel(level)
log.Debugf("Starting %s...", GetVersion())
dir, err := ioutil.TempDir("", "auto-devops")
if err != nil {
return errors.Wrap(err, "error creating temp folder")
}
// load initial .auto-devops.yaml
err = loadYAML(autoDevopsYaml)
if err != nil {
log.WithError(err).Debug("not loading initital config")
}
Boostrap.Dir = dir
if len(Boostrap.Version) == 0 {
Boostrap.Version = GetVersion()
}
if len(*BootstrapZip) > 0 {
Boostrap.Bootstrap = *BootstrapZip
}
if len(Boostrap.Bootstrap) == 0 {
log.Fatalf("neen bootstrap zip. Use -bootstrap argument or bootstrap attribute in %s", autoDevopsYaml)
}
_, err = utils.Unzip(Boostrap.Bootstrap, Boostrap.Dir)
if err != nil {
return errors.Wrapf(err, "error unzip %s", Boostrap.Bootstrap)
}
// load server .auto-devops.yaml
autoDevopsFile := filepath.Join(Boostrap.Dir, autoDevopsYaml)
err = loadYAML(autoDevopsFile)
if err != nil {
return errors.Wrap(err, "error loading server file")
}
// load user .auto-devops.yaml
if _, err := os.Stat(autoDevopsYaml); err == nil {
err = loadYAML(autoDevopsYaml)
if err != nil {
return errors.Wrap(err, "error loading user file")
}
}
err = loadYAML(autoDevopsYaml)
if err != nil {
log.WithError(err).Debug("error reading user ", autoDevopsYaml)
}
if len(Boostrap.Pwd) == 0 {
Boostrap.Pwd, err = os.Getwd()
if err != nil {
return errors.Wrap(err, "error getting current folder")
}
}
if len(Boostrap.Name) == 0 {
info, err := os.Stat(Boostrap.Pwd)
if err != nil {
return errors.Wrap(err, "error getting current folder stat")
}
Boostrap.Name = info.Name()
}
err = loadGitInfo()
if err != nil {
log.WithError(err).Warn("error loading git info")
}
return nil
}
func loadYAML(yamlPath string) error {
configByte, err := ioutil.ReadFile(yamlPath)
if err != nil {
return errors.Wrap(err, "error reading file")
}
err = yaml.Unmarshal(configByte, &Boostrap)
if err != nil {
return errors.Wrap(err, "error parse yaml")
}
return nil
}
var (
errHasNoKey = errors.New("has no key")
errWrongVersion = errors.New("wrong version")
)
func Validate() error {
for _, q := range Boostrap.Questions {
log.Debugf("key=%s", q.Key)
if len(q.Key) == 0 {
return errors.Wrap(errHasNoKey, "question "+q.Prompt)
}
if _, err := Boostrap.Template(q.Condition); err != nil {
return errors.Wrap(err, q.Condition)
}
}
for _, f := range Boostrap.Filters {
if _, err := Boostrap.Template(f.Condition); err != nil {
return errors.Wrap(err, f.Condition)
}
}
matched, err := regexp.MatchString(Boostrap.Version, GetVersion())
if err != nil {
return errors.Wrap(err, "error in version matching")
}
if !matched {
return errors.Wrap(
errWrongVersion,
fmt.Sprintf("required version %s,current version %s. Please install required version", Boostrap.Version, GetVersion()), //nolint:lll
)
}
return nil
}
func loadGitInfo() error {
r, err := git.PlainOpen(".")
if err != nil {
return errors.Wrap(err, "error opening folder")
}
list, err := r.Remotes()
if err != nil {
return errors.Wrap(err, "error listing remotes")
}
if len(list) == 0 {
return errors.New("no remote")
}
if len(list[0].Config().URLs) == 0 {
return errors.New("no remote urls")
}
remoteURL := list[0].Config().URLs[0]
u, err := giturls.Parse(remoteURL)
if err != nil {
return errors.Wrap(err, "error parsing string")
}
Boostrap.GitInfo.Host = u.Host
Boostrap.GitInfo.Path = u.Path
Boostrap.GitInfo.PathFormated = strings.TrimSuffix(Boostrap.GitInfo.Path, ".git")
return nil
}
|
[
"\"AUTO_DEVOPS_BOOTSTRAP\""
] |
[] |
[
"AUTO_DEVOPS_BOOTSTRAP"
] |
[]
|
["AUTO_DEVOPS_BOOTSTRAP"]
|
go
| 1 | 0 | |
src/postgres/postgres.go
|
package postgres
import (
"database/sql"
"fmt"
_ "github.com/lib/pq"
"os"
)
const (
defaultHost = "192.168.1.63"
defaultPort = "30000"
)
func connStr() string {
host := os.Getenv("DB_HOST")
port := os.Getenv("DB_PORT")
if len(host) == 0 || len(port) == 0 {
return fmt.Sprintf("user=postgres dbname=youtube host=%s port=%s sslmode=disable", defaultHost, defaultPort)
} else {
return fmt.Sprintf("user=postgres dbname=youtube host=%s port=%s sslmode=disable", host, port)
}
}
func connection() *sql.DB {
db, err := sql.Open("postgres", connStr())
if err != nil {
panic(err)
}
return db
}
func Channels() []string {
sqlStr := "select serial from youtube.entities.channels ORDER BY RANDOM() LIMIT 50"
db := connection()
defer func() {
err := db.Close()
if err != nil {
panic(err)
}
}()
row, err := db.Query(sqlStr)
if err != nil {
panic(err)
}
serials := make([]string, 0)
for row.Next() {
var serial string
err = row.Scan(&serial)
if err != nil {
panic(err)
}
serials = append(serials, serial)
}
return serials
}
|
[
"\"DB_HOST\"",
"\"DB_PORT\""
] |
[] |
[
"DB_PORT",
"DB_HOST"
] |
[]
|
["DB_PORT", "DB_HOST"]
|
go
| 2 | 0 | |
main.go
|
package main
import (
"encoding/json"
"fmt"
"log/syslog"
"os"
"path"
"time"
"github.com/mitchellh/go-ps"
"github.com/sirupsen/logrus"
"github.com/slack-go/slack"
logrus_syslog "github.com/sirupsen/logrus/hooks/syslog"
)
var (
log *logrus.Logger
config AutoSlackConfig
)
// AutoSlackConfig contains the configuration data loaded from the config file
type AutoSlackConfig struct {
SlackUserID string `json:"slack_user_id"`
SlackAPIKey string `json:"slack_api_key"`
LoopTime int `json:"loop_time"`
Debug bool `json:"debug"`
DefaultStatus SlackStatus `json:"default_status"`
States []Trigger `json:"states"`
}
// Trigger represents a process that, when seen locally, triggers an update to
// the users's Slack status
type Trigger struct {
Process string `json:"process"`
Status SlackStatus `json:"status"`
}
// SlackStatus embodies the text and icon associated with a specific Slack
// status update
type SlackStatus struct {
Text string `json:"text"`
Emoji string `json:"emoji"`
}
func setStatus(api *slack.Client, newStatus SlackStatus) error {
log.WithFields(logrus.Fields{
"emoji": newStatus.Emoji,
"text": newStatus.Text,
}).Debug("setStatus")
moo, err := api.GetUserProfile(config.SlackUserID, true)
if err != nil {
return err
}
if newStatus.Text == moo.StatusText && newStatus.Emoji == moo.StatusEmoji {
log.Debug("No change to status")
} else {
log.WithFields(logrus.Fields{
"emoji": newStatus.Emoji,
"text": newStatus.Text,
}).Info("Setting Slack Status")
return api.SetUserCustomStatusWithUser(config.SlackUserID, newStatus.Text, newStatus.Emoji, 0)
}
return nil
}
func lookForProcess(name string) bool {
log.WithFields(logrus.Fields{
"name": name,
}).Debug("lookForProcess")
plist, err := ps.Processes()
if err != nil {
log.WithError(err).Error("Can't get system process list")
return false
}
for _, p := range plist {
if p.Executable() == name {
return true
}
}
return false
}
func initLog() {
log = logrus.New()
hook, err := logrus_syslog.NewSyslogHook("", "", syslog.LOG_NOTICE, "autoslack")
if err != nil {
log.WithError(err).Error("Unable to connect to syslog")
} else {
log.AddHook(hook)
}
}
func loadConfig(file string) (c AutoSlackConfig) {
log.WithFields(logrus.Fields{
"filename": file,
}).Info("Loading configuration")
configFile, err := os.Open(file)
defer configFile.Close()
if err != nil {
log.WithError(err).Fatal("Cannot load config file")
}
jsonParser := json.NewDecoder(configFile)
err = jsonParser.Decode(&c)
if err != nil {
log.WithError(err).Fatal("Cannot parse config file")
}
if c.Debug {
log.Info("Debug logging enabled")
log.SetLevel(logrus.DebugLevel)
}
if c.Debug {
fmt.Println("-- ")
fmt.Printf("%+v\n", c)
fmt.Println("-- ")
}
return c
}
func main() {
var lastStatus string
initLog()
config = loadConfig(path.Join(os.Getenv("HOME"), ".config", "autoslack", "config.json"))
api := slack.New(config.SlackAPIKey)
for {
found := false
for _, trigger := range config.States {
log.WithFields(logrus.Fields{
"Process": trigger.Process,
"lastStatus": lastStatus,
}).Debug("Looking for process")
if lookForProcess(trigger.Process) {
found = true
if trigger.Process == lastStatus {
log.Debug("Same Process")
} else {
log.Debug("New Process")
err := setStatus(api, trigger.Status)
if err != nil {
log.WithError(err).Error("Unable to set status")
} else {
lastStatus = trigger.Process
}
break
}
} else {
log.Debug("Process Not Found")
}
}
if !found {
log.Debug("Not Found")
if lastStatus != "" {
err := setStatus(api, config.DefaultStatus)
if err != nil {
log.WithError(err).Error("Unable to set status")
} else {
lastStatus = ""
}
}
}
log.Debug("-- ")
time.Sleep(time.Duration(config.LoopTime) * time.Second)
log.WithFields(logrus.Fields{
"seconds": config.LoopTime,
}).Trace("Looping")
}
}
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
google/sdk.go
|
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package google
import (
"encoding/json"
"errors"
"fmt"
"net/http"
"os"
"os/user"
"path/filepath"
"runtime"
"strings"
"time"
"github.com/scalingdata/go-x-net/context"
"github.com/scalingdata/go-x-oauth2"
"github.com/scalingdata/go-x-oauth2/internal"
)
type sdkCredentials struct {
Data []struct {
Credential struct {
ClientID string `json:"client_id"`
ClientSecret string `json:"client_secret"`
AccessToken string `json:"access_token"`
RefreshToken string `json:"refresh_token"`
TokenExpiry *time.Time `json:"token_expiry"`
} `json:"credential"`
Key struct {
Account string `json:"account"`
Scope string `json:"scope"`
} `json:"key"`
}
}
// An SDKConfig provides access to tokens from an account already
// authorized via the Google Cloud SDK.
type SDKConfig struct {
conf oauth2.Config
initialToken *oauth2.Token
}
// NewSDKConfig creates an SDKConfig for the given Google Cloud SDK
// account. If account is empty, the account currently active in
// Google Cloud SDK properties is used.
// Google Cloud SDK credentials must be created by running `gcloud auth`
// before using this function.
// The Google Cloud SDK is available at https://cloud.google.com/sdk/.
func NewSDKConfig(account string) (*SDKConfig, error) {
configPath, err := sdkConfigPath()
if err != nil {
return nil, fmt.Errorf("oauth2/google: error getting SDK config path: %v", err)
}
credentialsPath := filepath.Join(configPath, "credentials")
f, err := os.Open(credentialsPath)
if err != nil {
return nil, fmt.Errorf("oauth2/google: failed to load SDK credentials: %v", err)
}
defer f.Close()
var c sdkCredentials
if err := json.NewDecoder(f).Decode(&c); err != nil {
return nil, fmt.Errorf("oauth2/google: failed to decode SDK credentials from %q: %v", credentialsPath, err)
}
if len(c.Data) == 0 {
return nil, fmt.Errorf("oauth2/google: no credentials found in %q, run `gcloud auth login` to create one", credentialsPath)
}
if account == "" {
propertiesPath := filepath.Join(configPath, "properties")
f, err := os.Open(propertiesPath)
if err != nil {
return nil, fmt.Errorf("oauth2/google: failed to load SDK properties: %v", err)
}
defer f.Close()
ini, err := internal.ParseINI(f)
if err != nil {
return nil, fmt.Errorf("oauth2/google: failed to parse SDK properties %q: %v", propertiesPath, err)
}
core, ok := ini["core"]
if !ok {
return nil, fmt.Errorf("oauth2/google: failed to find [core] section in %v", ini)
}
active, ok := core["account"]
if !ok {
return nil, fmt.Errorf("oauth2/google: failed to find %q attribute in %v", "account", core)
}
account = active
}
for _, d := range c.Data {
if account == "" || d.Key.Account == account {
if d.Credential.AccessToken == "" && d.Credential.RefreshToken == "" {
return nil, fmt.Errorf("oauth2/google: no token available for account %q", account)
}
var expiry time.Time
if d.Credential.TokenExpiry != nil {
expiry = *d.Credential.TokenExpiry
}
return &SDKConfig{
conf: oauth2.Config{
ClientID: d.Credential.ClientID,
ClientSecret: d.Credential.ClientSecret,
Scopes: strings.Split(d.Key.Scope, " "),
Endpoint: Endpoint,
RedirectURL: "oob",
},
initialToken: &oauth2.Token{
AccessToken: d.Credential.AccessToken,
RefreshToken: d.Credential.RefreshToken,
Expiry: expiry,
},
}, nil
}
}
return nil, fmt.Errorf("oauth2/google: no such credentials for account %q", account)
}
// Client returns an HTTP client using Google Cloud SDK credentials to
// authorize requests. The token will auto-refresh as necessary. The
// underlying http.RoundTripper will be obtained using the provided
// context. The returned client and its Transport should not be
// modified.
func (c *SDKConfig) Client(ctx context.Context) *http.Client {
return &http.Client{
Transport: &oauth2.Transport{
Source: c.TokenSource(ctx),
},
}
}
// TokenSource returns an oauth2.TokenSource that retrieve tokens from
// Google Cloud SDK credentials using the provided context.
// It will returns the current access token stored in the credentials,
// and refresh it when it expires, but it won't update the credentials
// with the new access token.
func (c *SDKConfig) TokenSource(ctx context.Context) oauth2.TokenSource {
return c.conf.TokenSource(ctx, c.initialToken)
}
// Scopes are the OAuth 2.0 scopes the current account is authorized for.
func (c *SDKConfig) Scopes() []string {
return c.conf.Scopes
}
// sdkConfigPath tries to guess where the gcloud config is located.
// It can be overridden during tests.
var sdkConfigPath = func() (string, error) {
if runtime.GOOS == "windows" {
return filepath.Join(os.Getenv("APPDATA"), "gcloud"), nil
}
homeDir := guessUnixHomeDir()
if homeDir == "" {
return "", errors.New("unable to get current user home directory: os/user lookup failed; $HOME is empty")
}
return filepath.Join(homeDir, ".config", "gcloud"), nil
}
func guessUnixHomeDir() string {
usr, err := user.Current()
if err == nil {
return usr.HomeDir
}
return os.Getenv("HOME")
}
|
[
"\"APPDATA\"",
"\"HOME\""
] |
[] |
[
"APPDATA",
"HOME"
] |
[]
|
["APPDATA", "HOME"]
|
go
| 2 | 0 | |
vendor/github.com/hashicorp/nomad/client/driver/docker_linux_test.go
|
package driver
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"testing"
"time"
docker "github.com/fsouza/go-dockerclient"
"github.com/hashicorp/nomad/client/testutil"
tu "github.com/hashicorp/nomad/testutil"
"github.com/stretchr/testify/require"
)
func TestDockerDriver_authFromHelper(t *testing.T) {
dir, err := ioutil.TempDir("", "test-docker-driver_authfromhelper")
require.NoError(t, err)
defer os.RemoveAll(dir)
helperPayload := "{\"Username\":\"hashi\",\"Secret\":\"nomad\"}"
helperContent := []byte(fmt.Sprintf("#!/bin/sh\ncat > %s/helper-$1.out;echo '%s'", dir, helperPayload))
helperFile := filepath.Join(dir, "docker-credential-testnomad")
err = ioutil.WriteFile(helperFile, helperContent, 0777)
require.NoError(t, err)
path := os.Getenv("PATH")
os.Setenv("PATH", fmt.Sprintf("%s:%s", path, dir))
defer os.Setenv("PATH", path)
helper := authFromHelper("testnomad")
creds, err := helper("registry.local:5000/repo/image")
require.NoError(t, err)
require.NotNil(t, creds)
require.Equal(t, "hashi", creds.Username)
require.Equal(t, "nomad", creds.Password)
if _, err := os.Stat(filepath.Join(dir, "helper-get.out")); os.IsNotExist(err) {
t.Fatalf("Expected helper-get.out to exist")
}
content, err := ioutil.ReadFile(filepath.Join(dir, "helper-get.out"))
require.NoError(t, err)
require.Equal(t, []byte("https://registry.local:5000"), content)
}
func TestDockerDriver_PidsLimit(t *testing.T) {
if !tu.IsTravis() {
t.Parallel()
}
if !testutil.DockerIsConnected(t) {
t.Skip("Docker not connected")
}
task, _, _ := dockerTask(t)
task.Config["pids_limit"] = "1"
task.Config["command"] = "/bin/sh"
// this starts three processes in container: /bin/sh and two sleep
// while a single sleep suffices, our observation is that it's image dependent
// (i.e. using a single sleep here in alpine image doesn't trigger PID limit failure)
task.Config["args"] = []string{"-c", "sleep 2 & sleep 2"}
ctx := testDockerDriverContexts(t, task)
defer ctx.Destroy()
d := NewDockerDriver(ctx.DriverCtx)
// TODO: current log capture of docker driver is broken
// so we must fetch logs from docker daemon directly
// which works in Linux as well as Mac
d.(*DockerDriver).DriverContext.config.Options[dockerCleanupContainerConfigOption] = "false"
// Copy the image into the task's directory
copyImage(t, ctx.ExecCtx.TaskDir, "busybox.tar")
_, err := d.Prestart(ctx.ExecCtx, task)
if err != nil {
t.Fatalf("error in prestart: %v", err)
}
resp, err := d.Start(ctx.ExecCtx, task)
if err != nil {
t.Fatalf("err: %v", err)
}
h := resp.Handle.(*DockerHandle)
defer h.client.RemoveContainer(docker.RemoveContainerOptions{
ID: h.containerID,
RemoveVolumes: true,
Force: true,
})
defer resp.Handle.Kill()
select {
case res := <-resp.Handle.WaitCh():
if res.Successful() {
t.Fatalf("expected error, but container exited successful")
}
// /bin/sh exits with 2
if res.ExitCode != 2 {
t.Fatalf("expected exit code of 2 but found %v", res.ExitCode)
}
case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
t.Fatalf("timeout")
}
// XXX Logging doesn't work on OSX so just test on Linux
// Check that data was written to the directory.
var act bytes.Buffer
err = h.client.Logs(docker.LogsOptions{
Container: h.containerID,
Stderr: true,
ErrorStream: &act,
})
if err != nil {
t.Fatalf("error in fetching logs: %v", err)
}
exp := "can't fork"
if !strings.Contains(act.String(), exp) {
t.Fatalf("Expected failed fork: %q", act)
}
}
|
[
"\"PATH\""
] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
go
| 1 | 0 | |
operator-database-backup/backup/backup_resource.go
|
package backup
import (
"fmt"
"os"
"path/filepath"
databaseoperatorv1alpha1 "github.com/ibm/operator-sample-go/operator-database/api/v1alpha1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/scheme"
)
func getBackupResource() error {
config, err := rest.InClusterConfig()
if err != nil {
kubeconfig := filepath.Join(
os.Getenv("HOME"), ".kube", "config",
)
fmt.Println("Using kubeconfig file: ", kubeconfig)
config, err = clientcmd.BuildConfigFromFlags("", kubeconfig)
if err != nil {
return err
}
}
var GroupVersion = schema.GroupVersion{Group: "database.sample.third.party", Version: "v1alpha1"}
var SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
var databaseOperatorScheme *runtime.Scheme
databaseOperatorScheme, err = SchemeBuilder.Build()
if err != nil {
return err
}
err = databaseoperatorv1alpha1.AddToScheme(databaseOperatorScheme)
if err != nil {
return err
}
kubernetesClient, err = client.New(config, client.Options{Scheme: databaseOperatorScheme})
if err != nil {
return err
}
databaseBackupResource = &databaseoperatorv1alpha1.DatabaseBackup{}
err = kubernetesClient.Get(applicationContext, types.NamespacedName{Name: backupResourceName, Namespace: namespace}, databaseBackupResource)
if err != nil {
return err
}
return nil
}
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Assignment.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
aws/aws-sdk-go.v2/options.go
|
package aws
import (
"os"
"time"
"github.com/americanas-go/config"
"github.com/jvitoroc/ignite/net/http/client"
)
type Options struct {
AccessKeyId string
SecretAccessKey string
DefaultRegion string
SessionToken string
DefaultAccountNumber string
MaxAttempts int
HasRateLimit bool
MaxConnsPerHost int
MaxIdleConns int
MaxIdleConnsPerHost int
TimeoutMillis time.Duration
KeepAliveMillis time.Duration
IdleConnTimeoutMillis time.Duration
ResponseHeaderTimeoutMillis time.Duration
HttpClient client.Options
}
func NewOptionsWithPath(path string) (opts *Options, err error) {
opts, err = NewOptions()
if err != nil {
return nil, err
}
err = config.UnmarshalWithPath(path, opts)
if err != nil {
return nil, err
}
return opts, nil
}
func NewOptions() (*Options, error) {
opts := &Options{}
err := config.UnmarshalWithPath(root, opts)
if err != nil {
return nil, err
}
if v := os.Getenv("AWS_ACCESS_KEY_ID"); v != "" {
opts.AccessKeyId = v
}
if v := os.Getenv("AWS_SECRET_ACCESS_KEY"); v != "" {
opts.SecretAccessKey = v
}
if v := os.Getenv("AWS_DEFAULT_REGION"); v != "" {
opts.DefaultRegion = v
}
if v := os.Getenv("AWS_DEFAULT_ACCOUNT_NUMBER"); v != "" {
opts.DefaultAccountNumber = v
}
if v := os.Getenv("AWS_SESSION_TOKEN"); v != "" {
opts.SessionToken = v
}
return opts, nil
}
|
[
"\"AWS_ACCESS_KEY_ID\"",
"\"AWS_SECRET_ACCESS_KEY\"",
"\"AWS_DEFAULT_REGION\"",
"\"AWS_DEFAULT_ACCOUNT_NUMBER\"",
"\"AWS_SESSION_TOKEN\""
] |
[] |
[
"AWS_SESSION_TOKEN",
"AWS_DEFAULT_REGION",
"AWS_SECRET_ACCESS_KEY",
"AWS_ACCESS_KEY_ID",
"AWS_DEFAULT_ACCOUNT_NUMBER"
] |
[]
|
["AWS_SESSION_TOKEN", "AWS_DEFAULT_REGION", "AWS_SECRET_ACCESS_KEY", "AWS_ACCESS_KEY_ID", "AWS_DEFAULT_ACCOUNT_NUMBER"]
|
go
| 5 | 0 | |
scripts/search.py
|
#!/usr/bin/env python
# coding: utf-8
# ###### Intro
# - query from twitter api, pipelined to aws RDS
# - before pipelining, check queries for alert, alert sent by email
# establish twitter api connection
# In[ ]:
# import library
import pandas as pd
import numpy as np
import tweepy
import csv
import configparser
import boto3
from botocore.exceptions import ClientError
from datetime import datetime
from config import keys, since, fav_count, fwr_count, rt_count
from config import master_bucket, region
from config import account_key_dict, df_account_list
from io import StringIO
import os
import mysql.connector
# email function
import smtplib
from email.message import EmailMessage
import ssl
# read credentials file
# In[ ]:
config = configparser.ConfigParser()
config.read('credentials.cfg')
# establish twitter api connection
# In[ ]:
# tweets' JSON data from twitter API
consumer_key = config['tweepy']['consumer_key']
consumer_secret = config['tweepy']['consumer_secret']
access_token = config['tweepy']['access_token']
access_secret = config['tweepy']['access_secret']
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
# establish aws rds connection in mysql
# In[ ]:
host = config['mysql_db']['host']
port = config['mysql_db']['port']
user = config['mysql_db']['user']
password = config['mysql_db']['password']
db_name = config['mysql_db']['db_name']
rds = mysql.connector.connect(
host=host,
user=user,
password=password,
port=port
)
rds.autocommit = True
cursor = rds.cursor()
# *(ignore if not using aws RDS)in aws rds, one default database is created once connected to rds, as set in aws rds previously, the db's name should match with the `db_name` in `credentials.cfg`
# establish aws s3 connection, create bucket and folder to store since_id
# In[ ]:
os.environ['AWS_ACCESS_KEY_ID'] = config['aws']['aws_access_key_id']
os.environ['AWS_SECRET_ACCESS_KEY'] = config['aws']['aws_secret_access_key']
# In[ ]:
s3 = boto3.client('s3', region_name=region)
# In[ ]:
try:
s3.get_bucket_versioning(Bucket=master_bucket)
except:
s3.create_bucket(Bucket=master_bucket)
print('Bucket {} created in region {}'.format(master_bucket, region))
# In[ ]:
try:
s3.get_object(Bucket=master_bucket, Key='sinceIds/')
except s3.exceptions.NoSuchKey as e:
s3.put_object(Bucket=master_bucket, Key='sinceIds/')
print('"sinceIds" folder created')
# alert email function
# * IMPORTANT: when usiing gmail as sender, make sure to turn `Less secure app access` on, as this allows the app to send on Gmail via SMTP
# * Link to turn `Less secure app access` on/off: https://myaccount.google.com/lesssecureapps
# In[ ]:
from_ = config['email']['from_']
to = config['email']['to']
smtp_host = config['email']['smtp_host']
port = config['email']['port']
username = config['email']['username']
password = config['email']['password']
subject = "TC Alert: "
def alert_by_email(subject_, alert_target, alert_msg, test_mode):
# initialization of database may not send alert
if test_mode == True:
return
alert_text = 'For {}: {}'.format(alert_target, alert_msg)
msg = EmailMessage()
msg.set_content(alert_text)
msg['Subject'] = subject + subject_
msg['From'] = from_
msg['To'] = to
s = smtplib.SMTP('{}:{}'.format(smtp_host, port))
s.starttls()
s.login(username, password)
s.send_message(msg)
s.quit()
# ### search_hashtag
# query for each key
# - track output by since_id.csv in s3, create/modify since_id.csv
# - querry for each key, with pre-setting from search_config.py
# - output transfer to s3, save new since_id.csv
# create tables for keys
# In[ ]:
print("PART 1")
cursor.execute("USE {}".format(db_name))
# In[ ]:
for key in keys:
# table with '#' sign has to be surround with ` `
create_table = """CREATE TABLE IF NOT EXISTS `{}` (
id int NOT NULL AUTO_INCREMENT,
user_name varchar(30) NOT NULL,
tweet varchar(280) NOT NULL,
followers int NOT NULL,
likes int NOT NULL,
retweets int NOT NULL,
retweeted boolean NOT NULL,
date datetime NOT NULL,
PRIMARY KEY (id)
);""".format(key)
cursor.execute(create_table)
print("table {} present in database {}".format(key, db_name))
# get since_id.csv from S3, to avoid duplicate output in query;
# if since_id.csv not initialized, initialization is done in the exception block
# In[ ]:
try:
since_ids_object = s3.get_object(Bucket=master_bucket, Key='sinceIds/hashtag_sinceid.csv')
object_body = since_ids_object['Body']
csv_string = object_body.read().decode('utf-8')
df = pd.read_csv(StringIO(csv_string), ) # the dataframe to store since_ids
for key in keys: # append new key to since_id.csv by pd
if key not in df.key.values:
df_oneRow = pd.DataFrame([[key, np.NAN]], columns=['key', 'since_id'])
df = pd.concat([df, df_oneRow])
except s3.exceptions.NoSuchKey as e:
df = pd.DataFrame(columns=['key', 'since_id']) # the dataframe to store since_ids
for key in keys: # create since_id.csv by pd
df_oneRow = pd.DataFrame([[key, np.NAN]], columns=['key', 'since_id'])
df = pd.concat([df, df_oneRow])
df = df.reset_index(drop=True)
# In[ ]:
def keyword_query_sql_string(key):
return """INSERT INTO `{}`(user_name, tweet, followers, likes, retweets, retweeted, date) VALUES (%s, %s, %s, %s, %s, %s, %s)""".format(
key)
# In[ ]:
# query by keyword, max timeline possible, many querys are ran at once
def keyword_query_api(key, since, fwr_count, fav_count, rt_count, since_id, api):
print('Key: {}'.format(key))
# 1st query
if since_id == 0:
tweets = api.search(q=key, lang='en', since=since, count=100, tweet_mode="extended")
else:
tweets = api.search(q=key, lang='en', since=since, count=100, since_id=since_id, tweet_mode="extended")
since_id_list = list() # temp since_id list per key
# filter and insert to mysql
for tweet in tweets:
since_id_list.append(tweet.id) # append since_id
if tweet.user.followers_count >= fwr_count and tweet.favorite_count >= fav_count and tweet.retweet_count >= rt_count:
sql_row = list()
if hasattr(tweet, 'retweeted_status'):
sql_row.extend((tweet.user.screen_name, tweet.retweeted_status.full_text, tweet.user.followers_count,
tweet.favorite_count, tweet.retweet_count, 'TRUE', tweet.created_at))
else:
sql_row.extend((tweet.user.screen_name, tweet.full_text, tweet.user.followers_count,
tweet.favorite_count, tweet.retweet_count, 'FALSE', tweet.created_at))
sql_insert = keyword_query_sql_string(key)
cursor.execute(sql_insert, sql_row)
if tweets:
max_id = tweets[-1].id - 1
print('1 query done')
else:
# no new queires produced at this run, no update on mysql
print('No query needed, done')
return
# subsequent queries
while True:
if since_id == 0:
tweets = api.search(q=key, lang='en', since=since, count=100, max_id=max_id, tweet_mode="extended")
else:
tweets = api.search(q=key, lang='en', since=since, count=100, since_id=since_id, max_id=max_id,
tweet_mode="extended")
# filter and insert to mysql
for tweet in tweets:
since_id_list.append(tweet.id) # append since_id
if tweet.user.followers_count >= fwr_count and tweet.favorite_count >= fav_count and tweet.retweet_count >= rt_count:
sql_row = list()
if hasattr(tweet, 'retweeted_status'):
sql_row.extend((
tweet.user.screen_name, tweet.retweeted_status.full_text, tweet.user.followers_count,
tweet.favorite_count, tweet.retweet_count, 'TRUE', tweet.created_at))
else:
sql_row.extend((tweet.user.screen_name, tweet.full_text, tweet.user.followers_count,
tweet.favorite_count, tweet.retweet_count, 'FALSE', tweet.created_at))
sql_insert = keyword_query_sql_string(key)
cursor.execute(sql_insert, sql_row)
if not tweets:
df.loc[df.index[df['key'] == key][0], 'since_id'] = max(since_id_list) # update since_id
print('done')
return
max_id = tweets[-1].id - 1
print('1 query done')
# In[ ]:
# call the func above, para by key and since_id
print("now search keyword s")
print("there are {} keywords to search from in total".format(len(keys)))
key_count = 1
for key in keys:
print('now on keyword #{}, {} keywords left'.formart(key_count, len(keys) - key_count))
if np.isnan(df[df['key'] == key].since_id.values[0]):
keyword_query_api(key=key, since=since, fwr_count=fwr_count, fav_count=fav_count, rt_count=rt_count, since_id=0,
api=api)
else:
keyword_query_api(key=key, since=since, fwr_count=fwr_count, fav_count=fav_count, rt_count=rt_count,
since_id=df[df['key'] == key].since_id.values[0], api=api)
key_count += 1
# update since_id to S3, e.g., update tc/sinceIDs/since_id.csv
# In[ ]:
s3.delete_object(Bucket=master_bucket, Key='sinceIds/hashtag_sinceid.csv') # delete the old since_id.csv
csv_buffer = StringIO()
df.to_csv(csv_buffer, sep=",", index=False)
s3.put_object(Bucket=master_bucket, Key='sinceIds/hashtag_sinceid.csv', Body=csv_buffer.getvalue())
# ## search_account_keyword
#
# query for each account by key
#
# - track account by screen_name
# - filter account tweets by since_id
# - update to aws s3
# get since_id.csv from S3, to avoid duplicate output in query
# In[ ]:
print("PART 2")
try:
since_ids_object = s3.get_object(Bucket=master_bucket, Key='sinceIds/account_keyword_sinceid.csv')
object_body = since_ids_object['Body']
csv_string = object_body.read().decode('utf-8')
df = pd.read_csv(StringIO(csv_string), ) # the dataframe to store since_ids
for acc in account_key_dict: # append new key to since_id.csv by pd
if acc not in df.acc.values:
df_oneRow = pd.DataFrame([[acc, np.NAN]], columns=['acc', 'since_id'])
df = pd.concat([df, df_oneRow])
except s3.exceptions.NoSuchKey as e:
df = pd.DataFrame(columns=['acc', 'since_id']) # the dataframe to store since_ids
for acc in account_key_dict: # create since_id.csv by pd
df_oneRow = pd.DataFrame([[acc, np.NAN]], columns=['acc', 'since_id'])
df = pd.concat([df, df_oneRow])
df = df.reset_index(drop=True)
# create table for each twitter account
# In[ ]:
cursor.execute("USE {}".format(db_name))
# check for twitter account validity (i.e. correct account name)
# In[ ]:
print('verifing twitter accounts')
for acc in account_key_dict:
try:
api.get_user(screen_name=acc)
except tweepy.TweepError:
print('twitter account {} not valid, please check, program will exit immediately.'.format(acc))
exit(1)
print('all twitter accounts verified')
# In[ ]:
for acc in account_key_dict:
# table's name with '#' has to be surround with ` `
create_table = """CREATE TABLE IF NOT EXISTS `{}` (
id int NOT NULL AUTO_INCREMENT,
tweet varchar(280) NOT NULL,
likes int NOT NULL,
retweets int NOT NULL,
retweeted boolean NOT NULL,
date datetime NOT NULL,
PRIMARY KEY (id)
);""".format(acc)
cursor.execute(create_table)
print("table {} present in database {}".format(acc, db_name))
# In[ ]:
def account_query_sql_string(account):
return """INSERT INTO `{}`(tweet, likes, retweets, retweeted, date) VALUES (%s, %s, %s, %s, %s)""".format(account)
# In[ ]:
# query by keyword, max timeline possible, many querys are ran at once
def account_query_api(acc, keys, since_id, api):
print('account: {}'.format(acc))
print('keys: ', end='')
print(*keys, sep=', ')
# 1st query
if since_id == 0:
tweets = api.user_timeline(screen_name=acc, count=200, tweet_mode="extended")
else:
tweets = api.user_timeline(screen_name=acc, since_id=since_id, count=200, tweet_mode="extended")
since_id_list = list() # temp since_id list per key
# insert into mysql and filter for alert
for tweet in tweets:
since_id_list.append(tweet.id) # append since_id
# insert into mysql
sql_row = list()
if hasattr(tweet, 'retweeted_status'):
sql_row.extend(
(tweet.retweeted_status.full_text, tweet.favorite_count, tweet.retweet_count, 'TRUE', tweet.created_at))
msg = tweet.retweeted_status.full_text
else:
sql_row.extend((tweet.full_text, tweet.favorite_count, tweet.retweet_count, 'FALSE', tweet.created_at))
msg = tweet.full_text
sql_insert = account_query_sql_string(acc)
cursor.execute(sql_insert, sql_row)
# filter for alert
tweet_lowercase = tweet.full_text.lower()
key_found = list()
for key in keys:
if key in tweet_lowercase:
key_found.append(key)
# send email alert if key in account found
if key_found:
print("sending email...")
# get coin name as part of email subject
account_row = df_account_list.loc[df_account_list[df_account_list.columns[1]] == acc]
subject_ = account_row.iloc[0][0]
alert_by_email(subject_=subject_, alert_target='@' + acc, alert_msg=msg, test_mode=False)
if tweets:
max_id = tweets[-1].id - 1
print('1 query done')
else:
# no new queires produced at this run, no update on mysql
print('No query needed, done')
return
# subsequent queries
while True:
if since_id == 0:
tweets = api.user_timeline(screen_name=acc, count=200, max_id=max_id, tweet_mode="extended")
else:
tweets = api.user_timeline(screen_name=acc, since_id=since_id, count=200, max_id=max_id,
tweet_mode="extended")
# insert into mysql and filter for alert
for tweet in tweets:
since_id_list.append(tweet.id) # append since_id
# insert into mysql
sql_row = list()
if hasattr(tweet, 'retweeted_status'):
sql_row.extend((tweet.retweeted_status.full_text, tweet.favorite_count, tweet.retweet_count, 'TRUE',
tweet.created_at))
msg = tweet.retweeted_status.full_text
else:
sql_row.extend((tweet.full_text, tweet.favorite_count, tweet.retweet_count, 'FALSE', tweet.created_at))
msg = tweet.full_text
sql_insert = account_query_sql_string(acc)
cursor.execute(sql_insert, sql_row)
# filter for alert
tweet_lowercase = tweet.full_text.lower()
key_found = list()
for key in keys:
if key in tweet_lowercase:
key_found.append(key)
# send email alert if key in account found
if key_found:
print("sending email...")
# get coin name as part of email subject
account_row = df_account_list.loc[df_account_list[df_account_list.columns[1]] == acc]
subject_ = account_row.iloc[0][0]
alert_by_email(subject_=subject_, alert_target='@' + acc, alert_msg=msg, test_mode=False)
if not tweets:
df.loc[df.index[df['acc'] == acc][0], 'since_id'] = max(since_id_list) # update since_id
print('done')
return
max_id = tweets[-1].id - 1
print('1 query done')
# In[ ]:
# call the func above, para by key and since_id
print("now search keyword in accounts")
print("there are {} accounts to search from in total".format(len(account_key_dict)))
acc_count = 1
for acc in account_key_dict:
print('now on account #{}, {} accounts left'.format(acc_count, len(account_key_dict) - acc_count))
if np.isnan(df[df['acc'] == acc].since_id.values[0]):
account_query_api(acc=acc, keys=account_key_dict[acc], since_id=0, api=api)
else:
account_query_api(acc=acc, keys=account_key_dict[acc], since_id=df[df['acc'] == acc].since_id.values[0],
api=api)
acc_count += 1
# In[ ]:
# update since_id to S3, tc/sinceIDs/since_id.csv
s3.delete_object(Bucket=master_bucket, Key='sinceIds/account_keyword_sinceid.csv') # delete the old since_id.csv
csv_buffer = StringIO()
df.to_csv(csv_buffer, sep=",", index=False)
s3.put_object(Bucket=master_bucket, Key='sinceIds/account_keyword_sinceid.csv', Body=csv_buffer.getvalue())
# In[ ]:
cursor.close()
rds.close()
print('Program finished')
|
[] |
[] |
[
"AWS_ACCESS_KEY_ID",
"AWS_SECRET_ACCESS_KEY"
] |
[]
|
["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY"]
|
python
| 2 | 0 | |
main.go
|
package main
import (
"flag"
"fmt"
"log"
"os"
"strings"
"bdo-rest-api/httpServer"
"bdo-rest-api/scrapers"
)
func main() {
flagProxy := flag.String("proxy", "", "Open proxy address to make requests to BDO servers")
flagPort := flag.Int("port", 8001, "Port to catch requests on")
flagCacheTTL := flag.Int("cachettl", 180, "Cache TTL in minutes")
flag.Parse()
var port string
if *flagPort == 8001 && len(os.Getenv("PORT")) > 0 {
port = os.Getenv("PORT")
} else {
port = fmt.Sprintf("%v", *flagPort)
}
var proxies []string
if len(*flagProxy) > 0 {
proxies = strings.Fields(*flagProxy)
} else {
proxies = strings.Fields(os.Getenv("PROXY"))
}
scrapers.PushProxies(proxies...)
fmt.Printf("Used configuration:\n\tProxies:\t%v\n\tPort:\t\t%v\n\tCache TTL:\t%v minutes\n\n", proxies, port, *flagCacheTTL)
srv := httpServer.Server(&port, flagCacheTTL)
log.Println("Listening for requests")
log.Fatal(srv.ListenAndServe())
}
|
[
"\"PORT\"",
"\"PORT\"",
"\"PROXY\""
] |
[] |
[
"PORT",
"PROXY"
] |
[]
|
["PORT", "PROXY"]
|
go
| 2 | 0 | |
examples/get_user_profile/get_user_profile.go
|
package main
import (
"context"
"log"
"os"
"github.com/tonicpow/go-moneybutton"
)
func main() {
client := moneybutton.NewClient(nil, nil)
response, err := client.GetProfile(
context.Background(),
os.Getenv("USER_ID"),
os.Getenv("ACCESS_TOKEN"),
)
if err != nil {
log.Fatalln(err)
}
log.Println("profile: ", response.Data)
}
|
[
"\"USER_ID\"",
"\"ACCESS_TOKEN\""
] |
[] |
[
"USER_ID",
"ACCESS_TOKEN"
] |
[]
|
["USER_ID", "ACCESS_TOKEN"]
|
go
| 2 | 0 | |
pkg/jx/cmd/controller_enviornmentcontroller.go
|
package cmd
import (
"crypto/hmac"
"crypto/sha1"
"encoding/hex"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"os"
"strconv"
"strings"
"time"
"github.com/jenkins-x/jx/pkg/gits"
"github.com/jenkins-x/jx/pkg/jenkinsfile"
"github.com/jenkins-x/jx/pkg/jx/cmd/opts"
"github.com/jenkins-x/jx/pkg/kube/services"
"github.com/jenkins-x/jx/pkg/log"
"github.com/jenkins-x/jx/pkg/util"
"github.com/sirupsen/logrus"
"github.com/pkg/errors"
"github.com/jenkins-x/jx/pkg/jx/cmd/templates"
"github.com/spf13/cobra"
pipelineapi "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
environmentControllerService = "environment-controller"
environmentControllerHmacSecret = "environment-controller-hmac"
environmentControllerHmacSecretKey = "hmac"
)
// ControllerEnvironmentOptions holds the command line arguments
type ControllerEnvironmentOptions struct {
*opts.CommonOptions
BindAddress string
Path string
Port int
NoGitCredeentialsInit bool
NoRegisterWebHook bool
GitServerURL string
GitOwner string
GitRepo string
SourceURL string
WebHookURL string
Branch string
Labels map[string]string
secret []byte
}
var (
controllerEnvironmentsLong = templates.LongDesc(`A controller which takes a webhook and updates the environment via GitOps for remote clusters`)
controllerEnvironmentsExample = templates.Examples(`
# run the environment controller
jx controller environment
`)
)
// NewCmdControllerEnvironment creates the command
func NewCmdControllerEnvironment(commonOpts *opts.CommonOptions) *cobra.Command {
options := ControllerEnvironmentOptions{
CommonOptions: commonOpts,
}
cmd := &cobra.Command{
Use: "environment",
Short: "A controller which takes a webhook and updates the environment via GitOps for remote clusters",
Long: controllerEnvironmentsLong,
Example: controllerEnvironmentsExample,
Run: func(cmd *cobra.Command, args []string) {
options.Cmd = cmd
options.Args = args
err := options.Run()
CheckErr(err)
},
}
cmd.Flags().IntVarP(&options.Port, optionPort, "", 8080, "The TCP port to listen on.")
cmd.Flags().StringVarP(&options.BindAddress, optionBind, "", "",
"The interface address to bind to (by default, will listen on all interfaces/addresses).")
cmd.Flags().StringVarP(&options.Path, "path", "p", "/",
"The path to listen on for requests to trigger a pipeline run.")
cmd.Flags().StringVarP(&options.ServiceAccount, "service-account", "", "tekton-bot", "The Kubernetes ServiceAccount to use to run the pipeline")
cmd.Flags().BoolVarP(&options.NoGitCredeentialsInit, "no-git-init", "", false, "Disables checking we have setup git credentials on startup")
cmd.Flags().BoolVarP(&options.NoGitCredeentialsInit, "no-register-webhook", "", false, "Disables checking to register the webhook on startup")
cmd.Flags().StringVarP(&options.SourceURL, "source-url", "s", "", "The source URL of the environment git repository")
cmd.Flags().StringVarP(&options.GitServerURL, "git-server-url", "", "", "The git server URL. If not specified defaults to $GIT_SERVER_URL")
cmd.Flags().StringVarP(&options.GitOwner, "owner", "o", "", "The git repository owner. If not specified defaults to $OWNER")
cmd.Flags().StringVarP(&options.GitRepo, "repo", "r", "", "The git repository name. If not specified defaults to $REPO")
cmd.Flags().StringVarP(&options.WebHookURL, "webhook-url", "w", "", "The external WebHook URL of this controller to register with the git provider. If not specified defaults to $WEBHOOK_URL")
return cmd
}
// Run will implement this command
func (o *ControllerEnvironmentOptions) Run() error {
var err error
if o.SourceURL != "" {
gitInfo, err := gits.ParseGitURL(o.SourceURL)
if err != nil {
return err
}
if o.GitServerURL == "" {
o.GitServerURL = gitInfo.ProviderURL()
}
if o.GitOwner == "" {
o.GitOwner = gitInfo.Organisation
}
if o.GitRepo == "" {
o.GitRepo = gitInfo.Name
}
}
if o.GitServerURL == "" {
o.GitServerURL = os.Getenv("GIT_SERVER_URL")
if o.GitServerURL == "" {
return util.MissingOption("git-server-url")
}
}
if o.GitOwner == "" {
o.GitOwner = os.Getenv("OWNER")
if o.GitOwner == "" {
return util.MissingOption("owner")
}
}
if o.GitRepo == "" {
o.GitRepo = os.Getenv("REPO")
if o.GitRepo == "" {
return util.MissingOption("repo")
}
}
if o.Branch == "" {
o.Branch = os.Getenv("BRANCH")
if o.Branch == "" {
o.Branch = "master"
}
}
if o.WebHookURL == "" {
o.WebHookURL = os.Getenv("WEBHOOK_URL")
if o.WebHookURL == "" {
o.WebHookURL, err = o.discoverWebHookURL()
if err != nil {
return err
}
}
}
if o.SourceURL == "" {
o.SourceURL = util.UrlJoin(o.GitServerURL, o.GitOwner, o.GitRepo)
}
log.Infof("using environment source directory %s and external webhook URL: %s\n", util.ColorInfo(o.SourceURL), util.ColorInfo(o.WebHookURL))
o.secret, err = o.loadOrCreateHmacSecret()
if err != nil {
return errors.Wrapf(err, "loading hmac secret")
}
if !o.NoGitCredeentialsInit {
err = o.InitGitConfigAndUser()
if err != nil {
return err
}
}
if !o.NoRegisterWebHook {
err = o.registerWebHook(o.WebHookURL, o.secret)
if err != nil {
return err
}
}
mux := http.NewServeMux()
mux.Handle(o.Path, http.HandlerFunc(o.handleRequests))
mux.Handle(HealthPath, http.HandlerFunc(o.health))
mux.Handle(ReadyPath, http.HandlerFunc(o.ready))
log.Infof("Environment Controller is now listening on %s for WebHooks from the source repository %s to trigger promotions\n", util.ColorInfo(util.UrlJoin(o.WebHookURL, o.Path)), util.ColorInfo(o.SourceURL))
return http.ListenAndServe(":"+strconv.Itoa(o.Port), mux)
}
// health returns either HTTP 204 if the service is healthy, otherwise nothing ('cos it's dead).
func (o *ControllerEnvironmentOptions) health(w http.ResponseWriter, r *http.Request) {
log.Debug("Health check")
w.WriteHeader(http.StatusNoContent)
}
// ready returns either HTTP 204 if the service is ready to serve requests, otherwise HTTP 503.
func (o *ControllerEnvironmentOptions) ready(w http.ResponseWriter, r *http.Request) {
log.Debug("Ready check")
if o.isReady() {
w.WriteHeader(http.StatusNoContent)
} else {
w.WriteHeader(http.StatusServiceUnavailable)
}
}
// handle request for pipeline runs
func (o *ControllerEnvironmentOptions) startPipelineRun(w http.ResponseWriter, r *http.Request) {
err := o.stepGitCredentials()
if err != nil {
log.Warn(err.Error())
}
sourceURL := o.SourceURL
branch := o.Branch
revision := "master"
pr := &StepCreateTaskOptions{}
pr.PipelineKind = jenkinsfile.PipelineKindRelease
copy := *o.CommonOptions
pr.CommonOptions = ©
// defaults
pr.SourceName = "source"
pr.Duration = time.Second * 20
pr.Trigger = string(pipelineapi.PipelineTriggerTypeManual)
pr.CloneGitURL = sourceURL
pr.DeleteTempDir = true
pr.Branch = branch
pr.Revision = revision
pr.ServiceAccount = o.ServiceAccount
// turn map into string array with = separator to match type of custom labels which are CLI flags
for key, value := range o.Labels {
pr.CustomLabels = append(pr.CustomLabels, fmt.Sprintf("%s=%s", key, value))
}
log.Infof("triggering pipeline for repo %s branch %s revision %s\n", sourceURL, branch, revision)
err = pr.Run()
if err != nil {
o.returnError(err, err.Error(), w, r)
return
}
results := &PipelineRunResponse{
Resources: pr.Results.ObjectReferences(),
}
err = o.marshalPayload(w, r, results)
if err != nil {
o.returnError(err, "failed to marshal payload", w, r)
}
return
}
// discoverWebHookURL lets try discover the webhook URL from the Service
func (o *ControllerEnvironmentOptions) discoverWebHookURL() (string, error) {
kubeCtl, ns, err := o.KubeClientAndNamespace()
if err != nil {
return "", err
}
serviceInterface := kubeCtl.CoreV1().Services(ns)
svc, err := serviceInterface.Get(environmentControllerService, metav1.GetOptions{})
if err != nil {
return "", errors.Wrapf(err, "failed to find Service %s in namespace %s", environmentControllerService, ns)
}
u := services.GetServiceURL(svc)
if u != "" {
return u, nil
}
if svc.Spec.Type == corev1.ServiceTypeLoadBalancer {
// lets wait for the LoadBalancer to be resolved
loggedWait := false
fn := func() (bool, error) {
svc, err := serviceInterface.Get(environmentControllerService, metav1.GetOptions{})
if err != nil {
return false, err
}
u = services.GetServiceURL(svc)
if u != "" {
return true, nil
}
if !loggedWait {
loggedWait = true
log.Infof("waiting for the external IP on the service %s in namespace %s ...\n", environmentControllerService, ns)
}
return false, nil
}
err = o.RetryUntilTrueOrTimeout(time.Minute*5, time.Second*3, fn)
if u != "" {
return u, nil
}
if err != nil {
return "", err
}
}
return "", fmt.Errorf("could not find external URL of Service %s in namespace %s", environmentControllerService, ns)
}
// loadOrCreateHmacSecret loads the hmac secret
func (o *ControllerEnvironmentOptions) loadOrCreateHmacSecret() ([]byte, error) {
kubeCtl, ns, err := o.KubeClientAndNamespace()
if err != nil {
return nil, err
}
secretInterface := kubeCtl.CoreV1().Secrets(ns)
secret, err := secretInterface.Get(environmentControllerHmacSecret, metav1.GetOptions{})
if err == nil {
if secret.Data == nil || len(secret.Data[environmentControllerHmacSecretKey]) == 0 {
// lets update the secret with a valid hmac token
err = o.ensureHmacTokenPopulated()
if err != nil {
return nil, err
}
if secret.Data == nil {
secret.Data = map[string][]byte{}
}
secret.Data[environmentControllerHmacSecretKey] = []byte(o.HMACToken)
secret, err = secretInterface.Update(secret)
if err != nil {
return nil, errors.Wrapf(err, "failed to update HMAC token secret %s in namespace %s", environmentControllerHmacSecret, ns)
}
}
} else {
err = o.ensureHmacTokenPopulated()
if err != nil {
return nil, err
}
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: environmentControllerHmacSecret,
},
Data: map[string][]byte{
environmentControllerHmacSecretKey: []byte(o.HMACToken),
},
}
secret, err = secretInterface.Create(secret)
if err != nil {
return nil, errors.Wrapf(err, "failed to create HMAC token secret %s in namespace %s", environmentControllerHmacSecret, ns)
}
}
if secret == nil || secret.Data == nil {
return nil, fmt.Errorf("no Secret %s found in namespace %s", environmentControllerHmacSecret, ns)
}
return secret.Data[environmentControllerHmacSecretKey], nil
}
func (o *ControllerEnvironmentOptions) ensureHmacTokenPopulated() error {
if o.HMACToken == "" {
var err error
// why 41? seems all examples so far have a random token of 41 chars
o.HMACToken, err = util.RandStringBytesMaskImprSrc(41)
if err != nil {
return errors.Wrapf(err, "failed to generate hmac token")
}
}
return nil
}
func (o *ControllerEnvironmentOptions) isReady() bool {
// TODO a better readiness check
return true
}
func (o *ControllerEnvironmentOptions) unmarshalBody(w http.ResponseWriter, r *http.Request, result interface{}) error {
// TODO assume JSON for now
data, err := ioutil.ReadAll(r.Body)
if err != nil {
return errors.Wrap(err, "reading the JSON request body")
}
err = json.Unmarshal(data, result)
if err != nil {
return errors.Wrap(err, "unmarshalling the JSON request body")
}
return nil
}
func (o *ControllerEnvironmentOptions) marshalPayload(w http.ResponseWriter, r *http.Request, payload interface{}) error {
data, err := json.Marshal(payload)
if err != nil {
return errors.Wrapf(err, "marshalling the JSON payload %#v", payload)
}
w.Write(data)
return nil
}
func (o *ControllerEnvironmentOptions) onError(err error) {
if err != nil {
log.Errorf("%v", err)
}
}
func (o *ControllerEnvironmentOptions) returnError(err error, message string, w http.ResponseWriter, r *http.Request) {
log.Errorf("%v %s", err, message)
o.onError(err)
w.WriteHeader(500)
w.Write([]byte(message))
}
func (o *ControllerEnvironmentOptions) stepGitCredentials() error {
if !o.NoGitCredeentialsInit {
copy := *o.CommonOptions
copy.BatchMode = true
gsc := &StepGitCredentialsOptions{
StepOptions: StepOptions{
CommonOptions: ©,
},
}
err := gsc.Run()
if err != nil {
return errors.Wrapf(err, "failed to run: jx step gc credentials")
}
}
return nil
}
// handle request for pipeline runs
func (o *ControllerEnvironmentOptions) handleRequests(w http.ResponseWriter, r *http.Request) {
eventType, _, _, valid, _ := ValidateWebhook(w, r, o.secret, false)
if !valid || eventType == "" {
return
}
o.startPipelineRun(w, r)
}
func (o *ControllerEnvironmentOptions) registerWebHook(webhookURL string, secret []byte) error {
gitURL := o.SourceURL
log.Infof("verifying that the webhook is registered for the git repository %s\n", util.ColorInfo(gitURL))
provider, err := o.GitProviderForURL(gitURL, "creating webhook git provider")
if err != nil {
return errors.Wrapf(err, "failed to create git provider for git URL %s", gitURL)
}
webHookData := &gits.GitWebHookArguments{
Owner: o.GitOwner,
Repo: &gits.GitRepository{
Name: o.GitRepo,
},
URL: webhookURL,
Secret: string(secret),
}
err = provider.CreateWebHook(webHookData)
if err != nil {
return errors.Wrapf(err, "failed to create git WebHook provider for URL %s", gitURL)
}
return nil
}
// ValidateWebhook ensures that the provided request conforms to the
// format of a Github webhook and the payload can be validated with
// the provided hmac secret. It returns the event type, the event guid,
// the payload of the request, whether the webhook is valid or not,
// and finally the resultant HTTP status code
func ValidateWebhook(w http.ResponseWriter, r *http.Request, hmacSecret []byte, requireGitHubHeaders bool) (string, string, []byte, bool, int) {
defer r.Body.Close()
// Our health check uses GET, so just kick back a 200.
if r.Method == http.MethodGet {
return "", "", nil, false, http.StatusOK
}
// Header checks: It must be a POST with an event type and a signature.
if r.Method != http.MethodPost {
responseHTTPError(w, http.StatusMethodNotAllowed, "405 Method not allowed")
return "", "", nil, false, http.StatusMethodNotAllowed
}
eventType := r.Header.Get("X-GitHub-Event")
eventGUID := r.Header.Get("X-GitHub-Delivery")
if requireGitHubHeaders {
if eventType == "" {
responseHTTPError(w, http.StatusBadRequest, "400 Bad Request: Missing X-GitHub-Event Header")
return "", "", nil, false, http.StatusBadRequest
}
if eventGUID == "" {
responseHTTPError(w, http.StatusBadRequest, "400 Bad Request: Missing X-GitHub-Delivery Header")
return "", "", nil, false, http.StatusBadRequest
}
} else {
if eventType == "" {
eventType = "push"
}
}
sig := r.Header.Get("X-Hub-Signature")
if sig == "" {
responseHTTPError(w, http.StatusForbidden, "403 Forbidden: Missing X-Hub-Signature")
return "", "", nil, false, http.StatusForbidden
}
contentType := r.Header.Get("content-type")
if contentType != "application/json" {
responseHTTPError(w, http.StatusBadRequest, "400 Bad Request: Hook only accepts content-type: application/json - please reconfigure this hook on GitHub")
return "", "", nil, false, http.StatusBadRequest
}
payload, err := ioutil.ReadAll(r.Body)
if err != nil {
responseHTTPError(w, http.StatusInternalServerError, "500 Internal Server Error: Failed to read request body")
return "", "", nil, false, http.StatusInternalServerError
}
// Validate the payload with our HMAC secret.
if !ValidatePayload(payload, sig, hmacSecret) {
responseHTTPError(w, http.StatusForbidden, "403 Forbidden: Invalid X-Hub-Signature")
return "", "", nil, false, http.StatusForbidden
}
return eventType, eventGUID, payload, true, http.StatusOK
}
// ValidatePayload ensures that the request payload signature matches the key.
func ValidatePayload(payload []byte, sig string, key []byte) bool {
if !strings.HasPrefix(sig, "sha1=") {
return false
}
sig = sig[5:]
sb, err := hex.DecodeString(sig)
if err != nil {
return false
}
mac := hmac.New(sha1.New, key)
mac.Write(payload)
expected := mac.Sum(nil)
return hmac.Equal(sb, expected)
}
// PayloadSignature returns the signature that matches the payload.
func PayloadSignature(payload []byte, key []byte) string {
mac := hmac.New(sha1.New, key)
mac.Write(payload)
sum := mac.Sum(nil)
return "sha1=" + hex.EncodeToString(sum)
}
func responseHTTPError(w http.ResponseWriter, statusCode int, response string) {
logrus.WithFields(logrus.Fields{
"response": response,
"status-code": statusCode,
}).Debug(response)
http.Error(w, response, statusCode)
}
|
[
"\"GIT_SERVER_URL\"",
"\"OWNER\"",
"\"REPO\"",
"\"BRANCH\"",
"\"WEBHOOK_URL\""
] |
[] |
[
"REPO",
"GIT_SERVER_URL",
"WEBHOOK_URL",
"BRANCH",
"OWNER"
] |
[]
|
["REPO", "GIT_SERVER_URL", "WEBHOOK_URL", "BRANCH", "OWNER"]
|
go
| 5 | 0 | |
backend/inventory_tracker_34421/wsgi.py
|
"""
WSGI config for inventory_tracker_34421 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'inventory_tracker_34421.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
x/bep3/client/cli/query.go
|
package cli
import (
"context"
"encoding/hex"
"fmt"
"strconv"
"strings"
"github.com/cosmos/cosmos-sdk/client"
"github.com/cosmos/cosmos-sdk/client/flags"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/spf13/cobra"
tmtime "github.com/tendermint/tendermint/types/time"
"github.com/kava-labs/kava/x/bep3/types"
)
// Query atomic swaps flags
const (
flagInvolve = "involve"
flagExpiration = "expiration"
flagStatus = "status"
flagDirection = "direction"
)
// GetQueryCmd returns the cli query commands for this module
func GetQueryCmd(queryRoute string) *cobra.Command {
// Group bep3 queries under a subcommand
bep3QueryCmd := &cobra.Command{
Use: "bep3",
Short: "Querying commands for the bep3 module",
DisableFlagParsing: true,
SuggestionsMinimumDistance: 2,
RunE: client.ValidateCmd,
}
cmds := []*cobra.Command{
QueryCalcSwapIDCmd(queryRoute),
QueryCalcRandomNumberHashCmd(queryRoute),
QueryGetAssetSupplyCmd(queryRoute),
QueryGetAssetSuppliesCmd(queryRoute),
QueryGetAtomicSwapCmd(queryRoute),
QueryGetAtomicSwapsCmd(queryRoute),
QueryParamsCmd(queryRoute),
}
for _, cmd := range cmds {
flags.AddQueryFlagsToCmd(cmd)
}
bep3QueryCmd.AddCommand(cmds...)
return bep3QueryCmd
}
// QueryCalcRandomNumberHashCmd calculates the random number hash for a number and timestamp
func QueryCalcRandomNumberHashCmd(queryRoute string) *cobra.Command {
return &cobra.Command{
Use: "calc-rnh [unix-timestamp]",
Short: "calculates an example random number hash from an optional timestamp",
Example: "bep3 calc-rnh now",
Args: cobra.MaximumNArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
clientCtx, err := client.GetClientQueryContext(cmd)
if err != nil {
return err
}
userTimestamp := "now"
if len(args) > 0 {
userTimestamp = args[0]
}
// Timestamp defaults to time.Now() unless it's explicitly set
var timestamp int64
if strings.Compare(userTimestamp, "now") == 0 {
timestamp = tmtime.Now().Unix()
} else {
userTimestamp, err := strconv.ParseInt(userTimestamp, 10, 64)
if err != nil {
return err
}
timestamp = userTimestamp
}
// Load hex-encoded cryptographically strong pseudo-random number
randomNumber, err := types.GenerateSecureRandomNumber()
if err != nil {
return err
}
randomNumberHash := types.CalculateRandomHash(randomNumber, timestamp)
// Prepare random number, timestamp, and hash for output
randomNumberStr := fmt.Sprintf("Random number: %s\n", hex.EncodeToString(randomNumber))
timestampStr := fmt.Sprintf("Timestamp: %d\n", timestamp)
randomNumberHashStr := fmt.Sprintf("Random number hash: %s", hex.EncodeToString(randomNumberHash))
output := []string{randomNumberStr, timestampStr, randomNumberHashStr}
return clientCtx.PrintObjectLegacy(strings.Join(output, ""))
},
}
}
// QueryCalcSwapIDCmd calculates the swapID for a random number hash, sender, and sender other chain
func QueryCalcSwapIDCmd(queryRoute string) *cobra.Command {
return &cobra.Command{
Use: "calc-swapid [random-number-hash] [sender] [sender-other-chain]",
Short: "calculate swap ID for the given random number hash, sender, and sender other chain",
Example: "bep3 calc-swapid 0677bd8a303dd981810f34d8e5cc6507f13b391899b84d3c1be6c6045a17d747 kava1l0xsq2z7gqd7yly0g40y5836g0appumark77ny bnb1ud3q90r98l3mhd87kswv3h8cgrymzeljct8qn7",
Args: cobra.MinimumNArgs(3),
RunE: func(cmd *cobra.Command, args []string) error {
clientCtx, err := client.GetClientQueryContext(cmd)
if err != nil {
return err
}
// Parse query params
randomNumberHash, err := hex.DecodeString(args[0])
if err != nil {
return err
}
sender, err := sdk.AccAddressFromBech32(args[1])
if err != nil {
return err
}
senderOtherChain := args[2]
// Calculate swap ID and convert to human-readable string
swapID := types.CalculateSwapID(randomNumberHash, sender, senderOtherChain)
return clientCtx.PrintObjectLegacy(hex.EncodeToString(swapID))
},
}
}
// QueryGetAssetSupplyCmd queries as asset's current in swap supply, active, supply, and supply limit
func QueryGetAssetSupplyCmd(queryRoute string) *cobra.Command {
return &cobra.Command{
Use: "supply [denom]",
Short: "get information about an asset's supply",
Example: "bep3 supply bnb",
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
clientCtx, err := client.GetClientQueryContext(cmd)
if err != nil {
return err
}
queryClient := types.NewQueryClient(clientCtx)
res, err := queryClient.AssetSupply(context.Background(), &types.QueryAssetSupplyRequest{
Denom: args[0],
})
if err != nil {
return err
}
return clientCtx.PrintProto(res)
},
}
}
// QueryGetAssetSuppliesCmd queries AssetSupplies in the store
func QueryGetAssetSuppliesCmd(queryRoute string) *cobra.Command {
return &cobra.Command{
Use: "supplies",
Short: "get a list of all asset supplies",
Example: "bep3 supplies",
RunE: func(cmd *cobra.Command, args []string) error {
clientCtx, err := client.GetClientQueryContext(cmd)
if err != nil {
return err
}
queryClient := types.NewQueryClient(clientCtx)
res, err := queryClient.AssetSupplies(context.Background(), &types.QueryAssetSuppliesRequest{
// TODO: Pagination here?
})
if err != nil {
return err
}
if len(res.AssetSupplies) == 0 {
return fmt.Errorf("there are currently no asset supplies")
}
return clientCtx.PrintProto(res)
},
}
}
// QueryGetAtomicSwapCmd queries an AtomicSwap by swapID
func QueryGetAtomicSwapCmd(queryRoute string) *cobra.Command {
return &cobra.Command{
Use: "swap [swap-id]",
Short: "get atomic swap information",
Example: "bep3 swap 6682c03cc3856879c8fb98c9733c6b0c30758299138166b6523fe94628b1d3af",
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
clientCtx, err := client.GetClientQueryContext(cmd)
if err != nil {
return err
}
queryClient := types.NewQueryClient(clientCtx)
res, err := queryClient.AtomicSwap(context.Background(), &types.QueryAtomicSwapRequest{
SwapId: args[0],
})
if err != nil {
return err
}
return clientCtx.PrintProto(res)
},
}
}
// QueryGetAtomicSwapsCmd queries AtomicSwaps in the store
func QueryGetAtomicSwapsCmd(queryRoute string) *cobra.Command {
cmd := &cobra.Command{
Use: "swaps",
Short: "query atomic swaps with optional filters",
Long: strings.TrimSpace(`Query for all paginated atomic swaps that match optional filters:
Example:
$ kvcli q bep3 swaps --involve=kava1l0xsq2z7gqd7yly0g40y5836g0appumark77ny
$ kvcli q bep3 swaps --expiration=280
$ kvcli q bep3 swaps --status=(Open|Completed|Expired)
$ kvcli q bep3 swaps --direction=(Incoming|Outgoing)
$ kvcli q bep3 swaps --page=2 --limit=100
`,
),
RunE: func(cmd *cobra.Command, args []string) error {
bechInvolveAddr, err := cmd.Flags().GetString(flagInvolve)
if err != nil {
return err
}
strExpiration, err := cmd.Flags().GetString(flagExpiration)
if err != nil {
return err
}
strSwapStatus, err := cmd.Flags().GetString(flagStatus)
if err != nil {
return err
}
strSwapDirection, err := cmd.Flags().GetString(flagDirection)
if err != nil {
return err
}
pageReq, err := client.ReadPageRequest(cmd.Flags())
if err != nil {
return err
}
req := types.QueryAtomicSwapsRequest{
Pagination: pageReq,
}
if len(bechInvolveAddr) != 0 {
involveAddr, err := sdk.AccAddressFromBech32(bechInvolveAddr)
if err != nil {
return err
}
req.Involve = involveAddr.String()
}
if len(strExpiration) != 0 {
expiration, err := strconv.ParseUint(strExpiration, 10, 64)
if err != nil {
return err
}
req.Expiration = expiration
}
if len(strSwapStatus) != 0 {
swapStatus := types.NewSwapStatusFromString(strSwapStatus)
if !swapStatus.IsValid() {
return fmt.Errorf("invalid swap status %s", strSwapStatus)
}
req.Status = swapStatus
}
if len(strSwapDirection) != 0 {
swapDirection := types.NewSwapDirectionFromString(strSwapDirection)
if !swapDirection.IsValid() {
return fmt.Errorf("invalid swap direction %s", strSwapDirection)
}
req.Direction = swapDirection
}
clientCtx, err := client.GetClientQueryContext(cmd)
if err != nil {
return err
}
queryClient := types.NewQueryClient(clientCtx)
res, err := queryClient.AtomicSwaps(context.Background(), &req)
if err != nil {
return err
}
return clientCtx.PrintProto(res)
},
}
cmd.Flags().String(flagInvolve, "", "(optional) filter by atomic swaps that involve an address")
cmd.Flags().String(flagExpiration, "", "(optional) filter by atomic swaps that expire before a block height")
cmd.Flags().String(flagStatus, "", "(optional) filter by atomic swap status, status: open/completed/expired")
cmd.Flags().String(flagDirection, "", "(optional) filter by atomic swap direction, direction: incoming/outgoing")
flags.AddPaginationFlagsToCmd(cmd, "swaps")
return cmd
}
// QueryParamsCmd queries the bep3 module parameters
func QueryParamsCmd(queryRoute string) *cobra.Command {
return &cobra.Command{
Use: "params",
Short: "get the bep3 module parameters",
Example: "bep3 params",
Args: cobra.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
clientCtx, err := client.GetClientQueryContext(cmd)
if err != nil {
return err
}
queryClient := types.NewQueryClient(clientCtx)
res, err := queryClient.Params(context.Background(), &types.QueryParamsRequest{})
if err != nil {
return err
}
return clientCtx.PrintProto(&res.Params)
},
}
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
test/unit/ingest/infrastructure/mq/publishers/test_transfer_ready_queue_publisher.py
|
from unittest import TestCase
from unittest.mock import patch
from app.ingest.infrastructure.mq.publishers.transfer_ready_queue_publisher import TransferReadyQueuePublisher
from test.resources.ingest.ingest_factory import create_ingest
class TestTransferReadyQueuePublisher(TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.TEST_INGEST = create_ingest()
cls.TEST_DESTINATION_PATH = "test_path"
@patch("app.common.infrastructure.mq.publishers.stomp_publisher_base.StompPublisherBase._publish_message")
@patch('app.ingest.infrastructure.mq.publishers.transfer_ready_queue_publisher.os.getenv')
def test_publish_message_happy_path(self, os_getenv_stub, inner_publish_message_mock) -> None:
os_getenv_stub.return_value = self.TEST_DESTINATION_PATH
self.sut = TransferReadyQueuePublisher()
self.sut.publish_message(self.TEST_INGEST)
inner_publish_message_mock.assert_called_once_with(
{
'package_id': self.TEST_INGEST.package_id,
's3_path': self.TEST_INGEST.s3_path,
's3_bucket_name': self.TEST_INGEST.s3_bucket_name,
'destination_path': self.TEST_DESTINATION_PATH,
'application_name': self.TEST_INGEST.depositing_application
}
)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
os.go
|
// Gulu - Golang common utilities for everyone.
// Copyright (c) 2019-present, b3log.org
//
// Gulu is licensed under Mulan PSL v2.
// You can use this software according to the terms and conditions of the Mulan PSL v2.
// You may obtain a copy of Mulan PSL v2 at:
// http://license.coscl.org.cn/MulanPSL2
// THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
// See the Mulan PSL v2 for more details.
package gulu
import (
"bytes"
"errors"
"os"
"os/exec"
"os/user"
"path/filepath"
"runtime"
"strings"
)
// IsWindows determines whether current OS is Windows.
func (*GuluOS) IsWindows() bool {
return "windows" == runtime.GOOS
}
// IsLinux determines whether current OS is Linux.
func (*GuluOS) IsLinux() bool {
return "linux" == runtime.GOOS
}
// IsDarwin determines whether current OS is Darwin.
func (*GuluOS) IsDarwin() bool {
return "darwin" == runtime.GOOS
}
// Pwd gets the path of current working directory.
func (*GuluOS) Pwd() string {
file, _ := exec.LookPath(os.Args[0])
pwd, _ := filepath.Abs(file)
return filepath.Dir(pwd)
}
// Home returns the home directory for the executing user.
//
// This uses an OS-specific method for discovering the home directory.
// An error is returned if a home directory cannot be detected.
func (*GuluOS) Home() (string, error) {
user, err := user.Current()
if nil == err {
return user.HomeDir, nil
}
// cross compile support
if OS.IsWindows() {
return homeWindows()
}
// Unix-like system, so just assume Unix
return homeUnix()
}
func homeUnix() (string, error) {
// First prefer the HOME environmental variable
if home := os.Getenv("HOME"); home != "" {
return home, nil
}
// If that fails, try the shell
var stdout bytes.Buffer
cmd := exec.Command("sh", "-c", "eval echo ~$USER")
cmd.Stdout = &stdout
if err := cmd.Run(); err != nil {
return "", err
}
result := strings.TrimSpace(stdout.String())
if result == "" {
return "", errors.New("blank output when reading home directory")
}
return result, nil
}
func homeWindows() (string, error) {
drive := os.Getenv("HOMEDRIVE")
path := os.Getenv("HOMEPATH")
home := drive + path
if drive == "" || path == "" {
home = os.Getenv("USERPROFILE")
}
if home == "" {
return "", errors.New("HOMEDRIVE, HOMEPATH, and USERPROFILE are blank")
}
return home, nil
}
|
[
"\"HOME\"",
"\"HOMEDRIVE\"",
"\"HOMEPATH\"",
"\"USERPROFILE\""
] |
[] |
[
"USERPROFILE",
"HOME",
"HOMEPATH",
"HOMEDRIVE"
] |
[]
|
["USERPROFILE", "HOME", "HOMEPATH", "HOMEDRIVE"]
|
go
| 4 | 0 | |
cmd/oj/main.go
|
// Copyright (c) 2020, Peter Ohler, All rights reserved.
package main
import (
"flag"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"time"
"github.com/ngjaying/ojg"
"github.com/ngjaying/ojg/alt"
"github.com/ngjaying/ojg/asm"
"github.com/ngjaying/ojg/jp"
"github.com/ngjaying/ojg/oj"
"github.com/ngjaying/ojg/pretty"
"github.com/ngjaying/ojg/sen"
)
const version = "1.9.4"
var (
indent = 2
color = false
bright = false
sortKeys = false
lazy = false
senOut = false
tab = false
showFnDocs = false
showConf = false
safe = false
mongo = false
// If true wrap extracts with an array.
wrapExtract = false
extracts = []jp.Expr{}
matches = []*jp.Script{}
planDef = ""
showVersion bool
plan *asm.Plan
root = map[string]interface{}{}
showRoot bool
prettyOpt = ""
width = 80
maxDepth = 3
prettyOn = false
align = false
html = false
convName = ""
confFile = ""
conv *alt.Converter
options *ojg.Options
)
func init() {
flag.IntVar(&indent, "i", indent, "indent")
flag.BoolVar(&color, "c", color, "color")
flag.BoolVar(&sortKeys, "s", sortKeys, "sort")
flag.BoolVar(&bright, "b", bright, "bright color")
flag.BoolVar(&wrapExtract, "w", wrapExtract, "wrap extracts in an array")
flag.BoolVar(&lazy, "z", lazy, "lazy mode accepts Simple Encoding Notation (quotes and commas mostly optional)")
flag.BoolVar(&senOut, "sen", senOut, "output in Simple Encoding Notation")
flag.BoolVar(&tab, "t", tab, "indent with tabs")
flag.Var(&exValue{}, "x", "extract path")
flag.Var(&matchValue{}, "m", "match equation/script")
flag.BoolVar(&showVersion, "version", showVersion, "display version and exit")
flag.StringVar(&planDef, "a", planDef, "assembly plan or plan file using @<plan>")
flag.BoolVar(&showRoot, "r", showRoot, "print root if an assemble plan provided")
flag.StringVar(&prettyOpt, "p", prettyOpt, `pretty print with the width, depth, and align as <width>.<max-depth>.<align>`)
flag.BoolVar(&html, "html", html, "output colored output as HTML")
flag.BoolVar(&safe, "safe", safe, "escape &, <, and > for HTML inclusion")
flag.StringVar(&confFile, "f", confFile, "configuration file (see -help-config), - indicates no file")
flag.BoolVar(&showFnDocs, "fn", showFnDocs, "describe assembly plan functions")
flag.BoolVar(&showFnDocs, "help-fn", showFnDocs, "describe assembly plan functions")
flag.BoolVar(&showConf, "help-config", showConf, "describe .oj-config.sen format")
flag.BoolVar(&mongo, "mongo", mongo, "parse mongo Javascript output")
flag.StringVar(&convName, "conv", convName, `apply converter before writing. Supported values are:
nano - converts integers over 946684800000000000 (2000-01-01) to time
rcf3339 - converts string in RFC3339 or RFC3339Nano to time
mongo - converts mongo wrapped values e.g., {$numberLong: "123"} => 123
<with-numbers> - if digits are included then time layout is assumed
<other> - any other is taken to be a key in a map with a string or nano time
`)
}
func main() {
flag.Usage = func() {
fmt.Fprintf(os.Stderr, `
usage: %s [<options>] [@<extraction>]... [(<match>)]... [<json-file>]...
The default behavior it to write the JSON formatted according to the color
options and the indentation option. If no files are specified JSON input is
expected from stdin.
Filtering and extraction of elements is supported using JSONPath and the
scripting that is part of JSONPath filters.
Extraction paths can be provided either with the -x option or an argument
starting with a $ or @. A Expr.Get() is executed and all the results are
either written or wrapped with an array and written depending on the value of
the wrap option (-w).
oj -x abc.def myfile.json "@.x[?(@.y > 1)]"
To filter JSON documents the match option (-m) is used. If a JSON document
matches at least one match option the JSON will be written. In addition to the
-m option an argument starting with a '(' is assumed to be a match script that
follows the oj.Script format.
oj -m "(@.name == 'Pete')" myfile.json "(@.name == "Makie")"
An argument that starts with a { or [ marks the start of a JSON document that
is composed of the remaining argument concatenated together. That document is
then used as the input.
oj -i 0 -z {a:1, b:two}
=> {"a":1,"b":"two"}
Oj can also be used to assemble new JSON output from input data. An assembly
plan that describes how to assemble the new JSON if specified by the -a
option. The -fn option will display the documentation for assembly.
Pretty mode output can be used with JSON or the -sen option. It indents
according to a defined width and maximum depth in a best effort approach. The
-p takes a pattern of <width>.<max-depth>.<align> where width and max-depth
are integers and align is a boolean.
`, filepath.Base(os.Args[0]))
flag.PrintDefaults()
fmt.Fprintln(os.Stderr)
}
flag.Parse() // get config file if specified
if showVersion {
fmt.Printf("oj %s\n", version)
os.Exit(0)
}
if showConf {
displayConf()
os.Exit(0)
}
if showFnDocs {
displayFnDocs()
os.Exit(0)
}
if err := run(); err != nil {
fmt.Fprintf(os.Stderr, "*-*-* %s\n", err)
os.Exit(1)
}
}
func run() (err error) {
defer func() {
if r := recover(); r != nil {
err, _ = r.(error)
}
}()
loadConfig()
flag.Parse() // load again to over-ride loaded config
var input []byte
var files []string
for _, arg := range flag.Args() {
if len(arg) == 0 {
continue
}
if 0 < len(input) {
input = append(input, arg...)
continue
}
switch arg[0] {
case '@', '$':
x, err := jp.ParseString(arg)
if err == nil {
extracts = append(extracts, x)
}
case '(':
script, err := jp.NewScript(arg)
if err == nil {
matches = append(matches, script)
}
case '{', '[':
input = append(input, arg...)
default:
files = append(files, arg)
}
}
if 0 < len(convName) {
switch strings.ToLower(convName) {
case "nano":
conv = &alt.TimeNanoConverter
case "rfc3339":
conv = &alt.TimeRFC3339Converter
case "mongo":
conv = &alt.MongoConverter
default:
if strings.ContainsAny(convName, "0123456789") {
conv = &alt.Converter{
String: []func(val string) (interface{}, bool){
func(val string) (interface{}, bool) {
if len(val) == len(convName) {
if t, err := time.ParseInLocation(convName, val, time.UTC); err == nil {
return t, true
}
}
return val, false
},
},
}
} else {
conv = &alt.Converter{
Map: []func(val map[string]interface{}) (interface{}, bool){
func(val map[string]interface{}) (interface{}, bool) {
if len(val) == 1 {
switch tv := val[convName].(type) {
case string:
for _, layout := range []string{time.RFC3339Nano, time.RFC3339, "2006-01-02"} {
if t, err := time.ParseInLocation(layout, tv, time.UTC); err == nil {
return t, true
}
}
case int64:
return time.Unix(0, tv), true
}
}
return val, false
},
},
}
}
}
}
var p oj.SimpleParser
switch {
case mongo:
sp := &sen.Parser{}
sp.AddMongoFuncs()
p = sp
if conv == nil {
conv = &alt.MongoConverter
}
case lazy:
p = &sen.Parser{}
default:
p = &oj.Parser{Reuse: true}
}
planDef = strings.TrimSpace(planDef)
if 0 < len(planDef) {
if planDef[0] != '[' {
var b []byte
if b, err = ioutil.ReadFile(planDef); err != nil {
fmt.Fprintf(os.Stderr, "*-*-* %s\n", err)
os.Exit(1)
}
planDef = string(b)
}
var pd interface{}
if pd, err = (&sen.Parser{}).Parse([]byte(planDef)); err != nil {
panic(err)
}
plist, _ := pd.([]interface{})
if len(plist) == 0 {
panic(fmt.Errorf("assembly plan not an array"))
}
plan = asm.NewPlan(plist)
}
if 0 < len(files) {
var f *os.File
for _, file := range files {
if f, err = os.Open(file); err == nil {
_, err = p.ParseReader(f, write)
f.Close()
}
if err != nil {
panic(err)
}
}
}
if 0 < len(input) {
if _, err = p.Parse(input, write); err != nil {
panic(err)
}
}
if len(files) == 0 && len(input) == 0 {
if _, err = p.ParseReader(os.Stdin, write); err != nil {
panic(err)
}
}
if showRoot && plan != nil {
plan = nil
delete(root, "src")
delete(root, "asm")
write(root)
}
return
}
func write(v interface{}) bool {
if conv != nil {
v = conv.Convert(v)
}
if 0 < len(matches) {
match := false
for _, m := range matches {
if m.Match(v) {
match = true
break
}
}
if !match {
return false
}
}
if 0 < len(extracts) {
if wrapExtract {
var w []interface{}
for _, x := range extracts {
w = append(w, x.Get(v)...)
}
if senOut {
writeSEN(w)
} else {
writeJSON(w)
}
} else {
for _, x := range extracts {
for _, v2 := range x.Get(v) {
if senOut {
writeSEN(v2)
} else {
writeJSON(v2)
}
}
}
}
} else if senOut {
writeSEN(v)
} else {
if plan != nil {
root["src"] = v
if err := plan.Execute(root); err != nil {
fmt.Fprintf(os.Stderr, "*-*-* %s\n", err)
os.Exit(1)
} else {
v = root["asm"]
}
}
writeJSON(v)
}
return false
}
func writeJSON(v interface{}) {
if options == nil {
o := ojg.Options{}
if bright {
o = oj.BrightOptions
o.Color = true
o.Sort = sortKeys
} else if color || sortKeys || tab {
o = ojg.DefaultOptions
o.Color = color
}
o.Indent = indent
o.Tab = tab
o.HTMLUnsafe = !safe
o.TimeFormat = time.RFC3339Nano
o.Sort = sortKeys
if html {
o.HTMLUnsafe = false
if color {
o.SyntaxColor = ojg.HTMLOptions.SyntaxColor
o.KeyColor = ojg.HTMLOptions.KeyColor
o.NullColor = ojg.HTMLOptions.NullColor
o.BoolColor = ojg.HTMLOptions.BoolColor
o.NumberColor = ojg.HTMLOptions.NumberColor
o.StringColor = ojg.HTMLOptions.StringColor
o.TimeColor = ojg.HTMLOptions.TimeColor
o.NoColor = ojg.HTMLOptions.NoColor
}
}
options = &o
}
if 0 < len(prettyOpt) {
parsePrettyOpt()
}
if prettyOn {
_ = pretty.WriteJSON(os.Stdout, v, options, float64(width)+float64(maxDepth)/10.0, align)
} else {
_ = oj.Write(os.Stdout, v, options)
}
os.Stdout.Write([]byte{'\n'})
}
func writeSEN(v interface{}) {
if options == nil {
o := ojg.Options{}
switch {
case html:
o = ojg.HTMLOptions
o.Color = true
o.HTMLUnsafe = false
case bright:
o = ojg.BrightOptions
o.Color = true
case color || sortKeys || tab:
o = ojg.DefaultOptions
o.Color = color
}
o.Indent = indent
o.Tab = tab
o.HTMLUnsafe = !safe
o.TimeFormat = time.RFC3339Nano
o.Sort = sortKeys
options = &o
}
if 0 < len(prettyOpt) {
parsePrettyOpt()
}
if prettyOn {
_ = pretty.WriteSEN(os.Stdout, v, options, float64(width)+float64(maxDepth)/10.0, align)
} else {
_ = sen.Write(os.Stdout, v, options)
}
os.Stdout.Write([]byte{'\n'})
}
func parsePrettyOpt() {
if 0 < len(prettyOpt) {
parts := strings.Split(prettyOpt, ".")
if 0 < len(parts[0]) {
if i, err := strconv.ParseInt(parts[0], 10, 64); err == nil {
width = int(i)
prettyOn = true
} else {
panic(err)
}
}
if 1 < len(parts) && 0 < len(parts[1]) {
if i, err := strconv.ParseInt(parts[1], 10, 64); err == nil {
maxDepth = int(i)
prettyOn = true
} else {
panic(err)
}
}
if 2 < len(parts) && 0 < len(parts[2]) {
var err error
if align, err = strconv.ParseBool(parts[2]); err != nil {
panic(err)
}
prettyOn = true
}
}
}
type exValue struct {
}
func (xv exValue) String() string {
return ""
}
func (xv exValue) Set(s string) error {
x, err := jp.ParseString(s)
if err == nil {
extracts = append(extracts, x)
}
return err
}
type matchValue struct {
}
func (mv matchValue) String() string {
return ""
}
func (mv matchValue) Set(s string) error {
script, err := jp.NewScript(s)
if err == nil {
matches = append(matches, script)
}
return err
}
func loadConfig() {
var conf interface{}
if 0 < len(confFile) {
if confFile == "-" { // special case
return
}
f, err := os.Open(confFile)
if err != nil {
panic(err)
}
if conf, err = sen.ParseReader(f); err != nil {
panic(err)
}
applyConf(conf)
}
home := os.Getenv("HOME")
for _, path := range []string{
"./.oj-config.sen",
"./.oj-config.json",
home + "/.oj-config.sen",
home + "/.oj-config.json",
} {
f, err := os.Open(path)
if err == nil {
if conf, err = sen.ParseReader(f); err == nil {
applyConf(conf)
return
}
}
}
}
func applyConf(conf interface{}) {
bright, _ = jp.C("bright").First(conf).(bool)
color, _ = jp.C("color").First(conf).(bool)
for _, v := range jp.C("format").C("indent").Get(conf) {
indent = int(alt.Int(v))
}
for _, v := range jp.C("format").C("tab").Get(conf) {
tab = alt.Bool(v)
}
for _, v := range jp.C("format").C("pretty").Get(conf) {
prettyOpt, _ = v.(string)
parsePrettyOpt()
}
for _, v := range jp.C("format").C("width").Get(conf) {
width = int(alt.Int(v))
prettyOn = true
}
for _, v := range jp.C("format").C("depth").Get(conf) {
maxDepth = int(alt.Int(v))
prettyOn = true
}
for _, v := range jp.C("format").C("align").Get(conf) {
align = alt.Bool(v)
prettyOn = true
}
safe, _ = jp.C("html-safe").First(conf).(bool)
lazy, _ = jp.C("lazy").First(conf).(bool)
senOut, _ = jp.C("sen").First(conf).(bool)
convName, _ = jp.C("conv").First(conf).(string)
mongo, _ = jp.C("mongo").First(conf).(bool)
setOptionsColor(conf, "bool", setBoolColor)
setOptionsColor(conf, "key", setKeyColor)
setOptionsColor(conf, "no-color", setNoColor)
setOptionsColor(conf, "null", setNullColor)
setOptionsColor(conf, "number", setNumberColor)
setOptionsColor(conf, "string", setStringColor)
setOptionsColor(conf, "time", setTimeColor)
setOptionsColor(conf, "syntax", setSyntaxColor)
setHTMLColor(conf, "bool", &sen.HTMLOptions.BoolColor)
setHTMLColor(conf, "key", &sen.HTMLOptions.KeyColor)
setHTMLColor(conf, "no-color", &sen.HTMLOptions.NoColor)
setHTMLColor(conf, "null", &sen.HTMLOptions.NullColor)
setHTMLColor(conf, "number", &sen.HTMLOptions.NumberColor)
setHTMLColor(conf, "string", &sen.HTMLOptions.StringColor)
setHTMLColor(conf, "syntax", &sen.HTMLOptions.SyntaxColor)
}
func setOptionsColor(conf interface{}, key string, fun func(color string)) {
for _, v := range jp.C("colors").C(key).Get(conf) {
fun(pickColor(alt.String(v)))
}
}
func setBoolColor(color string) {
ojg.DefaultOptions.BoolColor = color
ojg.BrightOptions.BoolColor = color
}
func setKeyColor(color string) {
ojg.DefaultOptions.KeyColor = color
ojg.BrightOptions.KeyColor = color
}
func setNoColor(color string) {
ojg.DefaultOptions.NoColor = color
ojg.BrightOptions.NoColor = color
}
func setNullColor(color string) {
ojg.DefaultOptions.NullColor = color
ojg.BrightOptions.NullColor = color
}
func setNumberColor(color string) {
ojg.DefaultOptions.NumberColor = color
ojg.BrightOptions.NumberColor = color
}
func setStringColor(color string) {
ojg.DefaultOptions.StringColor = color
ojg.BrightOptions.StringColor = color
}
func setTimeColor(color string) {
ojg.DefaultOptions.TimeColor = color
ojg.BrightOptions.TimeColor = color
}
func setSyntaxColor(color string) {
ojg.DefaultOptions.SyntaxColor = color
ojg.BrightOptions.SyntaxColor = color
}
func setHTMLColor(conf interface{}, key string, sp *string) {
for _, v := range jp.C("colors").C(key).Get(conf) {
*sp = pickColor(alt.String(v))
}
}
func pickColor(s string) (color string) {
switch strings.ToLower(s) {
case "normal":
color = "\x1b[m"
case "black":
color = "\x1b[30m"
case "red":
color = "\x1b[31m"
case "green":
color = "\x1b[32m"
case "yellow":
color = "\x1b[33m"
case "blue":
color = "\x1b[34m"
case "magenta":
color = "\x1b[35m"
case "cyan":
color = "\x1b[36m"
case "white":
color = "\x1b[37m"
case "gray":
color = "\x1b[90m"
case "bright-red":
color = "\x1b[91m"
case "bright-green":
color = "\x1b[92m"
case "bright-yellow":
color = "\x1b[93m"
case "bright-blue":
color = "\x1b[94m"
case "bright-magenta":
color = "\x1b[95m"
case "bright-cyan":
color = "\x1b[96m"
case "bright-white":
color = "\x1b[97m"
default:
panic(fmt.Errorf("%s is not a valid color choice", s))
}
return
}
func displayFnDocs() {
fmt.Printf(`
An assembly plan is described by a JSON document or a SEN document. The format
is much like LISP but with brackets instead of parenthesis. A plan is
evaluated by evaluating the plan function which is usually an 'asm'
function. The plan operates on a data map which is the root during
evaluation. The source data is in the $.src and the expected assembled output
should be in $.asm.
An example of a plan in SEN format is (the first asm is optional):
[ asm
[set $.asm {good: bye}] // set output to {good: bad}
[set $.asm.hello world] // output is now {good: bad, hello: world}
]
The functions available are:
`)
var b []byte
var keys []string
docs := asm.FnDocs()
for k := range docs {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
b = append(b, fmt.Sprintf(" %10s: %s\n\n", k, strings.ReplaceAll(docs[k], "\n", "\n "))...)
}
fmt.Println(string(b))
}
func displayConf() {
fmt.Printf(`
If an oj configuration file is present in the local directory or the home
directory that file is used to set the defaults for oj. The file can be in
either SEN or JSON format. The paths check, in order are:
./.oj-config.sen
./.oj-config.json
~/.oj-config.sen
~/.oj-config.json
The file format (SEN with comments) is:
{
bright: true // Color if true will colorize the output with bright colors.
color: false // Color if true will colorize the output. The bright option takes precedence.
colors: {
// Color values can be one of the following:
// normal
// black
// red
// green
// yellow
// blue
// magenta
// cyan
// white
// gray
// bright-red
// bright-green
// bright-yellow
// bright-blue
// bright-magenta
// bright-cyan
// bright-white
syntax: normal
key: bright-blue
null: bright-red
bool: bright-yellow
number: bright-cyan
string: bright-green
time: bright-magenta
no-color: normal // NoColor turns the color off.
}
// Either the pretty element can be used or the individual width, depth, and
// align options can be specified separately.
format: {indent: 2 tab: false pretty: 80.3.false}
//format: {indent: 2 tab: false width: 80 depth: 3 align: false}
html: {
syntax: "<span>"
key: '<span style="color:#44f">'
null: '<span style="color:red">'
bool: '<span style="color:#a40">"
number: '<span style="color:#04a">'
string: '<span style="color:green">'
time: '<span style="color:#f0f">'
no-color: "</span>"
}
html-safe: false
lazy: true // -z option, lazy read for SEN format
sen: true
conv: rfc3339
mongo: false
}
`)
}
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
google/cloud/talent/v4beta1/talent-v4beta1-py/tests/unit/gapic/talent_v4beta1/test_tenant_service.py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import packaging.version
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.talent_v4beta1.services.tenant_service import TenantServiceAsyncClient
from google.cloud.talent_v4beta1.services.tenant_service import TenantServiceClient
from google.cloud.talent_v4beta1.services.tenant_service import pagers
from google.cloud.talent_v4beta1.services.tenant_service import transports
from google.cloud.talent_v4beta1.services.tenant_service.transports.base import _GOOGLE_AUTH_VERSION
from google.cloud.talent_v4beta1.types import tenant
from google.cloud.talent_v4beta1.types import tenant as gct_tenant
from google.cloud.talent_v4beta1.types import tenant_service
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
import google.auth
# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively
# through google-api-core:
# - Delete the auth "less than" test cases
# - Delete these pytest markers (Make the "greater than or equal to" tests the default).
requires_google_auth_lt_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"),
reason="This test requires google-auth < 1.25.0",
)
requires_google_auth_gte_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"),
reason="This test requires google-auth >= 1.25.0",
)
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert TenantServiceClient._get_default_mtls_endpoint(None) is None
assert TenantServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert TenantServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint
assert TenantServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint
assert TenantServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint
assert TenantServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [
TenantServiceClient,
TenantServiceAsyncClient,
])
def test_tenant_service_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == 'jobs.googleapis.com:443'
@pytest.mark.parametrize("transport_class,transport_name", [
(transports.TenantServiceGrpcTransport, "grpc"),
(transports.TenantServiceGrpcAsyncIOTransport, "grpc_asyncio"),
])
def test_tenant_service_client_service_account_always_use_jwt(transport_class, transport_name):
with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize("client_class", [
TenantServiceClient,
TenantServiceAsyncClient,
])
def test_tenant_service_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == 'jobs.googleapis.com:443'
def test_tenant_service_client_get_transport_class():
transport = TenantServiceClient.get_transport_class()
available_transports = [
transports.TenantServiceGrpcTransport,
]
assert transport in available_transports
transport = TenantServiceClient.get_transport_class("grpc")
assert transport == transports.TenantServiceGrpcTransport
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(TenantServiceClient, transports.TenantServiceGrpcTransport, "grpc"),
(TenantServiceAsyncClient, transports.TenantServiceGrpcAsyncIOTransport, "grpc_asyncio"),
])
@mock.patch.object(TenantServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TenantServiceClient))
@mock.patch.object(TenantServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TenantServiceAsyncClient))
def test_tenant_service_client_client_options(client_class, transport_class, transport_name):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(TenantServiceClient, 'get_transport_class') as gtc:
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials()
)
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(TenantServiceClient, 'get_transport_class') as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [
(TenantServiceClient, transports.TenantServiceGrpcTransport, "grpc", "true"),
(TenantServiceAsyncClient, transports.TenantServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"),
(TenantServiceClient, transports.TenantServiceGrpcTransport, "grpc", "false"),
(TenantServiceAsyncClient, transports.TenantServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"),
])
@mock.patch.object(TenantServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TenantServiceClient))
@mock.patch.object(TenantServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TenantServiceAsyncClient))
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_tenant_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
options = client_options.ClientOptions(client_cert_source=client_cert_source_callback)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch.object(transport_class, '__init__') as patched:
with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True):
with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch.object(transport_class, '__init__') as patched:
with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False):
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(TenantServiceClient, transports.TenantServiceGrpcTransport, "grpc"),
(TenantServiceAsyncClient, transports.TenantServiceGrpcAsyncIOTransport, "grpc_asyncio"),
])
def test_tenant_service_client_client_options_scopes(client_class, transport_class, transport_name):
# Check the case scopes are provided.
options = client_options.ClientOptions(
scopes=["1", "2"],
)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(TenantServiceClient, transports.TenantServiceGrpcTransport, "grpc"),
(TenantServiceAsyncClient, transports.TenantServiceGrpcAsyncIOTransport, "grpc_asyncio"),
])
def test_tenant_service_client_client_options_credentials_file(client_class, transport_class, transport_name):
# Check the case credentials file is provided.
options = client_options.ClientOptions(
credentials_file="credentials.json"
)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_tenant_service_client_client_options_from_dict():
with mock.patch('google.cloud.talent_v4beta1.services.tenant_service.transports.TenantServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = TenantServiceClient(
client_options={'api_endpoint': 'squid.clam.whelk'}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_create_tenant(transport: str = 'grpc', request_type=tenant_service.CreateTenantRequest):
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tenant),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = gct_tenant.Tenant(
name='name_value',
external_id='external_id_value',
usage_type=gct_tenant.Tenant.DataUsageType.AGGREGATED,
keyword_searchable_profile_custom_attributes=['keyword_searchable_profile_custom_attributes_value'],
)
response = client.create_tenant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == tenant_service.CreateTenantRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gct_tenant.Tenant)
assert response.name == 'name_value'
assert response.external_id == 'external_id_value'
assert response.usage_type == gct_tenant.Tenant.DataUsageType.AGGREGATED
assert response.keyword_searchable_profile_custom_attributes == ['keyword_searchable_profile_custom_attributes_value']
def test_create_tenant_from_dict():
test_create_tenant(request_type=dict)
def test_create_tenant_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tenant),
'__call__') as call:
client.create_tenant()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == tenant_service.CreateTenantRequest()
@pytest.mark.asyncio
async def test_create_tenant_async(transport: str = 'grpc_asyncio', request_type=tenant_service.CreateTenantRequest):
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tenant),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gct_tenant.Tenant(
name='name_value',
external_id='external_id_value',
usage_type=gct_tenant.Tenant.DataUsageType.AGGREGATED,
keyword_searchable_profile_custom_attributes=['keyword_searchable_profile_custom_attributes_value'],
))
response = await client.create_tenant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == tenant_service.CreateTenantRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gct_tenant.Tenant)
assert response.name == 'name_value'
assert response.external_id == 'external_id_value'
assert response.usage_type == gct_tenant.Tenant.DataUsageType.AGGREGATED
assert response.keyword_searchable_profile_custom_attributes == ['keyword_searchable_profile_custom_attributes_value']
@pytest.mark.asyncio
async def test_create_tenant_async_from_dict():
await test_create_tenant_async(request_type=dict)
def test_create_tenant_field_headers():
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tenant_service.CreateTenantRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tenant),
'__call__') as call:
call.return_value = gct_tenant.Tenant()
client.create_tenant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_create_tenant_field_headers_async():
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tenant_service.CreateTenantRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tenant),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gct_tenant.Tenant())
await client.create_tenant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
def test_create_tenant_flattened():
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tenant),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = gct_tenant.Tenant()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_tenant(
parent='parent_value',
tenant=gct_tenant.Tenant(name='name_value'),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
assert args[0].tenant == gct_tenant.Tenant(name='name_value')
def test_create_tenant_flattened_error():
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_tenant(
tenant_service.CreateTenantRequest(),
parent='parent_value',
tenant=gct_tenant.Tenant(name='name_value'),
)
@pytest.mark.asyncio
async def test_create_tenant_flattened_async():
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tenant),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = gct_tenant.Tenant()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gct_tenant.Tenant())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_tenant(
parent='parent_value',
tenant=gct_tenant.Tenant(name='name_value'),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
assert args[0].tenant == gct_tenant.Tenant(name='name_value')
@pytest.mark.asyncio
async def test_create_tenant_flattened_error_async():
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_tenant(
tenant_service.CreateTenantRequest(),
parent='parent_value',
tenant=gct_tenant.Tenant(name='name_value'),
)
def test_get_tenant(transport: str = 'grpc', request_type=tenant_service.GetTenantRequest):
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_tenant),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = tenant.Tenant(
name='name_value',
external_id='external_id_value',
usage_type=tenant.Tenant.DataUsageType.AGGREGATED,
keyword_searchable_profile_custom_attributes=['keyword_searchable_profile_custom_attributes_value'],
)
response = client.get_tenant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == tenant_service.GetTenantRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, tenant.Tenant)
assert response.name == 'name_value'
assert response.external_id == 'external_id_value'
assert response.usage_type == tenant.Tenant.DataUsageType.AGGREGATED
assert response.keyword_searchable_profile_custom_attributes == ['keyword_searchable_profile_custom_attributes_value']
def test_get_tenant_from_dict():
test_get_tenant(request_type=dict)
def test_get_tenant_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_tenant),
'__call__') as call:
client.get_tenant()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == tenant_service.GetTenantRequest()
@pytest.mark.asyncio
async def test_get_tenant_async(transport: str = 'grpc_asyncio', request_type=tenant_service.GetTenantRequest):
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_tenant),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tenant.Tenant(
name='name_value',
external_id='external_id_value',
usage_type=tenant.Tenant.DataUsageType.AGGREGATED,
keyword_searchable_profile_custom_attributes=['keyword_searchable_profile_custom_attributes_value'],
))
response = await client.get_tenant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == tenant_service.GetTenantRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, tenant.Tenant)
assert response.name == 'name_value'
assert response.external_id == 'external_id_value'
assert response.usage_type == tenant.Tenant.DataUsageType.AGGREGATED
assert response.keyword_searchable_profile_custom_attributes == ['keyword_searchable_profile_custom_attributes_value']
@pytest.mark.asyncio
async def test_get_tenant_async_from_dict():
await test_get_tenant_async(request_type=dict)
def test_get_tenant_field_headers():
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tenant_service.GetTenantRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_tenant),
'__call__') as call:
call.return_value = tenant.Tenant()
client.get_tenant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_get_tenant_field_headers_async():
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tenant_service.GetTenantRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_tenant),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tenant.Tenant())
await client.get_tenant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_get_tenant_flattened():
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_tenant),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = tenant.Tenant()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_tenant(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
def test_get_tenant_flattened_error():
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_tenant(
tenant_service.GetTenantRequest(),
name='name_value',
)
@pytest.mark.asyncio
async def test_get_tenant_flattened_async():
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_tenant),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = tenant.Tenant()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tenant.Tenant())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_tenant(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
@pytest.mark.asyncio
async def test_get_tenant_flattened_error_async():
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_tenant(
tenant_service.GetTenantRequest(),
name='name_value',
)
def test_update_tenant(transport: str = 'grpc', request_type=tenant_service.UpdateTenantRequest):
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tenant),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = gct_tenant.Tenant(
name='name_value',
external_id='external_id_value',
usage_type=gct_tenant.Tenant.DataUsageType.AGGREGATED,
keyword_searchable_profile_custom_attributes=['keyword_searchable_profile_custom_attributes_value'],
)
response = client.update_tenant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == tenant_service.UpdateTenantRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gct_tenant.Tenant)
assert response.name == 'name_value'
assert response.external_id == 'external_id_value'
assert response.usage_type == gct_tenant.Tenant.DataUsageType.AGGREGATED
assert response.keyword_searchable_profile_custom_attributes == ['keyword_searchable_profile_custom_attributes_value']
def test_update_tenant_from_dict():
test_update_tenant(request_type=dict)
def test_update_tenant_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tenant),
'__call__') as call:
client.update_tenant()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == tenant_service.UpdateTenantRequest()
@pytest.mark.asyncio
async def test_update_tenant_async(transport: str = 'grpc_asyncio', request_type=tenant_service.UpdateTenantRequest):
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tenant),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gct_tenant.Tenant(
name='name_value',
external_id='external_id_value',
usage_type=gct_tenant.Tenant.DataUsageType.AGGREGATED,
keyword_searchable_profile_custom_attributes=['keyword_searchable_profile_custom_attributes_value'],
))
response = await client.update_tenant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == tenant_service.UpdateTenantRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gct_tenant.Tenant)
assert response.name == 'name_value'
assert response.external_id == 'external_id_value'
assert response.usage_type == gct_tenant.Tenant.DataUsageType.AGGREGATED
assert response.keyword_searchable_profile_custom_attributes == ['keyword_searchable_profile_custom_attributes_value']
@pytest.mark.asyncio
async def test_update_tenant_async_from_dict():
await test_update_tenant_async(request_type=dict)
def test_update_tenant_field_headers():
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tenant_service.UpdateTenantRequest()
request.tenant.name = 'tenant.name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tenant),
'__call__') as call:
call.return_value = gct_tenant.Tenant()
client.update_tenant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'tenant.name=tenant.name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_update_tenant_field_headers_async():
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tenant_service.UpdateTenantRequest()
request.tenant.name = 'tenant.name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tenant),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gct_tenant.Tenant())
await client.update_tenant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'tenant.name=tenant.name/value',
) in kw['metadata']
def test_update_tenant_flattened():
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tenant),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = gct_tenant.Tenant()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_tenant(
tenant=gct_tenant.Tenant(name='name_value'),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].tenant == gct_tenant.Tenant(name='name_value')
def test_update_tenant_flattened_error():
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_tenant(
tenant_service.UpdateTenantRequest(),
tenant=gct_tenant.Tenant(name='name_value'),
)
@pytest.mark.asyncio
async def test_update_tenant_flattened_async():
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tenant),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = gct_tenant.Tenant()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gct_tenant.Tenant())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_tenant(
tenant=gct_tenant.Tenant(name='name_value'),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].tenant == gct_tenant.Tenant(name='name_value')
@pytest.mark.asyncio
async def test_update_tenant_flattened_error_async():
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_tenant(
tenant_service.UpdateTenantRequest(),
tenant=gct_tenant.Tenant(name='name_value'),
)
def test_delete_tenant(transport: str = 'grpc', request_type=tenant_service.DeleteTenantRequest):
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tenant),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_tenant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == tenant_service.DeleteTenantRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_tenant_from_dict():
test_delete_tenant(request_type=dict)
def test_delete_tenant_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tenant),
'__call__') as call:
client.delete_tenant()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == tenant_service.DeleteTenantRequest()
@pytest.mark.asyncio
async def test_delete_tenant_async(transport: str = 'grpc_asyncio', request_type=tenant_service.DeleteTenantRequest):
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tenant),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_tenant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == tenant_service.DeleteTenantRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_tenant_async_from_dict():
await test_delete_tenant_async(request_type=dict)
def test_delete_tenant_field_headers():
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tenant_service.DeleteTenantRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tenant),
'__call__') as call:
call.return_value = None
client.delete_tenant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_delete_tenant_field_headers_async():
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tenant_service.DeleteTenantRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tenant),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_tenant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_delete_tenant_flattened():
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tenant),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_tenant(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
def test_delete_tenant_flattened_error():
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_tenant(
tenant_service.DeleteTenantRequest(),
name='name_value',
)
@pytest.mark.asyncio
async def test_delete_tenant_flattened_async():
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tenant),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_tenant(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
@pytest.mark.asyncio
async def test_delete_tenant_flattened_error_async():
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_tenant(
tenant_service.DeleteTenantRequest(),
name='name_value',
)
def test_list_tenants(transport: str = 'grpc', request_type=tenant_service.ListTenantsRequest):
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tenants),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = tenant_service.ListTenantsResponse(
next_page_token='next_page_token_value',
)
response = client.list_tenants(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == tenant_service.ListTenantsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListTenantsPager)
assert response.next_page_token == 'next_page_token_value'
def test_list_tenants_from_dict():
test_list_tenants(request_type=dict)
def test_list_tenants_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tenants),
'__call__') as call:
client.list_tenants()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == tenant_service.ListTenantsRequest()
@pytest.mark.asyncio
async def test_list_tenants_async(transport: str = 'grpc_asyncio', request_type=tenant_service.ListTenantsRequest):
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tenants),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tenant_service.ListTenantsResponse(
next_page_token='next_page_token_value',
))
response = await client.list_tenants(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == tenant_service.ListTenantsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListTenantsAsyncPager)
assert response.next_page_token == 'next_page_token_value'
@pytest.mark.asyncio
async def test_list_tenants_async_from_dict():
await test_list_tenants_async(request_type=dict)
def test_list_tenants_field_headers():
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tenant_service.ListTenantsRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tenants),
'__call__') as call:
call.return_value = tenant_service.ListTenantsResponse()
client.list_tenants(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_list_tenants_field_headers_async():
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tenant_service.ListTenantsRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tenants),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tenant_service.ListTenantsResponse())
await client.list_tenants(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
def test_list_tenants_flattened():
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tenants),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = tenant_service.ListTenantsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_tenants(
parent='parent_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
def test_list_tenants_flattened_error():
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_tenants(
tenant_service.ListTenantsRequest(),
parent='parent_value',
)
@pytest.mark.asyncio
async def test_list_tenants_flattened_async():
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tenants),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = tenant_service.ListTenantsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tenant_service.ListTenantsResponse())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_tenants(
parent='parent_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
@pytest.mark.asyncio
async def test_list_tenants_flattened_error_async():
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_tenants(
tenant_service.ListTenantsRequest(),
parent='parent_value',
)
def test_list_tenants_pager():
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tenants),
'__call__') as call:
# Set the response to a series of pages.
call.side_effect = (
tenant_service.ListTenantsResponse(
tenants=[
tenant.Tenant(),
tenant.Tenant(),
tenant.Tenant(),
],
next_page_token='abc',
),
tenant_service.ListTenantsResponse(
tenants=[],
next_page_token='def',
),
tenant_service.ListTenantsResponse(
tenants=[
tenant.Tenant(),
],
next_page_token='ghi',
),
tenant_service.ListTenantsResponse(
tenants=[
tenant.Tenant(),
tenant.Tenant(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('parent', ''),
)),
)
pager = client.list_tenants(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, tenant.Tenant)
for i in results)
def test_list_tenants_pages():
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tenants),
'__call__') as call:
# Set the response to a series of pages.
call.side_effect = (
tenant_service.ListTenantsResponse(
tenants=[
tenant.Tenant(),
tenant.Tenant(),
tenant.Tenant(),
],
next_page_token='abc',
),
tenant_service.ListTenantsResponse(
tenants=[],
next_page_token='def',
),
tenant_service.ListTenantsResponse(
tenants=[
tenant.Tenant(),
],
next_page_token='ghi',
),
tenant_service.ListTenantsResponse(
tenants=[
tenant.Tenant(),
tenant.Tenant(),
],
),
RuntimeError,
)
pages = list(client.list_tenants(request={}).pages)
for page_, token in zip(pages, ['abc','def','ghi', '']):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_tenants_async_pager():
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tenants),
'__call__', new_callable=mock.AsyncMock) as call:
# Set the response to a series of pages.
call.side_effect = (
tenant_service.ListTenantsResponse(
tenants=[
tenant.Tenant(),
tenant.Tenant(),
tenant.Tenant(),
],
next_page_token='abc',
),
tenant_service.ListTenantsResponse(
tenants=[],
next_page_token='def',
),
tenant_service.ListTenantsResponse(
tenants=[
tenant.Tenant(),
],
next_page_token='ghi',
),
tenant_service.ListTenantsResponse(
tenants=[
tenant.Tenant(),
tenant.Tenant(),
],
),
RuntimeError,
)
async_pager = await client.list_tenants(request={},)
assert async_pager.next_page_token == 'abc'
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, tenant.Tenant)
for i in responses)
@pytest.mark.asyncio
async def test_list_tenants_async_pages():
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tenants),
'__call__', new_callable=mock.AsyncMock) as call:
# Set the response to a series of pages.
call.side_effect = (
tenant_service.ListTenantsResponse(
tenants=[
tenant.Tenant(),
tenant.Tenant(),
tenant.Tenant(),
],
next_page_token='abc',
),
tenant_service.ListTenantsResponse(
tenants=[],
next_page_token='def',
),
tenant_service.ListTenantsResponse(
tenants=[
tenant.Tenant(),
],
next_page_token='ghi',
),
tenant_service.ListTenantsResponse(
tenants=[
tenant.Tenant(),
tenant.Tenant(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_tenants(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ['abc','def','ghi', '']):
assert page_.raw_page.next_page_token == token
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.TenantServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.TenantServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = TenantServiceClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.TenantServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = TenantServiceClient(
client_options={"scopes": ["1", "2"]},
transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.TenantServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = TenantServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.TenantServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.TenantServiceGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize("transport_class", [
transports.TenantServiceGrpcTransport,
transports.TenantServiceGrpcAsyncIOTransport,
])
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport,
transports.TenantServiceGrpcTransport,
)
def test_tenant_service_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.TenantServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json"
)
def test_tenant_service_base_transport():
# Instantiate the base transport.
with mock.patch('google.cloud.talent_v4beta1.services.tenant_service.transports.TenantServiceTransport.__init__') as Transport:
Transport.return_value = None
transport = transports.TenantServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
'create_tenant',
'get_tenant',
'update_tenant',
'delete_tenant',
'list_tenants',
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
@requires_google_auth_gte_1_25_0
def test_tenant_service_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.talent_v4beta1.services.tenant_service.transports.TenantServiceTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.TenantServiceTransport(
credentials_file="credentials.json",
quota_project_id="octopus",
)
load_creds.assert_called_once_with("credentials.json",
scopes=None,
default_scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/jobs',
),
quota_project_id="octopus",
)
@requires_google_auth_lt_1_25_0
def test_tenant_service_base_transport_with_credentials_file_old_google_auth():
# Instantiate the base transport with a credentials file
with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.talent_v4beta1.services.tenant_service.transports.TenantServiceTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.TenantServiceTransport(
credentials_file="credentials.json",
quota_project_id="octopus",
)
load_creds.assert_called_once_with("credentials.json", scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/jobs',
),
quota_project_id="octopus",
)
def test_tenant_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.talent_v4beta1.services.tenant_service.transports.TenantServiceTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.TenantServiceTransport()
adc.assert_called_once()
@requires_google_auth_gte_1_25_0
def test_tenant_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, 'default', autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
TenantServiceClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/jobs',
),
quota_project_id=None,
)
@requires_google_auth_lt_1_25_0
def test_tenant_service_auth_adc_old_google_auth():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, 'default', autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
TenantServiceClient()
adc.assert_called_once_with(
scopes=( 'https://www.googleapis.com/auth/cloud-platform', 'https://www.googleapis.com/auth/jobs',),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.TenantServiceGrpcTransport,
transports.TenantServiceGrpcAsyncIOTransport,
],
)
@requires_google_auth_gte_1_25_0
def test_tenant_service_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, 'default', autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=( 'https://www.googleapis.com/auth/cloud-platform', 'https://www.googleapis.com/auth/jobs',),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class",
[
transports.TenantServiceGrpcTransport,
transports.TenantServiceGrpcAsyncIOTransport,
],
)
@requires_google_auth_lt_1_25_0
def test_tenant_service_transport_auth_adc_old_google_auth(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus")
adc.assert_called_once_with(scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/jobs',
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.TenantServiceGrpcTransport, grpc_helpers),
(transports.TenantServiceGrpcAsyncIOTransport, grpc_helpers_async)
],
)
def test_tenant_service_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(
quota_project_id="octopus",
scopes=["1", "2"]
)
create_channel.assert_called_with(
"jobs.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/jobs',
),
scopes=["1", "2"],
default_host="jobs.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize("transport_class", [transports.TenantServiceGrpcTransport, transports.TenantServiceGrpcAsyncIOTransport])
def test_tenant_service_grpc_transport_client_cert_source_for_mtls(
transport_class
):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert,
private_key=expected_key
)
def test_tenant_service_host_no_port():
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='jobs.googleapis.com'),
)
assert client.transport._host == 'jobs.googleapis.com:443'
def test_tenant_service_host_with_port():
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='jobs.googleapis.com:8000'),
)
assert client.transport._host == 'jobs.googleapis.com:8000'
def test_tenant_service_grpc_transport_channel():
channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.TenantServiceGrpcTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_tenant_service_grpc_asyncio_transport_channel():
channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.TenantServiceGrpcAsyncIOTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize("transport_class", [transports.TenantServiceGrpcTransport, transports.TenantServiceGrpcAsyncIOTransport])
def test_tenant_service_transport_channel_mtls_with_client_cert_source(
transport_class
):
with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred:
with mock.patch.object(transport_class, "create_channel") as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize("transport_class", [transports.TenantServiceGrpcTransport, transports.TenantServiceGrpcAsyncIOTransport])
def test_tenant_service_transport_channel_mtls_with_adc(
transport_class
):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(transport_class, "create_channel") as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_tenant_path():
project = "squid"
tenant = "clam"
expected = "projects/{project}/tenants/{tenant}".format(project=project, tenant=tenant, )
actual = TenantServiceClient.tenant_path(project, tenant)
assert expected == actual
def test_parse_tenant_path():
expected = {
"project": "whelk",
"tenant": "octopus",
}
path = TenantServiceClient.tenant_path(**expected)
# Check that the path construction is reversible.
actual = TenantServiceClient.parse_tenant_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "oyster"
expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, )
actual = TenantServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "nudibranch",
}
path = TenantServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = TenantServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "cuttlefish"
expected = "folders/{folder}".format(folder=folder, )
actual = TenantServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "mussel",
}
path = TenantServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = TenantServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "winkle"
expected = "organizations/{organization}".format(organization=organization, )
actual = TenantServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nautilus",
}
path = TenantServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = TenantServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "scallop"
expected = "projects/{project}".format(project=project, )
actual = TenantServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "abalone",
}
path = TenantServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = TenantServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "squid"
location = "clam"
expected = "projects/{project}/locations/{location}".format(project=project, location=location, )
actual = TenantServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "whelk",
"location": "octopus",
}
path = TenantServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = TenantServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(transports.TenantServiceTransport, '_prep_wrapped_messages') as prep:
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(transports.TenantServiceTransport, '_prep_wrapped_messages') as prep:
transport_class = TenantServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc_asyncio",
)
with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport
)
with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
'grpc',
]
for transport in transports:
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
platforms/base.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import abc
from dataclasses import dataclass
import os
import torch
from foundations.hparams import Hparams
import platforms.platform
@dataclass
class Platform(Hparams):
num_workers: int = 0
_name: str = 'Platform Hyperparameters'
_description: str = 'Hyperparameters that control the plaform on which the job is run.'
_num_workers: str = 'The number of worker threads to use for data loading.'
gpu: str = '7'
os.environ['CUDA_VISIBLE_DEVICES'] = gpu
# Manage the available devices and the status of distributed training.
@property
def device_str(self):
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ['CUDA_VISIBLE_DEVICES'] = self.gpu
# GPU device.
if torch.cuda.is_available() and torch.cuda.device_count() > 0:
device_ids = ','.join([str(x) for x in range(torch.cuda.device_count())])
return f'cuda:{device_ids}'
# CPU device.
else:
return 'cpu'
@property
def torch_device(self):
return torch.device(self.device_str)
@property
def is_parallel(self):
os.environ['CUDA_VISIBLE_DEVICES'] = self.gpu
return torch.cuda.is_available() and torch.cuda.device_count() > 1
@property
def is_distributed(self):
return False
@property
def rank(self):
return 0
@property
def world_size(self):
return 1
@property
def is_primary_process(self):
return not self.is_distributed or self.rank == 0
def barrier(self):
pass
# Manage the location of important files.
@property
@abc.abstractmethod
def root(self):
"""The root directory where data will be stored."""
pass
@property
@abc.abstractmethod
def dataset_root(self):
"""The root directory where datasets will be stored."""
pass
@property
@abc.abstractmethod
def imagenet_root(self):
"""The directory where imagenet will be stored."""
pass
# Mediate access to files.
@staticmethod
def open(file, mode='r'):
return open(file, mode)
@staticmethod
def exists(file):
return os.path.exists(file)
@staticmethod
def makedirs(path):
return os.makedirs(path)
@staticmethod
def isdir(path):
return os.path.isdir(path)
@staticmethod
def listdir(path):
return os.listdir(path)
@staticmethod
def save_model(model, path, *args, **kwargs):
return torch.save(model, path, *args, **kwargs)
@staticmethod
def load_model(path, *args, **kwargs):
return torch.load(path, *args, **kwargs)
# Run jobs. Called by the command line interface.
def run_job(self, f):
"""Run a function that trains a network."""
old_platform = platforms.platform._PLATFORM
platforms.platform._PLATFORM = self
f()
platforms.platform._PLATFORM = old_platform
|
[] |
[] |
[
"CUDA_DEVICE_ORDER",
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES"]
|
python
| 2 | 0 | |
test_init_final_bak.py
|
# -*- coding: utf-8 -*-
################ Server Ver. 28 (2021. 6. 23.) #####################
import sys, os, ctypes
import asyncio, discord, aiohttp
import random, re, datetime, time, logging
from discord.ext import tasks, commands
from discord.ext.commands import CommandNotFound, MissingRequiredArgument
from gtts import gTTS
from github import Github
import base64
import gspread, boto3
from oauth2client.service_account import ServiceAccountCredentials #정산
from io import StringIO
import urllib.request
from math import ceil, floor
##################### 로깅 ###########################
log_stream = StringIO()
logging.basicConfig(stream=log_stream, level=logging.WARNING)
#ilsanglog = logging.getLogger('discord')
#ilsanglog.setLevel(level = logging.WARNING)
#handler = logging.StreamHandler()
#handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))
#ilsanglog.addHandler(handler)
#####################################################
if not discord.opus.is_loaded():
discord.opus.load_opus(ctypes.util.find_library('opus'))
print("opus_loaded")
basicSetting = []
bossData = []
fixed_bossData = []
bossNum = 0
fixed_bossNum = 0
chkvoicechannel = 0
chkrelogin = 0
chflg = 0
LoadChk = 0
bossTime = []
tmp_bossTime = []
fixed_bossTime = []
bossTimeString = []
bossDateString = []
tmp_bossTimeString = []
tmp_bossDateString = []
bossFlag = []
bossFlag0 = []
fixed_bossFlag = []
fixed_bossFlag0 = []
bossMungFlag = []
bossMungCnt = []
channel_info = []
channel_name = []
channel_id = []
channel_voice_name = []
channel_voice_id = []
channel_type = []
FixedBossDateData = []
indexFixedBossname = []
endTime = None
gc = None
credentials = None
regenembed = None
command = None
kill_Data = None
kill_Time = None
item_Data = None
tmp_racing_unit = None
setting_channel_name = None
boss_nick = {}
access_token = os.environ["BOT_TOKEN"]
git_access_token = os.environ["GIT_TOKEN"]
git_access_repo = os.environ["GIT_REPO"]
git_access_repo_restart = os.environ["GIT_REPO_RESTART"]
try:
aws_key = os.environ["AWS_KEY"]
aws_secret_key = os.environ["AWS_SECRET_KEY"]
except:
aws_key = ""
aws_secret_key = ""
g = Github(git_access_token)
repo = g.get_repo(git_access_repo)
repo_restart = g.get_repo(git_access_repo_restart)
#초성추출 함수
def convertToInitialLetters(text):
CHOSUNG_START_LETTER = 4352
JAMO_START_LETTER = 44032
JAMO_END_LETTER = 55203
JAMO_CYCLE = 588
def isHangul(ch):
return ord(ch) >= JAMO_START_LETTER and ord(ch) <= JAMO_END_LETTER
def isBlankOrNumber(ch):
return ord(ch) == 32 or ord(ch) >= 48 and ord(ch) <= 57
def convertNomalInitialLetter(ch):
dic_InitalLetter = {4352:"ㄱ"
,4353:"ㄲ"
,4354:"ㄴ"
,4355:"ㄷ"
,4356:"ㄸ"
,4357:"ㄹ"
,4358:"ㅁ"
,4359:"ㅂ"
,4360:"ㅃ"
,4361:"ㅅ"
,4362:"ㅆ"
,4363:"ㅇ"
,4364:"ㅈ"
,4365:"ㅉ"
,4366:"ㅊ"
,4367:"ㅋ"
,4368:"ㅌ"
,4369:"ㅍ"
,4370:"ㅎ"
,32:" "
,48:"0"
,49:"1"
,50:"2"
,51:"3"
,52:"4"
,53:"5"
,54:"6"
,55:"7"
,56:"8"
,57:"9"
}
return dic_InitalLetter[ord(ch)]
result = ""
for ch in text:
if isHangul(ch): #한글이 아닌 글자는 걸러냅니다.
result += convertNomalInitialLetter(chr((int((ord(ch)-JAMO_START_LETTER)/JAMO_CYCLE))+CHOSUNG_START_LETTER))
elif isBlankOrNumber(ch):
result += convertNomalInitialLetter(chr(int(ord(ch))))
return result
def init():
global basicSetting
global bossData
global fixed_bossData
global bossNum
global fixed_bossNum
global chkvoicechannel
global chkrelogin
global bossTime
global tmp_bossTime
global fixed_bossTime
global bossTimeString
global bossDateString
global tmp_bossTimeString
global tmp_bossDateString
global bossFlag
global bossFlag0
global fixed_bossFlag
global fixed_bossFlag0
global bossMungFlag
global bossMungCnt
global channel_info
global channel_name
global channel_voice_name
global channel_voice_id
global channel_id
global channel_type
global LoadChk
global indexFixedBossname
global FixedBossDateData
global endTime
global gc #정산
global credentials #정산
global regenembed
global command
global kill_Data
global kill_Time
global item_Data
global tmp_racing_unit
global boss_nick
command = []
tmp_bossData = []
tmp_fixed_bossData = []
FixedBossDateData = []
indexFixedBossname = []
kill_Data = {}
tmp_kill_Data = []
item_Data = {}
tmp_item_Data = []
f = []
fb = []
fk = []
fc = []
fi = []
tmp_racing_unit = []
boss_nick = {}
inidata = repo.get_contents("test_setting.ini")
file_data1 = base64.b64decode(inidata.content)
file_data1 = file_data1.decode('utf-8')
inputData = file_data1.split('\n')
command_inidata = repo.get_contents("command.ini")
file_data4 = base64.b64decode(command_inidata.content)
file_data4 = file_data4.decode('utf-8')
command_inputData = file_data4.split('\n')
boss_inidata = repo.get_contents("boss.ini")
file_data3 = base64.b64decode(boss_inidata.content)
file_data3 = file_data3.decode('utf-8')
boss_inputData = file_data3.split('\n')
fixed_inidata = repo.get_contents("fixed_boss.ini")
file_data2 = base64.b64decode(fixed_inidata.content)
file_data2 = file_data2.decode('utf-8')
fixed_inputData = file_data2.split('\n')
kill_inidata = repo.get_contents("kill_list.ini")
file_data5 = base64.b64decode(kill_inidata.content)
file_data5 = file_data5.decode('utf-8')
kill_inputData = file_data5.split('\n')
item_inidata = repo.get_contents("item_list.ini")
file_data6 = base64.b64decode(item_inidata.content)
file_data6 = file_data6.decode('utf-8')
item_inputData = file_data6.split('\n')
for i in range(len(fixed_inputData)):
FixedBossDateData.append(fixed_inputData[i])
index_fixed = 0
for value in FixedBossDateData:
if value.find('bossname') != -1:
indexFixedBossname.append(index_fixed)
index_fixed = index_fixed + 1
for i in range(inputData.count('\r')):
inputData.remove('\r')
for i in range(command_inputData.count('\r')):
command_inputData.remove('\r')
for i in range(boss_inputData.count('\r')):
boss_inputData.remove('\r')
for i in range(fixed_inputData.count('\r')):
fixed_inputData.remove('\r')
for i in range(kill_inputData.count('\r')):
kill_inputData.remove('\r')
for i in range(item_inputData.count('\r')):
item_inputData.remove('\r')
del(command_inputData[0])
del(boss_inputData[0])
del(fixed_inputData[0])
del(kill_inputData[0])
del(item_inputData[0])
for data in boss_inputData:
if "kakaoOnOff" in data:
raise Exception("[boss.ini] 파일에서 [kakaoOnOff]를 지워주세요.")
for data in fixed_inputData:
if "kakaoOnOff" in data:
raise Exception("[fixed_boss.ini] 파일에서 [kakaoOnOff]를 지워주세요.")
############## 보탐봇 초기 설정 리스트 #####################
try:
basicSetting.append(inputData[0][11:]) #basicSetting[0] : timezone
basicSetting.append(inputData[8][15:]) #basicSetting[1] : before_alert
basicSetting.append(inputData[10][11:]) #basicSetting[2] : mungChk1
basicSetting.append(inputData[9][16:]) #basicSetting[3] : before_alert1
basicSetting.append(inputData[14][14:16]) #basicSetting[4] : restarttime 시
basicSetting.append(inputData[14][17:]) #basicSetting[5] : restarttime 분
basicSetting.append(inputData[1][15:]) #basicSetting[6] : voice채널 ID
basicSetting.append(inputData[2][14:]) #basicSetting[7] : text채널 ID
basicSetting.append(inputData[3][16:]) #basicSetting[8] : 사다리 채널 ID
basicSetting.append(inputData[13][14:]) #basicSetting[9] : !ㅂ 출력 수
basicSetting.append(inputData[17][11:]) #basicSetting[10] : json 파일명
basicSetting.append(inputData[4][17:]) #basicSetting[11] : 정산 채널 ID
basicSetting.append(inputData[16][12:]) #basicSetting[12] : sheet 이름
basicSetting.append(inputData[15][16:]) #basicSetting[13] : restart 주기
basicSetting.append(inputData[18][12:]) #basicSetting[14] : 시트 이름
basicSetting.append(inputData[19][12:]) #basicSetting[15] : 입력 셀
basicSetting.append(inputData[20][13:]) #basicSetting[16] : 출력 셀
basicSetting.append(inputData[12][13:]) #basicSetting[17] : 멍삭제횟수
basicSetting.append(inputData[5][14:]) #basicSetting[18] : kill채널 ID
basicSetting.append(inputData[6][16:]) #basicSetting[19] : racing 채널 ID
basicSetting.append(inputData[7][14:]) #basicSetting[20] : item 채널 ID
basicSetting.append(inputData[21][12:]) #basicSetting[21] : voice_use
basicSetting.append(inputData[11][11:]) #basicSetting[22] : mungChk2
except:
raise Exception("[test_setting.ini] 파일 양식을 확인하세요.")
############## 보탐봇 명령어 리스트 #####################
for i in range(len(command_inputData)):
tmp_command = command_inputData[i][12:].rstrip('\r')
fc = tmp_command.split(', ')
command.append(fc)
fc = []
#command.append(command_inputData[i][12:].rstrip('\r')) #command[0] ~ [24] : 명령어
################## 척살 명단 ###########################
for i in range(len(kill_inputData)):
tmp_kill_Data.append(kill_inputData[i].rstrip('\r'))
fk.append(tmp_kill_Data[i][:tmp_kill_Data[i].find(' ')])
fk.append(tmp_kill_Data[i][tmp_kill_Data[i].find(' ')+1:])
try:
kill_Data[fk[0]] = int(fk[1])
except:
pass
fk = []
for i in range(len(item_inputData)):
tmp_item_Data.append(item_inputData[i].rstrip('\r'))
fi.append(tmp_item_Data[i][:tmp_item_Data[i].find(' ')])
fi.append(tmp_item_Data[i][tmp_item_Data[i].find(' ')+1:])
try:
item_Data[fi[0]] = int(fi[1])
except:
pass
fi = []
tmp_killtime = datetime.datetime.now().replace(hour=int(5), minute=int(0), second = int(0))
kill_Time = datetime.datetime.now()
if tmp_killtime < kill_Time :
kill_Time = tmp_killtime + datetime.timedelta(days=int(1))
else:
kill_Time = tmp_killtime
for i in range(len(basicSetting)):
basicSetting[i] = basicSetting[i].strip()
try:
if basicSetting[6] != "":
basicSetting[6] = int(basicSetting[6])
if basicSetting[7] != "":
basicSetting[7] = int(basicSetting[7])
if basicSetting[8] != "":
basicSetting[8] = int(basicSetting[8])
if basicSetting[11] != "":
basicSetting[11] = int(basicSetting[11])
if basicSetting[18] != "":
basicSetting[18] = int(basicSetting[18])
if basicSetting[19] != "":
basicSetting[19] = int(basicSetting[19])
if basicSetting[20] != "":
basicSetting[20] = int(basicSetting[20])
except ValueError:
raise Exception("[test_setting.ini] 파일 양식을 확인하세요.")
tmp_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
if int(basicSetting[13]) == 0 :
endTime = tmp_now.replace(hour=int(basicSetting[4]), minute=int(basicSetting[5]), second = int(0))
endTime = endTime + datetime.timedelta(days=int(1000))
else :
endTime = tmp_now.replace(hour=int(basicSetting[4]), minute=int(basicSetting[5]), second = int(0))
if endTime < tmp_now :
endTime = endTime + datetime.timedelta(days=int(basicSetting[13]))
bossNum = int(len(boss_inputData)/6)
fixed_bossNum = int(len(fixed_inputData)/6)
for i in range(bossNum):
tmp_bossData.append(boss_inputData[i*6:i*6+6])
for i in range(fixed_bossNum):
tmp_fixed_bossData.append(fixed_inputData[i*6:i*6+6])
for j in range(bossNum):
for i in range(len(tmp_bossData[j])):
tmp_bossData[j][i] = tmp_bossData[j][i].strip()
for j in range(fixed_bossNum):
for i in range(len(tmp_fixed_bossData[j])):
tmp_fixed_bossData[j][i] = tmp_fixed_bossData[j][i].strip()
tmp_boss_name_list : list = []
tmp_nick : list = []
############## 일반보스 정보 리스트 #####################
for j in range(bossNum):
tmp_nick = []
tmp_len = tmp_bossData[j][1].find(':')
tmp_boss_name_list = tmp_bossData[j][0][11:].split(", ")
f.append(tmp_boss_name_list[0]) #bossData[0] : 보스명
if len(tmp_boss_name_list) > 1:
for nick in tmp_boss_name_list[1:]:
tmp_nick.append(nick)
tmp_nick.append(convertToInitialLetters(nick))
boss_nick[tmp_boss_name_list[0]] = tmp_nick
f.append(tmp_bossData[j][1][10:tmp_len]) #bossData[1] : 시
f.append(tmp_bossData[j][2][13:]) #bossData[2] : 멍/미입력
f.append(tmp_bossData[j][3][20:]) #bossData[3] : 분전 알림멘트
f.append(tmp_bossData[j][4][13:]) #bossData[4] : 젠 알림멘트
f.append(tmp_bossData[j][1][tmp_len+1:]) #bossData[5] : 분
f.append('') #bossData[6] : 메세지
f.append(tmp_bossData[j][5][11:]) #bossData[8] : 멍체크시간종류
bossData.append(f)
f = []
bossTime.append(datetime.datetime.now()+datetime.timedelta(days=365, hours = int(basicSetting[0])))
tmp_bossTime.append(datetime.datetime.now()+datetime.timedelta(days=365, hours = int(basicSetting[0])))
bossTimeString.append('99:99:99')
bossDateString.append('9999-99-99')
tmp_bossTimeString.append('99:99:99')
tmp_bossDateString.append('9999-99-99')
bossFlag.append(False)
bossFlag0.append(False)
bossMungFlag.append(False)
bossMungCnt.append(0)
tmp_fixed_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
############## 고정보스 정보 리스트 #####################
for j in range(fixed_bossNum):
try:
tmp_fixed_len = tmp_fixed_bossData[j][1].find(':')
tmp_fixedGen_len = tmp_fixed_bossData[j][2].find(':')
fb.append(tmp_fixed_bossData[j][0][11:]) #fixed_bossData[0] : 보스명
fb.append(tmp_fixed_bossData[j][1][11:tmp_fixed_len]) #fixed_bossData[1] : 시
fb.append(tmp_fixed_bossData[j][1][tmp_fixed_len+1:]) #fixed_bossData[2] : 분
fb.append(tmp_fixed_bossData[j][4][20:]) #fixed_bossData[3] : 분전 알림멘트
fb.append(tmp_fixed_bossData[j][5][13:]) #fixed_bossData[4] : 젠 알림멘트
fb.append(tmp_fixed_bossData[j][2][12:tmp_fixedGen_len]) #fixed_bossData[5] : 젠주기-시
fb.append(tmp_fixed_bossData[j][2][tmp_fixedGen_len+1:]) #fixed_bossData[6] : 젠주기-분
fb.append(tmp_fixed_bossData[j][3][12:16]) #fixed_bossData[7] : 시작일-년
fb.append(tmp_fixed_bossData[j][3][17:19]) #fixed_bossData[8] : 시작일-월
fb.append(tmp_fixed_bossData[j][3][20:22]) #fixed_bossData[9] : 시작일-일
fixed_bossData.append(fb)
fb = []
fixed_bossFlag.append(False)
fixed_bossFlag0.append(False)
fixed_bossTime.append(tmp_fixed_now.replace(year = int(fixed_bossData[j][7]), month = int(fixed_bossData[j][8]), day = int(fixed_bossData[j][9]), hour=int(fixed_bossData[j][1]), minute=int(fixed_bossData[j][2]), second = int(0)))
if fixed_bossTime[j] < tmp_fixed_now :
while fixed_bossTime[j] < tmp_fixed_now :
fixed_bossTime[j] = fixed_bossTime[j] + datetime.timedelta(hours=int(fixed_bossData[j][5]), minutes=int(fixed_bossData[j][6]), seconds = int(0))
if tmp_fixed_now + datetime.timedelta(minutes=int(basicSetting[1])) <= fixed_bossTime[j] < tmp_fixed_now + datetime.timedelta(minutes=int(basicSetting[3])):
fixed_bossFlag0[j] = True
if fixed_bossTime[j] < tmp_fixed_now + datetime.timedelta(minutes=int(basicSetting[1])):
fixed_bossFlag[j] = True
fixed_bossFlag0[j] = True
except:
raise Exception(f"[fixed_boss.ini] 파일 {tmp_fixed_bossData[j][0][11:]} 부분 양식을 확인하세요.")
################# 이모지 로드 ######################
emo_inidata = repo.get_contents("emoji.ini")
emoji_data1 = base64.b64decode(emo_inidata.content)
emoji_data1 = emoji_data1.decode('utf-8')
emo_inputData = emoji_data1.split('\n')
for i in range(len(emo_inputData)):
tmp_emo = emo_inputData[i][8:].rstrip('\r')
if tmp_emo != "":
tmp_racing_unit.append(tmp_emo)
################# 리젠보스 시간 정렬 ######################
regenData = []
regenTime = []
regenbossName = []
outputTimeHour = []
outputTimeMin = []
for i in range(bossNum):
if bossData[i][2] == "1":
f.append(bossData[i][0] + "R")
else:
f.append(bossData[i][0])
f.append(bossData[i][1] + bossData[i][5])
regenData.append(f)
regenTime.append(bossData[i][1] + bossData[i][5])
f = []
regenTime = sorted(list(set(regenTime)))
for j in range(len(regenTime)):
for i in range(len(regenData)):
if regenTime[j] == regenData[i][1] :
f.append(regenData[i][0])
regenbossName.append(f)
try:
outputTimeHour.append(int(regenTime[j][:2]))
outputTimeMin.append(int(regenTime[j][2:]))
except ValueError:
raise Exception(f"[boss.ini] 파일 {f} gentime을 확인하시기 바랍니다.")
f = []
regenembed = discord.Embed(
title='----- 보스별 리스폰 시간 -----',
description= ' ')
for i in range(len(regenTime)):
if outputTimeMin[i] == 0 :
regenembed.add_field(name=str(outputTimeHour[i]) + '시간', value= '```'+ ', '.join(map(str, sorted(regenbossName[i]))) + '```', inline=False)
else :
regenembed.add_field(name=str(outputTimeHour[i]) + '시간' + str(outputTimeMin[i]) + '분', value= '```' + ','.join(map(str, sorted(regenbossName[i]))) + '```', inline=False)
regenembed.set_footer(text = 'R : 멍 보스')
##########################################################
if basicSetting[10] !="":
scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive'] #정산
credentials = ServiceAccountCredentials.from_json_keyfile_name(basicSetting[10], scope) #정산
init()
channel = ''
#mp3 파일 생성함수(gTTS 이용, 남성목소리)
async def MakeSound(saveSTR, filename):
if aws_key != "" and aws_secret_key != "":
polly = boto3.client("polly", aws_access_key_id = aws_key, aws_secret_access_key = aws_secret_key, region_name = "eu-west-1")
s = '<speak><prosody rate="' + str(95) + '%">' + saveSTR + '</prosody></speak>'
response = polly.synthesize_speech(
TextType = "ssml",
Text=s,
OutputFormat="mp3",
VoiceId="Seoyeon")
stream = response.get("AudioStream")
with open(f"./{filename}.mp3", "wb") as mp3file:
data = stream.read()
mp3file.write(data)
else:
tts = gTTS(saveSTR, lang = 'ko')
tts.save(f"./{filename}.wav")
#mp3 파일 재생함수
async def PlaySound(voiceclient, filename):
if basicSetting[21] != "1":
return
# source = discord.FFmpegPCMAudio(filename)
source = discord.FFmpegOpusAudio(filename)
try:
voiceclient.play(source)
except discord.errors.ClientException:
while voiceclient.is_playing():
await asyncio.sleep(1)
while voiceclient.is_playing():
await asyncio.sleep(1)
voiceclient.stop()
# source.cleanup()
return
#my_bot.db 저장하기
async def dbSave():
global bossData
global bossNum
global bossTime
global bossTimeString
global bossDateString
global bossMungFlag
global bossMungCnt
for i in range(bossNum):
for j in range(bossNum):
if bossTimeString[i] and bossTimeString[j] != '99:99:99':
if bossTimeString[i] == bossTimeString[j] and i != j:
tmp_time1 = bossTimeString[j][:6]
tmp_time2 = (int(bossTimeString[j][6:]) + 1)%100
if tmp_time2 < 10 :
tmp_time22 = '0' + str(tmp_time2)
elif tmp_time2 == 60 :
tmp_time22 = '00'
else :
tmp_time22 = str(tmp_time2)
bossTimeString[j] = tmp_time1 + tmp_time22
datelist1 = bossTime
datelist = list(set(datelist1))
information1 = '----- 보스탐 정보 -----\n'
for timestring in sorted(datelist):
for i in range(bossNum):
if timestring == bossTime[i]:
if bossTimeString[i] != '99:99:99' or bossMungFlag[i] == True :
if bossMungFlag[i] == True :
if bossData[i][2] == '0' :
information1 += ' - ' + bossData[i][0] + '(' + bossData[i][1] + '.' + bossData[i][5] + ') : ' + tmp_bossTime[i].strftime('%H:%M:%S') + ' @ ' + tmp_bossTime[i].strftime('%Y-%m-%d') + ' (미입력 ' + str(bossMungCnt[i]) + '회)' + ' * ' + bossData[i][6] + '\n'
else :
information1 += ' - ' + bossData[i][0] + '(' + bossData[i][1] + '.' + bossData[i][5] + ') : ' + tmp_bossTime[i].strftime('%H:%M:%S') + ' @ ' + tmp_bossTime[i].strftime('%Y-%m-%d') + ' (멍 ' + str(bossMungCnt[i]) + '회)' + ' * ' + bossData[i][6] + '\n'
else:
if bossData[i][2] == '0' :
information1 += ' - ' + bossData[i][0] + '(' + bossData[i][1] + '.' + bossData[i][5] + ') : ' + bossTimeString[i] + ' @ ' + bossDateString[i] + ' (미입력 ' + str(bossMungCnt[i]) + '회)' + ' * ' + bossData[i][6] + '\n'
else :
information1 += ' - ' + bossData[i][0] + '(' + bossData[i][1] + '.' + bossData[i][5] + ') : ' + bossTimeString[i] + ' @ ' + bossDateString[i] + ' (멍 ' + str(bossMungCnt[i]) + '회)' + ' * ' + bossData[i][6] + '\n'
try :
contents = repo.get_contents("my_bot.db")
repo.update_file(contents.path, "bossDB", information1, contents.sha)
except Exception as e :
print ('save error!!')
print(e.args[1]['message']) # output: This repository is empty.
errortime = datetime.datetime.now()
print (errortime)
pass
#my_bot.db 불러오기
async def dbLoad():
global LoadChk
contents1 = repo.get_contents("my_bot.db")
file_data = base64.b64decode(contents1.content)
file_data = file_data.decode('utf-8')
beforeBossData = file_data.split('\n')
if len(beforeBossData) > 1:
for i in range(len(beforeBossData)-1):
for j in range(bossNum):
startPos = beforeBossData[i+1].find('-')
endPos = beforeBossData[i+1].find('(')
if beforeBossData[i+1][startPos+2:endPos] == bossData[j][0] :
#if beforeBossData[i+1].find(bossData[j][0]) != -1 :
tmp_mungcnt = 0
tmp_len = beforeBossData[i+1].find(':')
tmp_datelen = beforeBossData[i+1].find('@')
tmp_msglen = beforeBossData[i+1].find('*')
years1 = beforeBossData[i+1][tmp_datelen+2:tmp_datelen+6]
months1 = beforeBossData[i+1][tmp_datelen+7:tmp_datelen+9]
days1 = beforeBossData[i+1][tmp_datelen+10:tmp_datelen+12]
hours1 = beforeBossData[i+1][tmp_len+2:tmp_len+4]
minutes1 = beforeBossData[i+1][tmp_len+5:tmp_len+7]
seconds1 = beforeBossData[i+1][tmp_len+8:tmp_len+10]
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = tmp_now.replace(year = int(years1), month = int(months1), day = int(days1), hour=int(hours1), minute=int(minutes1), second = int(seconds1))
if bossData[j][7] == "1":
tmp_now_chk = tmp_now + datetime.timedelta(minutes = int(basicSetting[2]))
else:
tmp_now_chk = tmp_now + datetime.timedelta(minutes = int(basicSetting[22]))
if tmp_now_chk < now2 :
deltaTime = datetime.timedelta(hours = int(bossData[j][1]), minutes = int(bossData[j][5]))
while tmp_now_chk < now2 :
tmp_now_chk = tmp_now_chk + deltaTime
tmp_now = tmp_now + deltaTime
tmp_mungcnt = tmp_mungcnt + 1
if tmp_now_chk > now2 > tmp_now: #젠중.
bossMungFlag[j] = True
tmp_bossTime[j] = tmp_now
tmp_bossTimeString[j] = tmp_bossTime[j].strftime('%H:%M:%S')
tmp_bossDateString[j] = tmp_bossTime[j].strftime('%Y-%m-%d')
bossTimeString[j] = '99:99:99'
bossDateString[j] = '9999-99-99'
bossTime[j] = tmp_bossTime[j] + datetime.timedelta(days=365)
else:
tmp_bossTime[j] = bossTime[j] = tmp_now
tmp_bossTimeString[j] = bossTimeString[j] = bossTime[j].strftime('%H:%M:%S')
tmp_bossDateString[j] = bossDateString[j] = bossTime[j].strftime('%Y-%m-%d')
if now2 + datetime.timedelta(minutes=int(basicSetting[1])) <= tmp_bossTime[j] < now2 + datetime.timedelta(minutes=int(basicSetting[3])):
bossFlag0[j] = True
if tmp_bossTime[j] < now2 + datetime.timedelta(minutes=int(basicSetting[1])):
bossFlag[j] = True
bossFlag0[j] = True
bossData[j][6] = beforeBossData[i+1][tmp_msglen+2:len(beforeBossData[i+1])]
if beforeBossData[i+1][tmp_msglen-4:tmp_msglen-3] != 0 and beforeBossData[i+1][tmp_msglen-5:tmp_msglen-4] == ' ':
bossMungCnt[j] = int(beforeBossData[i+1][tmp_msglen-4:tmp_msglen-3]) + tmp_mungcnt
elif beforeBossData[i+1][tmp_msglen-5:tmp_msglen-4] != ' ':
bossMungCnt[j] = int(beforeBossData[i+1][tmp_msglen-5:tmp_msglen-4] + beforeBossData[i+1][tmp_msglen-4:tmp_msglen-3]) + tmp_mungcnt
else:
bossMungCnt[j] = 0
global FixedBossDateData
global fixed_bossFlag
global fixed_bossFlag0
global fixed_bossTime
global fixed_bossData
FixedBossDateData = []
fixed_bossFlag = []
fixed_bossFlag0 = []
fixed_bossTime = []
fixed_bossData = []
tmp_fixed_bossData = []
fb = []
fixed_inidata = repo.get_contents("fixed_boss.ini")
file_data2 = base64.b64decode(fixed_inidata.content)
file_data2 = file_data2.decode('utf-8')
fixed_inputData = file_data2.split('\n')
for i in range(len(fixed_inputData)):
FixedBossDateData.append(fixed_inputData[i])
del(fixed_inputData[0])
for i in range(fixed_inputData.count('\r')):
fixed_inputData.remove('\r')
fixed_bossNum = int(len(fixed_inputData)/6)
for i in range(fixed_bossNum):
tmp_fixed_bossData.append(fixed_inputData[i*6:i*6+6])
for j in range(fixed_bossNum):
for i in range(len(tmp_fixed_bossData[j])):
tmp_fixed_bossData[j][i] = tmp_fixed_bossData[j][i].strip()
tmp_fixed_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
############## 고정보스 정보 리스트 #####################
for j in range(fixed_bossNum):
try:
tmp_fixed_len = tmp_fixed_bossData[j][1].find(':')
tmp_fixedGen_len = tmp_fixed_bossData[j][2].find(':')
fb.append(tmp_fixed_bossData[j][0][11:]) #fixed_bossData[0] : 보스명
fb.append(tmp_fixed_bossData[j][1][11:tmp_fixed_len]) #fixed_bossData[1] : 시
fb.append(tmp_fixed_bossData[j][1][tmp_fixed_len+1:]) #fixed_bossData[2] : 분
fb.append(tmp_fixed_bossData[j][4][20:]) #fixed_bossData[3] : 분전 알림멘트
fb.append(tmp_fixed_bossData[j][5][13:]) #fixed_bossData[4] : 젠 알림멘트
fb.append(tmp_fixed_bossData[j][2][12:tmp_fixedGen_len]) #fixed_bossData[5] : 젠주기-시
fb.append(tmp_fixed_bossData[j][2][tmp_fixedGen_len+1:]) #fixed_bossData[6] : 젠주기-분
fb.append(tmp_fixed_bossData[j][3][12:16]) #fixed_bossData[7] : 시작일-년
fb.append(tmp_fixed_bossData[j][3][17:19]) #fixed_bossData[8] : 시작일-월
fb.append(tmp_fixed_bossData[j][3][20:22]) #fixed_bossData[9] : 시작일-일
fixed_bossData.append(fb)
fb = []
fixed_bossFlag.append(False)
fixed_bossFlag0.append(False)
fixed_bossTime.append(tmp_fixed_now.replace(year = int(fixed_bossData[j][7]), month = int(fixed_bossData[j][8]), day = int(fixed_bossData[j][9]), hour=int(fixed_bossData[j][1]), minute=int(fixed_bossData[j][2]), second = int(0)))
if fixed_bossTime[j] < tmp_fixed_now :
while fixed_bossTime[j] < tmp_fixed_now :
fixed_bossTime[j] = fixed_bossTime[j] + datetime.timedelta(hours=int(fixed_bossData[j][5]), minutes=int(fixed_bossData[j][6]), seconds = int(0))
if tmp_fixed_now + datetime.timedelta(minutes=int(basicSetting[1])) <= fixed_bossTime[j] < tmp_fixed_now + datetime.timedelta(minutes=int(basicSetting[3])):
fixed_bossFlag0[j] = True
if fixed_bossTime[j] < tmp_fixed_now + datetime.timedelta(minutes=int(basicSetting[1])):
fixed_bossFlag[j] = True
fixed_bossFlag0[j] = True
except:
raise Exception(f"[fixed_boss.ini] 파일 {tmp_fixed_bossData[j][0]} 부분 양식을 확인하세요.")
LoadChk = 0
print ("<불러오기 완료>")
else:
LoadChk = 1
print ("보스타임 정보가 없습니다.")
#고정보스 날짜저장
async def FixedBossDateSave():
global fixed_bossData
global fixed_bossTime
global fixed_bossNum
global FixedBossDateData
global indexFixedBossname
for i in range(fixed_bossNum):
FixedBossDateData[indexFixedBossname[i] + 3] = 'startDate = '+ fixed_bossTime[i].strftime('%Y-%m-%d') + '\n'
FixedBossDateDataSTR = ""
for j in range(len(FixedBossDateData)):
pos = len(FixedBossDateData[j])
tmpSTR = FixedBossDateData[j][:pos-1] + '\r\n'
FixedBossDateDataSTR += tmpSTR
contents = repo.get_contents("fixed_boss.ini")
repo.update_file(contents.path, "bossDB", FixedBossDateDataSTR, contents.sha)
#사다리함수
async def LadderFunc(number, ladderlist, channelVal):
result_ladder = random.sample(ladderlist, number)
lose_member = [item for item in ladderlist if item not in result_ladder]
result_ladderSTR = ','.join(map(str, result_ladder))
embed = discord.Embed(title = "🎲 사다리! 묻고 더블로 가!",color=0x00ff00)
embed.add_field(name = "👥 참가자", value = f"```fix\n{', '.join(ladderlist)}```", inline=False)
embed.add_field(name = "😍 당첨", value = f"```fix\n{', '.join(result_ladder)}```")
embed.add_field(name = "😭 낙첨", value = f"```{', '.join(lose_member)}```")
await channelVal.send(embed=embed, tts=False)
#data초기화
async def init_data_list(filename, first_line : str = "-----------"):
try :
contents = repo.get_contents(filename)
repo.update_file(contents.path, "deleted list " + str(filename), first_line, contents.sha)
print ('< 데이터 초기화 >')
except Exception as e :
print ('save error!!')
print(e.args[1]['message']) # output: This repository is empty.
errortime = datetime.datetime.now()
print (errortime)
pass
#data저장
async def data_list_Save(filename, first_line : str = "-----------", save_data : dict = {}):
output_list = first_line+ '\n'
for key, value in save_data.items():
output_list += str(key) + ' ' + str(value) + '\n'
try :
contents = repo.get_contents(filename)
repo.update_file(contents.path, "updated " + str(filename), output_list, contents.sha)
except Exception as e :
print ('save error!!')
print(e.args[1]['message']) # output: This repository is empty.
errortime = datetime.datetime.now()
print (errortime)
pass
#서버(길드) 정보
async def get_guild_channel_info(bot):
text_channel_name : list = []
text_channel_id : list = []
voice_channel_name : list = []
voice_channel_id : list = []
for guild in bot.guilds:
for text_channel in guild.text_channels:
text_channel_name.append(text_channel.name)
text_channel_id.append(str(text_channel.id))
for voice_channel in guild.voice_channels:
voice_channel_name.append(voice_channel.name)
voice_channel_id.append(str(voice_channel.id))
return text_channel_name, text_channel_id, voice_channel_name, voice_channel_id
class taskCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.checker = True
self.main_task.start()
@tasks.loop(seconds=1.0, count=1)
async def main_task(self):
boss_task = asyncio.get_event_loop().create_task(self.boss_check())
await boss_task
@main_task.before_loop
async def before_tast(self):
await self.bot.wait_until_ready()
################ 명존쎄 ################
@commands.command(name=command[8][0], aliases=command[8][1:])
async def command_task_list(self, ctx : commands.Context):
if ctx.message.channel.id != basicSetting[7]:
return
for t in asyncio.Task.all_tasks():
# print(t._coro.__name__)
if t._coro.__name__ == f"boss_check":
if t.done():
try:
t.exception()
except asyncio.CancelledError:
continue
continue
t.cancel()
# await ctx.send( '< 보탐봇 명치 맞고 숨 고르기 중! 잠시만요! >', tts=False)
try:
file = discord.File("./명치.JPG")
await ctx.send(file = file)
except:
await ctx.send( '< 보탐봇 명치 맞고 숨 고르기 중! 잠시만요! >', tts=False)
print("명치!")
await dbSave()
await data_list_Save("kill_list.ini", "-----척살명단-----", kill_Data)
await data_list_Save("item_list.ini", "-----아이템목록-----", item_Data)
for vc in self.bot.voice_clients:
if vc.guild.id == int(ctx.guild.id):
if vc.is_playing():
vc.stop()
await vc.disconnect(force=True)
if basicSetting[21] != "1":
print("명치복구완료!")
await dbLoad()
await self.bot.get_channel(channel).send( '< 다시 왔습니다!(보이스 미사용) >', tts=False)
self.checker = True
boss_task = asyncio.Task(self.boss_check())
return
async def boss_check(self):
await self.bot.wait_until_ready()
global channel
global endTime
global basicSetting
global bossData
global fixed_bossData
global bossNum
global fixed_bossNum
global chkvoicechannel
global chkrelogin
global bossTime
global tmp_bossTime
global fixed_bossTime
global bossTimeString
global bossDateString
global tmp_bossTimeString
global tmp_bossDateString
global bossFlag
global bossFlag0
global fixed_bossFlag
global fixed_bossFlag0
global bossMungFlag
global bossMungCnt
global channel_info
global channel_name
global channel_id
global channel_voice_name
global channel_voice_id
global channel_type
global endTime
global kill_Time
if chflg == 1 :
if len(self.bot.voice_clients) == 0 :
if basicSetting[21] == "1":
try:
await self.bot.get_channel(basicSetting[6]).connect(reconnect=True, timeout=5)
if self.bot.voice_clients[0].is_connected() :
await self.bot.get_channel(channel).send( '< 다시 왔습니다! >', tts=False)
self.checker = True
print("명치복구완료!")
except:
await self.bot.get_channel(channel).send( '< 음성채널 접속 에러! >', tts=False)
self.checker = False
print("명치복구실패!")
pass
await dbLoad()
while True:
############ 워닝잡자! ############
if log_stream.getvalue().find("Awaiting") != -1:
log_stream.truncate(0)
log_stream.seek(0)
await self.bot.get_channel(channel).send( '< 디코접속에러! 잠깐 나갔다 올께요! >', tts=False)
await dbSave()
break
log_stream.truncate(0)
log_stream.seek(0)
##################################
now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
priv0 = now+datetime.timedelta(minutes=int(basicSetting[3]))
priv = now+datetime.timedelta(minutes=int(basicSetting[1]))
tmp_aftr1 = now+datetime.timedelta(minutes=int(0-int(basicSetting[2])))
tmp_aftr2 = now+datetime.timedelta(minutes=int(0-int(basicSetting[22])))
if channel != '':
################ 보탐봇 재시작 ################
if endTime.strftime('%Y-%m-%d ') + endTime.strftime('%H:%M:%S') == now.strftime('%Y-%m-%d ') + now.strftime('%H:%M:%S'):
await dbSave()
await FixedBossDateSave()
await data_list_Save("kill_list.ini", "-----척살명단-----", kill_Data)
await data_list_Save("item_list.ini", "-----아이템목록-----", item_Data)
print("보탐봇재시작!")
endTime = endTime + datetime.timedelta(days = int(basicSetting[13]))
for voice_client in self.bot.voice_clients:
if voice_client.is_playing():
voice_client.stop()
await voice_client.disconnect(force=True)
await asyncio.sleep(2)
inidata_restart = repo_restart.get_contents("restart.txt")
file_data_restart = base64.b64decode(inidata_restart.content)
file_data_restart = file_data_restart.decode('utf-8')
inputData_restart = file_data_restart.split('\n')
if len(inputData_restart) < 3:
contents12 = repo_restart.get_contents("restart.txt")
repo_restart.update_file(contents12.path, "restart_0", "restart\nrestart\nrestrat\n", contents12.sha)
else:
contents12 = repo_restart.get_contents("restart.txt")
repo_restart.update_file(contents12.path, "restart_1", "", contents12.sha)
############# 음성접속! ###########
if len(self.bot.voice_clients) == 0 and self.checker and basicSetting[21] == "1":
try:
await self.bot.get_channel(basicSetting[6]).connect(reconnect=True, timeout=5)
print(f"{now.strftime('%Y-%m-%d %H:%M:%S')} : 음성 채널 자동 재접속완료!")
except discord.errors.ClientException as e:
print(f"{now.strftime('%Y-%m-%d %H:%M:%S')} : 음성 자동 접속 부분에서 서버 음성 채널 이미 접속 에러 : {e}")
self.checker = False
pass
except Exception as e:
print(f"{now.strftime('%Y-%m-%d %H:%M:%S')} : 음성 자동 접속 부분에서 서버 음성 채널 타임아웃 에러 : {e}")
self.checker = False
pass
if not self.bot.voice_clients[0].is_connected():
print(f"{now.strftime('%Y-%m-%d %H:%M:%S')} : 음성 채널 자동 복구실패!")
await self.bot.get_channel(channel).send( '< 음성 채널 접속에 실패하였습니다. 잠시 후 음성 채널 접속을 시도해주세요! >')
self.checker = False
pass
################ 킬 목록 초기화 ################
if kill_Time.strftime('%Y-%m-%d ') + kill_Time.strftime('%H:%M') == now.strftime('%Y-%m-%d ') + now.strftime('%H:%M'):
kill_Time = kill_Time + datetime.timedelta(days=int(1))
await init_data_list('kill_list.ini', '-----척살명단-----')
################ 고정 보스 확인 ################
for i in range(fixed_bossNum):
if int(basicSetting[3]) == 0:
fixed_bossFlag0[i] = True
if int(basicSetting[1]) == 0:
fixed_bossFlag[i] = True
################ before_alert1 ################
if fixed_bossTime[i] <= priv0 and fixed_bossTime[i] > priv:
if basicSetting[3] != '0':
if fixed_bossFlag0[i] == False:
fixed_bossFlag0[i] = True
await self.bot.get_channel(channel).send("```" + fixed_bossData[i][0] + ' ' + basicSetting[3] + '분 전 ' + fixed_bossData[i][3] +' [' + fixed_bossTime[i].strftime('%H:%M:%S') + ']```', tts=False)
try:
if basicSetting[21] == "1":
await PlaySound(self.bot.voice_clients[0], './sound/' + fixed_bossData[i][0] + '알림1.mp3')
except:
pass
################ before_alert ################
if fixed_bossTime[i] <= priv and fixed_bossTime[i] > now and fixed_bossFlag0[i] == True :
if basicSetting[1] != '0' :
if fixed_bossFlag[i] == False:
fixed_bossFlag[i] = True
await self.bot.get_channel(channel).send("```" + fixed_bossData[i][0] + ' ' + basicSetting[1] + '분 전 ' + fixed_bossData[i][3] +' [' + fixed_bossTime[i].strftime('%H:%M:%S') + ']```', tts=False)
try:
if basicSetting[21] == "1":
await PlaySound(self.bot.voice_clients[0], './sound/' + fixed_bossData[i][0] + '알림.mp3')
except:
pass
################ 보스 젠 시간 확인 ################
if fixed_bossTime[i] <= now and fixed_bossFlag[i] == True and fixed_bossFlag0[i] == True :
fixed_bossTime[i] = fixed_bossTime[i]+datetime.timedelta(hours=int(fixed_bossData[i][5]), minutes=int(fixed_bossData[i][6]), seconds = int(0))
fixed_bossFlag0[i] = False
fixed_bossFlag[i] = False
embed = discord.Embed(
description= "```" + fixed_bossData[i][0] + fixed_bossData[i][4] + "```" ,
color=0x00ff00
)
await self.bot.get_channel(channel).send(embed=embed, tts=False)
try:
if basicSetting[21] == "1":
await PlaySound(self.bot.voice_clients[0], './sound/' + fixed_bossData[i][0] + '젠.mp3')
except:
pass
################ 일반 보스 확인 ################
for i in range(bossNum):
if int(basicSetting[3]) == 0:
bossFlag0[i] = True
if int(basicSetting[1]) == 0:
bossFlag[i] = True
################ before_alert1 ################
if bossTime[i] <= priv0 and bossTime[i] > priv:
if basicSetting[3] != '0':
if bossFlag0[i] == False:
bossFlag0[i] = True
if bossData[i][6] != '' :
await self.bot.get_channel(channel).send("```" + bossData[i][0] + ' ' + basicSetting[3] + '분 전 ' + bossData[i][3] + " [" + bossTimeString[i] + "]" + '\n<' + bossData[i][6] + '>```', tts=False)
else :
await self.bot.get_channel(channel).send("```" + bossData[i][0] + ' ' + basicSetting[3] + '분 전 ' + bossData[i][3] + " [" + bossTimeString[i] + "]```", tts=False)
try:
if basicSetting[21] == "1":
await PlaySound(self.bot.voice_clients[0], './sound/' + bossData[i][0] + '알림1.mp3')
except:
pass
################ before_alert ################
if bossTime[i] <= priv and bossTime[i] > now and bossFlag0[i] == True:
if basicSetting[1] != '0' :
if bossFlag[i] == False:
bossFlag[i] = True
if bossData[i][6] != '' :
await self.bot.get_channel(channel).send("```" + bossData[i][0] + ' ' + basicSetting[1] + '분 전 ' + bossData[i][3] + " [" + bossTimeString[i] + "]" + '\n<' + bossData[i][6] + '>```', tts=False)
else :
await self.bot.get_channel(channel).send("```" + bossData[i][0] + ' ' + basicSetting[1] + '분 전 ' + bossData[i][3] + " [" + bossTimeString[i] + "]```", tts=False)
try:
if basicSetting[21] == "1":
await PlaySound(self.bot.voice_clients[0], './sound/' + bossData[i][0] + '알림.mp3')
except:
pass
################ 보스 젠 시간 확인 ################
if bossTime[i] <= now and bossFlag0[i] == True and bossFlag[i] == True :
#print ('if ', bossTime[i])
bossMungFlag[i] = True
tmp_bossTime[i] = bossTime[i]
tmp_bossTimeString[i] = tmp_bossTime[i].strftime('%H:%M:%S')
tmp_bossDateString[i] = tmp_bossTime[i].strftime('%Y-%m-%d')
bossTimeString[i] = '99:99:99'
bossDateString[i] = '9999-99-99'
bossTime[i] = now+datetime.timedelta(days=365)
if bossData[i][6] != '' :
embed = discord.Embed(
description= "```" + bossData[i][0] + bossData[i][4] + '\n<' + bossData[i][6] + '>```' ,
color=0x00ff00
)
else :
embed = discord.Embed(
description= "```" + bossData[i][0] + bossData[i][4] + "```" ,
color=0x00ff00
)
await self.bot.get_channel(channel).send(embed=embed, tts=False)
try:
if basicSetting[21] == "1":
await PlaySound(self.bot.voice_clients[0], './sound/' + bossData[i][0] + '젠.mp3')
except:
pass
################ 보스 자동 멍 처리 ################
if bossMungFlag[i] == True:
if bossData[i][7] == "1":
aftr = tmp_aftr1
else:
aftr = tmp_aftr2
if (bossTime[i]+datetime.timedelta(days=-365)) <= aftr:
if basicSetting[2] != '0' and basicSetting[22] != '0' and bossFlag[i] == True and bossFlag0[i] == True and bossMungFlag[i] == True :
if int(basicSetting[17]) <= bossMungCnt[i] and int(basicSetting[17]) != 0:
bossTime[i] = datetime.datetime.now()+datetime.timedelta(days=365, hours = int(basicSetting[0]))
tmp_bossTime[i] = datetime.datetime.now()+datetime.timedelta(days=365, hours = int(basicSetting[0]))
bossTimeString[i] = '99:99:99'
bossDateString[i] = '9999-99-99'
tmp_bossTimeString[i] = '99:99:99'
tmp_bossDateString[i] = '9999-99-99'
bossFlag[i] = False
bossFlag0[i] = False
bossMungFlag[i] = False
bossMungCnt[i] = 0
if bossData[i][2] == '0':
await self.bot.get_channel(channel).send(f'```자동 미입력 횟수 {basicSetting[17]}회 초과! [{bossData[i][0]}] 삭제!```', tts=False)
print ('자동미입력 횟수초과 <' + bossData[i][0] + ' 삭제완료>')
else:
await self.bot.get_channel(channel).send(f'```자동 멍처리 횟수 {basicSetting[17]}회 초과! [{bossData[i][0]}] 삭제!```', tts=False)
print ('자동멍처리 횟수초과 <' + bossData[i][0] + ' 삭제완료>')
#await dbSave()
else:
################ 미입력 보스 ################
if bossData[i][2] == '0':
bossFlag[i] = False
bossFlag0[i] = False
bossMungFlag[i] = False
bossMungCnt[i] = bossMungCnt[i] + 1
tmp_bossTime[i] = bossTime[i] = nextTime = tmp_bossTime[i]+datetime.timedelta(hours=int(bossData[i][1]), minutes=int(bossData[i][5]))
tmp_bossTimeString[i] = bossTimeString[i] = nextTime.strftime('%H:%M:%S')
tmp_bossDateString[i] = bossDateString[i] = nextTime.strftime('%Y-%m-%d')
await self.bot.get_channel(channel).send("```" + bossData[i][0] + ' 미입력 됐습니다.```', tts=False)
embed = discord.Embed(
description= '```다음 ' + bossData[i][0] + ' ' + bossTimeString[i] + '입니다.```',
color=0xff0000
)
await self.bot.get_channel(channel).send(embed=embed, tts=False)
try:
if basicSetting[21] == "1":
await PlaySound(self.bot.voice_clients[0], './sound/' + bossData[i][0] + '미입력.mp3')
except:
pass
################ 멍 보스 ################
else :
bossFlag[i] = False
bossFlag0[i] = False
bossMungFlag[i] = False
bossMungCnt[i] = bossMungCnt[i] + 1
tmp_bossTime[i] = bossTime[i] = nextTime = tmp_bossTime[i]+datetime.timedelta(hours=int(bossData[i][1]), minutes=int(bossData[i][5]))
tmp_bossTimeString[i] = bossTimeString[i] = nextTime.strftime('%H:%M:%S')
tmp_bossDateString[i] = bossDateString[i] = nextTime.strftime('%Y-%m-%d')
await self.bot.get_channel(channel).send("```" + bossData[i][0] + ' 멍 입니다.```')
embed = discord.Embed(
description= '```다음 ' + bossData[i][0] + ' ' + bossTimeString[i] + '입니다.```',
color=0xff0000
)
await self.bot.get_channel(channel).send(embed=embed, tts=False)
try:
if basicSetting[21] == "1":
await PlaySound(self.bot.voice_clients[0], './sound/' + bossData[i][0] + '멍.mp3')
except:
pass
await asyncio.sleep(1) # task runs every 60 seconds
self.checker = False
for voice_client in self.bot.voice_clients:
if voice_client.is_playing():
voice_client.stop()
await voice_client.disconnect(force=True)
for t in asyncio.Task.all_tasks():
if t._coro.__name__ == f"boss_check":
print("-------------")
if t.done():
try:
t.exception()
except asyncio.CancelledError:
continue
continue
t.cancel()
await dbSave()
await data_list_Save("kill_list.ini", "-----척살명단-----", kill_Data)
await data_list_Save("item_list.ini", "-----아이템목록-----", item_Data)
boss_task = asyncio.Task(self.boss_check())
class mainCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
################ 보탐봇 입장 ################
@commands.has_permissions(manage_messages=True)
@commands.command(name=command[0][0], aliases=command[0][1:])
async def join_(self, ctx):
global basicSetting
global chflg
if basicSetting[7] == "":
channel = ctx.message.channel.id #메세지가 들어온 채널 ID
print ('[ ', basicSetting[7], ' ]')
print ('] ', ctx.message.channel.name, ' [')
inidata_textCH = repo.get_contents("test_setting.ini")
file_data_textCH = base64.b64decode(inidata_textCH.content)
file_data_textCH = file_data_textCH.decode('utf-8')
inputData_textCH = file_data_textCH.split('\n')
for i in range(len(inputData_textCH)):
if inputData_textCH[i].startswith("textchannel ="):
inputData_textCH[i] = 'textchannel = ' + str(channel) + '\r'
basicSetting[7] = channel
#print ('======', inputData_text[i])
result_textCH = '\n'.join(inputData_textCH)
#print (result_textCH)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_textCH, contents.sha)
await ctx.send(f"< 텍스트채널 [{ctx.message.channel.name}] 접속완료 >\n< 음성채널 접속 후 [{command[5][0]}] 명령을 사용 하세요 >", tts=False)
print('< 텍스트채널 [' + ctx.guild.get_channel(basicSetting[7]).name + '] 접속완료>')
if basicSetting[6] != "" and basicSetting[21] == "1":
try:
await ctx.guild.get_channel(basicSetting[6]).connect(reconnect=True, timeout=5)
print('< 음성채널 [' + ctx.guild.get_channel(basicSetting[6]).name + '] 접속완료>')
except:
print('< 음성채널 [' + ctx.guild.get_channel(basicSetting[6]).name + '] 접속에러! >')
pass
if basicSetting[8] != "":
if str(basicSetting[8]) in channel_id:
print('< 사다리채널 [' + ctx.guild.get_channel(int(basicSetting[8])).name + '] 접속완료 >')
else:
basicSetting[8] = ""
print(f"사다리채널 ID 오류! [{command[28][0]} 사다리] 명령으로 재설정 바랍니다.")
if basicSetting[11] != "":
if str(basicSetting[11]) in channel_id:
print('< 정산채널 [' + ctx.guild.get_channel(int(basicSetting[11])).name + '] 접속완료>')
else:
basicSetting[11] = ""
print(f"정산채널 ID 오류! [{command[28][0]} 정산] 명령으로 재설정 바랍니다.")
if basicSetting[18] != "":
if str(basicSetting[18]) in channel_id:
print('< 척살채널 [' + ctx.guild.get_channel(int(basicSetting[18])).name + '] 접속완료>')
else:
basicSetting[18] = ""
print(f"척살채널 ID 오류! [{command[28][0]} 척살] 명령으로 재설정 바랍니다.")
if basicSetting[19] != "":
if str(basicSetting[19]) in channel_id:
print('< 경주채널 [' + ctx.guild.get_channel(int(basicSetting[19])).name + '] 접속완료>')
else:
basicSetting[19] = ""
print(f"경주채널 ID 오류! [{command[28][0]} 경주] 명령으로 재설정 바랍니다.")
if basicSetting[20] != "":
if str(basicSetting[20]) in channel_id:
print('< 아이템채널 [' + ctx.guild.get_channel(int(basicSetting[20])).name + '] 접속완료>')
else:
basicSetting[20] = ""
print(f"아이템채널 ID 오류! [{command[28][0]} 아이템] 명령으로 재설정 바랍니다.")
if int(basicSetting[13]) != 0 :
print('< 보탐봇 재시작 시간 ' + endTime.strftime('%Y-%m-%d ') + endTime.strftime('%H:%M:%S') + ' >')
print('< 보탐봇 재시작 주기 ' + basicSetting[13] + '일 >')
else :
print('< 보탐봇 재시작 설정안됨 >')
chflg = 1
else:
curr_guild_info = None
for guild in self.bot.guilds:
for text_channel in guild.text_channels:
if basicSetting[7] == text_channel.id:
curr_guild_info = guild
emoji_list : list = ["⭕", "❌"]
guild_error_message = await ctx.send(f"이미 **[{curr_guild_info.name}]** 서버 **[{setting_channel_name}]** 채널이 명령어 채널로 설정되어 있습니다.\n해당 채널로 명령어 채널을 변경 하시려면 ⭕ 그대로 사용하시려면 ❌ 를 눌러주세요.\n(10초이내 미입력시 기존 설정 그대로 설정됩니다.)", tts=False)
for emoji in emoji_list:
await guild_error_message.add_reaction(emoji)
def reaction_check(reaction, user):
return (reaction.message.id == guild_error_message.id) and (user.id == ctx.author.id) and (str(reaction) in emoji_list)
try:
reaction, user = await self.bot.wait_for('reaction_add', check = reaction_check, timeout = 10)
except asyncio.TimeoutError:
return await ctx.send(f"시간이 초과됐습니다. **[{curr_guild_info.name}]** 서버 **[{setting_channel_name}]** 채널에서 사용해주세요!")
if str(reaction) == "⭕":
if ctx.voice_client is not None:
await ctx.voice_client.disconnect(force=True)
basicSetting[6] = ""
basicSetting[7] = int(ctx.message.channel.id)
print ('[ ', basicSetting[7], ' ]')
print ('] ', ctx.message.channel.name, ' [')
inidata_textCH = repo.get_contents("test_setting.ini")
file_data_textCH = base64.b64decode(inidata_textCH.content)
file_data_textCH = file_data_textCH.decode('utf-8')
inputData_textCH = file_data_textCH.split('\n')
for i in range(len(inputData_textCH)):
if inputData_textCH[i].startswith("textchannel ="):
inputData_textCH[i] = 'textchannel = ' + str(basicSetting[7]) + '\r'
result_textCH = '\n'.join(inputData_textCH)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_textCH, contents.sha)
return await ctx.send(f"명령어 채널이 **[{ctx.author.guild.name}]** 서버 **[{ctx.message.channel.name}]** 채널로 새로 설정되었습니다.\n< 음성채널 접속 후 [{command[5][0]}] 명령을 사용 하세요 >")
else:
return await ctx.send(f"명령어 채널 설정이 취소되었습니다.\n**[{curr_guild_info.name}]** 서버 **[{setting_channel_name}]** 채널에서 사용해주세요!")
################ 보탐봇 메뉴 출력 ################
@commands.command(name=command[1][0], aliases=command[1][1:])
async def menu_(self, ctx):
if ctx.message.channel.id == basicSetting[7]:
command_list = ''
command_list += ','.join(command[2]) + '\n' #!설정확인
command_list += ','.join(command[3]) + '\n' #!채널확인
command_list += ','.join(command[4]) + ' [채널명]\n' #!채널이동
command_list += ','.join(command[5]) + ' ※ 관리자만 실행 가능\n' #!소환
command_list += ','.join(command[6]) + '\n' #!불러오기
command_list += ','.join(command[7]) + '\n' #!초기화
command_list += ','.join(command[8]) + '\n' #!명치
command_list += ','.join(command[9]) + '\n' #!재시작
command_list += ','.join(command[10]) + '\n' #!미예약
command_list += ','.join(command[11]) + ' [인원] [금액]\n' #!분배
command_list += ','.join(command[12]) + ' [뽑을인원수] [아이디1] [아이디2]...\n' #!사다리
command_list += ','.join(command[27]) + ' [아이디1] [아이디2]...(최대 12명)\n' #!경주
command_list += ','.join(command[41]) + ' [추첨인원] (대기시간/초) *(메모)\n' #!럭키박스
command_list += ','.join(command[35]) + ' [판매금액] (거래소세금)\n' #!수수료
command_list += ','.join(command[36]) + ' [거래소금액] [실거래금액] (거래소세금)\n' #!페이백
command_list += ','.join(command[13]) + ' [아이디]\n' #!정산
command_list += ','.join(command[14]) + ' 또는 ' + ','.join(command[14]) + ' 0000, 00:00\n' #!보스일괄
command_list += ','.join(command[40]) + ' 또는 ' + ','.join(command[40]) + ' 0000, 00:00\n' #!멍일괄
command_list += ','.join(command[43]) + f' [00:00:00 : 보스명(엔터) ...]\n※ 보스탐 결과 복붙 가능\nex){command[43][0]} + 12:34:00 : {bossData[0][0]}\n+ 10:56:00 : {bossData[1][0]}\n+ (+1d) 12:12:00 : {bossData[2][0]}...\n' #!컷등록
command_list += ','.join(command[44]) + f' [00:00:00 : 보스명(엔터) ...]\n※ [00:00:00 보스명] 형태로 여러줄(엔터)로 구분하여 등록\nex){command[44][0]} + 12:34:00 : {bossData[0][0]}\n10:56:00 : {bossData[1][0]}\n+ (+1d) 12:12:00 : {bossData[2][0]}...\n' #!예상등록
command_list += ','.join(command[45]) + ' [시간(00:00)] [추가시간(숫자)] [보스명1] [보스명2] [보스명3] ...\n' #!추가등록
command_list += ','.join(command[15]) + '\n' #!q
command_list += ','.join(command[16]) + ' [할말]\n' #!v
command_list += ','.join(command[17]) + '\n' #!리젠
command_list += ','.join(command[18]) + '\n' #!현재시간
command_list += ','.join(command[24]) + '\n' #!킬초기화
command_list += ','.join(command[25]) + '\n' #!킬횟수 확인
command_list += ','.join(command[25]) + ' [아이디]\n' #!킬
command_list += ','.join(command[26]) + ' [아이디]\n' #!킬삭제
command_list += ','.join(command[33]) + ' [아이디] 또는 ' + ','.join(command[33]) + ' [아이디] [횟수]\n' #!킬차감
command_list += ','.join(command[29]) + '\n' #!아이템 목록 초기화
command_list += ','.join(command[30]) + '\n' #!아이템 목록 확인
command_list += ','.join(command[30]) + ' [아이템] 또는 ' + ','.join(command[30]) + ' [아이템] [개수]\n' #!아이템 목록 입력
command_list += ','.join(command[31]) + ' [아이템]\n' #!아이템 목록에서 삭제
command_list += ','.join(command[32]) + ' [아이템] 또는 ' + ','.join(command[32]) + ' [아이템] [개수]\n' #!아이템 차감
command_list += ','.join(command[19]) + '\n' #!공지
command_list += ','.join(command[19]) + ' [공지내용]\n' #!공지
command_list += ','.join(command[20]) + '\n' #!공지삭제
command_list += ','.join(command[21]) + ' [할말]\n' #!상태
command_list += ','.join(command[28]) + ' 사다리, 정산, 척살, 경주, 아이템\n' #!채널설정
command_list += ','.join(command[42]) + ' 사다리, 정산, 척살, 경주, 아이템\n' #!채널삭제
command_list += ','.join(command[34]) + ' ※ 관리자만 실행 가능\n\n' #서버나가기
command_list += ','.join(command[22]) + '\n' #보스탐
command_list += ','.join(command[23]) + '\n' #!보스탐
command_list += '[보스명]컷 또는 [보스명]컷 0000, 00:00\n'
command_list += '[보스명] 컷 또는 [보스명] 컷 0000, 00:00\n'
command_list += '[보스명]멍 또는 [보스명]멍 0000, 00:00\n'
command_list += '[보스명]예상 또는 [보스명]예상 0000, 00:00\n'
command_list += '[보스명]삭제\n'
command_list += '[보스명]메모 [할말]\n'
embed = discord.Embed(
title = "----- 명령어 -----",
description= '```' + command_list + '```',
color=0xff00ff
)
embed.add_field(
name="----- 추가기능 -----",
value= '```- [보스명]컷/멍/예상 [할말] : 보스시간 입력 후 빈칸 두번!! 메모 가능\n- [보스명]컷 명령어는 초성으로 입력가능합니다.\n ex)' + bossData[0][0] + '컷 => ' + convertToInitialLetters(bossData[0][0] +'컷') + ', ' + bossData[0][0] + ' 컷 => ' + convertToInitialLetters(bossData[0][0] +' 컷') + '```'
)
await ctx.send( embed=embed, tts=False)
else:
return
################ 보탐봇 기본 설정확인 ################
@commands.command(name=command[2][0], aliases=command[2][1:])
async def setting_(self, ctx):
#print (ctx.message.channel.id)
if ctx.message.channel.id == basicSetting[7]:
setting_val = '보탐봇버전 : Server Ver. 28 (2021. 6. 23.)\n'
if basicSetting[6] != "" :
setting_val += '음성채널 : ' + self.bot.get_channel(basicSetting[6]).name + '\n'
setting_val += '텍스트채널 : ' + self.bot.get_channel(basicSetting[7]).name +'\n'
if basicSetting[8] != "" :
setting_val += '사다리채널 : ' + self.bot.get_channel(int(basicSetting[8])).name + '\n'
if basicSetting[11] != "" :
setting_val += '정산채널 : ' + self.bot.get_channel(int(basicSetting[11])).name + '\n'
if basicSetting[18] != "" :
setting_val += '척살채널 : ' + self.bot.get_channel(int(basicSetting[18])).name + '\n'
if basicSetting[19] != "" :
setting_val += '경주채널 : ' + self.bot.get_channel(int(basicSetting[19])).name + '\n'
if basicSetting[20] != "" :
setting_val += '아이템채널 : ' + self.bot.get_channel(int(basicSetting[20])).name + '\n'
setting_val += '보스젠알림시간1 : ' + basicSetting[1] + ' 분 전\n'
setting_val += '보스젠알림시간2 : ' + basicSetting[3] + ' 분 전\n'
setting_val += '보스멍확인시간1 : ' + basicSetting[2] + ' 분 후\n'
setting_val += '보스멍확인시간2 : ' + basicSetting[22] + ' 분 후\n'
if basicSetting[21] == "0":
setting_val += '보이스사용여부 : 사용안함\n'
else:
setting_val += '보이스사용여부 : 사용중\n'
embed = discord.Embed(
title = "----- 설정내용 -----",
description= f'```{setting_val}```',
color=0xff00ff
)
embed.add_field(
name="----- Special Thanks to. -----",
value= '```총무, 옹님, 공부중, 꽃신, 별빛, 크마, D.H.Kim, K.H.Sim, 쿠쿠, 오브로드, D.H.Oh, Bit, 팥빵, 천려, 이파리, 도미, 일깡, B.Park```'
)
await ctx.send(embed=embed, tts=False)
else:
return
################ 서버 채널 확인 ################
@commands.command(name=command[3][0], aliases=command[3][1:])
async def chChk_(self, ctx):
if ctx.message.channel.id == basicSetting[7]:
channel_name, channel_id, channel_voice_name, channel_voice_id = await get_guild_channel_info(self.bot)
ch_information = []
cnt = 0
ch_information.append("")
ch_voice_information = []
cntV = 0
ch_voice_information.append("")
for guild in self.bot.guilds:
ch_information[cnt] = f"{ch_information[cnt]}👑 {guild.name} 👑\n"
for i in range(len(channel_name)):
for text_channel in guild.text_channels:
if channel_id[i] == str(text_channel.id):
if len(ch_information[cnt]) > 900 :
ch_information.append("")
cnt += 1
ch_information[cnt] = f"{ch_information[cnt]}[{channel_id[i]}] {channel_name[i]}\n"
ch_voice_information[cntV] = f"{ch_voice_information[cntV]}👑 {guild.name} 👑\n"
for i in range(len(channel_voice_name)):
for voice_channel in guild.voice_channels:
if channel_voice_id[i] == str(voice_channel.id):
if len(ch_voice_information[cntV]) > 900 :
ch_voice_information.append("")
cntV += 1
ch_voice_information[cntV] = f"{ch_voice_information[cntV]}[{channel_voice_id[i]}] {channel_voice_name[i]}\n"
######################
if len(ch_information) == 1 and len(ch_voice_information) == 1:
embed = discord.Embed(
title = "----- 채널 정보 -----",
description = '',
color=0xff00ff
)
embed.add_field(
name="< 택스트 채널 >",
value= '```' + ch_information[0] + '```',
inline = False
)
embed.add_field(
name="< 보이스 채널 >",
value= '```' + ch_voice_information[0] + '```',
inline = False
)
await ctx.send( embed=embed, tts=False)
else :
embed = discord.Embed(
title = "----- 채널 정보 -----\n< 택스트 채널 >",
description= '```' + ch_information[0] + '```',
color=0xff00ff
)
await ctx.send( embed=embed, tts=False)
for i in range(len(ch_information)-1):
embed = discord.Embed(
title = '',
description= '```' + ch_information[i+1] + '```',
color=0xff00ff
)
await ctx.send( embed=embed, tts=False)
embed = discord.Embed(
title = "< 음성 채널 >",
description= '```' + ch_voice_information[0] + '```',
color=0xff00ff
)
await ctx.send( embed=embed, tts=False)
for i in range(len(ch_voice_information)-1):
embed = discord.Embed(
title = '',
description= '```' + ch_voice_information[i+1] + '```',
color=0xff00ff
)
await ctx.send( embed=embed, tts=False)
else:
return
################ 텍스트채널이동 ################
@commands.command(name=command[4][0], aliases=command[4][1:])
async def chMove_(self, ctx):
global basicSetting
if ctx.message.channel.id == basicSetting[7]:
msg = ctx.message.content[len(ctx.invoked_with)+1:]
channel = None
for i in range(len(channel_name)):
if channel_name[i] == msg:
channel = int(channel_id[i])
inidata_textCH = repo.get_contents("test_setting.ini")
file_data_textCH = base64.b64decode(inidata_textCH.content)
file_data_textCH = file_data_textCH.decode('utf-8')
inputData_textCH = file_data_textCH.split('\n')
for i in range(len(inputData_textCH)):
if inputData_textCH[i].startswith('textchannel ='):
inputData_textCH[i] = 'textchannel = ' + str(channel) + '\r'
basicSetting[7] = int(channel)
result_textCH = '\n'.join(inputData_textCH)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_textCH, contents.sha)
await ctx.send( f"명령어 채널이 < {ctx.message.channel.name} >에서 < {self.bot.get_channel(channel).name} > 로 이동되었습니다.", tts=False)
await self.bot.get_channel(channel).send( f"< {self.bot.get_channel(channel).name} 이동완료 >", tts=False)
else:
return
################ 보탐봇 음성채널 소환 ################
@commands.has_permissions(manage_messages=True)
@commands.command(name=command[5][0], aliases=command[5][1:])
async def connectVoice_(self, ctx):
global basicSetting
if ctx.message.channel.id == basicSetting[7]:
if basicSetting[21] != "1":
return await ctx.send('```보이스를 사용하지 않도록 설정되어 있습니다.```', tts=False)
if ctx.voice_client is None:
if ctx.author.voice:
try:
await ctx.author.voice.channel.connect(reconnect=True, timeout=5)
except:
await ctx.send('음성채널에 접속에 실패하였습니다.', tts=False)
pass
else:
await ctx.send('음성채널에 먼저 들어가주세요.', tts=False)
return
else:
if ctx.voice_client.is_playing():
ctx.voice_client.stop()
await ctx.voice_client.move_to(ctx.author.voice.channel)
voice_channel = ctx.author.voice.channel
print ('< ', basicSetting[6], ' >')
print ('> ', self.bot.get_channel(voice_channel.id).name, ' <')
if basicSetting[6] == "":
inidata_voiceCH = repo.get_contents("test_setting.ini")
file_data_voiceCH = base64.b64decode(inidata_voiceCH.content)
file_data_voiceCH = file_data_voiceCH.decode('utf-8')
inputData_voiceCH = file_data_voiceCH.split('\n')
for i in range(len(inputData_voiceCH)):
if inputData_voiceCH[i].startswith('voicechannel ='):
inputData_voiceCH[i] = 'voicechannel = ' + str(voice_channel.id) + '\r'
basicSetting[6] = int(voice_channel.id)
result_voiceCH = '\n'.join(inputData_voiceCH)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_voiceCH, contents.sha)
elif basicSetting[6] != int(voice_channel.id):
inidata_voiceCH = repo.get_contents("test_setting.ini")
file_data_voiceCH = base64.b64decode(inidata_voiceCH.content)
file_data_voiceCH = file_data_voiceCH.decode('utf-8')
inputData_voiceCH = file_data_voiceCH.split('\n')
for i in range(len(inputData_voiceCH)):
if inputData_voiceCH[i].startswith('voicechannel ='):
inputData_voiceCH[i] = 'voicechannel = ' + str(voice_channel.id) + '\r'
basicSetting[6] = int(voice_channel.id)
result_voiceCH = '\n'.join(inputData_voiceCH)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_voiceCH, contents.sha)
await ctx.send('< 음성채널 [' + self.bot.get_channel(voice_channel.id).name + '] 접속완료>', tts=False)
else:
return
################ my_bot.db에 저장된 보스타임 불러오기 ################
@commands.command(name=command[6][0], aliases=command[6][1:])
async def loadDB_(self, ctx):
if ctx.message.channel.id == basicSetting[7]:
await dbLoad()
if LoadChk == 0:
await ctx.send('<불러오기 완료>', tts=False)
else:
await ctx.send('<보스타임 정보가 없습니다.>', tts=False)
else:
return
################ 저장된 정보 초기화 ################
@commands.command(name=command[7][0], aliases=command[7][1:])
async def initVal_(self, ctx):
global basicSetting
global bossData
global fixed_bossData
global bossTime
global tmp_bossTime
global fixed_bossTime
global bossTimeString
global bossDateString
global tmp_bossTimeString
global tmp_bossDateString
global bossFlag
global bossFlag0
global fixed_bossFlag
global fixed_bossFlag0
global bossMungFlag
global bossMungCnt
global FixedBossDateData
global indexFixedBossname
if ctx.message.channel.id == basicSetting[7]:
basicSetting = []
bossData = []
fixed_bossData = []
bossTime = []
tmp_bossTime = []
fixed_bossTime = []
bossTimeString = []
bossDateString = []
tmp_bossTimeString = []
tmp_bossDateString = []
bossFlag = []
bossFlag0 = []
fixed_bossFlag = []
fixed_bossFlag0 = []
bossMungFlag = []
bossMungCnt = []
FixedBossDateData = []
indexFixedBossname = []
init()
await dbSave()
await ctx.send('< 초기화 완료 >', tts=False)
print ("< 초기화 완료 >")
else:
return
################ 보탐봇 재시작 ################
@commands.command(name=command[9][0], aliases=command[9][1:])
async def restart_(self, ctx):
global basicSetting
global bossTimeString
global bossDateString
if ctx.message.channel.id == basicSetting[7]:
if basicSetting[2] != '0' and basicSetting[22] != '0':
for i in range(bossNum):
if bossMungFlag[i] == True:
bossTimeString[i] = tmp_bossTime[i].strftime('%H:%M:%S')
bossDateString[i] = tmp_bossTime[i].strftime('%Y-%m-%d')
await dbSave()
await data_list_Save("kill_list.ini", "-----척살명단-----", kill_Data)
await data_list_Save("item_list.ini", "-----아이템목록-----", item_Data)
for voice_client in self.bot.voice_clients:
if voice_client.is_playing():
voice_client.stop()
await voice_client.disconnect(force=True)
print("보탐봇강제재시작!")
await asyncio.sleep(2)
inidata_restart = repo_restart.get_contents("restart.txt")
file_data_restart = base64.b64decode(inidata_restart.content)
file_data_restart = file_data_restart.decode('utf-8')
inputData_restart = file_data_restart.split('\n')
if len(inputData_restart) < 3:
contents12 = repo_restart.get_contents("restart.txt")
repo_restart.update_file(contents12.path, "restart_0", "restart\nrestart\nrestrat\n", contents12.sha)
else:
contents12 = repo_restart.get_contents("restart.txt")
repo_restart.update_file(contents12.path, "restart_1", "", contents12.sha)
else:
return
################ 미예약 보스타임 출력 ################
@commands.command(name=command[10][0], aliases=command[10][1:])
async def nocheckBoss_(self, ctx):
if ctx.message.channel.id == basicSetting[7]:
tmp_boss_information = []
tmp_cnt = 0
tmp_boss_information.append('')
for i in range(bossNum):
if bossTimeString[i] == '99:99:99' and bossMungFlag[i] != True :
if len(tmp_boss_information[tmp_cnt]) > 1800 :
tmp_boss_information.append('')
tmp_cnt += 1
tmp_boss_information[tmp_cnt] = tmp_boss_information[tmp_cnt] + bossData[i][0] + ','
if len(tmp_boss_information) == 1:
if len(tmp_boss_information[0]) != 0:
tmp_boss_information[0] = "```fix\n" + tmp_boss_information[0][:len(tmp_boss_information[0])-1] + "\n```"
else :
tmp_boss_information[0] = '``` ```'
embed = discord.Embed(
title = "----- 미예약 보스 -----",
description= tmp_boss_information[0],
color=0x0000ff
)
await ctx.send( embed=embed, tts=False)
else:
if len(tmp_boss_information[0]) != 0:
if len(tmp_boss_information) == 1 :
tmp_boss_information[0] = "```fix\n" + tmp_boss_information[0][:len(tmp_boss_information[0])-1] + "\n```"
else:
tmp_boss_information[0] = "```fix\n" + tmp_boss_information[0] + "\n```"
else :
tmp_boss_information[0] = '``` ```'
embed = discord.Embed(
title = "----- 미예약 보스 -----",
description= tmp_boss_information[0],
color=0x0000ff
)
await ctx.send( embed=embed, tts=False)
for i in range(len(tmp_boss_information)-1):
if len(tmp_boss_information[i+1]) != 0:
if i == len(tmp_boss_information)-2:
tmp_boss_information[i+1] = "```fix\n" + tmp_boss_information[i+1][:len(tmp_boss_information[i+1])-1] + "\n```"
else:
tmp_boss_information[i+1] = "```fix\n" + tmp_boss_information[i+1] + "\n```"
else :
tmp_boss_information[i+1] = '``` ```'
embed = discord.Embed(
title = '',
description= tmp_boss_information[i+1],
color=0x0000ff
)
await ctx.send( embed=embed, tts=False)
else:
return
################ 분배 결과 출력 ################
@commands.command(name=command[11][0], aliases=command[11][1:])
async def bunbae_(self, ctx):
if ctx.message.channel.id == basicSetting[7]:
msg = ctx.message.content[len(ctx.invoked_with)+1:]
separate_money = []
separate_money = msg.split(" ")
num_sep = floor(int(separate_money[0]))
cal_tax1 = floor(float(separate_money[1])*0.05)
real_money = floor(floor(float(separate_money[1])) - cal_tax1)
cal_tax2 = floor(real_money/num_sep) - floor(float(floor(real_money/num_sep))*0.95)
if num_sep == 0 :
await ctx.send('```분배 인원이 0입니다. 재입력 해주세요.```', tts=False)
else :
embed = discord.Embed(
title = "----- 분배결과! -----",
description= '```1차 세금 : ' + str(cal_tax1) + '\n1차 수령액 : ' + str(real_money) + '\n분배자 거래소등록금액 : ' + str(floor(real_money/num_sep)) + '\n2차 세금 : ' + str(cal_tax2) + '\n인당 실수령액 : ' + str(floor(float(floor(real_money/num_sep))*0.95)) + '```',
color=0xff00ff
)
await ctx.send(embed=embed, tts=False)
else:
return
################ 사다리 결과 출력 ################
@commands.command(name=command[12][0], aliases=command[12][1:])
async def ladder_(self, ctx : commands.Context, *, args : str = None):
if basicSetting[8] != "" and ctx.message.channel.id == basicSetting[7]:
return
if ctx.message.channel.id == basicSetting[7] or ctx.message.channel.id == basicSetting[8]:
if not args:
return await ctx.send(f'```명령어 [인원] [아이디1] [아이디2] ... 형태로 입력해주시기 바랍나다.```')
ladder = args.split()
try:
num_cong = int(ladder[0]) # 뽑을 인원
del(ladder[0])
except ValueError:
return await ctx.send(f'```뽑을 인원은 숫자로 입력바랍니다\nex)!사다리 1 가 나 다 ...```')
if num_cong >= len(ladder):
return await ctx.send(f'```추첨인원이 총 인원과 같거나 많습니다. 재입력 해주세요```')
if len(ladder) > 20:
await LadderFunc(num_cong, ladder, ctx)
return
input_dict : dict = {}
ladder_description : list = []
ladder_data : list = []
output_list : list = []
result :dict = {}
for i in range(len(ladder)):
input_dict[f"{i+1}"] = ladder[i]
if i < num_cong:
output_list.append("o")
else:
output_list.append("x")
for i in range(len(ladder)+1):
tmp_list = []
if i%2 != 0:
sample_list = ["| |-", "| | "]
else:
sample_list = ["| | ", "|-| "]
for i in range(len(ladder)//2):
value = random.choice(sample_list)
tmp_list.append(value)
ladder_description.append(tmp_list)
tmp_result = list(input_dict.keys())
input_data : str = ""
for i in range(len(tmp_result)):
if int(tmp_result[i]) < 9:
input_data += f"{tmp_result[i]} "
else:
input_data += f"{tmp_result[i]}"
input_value_data = " ".join(list(input_dict.values()))
for i in range(len(ladder_description)):
if (len(ladder) % 2) != 0:
ladder_data.append(f"{''.join(ladder_description[i])}|\n")
else:
ladder_data.append(f"{''.join(ladder_description[i])[:-1]}\n")
random.shuffle(output_list)
output_data = list(" ".join(output_list))
for line in reversed(ladder_data):
for i, x in enumerate(line):
if i % 2 == 1 and x == '-':
output_data[i-1], output_data[i+1] = output_data[i+1], output_data[i-1]
for i in range(output_data.count(" ")):
output_data.remove(" ")
for i in range(len(tmp_result)):
result[tmp_result[i]] = output_data[i]
result_str : str = ""
join_member : list = []
win_member : list = []
lose_member : list = []
for x, y in result.items():
join_member.append(f"{x}:{input_dict[f'{x}']}")
if y == "o":
win_member.append(f"{input_dict[f'{x}']}")
else :
lose_member.append(f"{input_dict[f'{x}']}")
embed = discord.Embed(title = "🎲 사다리! 묻고 더블로 가!",
color=0x00ff00
)
embed.description = f"||```{input_data}\n{''.join(ladder_data)}{' '.join(output_list)}```||"
embed.add_field(name = "👥 참가자", value = f"```fix\n{', '.join(join_member)}```", inline=False)
embed.add_field(name = "😍 당첨", value = f"```fix\n{', '.join(win_member)}```")
embed.add_field(name = "😭 낙첨", value = f"```{', '.join(lose_member)}```")
return await ctx.send(embed = embed)
else:
return
################ 정산확인 ################
@commands.command(name=command[13][0], aliases=command[13][1:])
async def jungsan_(self, ctx):
if basicSetting[11] != "" and ctx.message.channel.id == basicSetting[7]:
return
if ctx.message.channel.id == basicSetting[7] or ctx.message.channel.id == basicSetting[11]:
msg = ctx.message.content[len(ctx.invoked_with)+1:]
if basicSetting[10] !="" and basicSetting[12] !="" and basicSetting[14] !="" and basicSetting[15] !="" and basicSetting[16] !="" :
SearchID = msg
gc = gspread.authorize(credentials)
wks = gc.open(basicSetting[12]).worksheet(basicSetting[14])
wks.update_acell(basicSetting[15], SearchID)
result = wks.acell(basicSetting[16]).value
embed = discord.Embed(
description= '```' + SearchID + ' 님이 받을 다이야는 ' + result + ' 다이야 입니다.```',
color=0xff00ff
)
await ctx.send(embed=embed, tts=False)
else:
return
################ 보스타임 일괄 설정 ################
@commands.command(name=command[14][0], aliases=command[14][1:])
async def allBossInput_(self, ctx):
global basicSetting
global bossData
global fixed_bossData
global bossTime
global tmp_bossTime
global fixed_bossTime
global bossTimeString
global bossDateString
global tmp_bossTimeString
global tmp_bossDateString
global bossFlag
global bossFlag0
global bossMungFlag
global bossMungCnt
if ctx.message.channel.id == basicSetting[7]:
msg = ctx.message.content[len(ctx.invoked_with)+1:]
for i in range(bossNum):
if bossTimeString[i] == '99:99:99':
tmp_msg = msg
if len(tmp_msg) > 3 :
if tmp_msg.find(':') != -1 :
chkpos = tmp_msg.find(':')
hours1 = tmp_msg[chkpos-2:chkpos]
minutes1 = tmp_msg[chkpos+1:chkpos+3]
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = tmp_now.replace(hour=int(hours1), minute=int(minutes1))
else:
chkpos = len(tmp_msg)-2
hours1 = tmp_msg[chkpos-2:chkpos]
minutes1 = tmp_msg[chkpos:chkpos+2]
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = tmp_now.replace(hour=int(hours1), minute=int(minutes1))
else:
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = now2
bossFlag[i] = False
bossFlag0[i] = False
bossMungFlag[i] = False
bossMungCnt[i] = 1
if tmp_now > now2 :
tmp_now = tmp_now + datetime.timedelta(days=int(-1))
if tmp_now < now2 :
deltaTime = datetime.timedelta(hours = int(bossData[i][1]), minutes = int(bossData[i][5]))
while now2 > tmp_now :
tmp_now = tmp_now + deltaTime
bossMungCnt[i] = bossMungCnt[i] + 1
now2 = tmp_now
bossMungCnt[i] = bossMungCnt[i] - 1
else :
now2 = now2 + datetime.timedelta(hours = int(bossData[i][1]), minutes = int(bossData[i][5]))
tmp_bossTime[i] = bossTime[i] = nextTime = now2
tmp_bossTimeString[i] = bossTimeString[i] = nextTime.strftime('%H:%M:%S')
tmp_bossDateString[i] = bossDateString[i] = nextTime.strftime('%Y-%m-%d')
await dbSave()
await dbLoad()
await dbSave()
await ctx.send('<보스 일괄 입력 완료>', tts=False)
print ("<보스 일괄 입력 완료>")
else:
return
################ 멍보스타임 일괄 설정 ################
@commands.command(name=command[40][0], aliases=command[40][1:])
async def mungBossInput_(self, ctx):
global basicSetting
global bossData
global fixed_bossData
global bossTime
global tmp_bossTime
global fixed_bossTime
global bossTimeString
global bossDateString
global tmp_bossTimeString
global tmp_bossDateString
global bossFlag
global bossFlag0
global bossMungFlag
global bossMungCnt
if ctx.message.channel.id == basicSetting[7]:
msg = ctx.message.content[len(ctx.invoked_with)+1:]
for i in range(bossNum):
if bossData[i][2] == "1" and bossTimeString[i] == '99:99:99':
tmp_msg = msg
if len(tmp_msg) > 3 :
if tmp_msg.find(':') != -1 :
chkpos = tmp_msg.find(':')
hours1 = tmp_msg[chkpos-2:chkpos]
minutes1 = tmp_msg[chkpos+1:chkpos+3]
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = tmp_now.replace(hour=int(hours1), minute=int(minutes1))
else:
chkpos = len(tmp_msg)-2
hours1 = tmp_msg[chkpos-2:chkpos]
minutes1 = tmp_msg[chkpos:chkpos+2]
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = tmp_now.replace(hour=int(hours1), minute=int(minutes1))
else:
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = now2
bossFlag[i] = False
bossFlag0[i] = False
bossMungFlag[i] = False
bossMungCnt[i] = 1
if tmp_now > now2 :
tmp_now = tmp_now + datetime.timedelta(days=int(-1))
if tmp_now < now2 :
deltaTime = datetime.timedelta(hours = int(bossData[i][1]), minutes = int(bossData[i][5]))
while now2 > tmp_now :
tmp_now = tmp_now + deltaTime
bossMungCnt[i] = bossMungCnt[i] + 1
now2 = tmp_now
bossMungCnt[i] = bossMungCnt[i] - 1
else :
now2 = now2 + datetime.timedelta(hours = int(bossData[i][1]), minutes = int(bossData[i][5]))
tmp_bossTime[i] = bossTime[i] = nextTime = now2
tmp_bossTimeString[i] = bossTimeString[i] = nextTime.strftime('%H:%M:%S')
tmp_bossDateString[i] = bossDateString[i] = nextTime.strftime('%Y-%m-%d')
await dbSave()
await dbLoad()
await dbSave()
await ctx.send('<멍보스 일괄 입력 완료>', tts=False)
print ("<멍보스 일괄 입력 완료>")
else:
return
################ 가장 근접한 보스타임 출력 ################
@commands.command(name=command[15][0], aliases=command[15][1:])
async def nearTimeBoss_(self, ctx):
if ctx.message.channel.id == basicSetting[7]:
checkTime = datetime.datetime.now() + datetime.timedelta(days=1, hours = int(basicSetting[0]))
datelist = []
datelist2 = []
ouput_bossData = []
aa = []
sorted_datelist = []
for i in range(bossNum):
if bossMungFlag[i] != True and bossTimeString[i] != '99:99:99' :
datelist2.append(bossTime[i])
for i in range(fixed_bossNum):
if fixed_bossTime[i] < datetime.datetime.now() + datetime.timedelta(hours=int(basicSetting[0])+3):
datelist2.append(fixed_bossTime[i])
datelist = list(set(datelist2))
for i in range(bossNum):
if bossMungFlag[i] != True :
aa.append(bossData[i][0]) #output_bossData[0] : 보스명
aa.append(bossTime[i]) #output_bossData[1] : 시간
aa.append(bossTime[i].strftime('%H:%M:%S')) #output_bossData[2] : 시간(00:00:00)
ouput_bossData.append(aa)
aa = []
for i in range(fixed_bossNum):
aa.append(fixed_bossData[i][0]) #output_bossData[0] : 보스명
aa.append(fixed_bossTime[i]) #output_bossData[1] : 시간
aa.append(fixed_bossTime[i].strftime('%H:%M:%S')) #output_bossData[2] : 시간(00:00:00)
ouput_bossData.append(aa)
aa = []
tmp_sorted_datelist = sorted(datelist)
for i in range(len(tmp_sorted_datelist)):
if checkTime > tmp_sorted_datelist[i]:
sorted_datelist.append(tmp_sorted_datelist[i])
if len(sorted_datelist) == 0:
await ctx.send( '<보스타임 정보가 없습니다.>', tts=False)
else :
result_lefttime = ''
if len(sorted_datelist) > int(basicSetting[9]):
for j in range(int(basicSetting[9])):
for i in range(len(ouput_bossData)):
if sorted_datelist[j] == ouput_bossData[i][1]:
leftTime = ouput_bossData[i][1] - (datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0])))
total_seconds = int(leftTime.total_seconds())
hours, remainder = divmod(total_seconds,60*60)
minutes, seconds = divmod(remainder,60)
result_lefttime += '다음 ' + ouput_bossData[i][0] + '탐까지 %02d:%02d:%02d 남았습니다. ' % (hours,minutes,seconds) + '[' + ouput_bossData[i][2] + ']\n'
else :
for j in range(len(sorted_datelist)):
for i in range(len(ouput_bossData)):
if sorted_datelist[j] == ouput_bossData[i][1]:
leftTime = ouput_bossData[i][1] - (datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0])))
total_seconds = int(leftTime.total_seconds())
hours, remainder = divmod(total_seconds,60*60)
minutes, seconds = divmod(remainder,60)
result_lefttime += '다음 ' + ouput_bossData[i][0] + '탐까지 %02d:%02d:%02d 남았습니다. ' % (hours,minutes,seconds) + '[' + ouput_bossData[i][2] + ']\n'
embed = discord.Embed(
description= result_lefttime,
color=0xff0000
)
await ctx.send( embed=embed, tts=False)
else:
return
################ 음성파일 생성 후 재생 ################
@commands.command(name=command[16][0], aliases=command[16][1:])
async def playText_(self, ctx):
if ctx.message.channel.id == basicSetting[7]:
if basicSetting[21] != "1":
return await ctx.send('```보이스를 사용하지 않도록 설정되어 있습니다.```', tts=False)
msg = ctx.message.content[len(ctx.invoked_with)+1:]
sayMessage = msg
try:
await MakeSound(ctx.message.author.display_name +'님이, ' + sayMessage, './sound/say')
except:
await ctx.send( f"```음성파일 생성에 실패하였습니다.!(amazon polly 사용시 키 값을 확인하세요!)```")
return
await ctx.send("```< " + ctx.author.display_name + " >님이 \"" + sayMessage + "\"```", tts=False)
try:
if aws_key != "" and aws_secret_key != "":
await PlaySound(ctx.voice_client, './sound/say.mp3')
else:
await PlaySound(ctx.voice_client, './sound/say.wav')
except:
await ctx.send( f"```음성파일 재생에 실패하였습니다. 접속에 문제가 있거나 음성채널에 접속 되지 않은 상태입니다.!```")
return
else:
return
################ 리젠시간 출력 ################
@commands.command(name=command[17][0], aliases=command[17][1:])
async def regenTime_(self, ctx):
if ctx.message.channel.id == basicSetting[7]:
await ctx.send(embed=regenembed, tts=False)
else:
return
################ 현재시간 확인 ################
@commands.command(name=command[18][0], aliases=command[18][1:])
async def currentTime_(self, ctx):
if ctx.message.channel.id == basicSetting[7]:
curruntTime = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
embed = discord.Embed(
title = '현재시간은 ' + curruntTime.strftime('%H') + '시 ' + curruntTime.strftime('%M') + '분 ' + curruntTime.strftime('%S')+ '초 입니다.',
color=0xff00ff
)
await ctx.send( embed=embed, tts=False)
else:
return
################ 공지 등록/확인 ################
@commands.command(name=command[19][0], aliases=command[19][1:])
async def notice_(self, ctx):
if ctx.message.channel.id == basicSetting[7]:
msg = ctx.message.content.split(" ")
if len(msg) > 1:
sayMessage = " ".join(msg[1:])
contents = repo.get_contents("notice.ini")
repo.update_file(contents.path, "notice 등록", sayMessage, contents.sha)
await ctx.send( '< 공지 등록완료 >', tts=False)
else:
notice_initdata = repo.get_contents("notice.ini")
notice = base64.b64decode(notice_initdata.content)
notice = notice.decode('utf-8')
if notice != '' :
embed = discord.Embed(
description= str(notice),
color=0xff00ff
)
else :
embed = discord.Embed(
description= '```등록된 공지가 없습니다.```',
color=0xff00ff
)
await ctx.send(embed=embed, tts=False)
else:
return
################ 공지 삭제 ################
@commands.command(name=command[20][0], aliases=command[20][1:])
async def noticeDel_(self, ctx):
if ctx.message.channel.id == basicSetting[7]:
contents = repo.get_contents("notice.ini")
repo.update_file(contents.path, "notice 삭제", '', contents.sha)
await ctx.send( '< 공지 삭제완료 >', tts=False)
else:
return
################ 봇 상태메세지 변경 ################
@commands.command(name=command[21][0], aliases=command[21][1:])
async def botStatus_(self, ctx):
if ctx.message.channel.id == basicSetting[7]:
msg = ctx.message.content[len(ctx.invoked_with)+1:]
sayMessage = msg
await self.bot.change_presence(status=discord.Status.online, activity=discord.Game(name=sayMessage, type=1), afk = False)
await ctx.send( '< 상태메세지 변경완료 >', tts=False)
else:
return
################ 보스타임 출력 ################
@commands.command(name=command[22][0], aliases=command[22][1:])
async def bossTime_(self, ctx):
if ctx.message.channel.id == basicSetting[7]:
datelist = []
datelist2 = []
ouput_bossData = []
aa = []
for i in range(bossNum):
if bossMungFlag[i] == True :
datelist2.append(tmp_bossTime[i])
else :
datelist2.append(bossTime[i])
for i in range(fixed_bossNum):
if fixed_bossTime[i] < datetime.datetime.now() + datetime.timedelta(hours=int(basicSetting[0])+3):
datelist2.append(fixed_bossTime[i])
datelist = list(set(datelist2))
tmp_boss_information = []
tmp_cnt = 0
tmp_time_delta = 0
tmp_boss_information.append('')
for i in range(bossNum):
if bossTimeString[i] == '99:99:99' and bossMungFlag[i] != True :
if len(tmp_boss_information[tmp_cnt]) > 1000 :
tmp_boss_information.append('')
tmp_cnt += 1
tmp_boss_information[tmp_cnt] = tmp_boss_information[tmp_cnt] + bossData[i][0] + ','
else :
aa.append(bossData[i][0]) #output_bossData[0] : 보스명
if bossMungFlag[i] == True :
aa.append(tmp_bossTime[i]) #output_bossData[1] : 시간
tmp_time_delta = (tmp_bossTime[i].date() - (datetime.datetime.now() + datetime.timedelta(hours=int(basicSetting[0]))).date()).days
if tmp_time_delta == 0:
aa.append(tmp_bossTime[i].strftime('%H:%M:%S'))
else:
if tmp_time_delta > 0:
aa.append(f"(+{tmp_time_delta}d) {tmp_bossTime[i].strftime('%H:%M:%S')}")
else:
aa.append(f"({tmp_time_delta}d) {tmp_bossTime[i].strftime('%H:%M:%S')}")
tmp_time_delta = 0
# aa.append(tmp_bossTime[i].strftime('%H:%M:%S')) #output_bossData[2] : 시간(00:00:00) -> 초빼기 : aa.append(tmp_bossTime[i].strftime('%H:%M'))
aa.append('-') #output_bossData[3] : -
else :
aa.append(bossTime[i]) #output_bossData[1] : 시간
tmp_time_delta = (tmp_bossTime[i].date() - (datetime.datetime.now() + datetime.timedelta(hours=int(basicSetting[0]))).date()).days
if tmp_time_delta == 0:
aa.append(tmp_bossTime[i].strftime('%H:%M:%S'))
else:
if tmp_time_delta > 0:
aa.append(f"(+{tmp_time_delta}d) {tmp_bossTime[i].strftime('%H:%M:%S')}")
else:
aa.append(f"({tmp_time_delta}d) {tmp_bossTime[i].strftime('%H:%M:%S')}")
tmp_time_delta = 0
# aa.append(bossTime[i].strftime('%H:%M:%S')) #output_bossData[2] : 시간(00:00:00) -> 초빼기 : aa.append(bossTime[i].strftime('%H:%M'))
aa.append('+') #output_bossData[3] : +
aa.append(bossData[i][2]) #output_bossData[4] : 멍/미입력 보스
aa.append(bossMungCnt[i]) #output_bossData[5] : 멍/미입력횟수
aa.append(bossData[i][6]) #output_bossData[6] : 메세지
ouput_bossData.append(aa)
aa = []
for i in range(fixed_bossNum):
aa.append(fixed_bossData[i][0]) #output_bossData[0] : 보스명
aa.append(fixed_bossTime[i]) #output_bossData[1] : 시간
aa.append(fixed_bossTime[i].strftime('%H:%M:%S')) #output_bossData[2] : 시간(00:00:00) -> 초빼기 : aa.append(fixed_bossTime[i].strftime('%H:%M'))
aa.append('@') #output_bossData[3] : @
aa.append(0) #output_bossData[4] : 멍/미입력 보스
aa.append(0) #output_bossData[5] : 멍/미입력횟수
aa.append("") #output_bossData[6] : 메세지
ouput_bossData.append(aa)
aa = []
boss_information = []
cnt = 0
boss_information.append('')
for timestring in sorted(datelist):
if len(boss_information[cnt]) > 1800 :
boss_information.append('')
cnt += 1
for i in range(len(ouput_bossData)):
if timestring == ouput_bossData[i][1]:
if ouput_bossData[i][4] == '0' :
if ouput_bossData[i][5] == 0 :
boss_information[cnt] = boss_information[cnt] + ouput_bossData[i][3] + ' ' + ouput_bossData[i][2] + ' : ' + ouput_bossData[i][0] + ' ' + ouput_bossData[i][6] + '\n'
else :
boss_information[cnt] = boss_information[cnt] + ouput_bossData[i][3] + ' ' + ouput_bossData[i][2] + ' : ' + ouput_bossData[i][0] + ' (미 ' + str(ouput_bossData[i][5]) + '회)' + ' ' + ouput_bossData[i][6] + '\n'
else :
if ouput_bossData[i][5] == 0 :
boss_information[cnt] = boss_information[cnt] + ouput_bossData[i][3] + ' ' + ouput_bossData[i][2] + ' : ' + ouput_bossData[i][0] + ' ' + ouput_bossData[i][6] + '\n'
else :
boss_information[cnt] = boss_information[cnt] + ouput_bossData[i][3] + ' ' + ouput_bossData[i][2] + ' : ' + ouput_bossData[i][0] + ' (멍 ' + str(ouput_bossData[i][5]) + '회)' + ' ' + ouput_bossData[i][6] + '\n'
if len(boss_information) == 1 and len(tmp_boss_information) == 1:
###########################
if len(boss_information[0]) != 0:
boss_information[0] = "```diff\n" + boss_information[0] + "\n```"
else :
boss_information[0] = '``` ```'
if len(tmp_boss_information[0]) != 0:
tmp_boss_information[0] = "```fix\n" + tmp_boss_information[0][:len(tmp_boss_information[0])-1] + "\n```"
else :
tmp_boss_information[0] = '``` ```'
embed = discord.Embed(
title = "----- 보스탐 정보 -----",
description= boss_information[0],
color=0x0000ff
)
embed.add_field(
name="----- 미예약 보스 -----",
value= tmp_boss_information[0],
inline = False
)
await ctx.send( embed=embed, tts=False)
else :
###########################일반보스출력
if len(boss_information[0]) != 0:
boss_information[0] = "```diff\n" + boss_information[0] + "\n```"
else :
boss_information[0] = '``` ```'
embed = discord.Embed(
title = "----- 보스탐 정보 -----",
description= boss_information[0],
color=0x0000ff
)
await ctx.send( embed=embed, tts=False)
for i in range(len(boss_information)-1):
if len(boss_information[i+1]) != 0:
boss_information[i+1] = "```diff\n" + boss_information[i+1] + "\n```"
else :
boss_information[i+1] = '``` ```'
embed = discord.Embed(
title = '',
description= boss_information[i+1],
color=0x0000ff
)
await ctx.send( embed=embed, tts=False)
###########################미예약보스출력
if len(tmp_boss_information[0]) != 0:
if len(tmp_boss_information) == 1 :
tmp_boss_information[0] = "```fix\n" + tmp_boss_information[0][:len(tmp_boss_information[0])-1] + "\n```"
else:
tmp_boss_information[0] = "```fix\n" + tmp_boss_information[0] + "\n```"
else :
tmp_boss_information[0] = '``` ```'
embed = discord.Embed(
title = "----- 미예약 보스 -----",
description= tmp_boss_information[0],
color=0x0000ff
)
await ctx.send( embed=embed, tts=False)
for i in range(len(tmp_boss_information)-1):
if len(tmp_boss_information[i+1]) != 0:
if i == len(tmp_boss_information)-2:
tmp_boss_information[i+1] = "```fix\n" + tmp_boss_information[i+1][:len(tmp_boss_information[i+1])-1] + "\n```"
else:
tmp_boss_information[i+1] = "```fix\n" + tmp_boss_information[i+1] + "\n```"
else :
tmp_boss_information[i+1] = '``` ```'
embed = discord.Embed(
title = '',
description= tmp_boss_information[i+1],
color=0x0000ff
)
await ctx.send( embed=embed, tts=False)
await dbSave()
await data_list_Save("kill_list.ini", "-----척살명단-----", kill_Data)
await data_list_Save("item_list.ini", "-----아이템목록-----", item_Data)
else:
return
################ 보스타임 출력(고정보스포함) ################
@commands.command(name=command[23][0], aliases=command[23][1:])
async def bossTime_fixed_(self, ctx):
if ctx.message.channel.id == basicSetting[7]:
datelist = []
datelist2 = []
ouput_bossData = []
aa = []
fixed_datelist = []
for i in range(bossNum):
if bossMungFlag[i] == True :
datelist2.append(tmp_bossTime[i])
else :
datelist2.append(bossTime[i])
datelist = list(set(datelist2))
tmp_boss_information = []
tmp_cnt = 0
tmp_boss_information.append('')
for i in range(bossNum):
if bossTimeString[i] == '99:99:99' and bossMungFlag[i] != True :
if len(tmp_boss_information[tmp_cnt]) > 1800 :
tmp_boss_information.append('')
tmp_cnt += 1
tmp_boss_information[tmp_cnt] = tmp_boss_information[tmp_cnt] + bossData[i][0] + ','
else :
aa.append(bossData[i][0]) #output_bossData[0] : 보스명
if bossMungFlag[i] == True :
aa.append(tmp_bossTime[i]) #output_bossData[1] : 시간
if (datetime.datetime.now() + datetime.timedelta(hours=int(basicSetting[0]))).strftime('%Y-%m-%d') == tmp_bossTime[i].strftime('%Y-%m-%d'):
aa.append(tmp_bossTime[i].strftime('%H:%M:%S'))
else:
aa.append(f"[{tmp_bossTime[i].strftime('%Y-%m-%d')}] {tmp_bossTime[i].strftime('%H:%M:%S')}")
# aa.append(tmp_bossTime[i].strftime('%H:%M:%S')) #output_bossData[2] : 시간(00:00:00) -> 초빼기 : aa.append(tmp_bossTime[i].strftime('%H:%M'))
aa.append('-') #output_bossData[3] : -
else :
aa.append(bossTime[i]) #output_bossData[1] : 시간
if (datetime.datetime.now() + datetime.timedelta(hours=int(basicSetting[0]))).strftime('%Y-%m-%d') == bossTime[i].strftime('%Y-%m-%d'):
aa.append(bossTime[i].strftime('%H:%M:%S'))
else:
aa.append(f"[{bossTime[i].strftime('%Y-%m-%d')}] {bossTime[i].strftime('%H:%M:%S')}")
# aa.append(bossTime[i].strftime('%H:%M:%S')) #output_bossData[2] : 시간(00:00:00) -> 초빼기 : aa.append(bossTime[i].strftime('%H:%M'))
aa.append('+') #output_bossData[3] : +
aa.append(bossData[i][2]) #output_bossData[4] : 멍/미입력 보스
aa.append(bossMungCnt[i]) #output_bossData[5] : 멍/미입력횟수
aa.append(bossData[i][6]) #output_bossData[6] : 메세지
ouput_bossData.append(aa)
aa = []
for i in range(fixed_bossNum):
fixed_datelist.append(fixed_bossTime[i])
fixed_datelist = list(set(fixed_datelist))
fixedboss_information = []
cntF = 0
fixedboss_information.append('')
for timestring1 in sorted(fixed_datelist):
if len(fixedboss_information[cntF]) > 1800 :
fixedboss_information.append('')
cntF += 1
for i in range(fixed_bossNum):
if timestring1 == fixed_bossTime[i]:
if (datetime.datetime.now() + datetime.timedelta(hours=int(basicSetting[0]))).strftime('%Y-%m-%d') == fixed_bossTime[i].strftime('%Y-%m-%d'):
tmp_timeSTR = fixed_bossTime[i].strftime('%H:%M:%S') #초빼기 : tmp_timeSTR = fixed_bossTime[i].strftime('%H:%M')
else:
tmp_timeSTR = '[' + fixed_bossTime[i].strftime('%Y-%m-%d') + '] ' + fixed_bossTime[i].strftime('%H:%M:%S') #초빼기 : tmp_timeSTR = '[' + fixed_bossTime[i].strftime('%Y-%m-%d') + '] ' + fixed_bossTime[i].strftime('%H:%M')
fixedboss_information[cntF] = fixedboss_information[cntF] + tmp_timeSTR + ' : ' + fixed_bossData[i][0] + '\n'
boss_information = []
cnt = 0
boss_information.append('')
for timestring in sorted(datelist):
if len(boss_information[cnt]) > 1800 :
boss_information.append('')
cnt += 1
for i in range(len(ouput_bossData)):
if timestring == ouput_bossData[i][1]:
if ouput_bossData[i][4] == '0' :
if ouput_bossData[i][5] == 0 :
boss_information[cnt] = boss_information[cnt] + ouput_bossData[i][3] + ' ' + ouput_bossData[i][2] + ' : ' + ouput_bossData[i][0] + ' ' + ouput_bossData[i][6] + '\n'
else :
boss_information[cnt] = boss_information[cnt] + ouput_bossData[i][3] + ' ' + ouput_bossData[i][2] + ' : ' + ouput_bossData[i][0] + ' (미 ' + str(ouput_bossData[i][5]) + '회)' + ' ' + ouput_bossData[i][6] + '\n'
else :
if ouput_bossData[i][5] == 0 :
boss_information[cnt] = boss_information[cnt] + ouput_bossData[i][3] + ' ' + ouput_bossData[i][2] + ' : ' + ouput_bossData[i][0] + ' ' + ouput_bossData[i][6] + '\n'
else :
boss_information[cnt] = boss_information[cnt] + ouput_bossData[i][3] + ' ' + ouput_bossData[i][2] + ' : ' + ouput_bossData[i][0] + ' (멍 ' + str(ouput_bossData[i][5]) + '회)' + ' ' + ouput_bossData[i][6] + '\n'
###########################고정보스출력
if len(fixedboss_information[0]) != 0:
fixedboss_information[0] = "```diff\n" + fixedboss_information[0] + "\n```"
else :
fixedboss_information[0] = '``` ```'
embed = discord.Embed(
title = "----- 고 정 보 스 -----",
description= fixedboss_information[0],
color=0x0000ff
)
await ctx.send( embed=embed, tts=False)
for i in range(len(fixedboss_information)-1):
if len(fixedboss_information[i+1]) != 0:
fixedboss_information[i+1] = "```diff\n" + fixedboss_information[i+1] + "\n```"
else :
fixedboss_information[i+1] = '``` ```'
embed = discord.Embed(
title = '',
description= fixedboss_information[i+1],
color=0x0000ff
)
await ctx.send( embed=embed, tts=False)
###########################일반보스출력
if len(boss_information[0]) != 0:
boss_information[0] = "```diff\n" + boss_information[0] + "\n```"
else :
boss_information[0] = '``` ```'
embed = discord.Embed(
title = "----- 보스탐 정보 -----",
description= boss_information[0],
color=0x0000ff
)
await ctx.send( embed=embed, tts=False)
for i in range(len(boss_information)-1):
if len(boss_information[i+1]) != 0:
boss_information[i+1] = "```diff\n" + boss_information[i+1] + "\n```"
else :
boss_information[i+1] = '``` ```'
embed = discord.Embed(
title = '',
description= boss_information[i+1],
color=0x0000ff
)
await ctx.send( embed=embed, tts=False)
###########################미예약보스출력
if len(tmp_boss_information[0]) != 0:
if len(tmp_boss_information) == 1 :
tmp_boss_information[0] = "```fix\n" + tmp_boss_information[0][:len(tmp_boss_information[0])-1] + "\n```"
else:
tmp_boss_information[0] = "```fix\n" + tmp_boss_information[0] + "\n```"
else :
tmp_boss_information[0] = '``` ```'
embed = discord.Embed(
title = "----- 미예약 보스 -----",
description= tmp_boss_information[0],
color=0x0000ff
)
await ctx.send( embed=embed, tts=False)
for i in range(len(tmp_boss_information)-1):
if len(tmp_boss_information[i+1]) != 0:
if i == len(tmp_boss_information)-2:
tmp_boss_information[i+1] = "```fix\n" + tmp_boss_information[i+1][:len(tmp_boss_information[i+1])-1] + "\n```"
else:
tmp_boss_information[i+1] = "```fix\n" + tmp_boss_information[i+1] + "\n```"
else :
tmp_boss_information[i+1] = '``` ```'
embed = discord.Embed(
title = '',
description= tmp_boss_information[i+1],
color=0x0000ff
)
await ctx.send( embed=embed, tts=False)
await dbSave()
await data_list_Save("kill_list.ini", "-----척살명단-----", kill_Data)
await data_list_Save("item_list.ini", "-----아이템목록-----", item_Data)
else:
return
################ 킬초기화 ################
@commands.command(name=command[24][0], aliases=command[24][1:])
async def killInit_(self, ctx):
if basicSetting[18] != "" and ctx.message.channel.id == basicSetting[7]:
return
if ctx.message.channel.id == basicSetting[7] or ctx.message.channel.id == basicSetting[18]:
global kill_Data
kill_Data = {}
await init_data_list('kill_list.ini', '-----척살명단-----')
return await ctx.send( '< 킬 목록 초기화완료 >', tts=False)
else:
return
################ 킬명단 확인 및 추가################
@commands.command(name=command[25][0], aliases=command[25][1:])
async def killList_(self, ctx, *, args : str = None):
if basicSetting[18] != "" and ctx.message.channel.id == basicSetting[7]:
return
if ctx.message.channel.id == basicSetting[7] or ctx.message.channel.id == basicSetting[18]:
global kill_Data
if not args:
kill_output = ''
for key, value in kill_Data.items():
kill_output += ':skull_crossbones: ' + str(key) + ' : ' + str(value) + '번 따히!\n'
if kill_output != '' :
embed = discord.Embed(
description= str(kill_output),
color=0xff00ff
)
else :
embed = discord.Embed(
description= '등록된 킬 목록이 없습니다. 분발하세요!',
color=0xff00ff
)
return await ctx.send(embed=embed, tts=False)
if args in kill_Data:
kill_Data[args] += 1
else:
kill_Data[args] = 1
embed = discord.Embed(
description= ':skull_crossbones: ' + args + ' 따히! [' + str(kill_Data[args]) + '번]\n',
color=0xff00ff
)
return await ctx.send(embed=embed, tts=False)
else:
return
################ 킬삭제 ################
@commands.command(name=command[26][0], aliases=command[26][1:])
async def killDel_(self, ctx, *, args : str = None):
if basicSetting[18] != "" and ctx.message.channel.id == basicSetting[7]:
return
if ctx.message.channel.id == basicSetting[7] or ctx.message.channel.id == basicSetting[18]:
global kill_Data
if not args:
return await ctx.send( '```제대로 된 아이디를 입력해주세요!\n```', tts=False)
if args in kill_Data:
del kill_Data[args]
return await ctx.send( ':angel: ' + args + ' 삭제완료!', tts=False)
else :
return await ctx.send( '```킬 목록에 등록되어 있지 않습니다!\n```', tts=False)
else:
return
################ 킬 차감 ################
@commands.command(name=command[33][0], aliases=command[33][1:])
async def killSubtract_(self, ctx, *, args : str = None):
if basicSetting[18] != "" and ctx.message.channel.id == basicSetting[7]:
return
if ctx.message.channel.id == basicSetting[7] or ctx.message.channel.id == basicSetting[18]:
global kill_Data
if not args:
return await ctx.send(f'{command[33][0]} [아이디] 혹은 {command[33][0]} [아이디] [횟수] 양식에 맞춰 입력해주세요!', tts = False)
input_data = args.split()
if len(input_data) == 1:
kill_name = args
count = 1
elif len(input_data) == 2:
kill_name = input_data[0]
try:
count = int(input_data[1])
except ValueError:
return await ctx.send(f'[횟수]는 숫자로 입력바랍니다')
else:
return await ctx.send(f'{command[33][0]} [아이디] 혹은 {command[33][0]} [아이디] [횟수] 양식에 맞춰 입력해주세요!', tts = False)
if kill_name in kill_Data:
if kill_Data[kill_name] < int(count):
return await ctx.send( f"등록된 킬 횟수[{str(kill_Data[kill_name])}번]보다 차감 횟수[{str(count)}번]가 많습니다. 킬 횟수에 맞게 재입력 바랍니다.", tts=False)
else:
kill_Data[kill_name] -= int(count)
else:
return await ctx.send( '```킬 목록에 등록되어 있지 않습니다!\n```', tts=False)
embed = discord.Embed(
description= f':angel: [{kill_name}] [{str(count)}번] 차감 완료! [잔여 : {str(kill_Data[kill_name])}번]\n',
color=0xff00ff
)
if kill_Data[kill_name] == 0:
del kill_Data[kill_name]
return await ctx.send(embed=embed, tts=False)
else:
return
################ 경주 ################
@commands.command(name=command[27][0], aliases=command[27][1:])
async def race_(self, ctx):
if basicSetting[19] != "" and ctx.message.channel.id == basicSetting[7]:
return
if ctx.message.channel.id == basicSetting[7] or ctx.message.channel.id == basicSetting[19]:
msg = ctx.message.content[len(ctx.invoked_with)+1:]
race_info = []
fr = []
racing_field = []
str_racing_field = []
cur_pos = []
race_val = []
random_pos = []
racing_result = []
output = ':camera: :camera: :camera: 신나는 레이싱! :camera: :camera: :camera:\n'
#racing_unit = [':giraffe:', ':elephant:', ':tiger2:', ':hippopotamus:', ':crocodile:',':leopard:',':ox:', ':sheep:', ':pig2:',':dromedary_camel:',':dragon:',':rabbit2:'] #동물스킨
#racing_unit = [':red_car:', ':taxi:', ':bus:', ':trolleybus:', ':race_car:', ':police_car:', ':ambulance:', ':fire_engine:', ':minibus:', ':truck:', ':articulated_lorry:', ':tractor:', ':scooter:', ':manual_wheelchair:', ':motor_scooter:', ':auto_rickshaw:', ':blue_car:', ':bike:', ':helicopter:', ':steam_locomotive:'] #탈것스킨
#random.shuffle(racing_unit)
racing_member = msg.split(" ")
racing_unit = []
emoji = discord.Emoji
emoji = ctx.message.guild.emojis
for j in range(len(tmp_racing_unit)):
racing_unit.append(':' + tmp_racing_unit[j] + ':')
for i in range(len(emoji)):
if emoji[i].name == tmp_racing_unit[j].strip(":"):
racing_unit[j] = '<:' + tmp_racing_unit[j] + ':' + str(emoji[i].id) + '>'
random.shuffle(racing_unit)
field_size = 60
tmp_race_tab = 35 - len(racing_member)
if len(racing_member) <= 1:
await ctx.send('레이스 인원이 2명보다 작습니다.')
return
elif len(racing_member) >= 13:
await ctx.send('레이스 인원이 12명 초과입니다.')
return
else :
race_val = random.sample(range(tmp_race_tab, tmp_race_tab+len(racing_member)), len(racing_member))
random.shuffle(race_val)
for i in range(len(racing_member)):
fr.append(racing_member[i])
fr.append(racing_unit[i])
fr.append(race_val[i])
race_info.append(fr)
fr = []
for i in range(field_size):
fr.append(" ")
racing_field.append(fr)
fr = []
for i in range(len(racing_member)):
racing_field[i][0] = "|"
racing_field[i][field_size-2] = race_info[i][1]
if len(race_info[i][0]) > 5:
racing_field[i][field_size-1] = "| " + race_info[i][0][:5] + '..'
else:
racing_field[i][field_size-1] = "| " + race_info[i][0]
str_racing_field.append("".join(racing_field[i]))
cur_pos.append(field_size-2)
for i in range(len(racing_member)):
output += str_racing_field[i] + '\n'
result_race = await ctx.send(output + ':traffic_light: 3초 후 경주가 시작됩니다!')
await asyncio.sleep(1)
await result_race.edit(content = output + ':traffic_light: 2초 후 경주가 시작됩니다!')
await asyncio.sleep(1)
await result_race.edit(content = output + ':traffic_light: 1초 후 경주가 시작됩니다!')
await asyncio.sleep(1)
await result_race.edit(content = output + ':checkered_flag: 경주 시작!')
for i in range(len(racing_member)):
test = random.sample(range(2,field_size-2), race_info[i][2])
while len(test) != tmp_race_tab + len(racing_member)-1 :
test.append(1)
test.append(1)
test.sort(reverse=True)
random_pos.append(test)
for j in range(len(random_pos[0])):
if j%2 == 0:
output = ':camera: :camera_with_flash: :camera: 신나는 레이싱! :camera_with_flash: :camera: :camera_with_flash:\n'
else :
output = ':camera_with_flash: :camera: :camera_with_flash: 신나는 레이싱! :camera: :camera_with_flash: :camera:\n'
str_racing_field = []
for i in range(len(racing_member)):
temp_pos = cur_pos[i]
racing_field[i][random_pos[i][j]], racing_field[i][temp_pos] = racing_field[i][temp_pos], racing_field[i][random_pos[i][j]]
cur_pos[i] = random_pos[i][j]
str_racing_field.append("".join(racing_field[i]))
await asyncio.sleep(1)
for i in range(len(racing_member)):
output += str_racing_field[i] + '\n'
await result_race.edit(content = output + ':checkered_flag: 경주 시작!')
for i in range(len(racing_field)):
fr.append(race_info[i][0])
fr.append((race_info[i][2]) - tmp_race_tab + 1)
racing_result.append(fr)
fr = []
result = sorted(racing_result, key=lambda x: x[1])
result_str = ''
for i in range(len(result)):
if result[i][1] == 1:
result[i][1] = ':first_place:'
elif result[i][1] == 2:
result[i][1] = ':second_place:'
elif result[i][1] == 3:
result[i][1] = ':third_place:'
elif result[i][1] == 4:
result[i][1] = ':four:'
elif result[i][1] == 5:
result[i][1] = ':five:'
elif result[i][1] == 6:
result[i][1] = ':six:'
elif result[i][1] == 7:
result[i][1] = ':seven:'
elif result[i][1] == 8:
result[i][1] = ':eight:'
elif result[i][1] == 9:
result[i][1] = ':nine:'
elif result[i][1] == 10:
result[i][1] = ':keycap_ten:'
else:
result[i][1] = ':x:'
result_str += result[i][1] + " " + result[i][0] + " "
#print(result)
await asyncio.sleep(1)
return await result_race.edit(content = output + ':tada: 경주 종료!\n' + result_str)
else:
return
################ 채널설정 ################
@commands.command(name=command[28][0], aliases=command[28][1:])
async def set_channel_(self, ctx):
global basicSetting
msg = ctx.message.content[len(ctx.invoked_with)+1:]
channel = ctx.message.channel.id #메세지가 들어온 채널 ID
if channel == basicSetting[7] and msg in ["사다리", "정산", "척살", "경주", "아이템"]:
return await ctx.send(f'명령어 채널은 `{msg} 채널`로 `설정`할 수 없습니다.', tts=False)
if msg == '사다리' : #사다리 채널 설정
inidata_textCH = repo.get_contents("test_setting.ini")
file_data_textCH = base64.b64decode(inidata_textCH.content)
file_data_textCH = file_data_textCH.decode('utf-8')
inputData_textCH = file_data_textCH.split('\n')
for i in range(len(inputData_textCH)):
if inputData_textCH[i].startswith('ladderchannel'):
inputData_textCH[i] = 'ladderchannel = ' + str(channel) + '\r'
basicSetting[8] = channel
result_textCH = '\n'.join(inputData_textCH)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_textCH, contents.sha)
print(f'< 사다리채널 [{ctx.message.channel.name}] 설정완료 >')
return await ctx.send(f'< 사다리채널 [{ctx.message.channel.name}] 설정완료 >', tts=False)
elif msg == '정산' :
inidata_textCH = repo.get_contents("test_setting.ini")
file_data_textCH = base64.b64decode(inidata_textCH.content)
file_data_textCH = file_data_textCH.decode('utf-8')
inputData_textCH = file_data_textCH.split('\n')
for i in range(len(inputData_textCH)):
if inputData_textCH[i].startswith('jungsanchannel'):
inputData_textCH[i] = 'jungsanchannel = ' + str(channel) + '\r'
basicSetting[11] = channel
result_textCH = '\n'.join(inputData_textCH)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_textCH, contents.sha)
print(f'< 정산채널 [{ctx.message.channel.name}] 설정완료 >')
return await ctx.send(f'< 정산채널 [{ctx.message.channel.name}] 설정완료 >', tts=False)
elif msg == '척살' :
inidata_textCH = repo.get_contents("test_setting.ini")
file_data_textCH = base64.b64decode(inidata_textCH.content)
file_data_textCH = file_data_textCH.decode('utf-8')
inputData_textCH = file_data_textCH.split('\n')
for i in range(len(inputData_textCH)):
if inputData_textCH[i].startswith('killchannel'):
inputData_textCH[i] = 'killchannel = ' + str(channel) + '\r'
basicSetting[18] = channel
result_textCH = '\n'.join(inputData_textCH)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_textCH, contents.sha)
print(f'< 척살채널 [{ctx.message.channel.name}] 설정완료 >')
return await ctx.send(f'< 척살채널 [{ctx.message.channel.name}] 설정완료 >', tts=False)
elif msg == '경주' :
inidata_textCH = repo.get_contents("test_setting.ini")
file_data_textCH = base64.b64decode(inidata_textCH.content)
file_data_textCH = file_data_textCH.decode('utf-8')
inputData_textCH = file_data_textCH.split('\n')
for i in range(len(inputData_textCH)):
if inputData_textCH[i].startswith('racingchannel'):
inputData_textCH[i] = 'racingchannel = ' + str(channel) + '\r'
basicSetting[19] = channel
result_textCH = '\n'.join(inputData_textCH)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_textCH, contents.sha)
print(f'< 경주채널 [{ctx.message.channel.name}] 설정완료 >')
return await ctx.send(f'< 경주채널 [{ctx.message.channel.name}] 설정완료 >', tts=False)
elif msg == '아이템' :
inidata_textCH = repo.get_contents("test_setting.ini")
file_data_textCH = base64.b64decode(inidata_textCH.content)
file_data_textCH = file_data_textCH.decode('utf-8')
inputData_textCH = file_data_textCH.split('\n')
for i in range(len(inputData_textCH)):
if inputData_textCH[i].startswith('itemchannel'):
inputData_textCH[i] = 'itemchannel = ' + str(channel) + '\r'
basicSetting[20] = channel
result_textCH = '\n'.join(inputData_textCH)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_textCH, contents.sha)
print(f'< 아이템채널 [{ctx.message.channel.name}] 설정완료 >')
return await ctx.send(f'< 아이템채널 [{ctx.message.channel.name}] 설정완료 >', tts=False)
else :
return await ctx.send(f'```올바른 명령어를 입력해주세요.```', tts=False)
################ 채널삭제 ################
@commands.command(name=command[42][0], aliases=command[42][1:])
async def remove_channel_(self, ctx):
global basicSetting
if ctx.message.channel.id != basicSetting[7]:
return
msg = ctx.message.content[len(ctx.invoked_with)+1:]
channel = ctx.message.channel.id #메세지가 들어온 채널 ID
if msg == '사다리' : #사다리 채널 설정
inidata_textCH = repo.get_contents("test_setting.ini")
file_data_textCH = base64.b64decode(inidata_textCH.content)
file_data_textCH = file_data_textCH.decode('utf-8')
inputData_textCH = file_data_textCH.split('\n')
ch_name = ctx.guild.get_channel(int(basicSetting[8]))
for i in range(len(inputData_textCH)):
if inputData_textCH[i].startswith('ladderchannel'):
inputData_textCH[i] = 'ladderchannel = \r'
basicSetting[8] = ""
result_textCH = '\n'.join(inputData_textCH)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_textCH, contents.sha)
print(f'< 사다리채널 [{ch_name}] 삭제완료 >')
return await ctx.send(f'< 사다리채널 [{ch_name}] 삭제완료 >', tts=False)
elif msg == '정산' :
inidata_textCH = repo.get_contents("test_setting.ini")
file_data_textCH = base64.b64decode(inidata_textCH.content)
file_data_textCH = file_data_textCH.decode('utf-8')
inputData_textCH = file_data_textCH.split('\n')
ch_name = ctx.guild.get_channel(int(basicSetting[11]))
for i in range(len(inputData_textCH)):
if inputData_textCH[i].startswith('jungsanchannel'):
inputData_textCH[i] = 'jungsanchannel = \r'
basicSetting[11] = ""
result_textCH = '\n'.join(inputData_textCH)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_textCH, contents.sha)
print(f'< 정산채널 [{ch_name}] 삭제완료 >')
return await ctx.send(f'< 정산채널 [{ch_name}] 삭제완료 >', tts=False)
elif msg == '척살' :
inidata_textCH = repo.get_contents("test_setting.ini")
file_data_textCH = base64.b64decode(inidata_textCH.content)
file_data_textCH = file_data_textCH.decode('utf-8')
inputData_textCH = file_data_textCH.split('\n')
ch_name = ctx.guild.get_channel(int(basicSetting[18]))
for i in range(len(inputData_textCH)):
if inputData_textCH[i].startswith('killchannel'):
inputData_textCH[i] = 'killchannel = \r'
basicSetting[18] = ""
result_textCH = '\n'.join(inputData_textCH)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_textCH, contents.sha)
print(f'< 척살채널 [{ch_name}] 삭제완료 >')
return await ctx.send(f'< 척살채널 [{ch_name}] 삭제완료 >', tts=False)
elif msg == '경주' :
inidata_textCH = repo.get_contents("test_setting.ini")
file_data_textCH = base64.b64decode(inidata_textCH.content)
file_data_textCH = file_data_textCH.decode('utf-8')
inputData_textCH = file_data_textCH.split('\n')
ch_name = ctx.guild.get_channel(int(basicSetting[19]))
for i in range(len(inputData_textCH)):
if inputData_textCH[i].startswith('racingchannel'):
inputData_textCH[i] = 'racingchannel = \r'
basicSetting[19] = ""
result_textCH = '\n'.join(inputData_textCH)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_textCH, contents.sha)
print(f'< 경주채널 [{ch_name}] 삭제완료 >')
return await ctx.send(f'< 경주채널 [{ch_name}] 삭제완료 >', tts=False)
elif msg == '아이템' :
inidata_textCH = repo.get_contents("test_setting.ini")
file_data_textCH = base64.b64decode(inidata_textCH.content)
file_data_textCH = file_data_textCH.decode('utf-8')
inputData_textCH = file_data_textCH.split('\n')
ch_name = ctx.guild.get_channel(int(basicSetting[20]))
for i in range(len(inputData_textCH)):
if inputData_textCH[i].startswith('itemchannel'):
inputData_textCH[i] = 'itemchannel = \r'
basicSetting[20] = ""
result_textCH = '\n'.join(inputData_textCH)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_textCH, contents.sha)
print(f'< 아이템채널 [{ch_name}] 삭제완료 >')
return await ctx.send(f'< 아이템채널 [{ch_name}] 삭제완료 >', tts=False)
else :
return await ctx.send(f'```올바른 명령어를 입력해주세요.```', tts=False)
################ 아이템초기화 확인 ################
@commands.command(name=command[29][0], aliases=command[29][1:])
async def itemInit_(self, ctx):
if basicSetting[20] != "" and ctx.message.channel.id == basicSetting[7]:
return
if ctx.message.channel.id == basicSetting[7] or ctx.message.channel.id == basicSetting[20]:
global item_Data
item_Data = {}
await init_data_list('item_list.ini', '-----아이템 목록-----')
return await ctx.send( '< 아이템 목록 초기화완료 >', tts=False)
else:
return
################ 아이템 목록 확인 및 추가 ################
@commands.command(name=command[30][0], aliases=command[30][1:])
async def itemList_(self, ctx, *, args : str = None):
if basicSetting[20] != "" and ctx.message.channel.id == basicSetting[7]:
return
if ctx.message.channel.id == basicSetting[7] or ctx.message.channel.id == basicSetting[20]:
global item_Data
if not args:
sorted_item_list = sorted(item_Data.items(), key=lambda x: x[0])
embed_list : list = []
embed_index : int = 0
embed_cnt : int = 0
embed = discord.Embed(title = '', description = f'`{self.bot.user.name}\'s 창고`', color = 0x00ff00)
embed_list.append(embed)
if len(sorted_item_list) > 0 :
for item_id, count in sorted_item_list:
embed_cnt += 1
if embed_cnt > 24 :
embed_cnt = 0
embed_index += 1
tmp_embed = discord.Embed(
title = "",
description = "",
color=0x00ff00
)
embed_list.append(tmp_embed)
embed_list[embed_index].add_field(name = item_id, value = count)
embed_list[len(embed_list)-1].set_footer(text = f"전체 아이템 종류 : {len(item_Data)}개")
if len(embed_list) > 1:
for embed_data in embed_list:
await asyncio.sleep(0.1)
await ctx.send(embed = embed_data)
return
else:
return await ctx.send(embed=embed, tts=False)
else :
embed.add_field(name = '\u200b\n', value = '창고가 비었습니다.\n\u200b')
return await ctx.send(embed=embed, tts=False)
input_data = args.split()
if len(input_data) == 1:
item_name = args
count = 1
elif len(input_data) == 2:
item_name = input_data[0]
try:
count = int(input_data[1])
except ValueError:
return await ctx.send(f'아이템 [개수]는 숫자로 입력바랍니다')
else:
return await ctx.send(f'{command[30][0]} [아이템명] 혹은 {command[30][0]} [아이템명] [개수] 양식에 맞춰 입력해주세요!', tts = False)
if item_name in item_Data:
item_Data[item_name] += int(count)
else:
item_Data[item_name] = int(count)
embed = discord.Embed(
description= f':inbox_tray: **[{item_name}] [{str(count)}개]** 등록 완료! [잔여 : {str(item_Data[item_name])}개]\n',
color=0xff00ff
)
return await ctx.send(embed=embed, tts=False)
else:
return
################ 아이템 삭제 ################
@commands.command(name=command[31][0], aliases=command[31][1:])
async def itemDel_(self, ctx, *, args : str = None):
if basicSetting[20] != "" and ctx.message.channel.id == basicSetting[7]:
return
if ctx.message.channel.id == basicSetting[7] or ctx.message.channel.id == basicSetting[20]:
global item_Data
if not args:
return await ctx.send( f'{command[31][0]} [아이템명] 양식에 맞춰 입력해주세요!', tts = False)
if args in item_Data:
del item_Data[args]
embed = discord.Embed(
description= ':outbox_tray: ' + args + ' 삭제완료!',
color=0xff00ff
)
return await ctx.send(embed=embed, tts=False)
else :
return await ctx.send( '```아이템 목록에 등록되어 있지 않습니다!\n```', tts=False)
else:
return
################ 아이템 차감 ################
@commands.command(name=command[32][0], aliases=command[32][1:])
async def itemSubtract_(self, ctx, *, args : str = None):
if basicSetting[20] != "" and ctx.message.channel.id == basicSetting[7]:
return
if ctx.message.channel.id == basicSetting[7] or ctx.message.channel.id == basicSetting[20]:
global item_Data
if not args:
return await ctx.send(f'{command[32][0]} [아이템명] 혹은 {command[32][0]} [아이템명] [개수] 양식에 맞춰 입력해주세요!', tts = False)
input_data = args.split()
if len(input_data) == 1:
item_name = args
count = 1
elif len(input_data) == 2:
item_name = input_data[0]
try:
count = int(input_data[1])
except ValueError:
return await ctx.send(f'아이템 [개수]는 숫자로 입력바랍니다')
else:
return await ctx.send(f'{command[32][0]} [아이템명] 혹은 {command[32][0]} [아이템명] [개수] 양식에 맞춰 입력해주세요!', tts = False)
if item_name in item_Data:
if item_Data[item_name] < int(count):
return await ctx.send( f"등록된 아이템 개수[{str(item_Data[item_name])}개]보다 차감 개수[{str(count)}개]가 많습니다. 등록 개수에 맞게 재입력 바랍니다.", tts=False)
else:
item_Data[item_name] -= int(count)
else:
return await ctx.send( '```아이템 목록에 등록되어 있지 않습니다!\n```', tts=False)
embed = discord.Embed(
description= f':outbox_tray: **[{item_name}] [{str(count)}개]** 차감 완료! [잔여 : {str(item_Data[item_name])}개]\n',
color=0xff00ff
)
if item_Data[item_name] == 0:
del item_Data[item_name]
return await ctx.send(embed=embed, tts=False)
else:
return
################ 서버 나가기 ################
@commands.has_permissions(manage_messages=True)
@commands.command(name=command[34][0], aliases=command[34][1:])
async def leaveGuild_(self, ctx):
if ctx.message.channel.id == basicSetting[7]:
guild_list : str = ""
guild_name : str = ""
for i, gulid_name in enumerate(self.bot.guilds):
guild_list += f"`{i+1}.` {gulid_name}\n"
embed = discord.Embed(
title = "----- 서버 목록 -----",
description = guild_list,
color=0x00ff00
)
await ctx.send(embed = embed)
try:
await ctx.send(f"```떠나고 싶은 서버의 [숫자]를 입력하여 선택해 주세요```")
message_result : discord.Message = await self.bot.wait_for("message", timeout = 10, check=(lambda message: message.channel == ctx.message.channel and message.author == ctx.message.author))
except asyncio.TimeoutError:
return await ctx.send(f"```서버 선택 시간이 초과됐습니다! 필요시 명령어를 재입력해 주세요```")
try:
guild_name = self.bot.guilds[int(message_result.content)-1].name
await self.bot.get_guild(self.bot.guilds[int(message_result.content)-1].id).leave()
return await ctx.send(f"```[{guild_name}] 서버에서 떠났습니다.!```")
except ValueError:
return
################ 수수료 계산기 ################
@commands.command(name=command[35][0], aliases=command[35][1:])
async def tax_check(self, ctx, *, args : str = None):
if basicSetting[20] != "" and ctx.message.channel.id == basicSetting[7]:
return
if ctx.message.channel.id == basicSetting[7] or ctx.message.channel.id == basicSetting[20]:
if not args:
return await ctx.send(f"**{command[35][0]} [판매금액] (거래소세금)** 양식으로 입력 해주세요\n※ 거래소세금은 미입력시 5%입니다.")
input_money_data : list = args.split()
len_input_money_data = len(input_money_data)
try:
for i in range(len_input_money_data):
input_money_data[i] = int(input_money_data[i])
except ValueError:
return await ctx.send(f"**[판매금액] (거래소세금)**은 숫자로 입력 해주세요.")
if len_input_money_data < 1 or len_input_money_data > 3:
return await ctx.send(f"**{command[35][0]} [판매금액] (거래소세금)** 양식으로 입력 해주세요\n※ 거래소세금은 미입력시 5%입니다.")
elif len_input_money_data == 2:
tax = input_money_data[1]
else:
tax = 5
price_first_tax = int(input_money_data[0] * ((100-tax)/100))
price_second_tax = int(price_first_tax * ((100-tax)/100))
price_rev_tax = int((input_money_data[0] * 100)/(100-tax)+0.5)
embed = discord.Embed(
title = f"🧮 수수료 계산결과 (세율 {tax}% 기준) ",
description = f"",
color=0x00ff00
)
embed.add_field(name = "⚖️ 수수료 지원", value = f"```등록가 : {price_rev_tax}\n수령가 : {input_money_data[0]}\n세 금 : {price_rev_tax-input_money_data[0]}```")
embed.add_field(name = "⚖️ 1차 거래", value = f"```등록가 : {input_money_data[0]}\n정산가 : {price_first_tax}\n세 금 : {input_money_data[0]-price_first_tax}```")
embed.add_field(name = "⚖️ 2차 거래", value = f"```등록가 : {price_first_tax}\n정산가 : {price_second_tax}\n세 금 : {price_first_tax-price_second_tax}```")
return await ctx.send(embed = embed)
else:
return
################ 페이백 계산기 ################
@commands.command(name=command[36][0], aliases=command[36][1:])
async def payback_check(self, ctx, *, args : str = None):
if basicSetting[20] != "" and ctx.message.channel.id == basicSetting[7]:
return
if ctx.message.channel.id == basicSetting[7] or ctx.message.channel.id == basicSetting[20]:
if not args:
return await ctx.send(f"**{command[36][0]} [거래소가격] [실거래가] (거래소세금)** 양식으로 입력 해주세요\n※ 거래소세금은 미입력시 5%입니다.")
input_money_data : list = args.split()
len_input_money_data = len(input_money_data)
try:
for i in range(len_input_money_data):
input_money_data[i] = int(input_money_data[i])
except ValueError:
return await ctx.send(f"**[판매금액] (거래소세금)**은 숫자로 입력 해주세요.")
if len_input_money_data < 2 or len_input_money_data > 4:
return await ctx.send(f"**{command[36][0]} [거래소가격] [실거래가] (거래소세금)** 양식으로 입력 해주세요\n※ 거래소세금은 미입력시 5%입니다.")
elif len_input_money_data == 3:
tax = input_money_data[2]
else:
tax = 5
price_reg_tax = int(input_money_data[0] * ((100-tax)/100))
price_real_tax = int(input_money_data[1] * ((100-tax)/100))
reault_payback = price_reg_tax - price_real_tax
reault_payback1= price_reg_tax - input_money_data[1]
embed = discord.Embed(
title = f"🧮 페이백 계산결과1 (세율 {tax}% 기준) ",
description = f"**```fix\n{reault_payback}```**",
color=0x00ff00
)
embed.add_field(name = "⚖️ 거래소", value = f"```등록가 : {input_money_data[0]}\n정산가 : {price_reg_tax}\n세 금 : {input_money_data[0]-price_reg_tax}```")
embed.add_field(name = "🕵️ 실거래", value = f"```등록가 : {input_money_data[1]}\n정산가 : {price_real_tax}\n세 금 : {input_money_data[1]-price_real_tax}```")
await ctx.send(embed = embed)
embed2 = discord.Embed(
title = f"🧮 페이백 계산결과2 (세율 {tax}% 기준) ",
description = f"**```fix\n{reault_payback1}```**",
color=0x00ff00
)
embed2.add_field(name = "⚖️ 거래소", value = f"```등록가 : {input_money_data[0]}\n정산가 : {price_reg_tax}\n세 금 : {input_money_data[0]-price_reg_tax}```")
embed2.add_field(name = "🕵️ 실거래", value = f"```내판가 : {input_money_data[1]}```")
return await ctx.send(embed = embed2)
else:
return
@commands.command(name=command[37][0], aliases=command[37][1:])
async def command_rock_paper_scissors_game(self, ctx : commands.Context):
if basicSetting[19] != "" and ctx.message.channel.id == basicSetting[7]:
return
if ctx.message.channel.id != basicSetting[7] and ctx.message.channel.id != basicSetting[19]:
return
message_rock_paper_scissors : discord.message.Message = await ctx.send("안내면 진거 가위바위..")
reaction_emoji : list = ["✌️", "✊", "✋"]
for emoji in reaction_emoji:
await message_rock_paper_scissors.add_reaction(emoji)
def reaction_check(reaction, user):
return (reaction.message.id == message_rock_paper_scissors.id) and (user.id == ctx.author.id) and (str(reaction) in reaction_emoji)
try:
reaction_result, user = await self.bot.wait_for('reaction_add', check = reaction_check, timeout = int(basicSetting[5]))
except asyncio.TimeoutError:
return await ctx.send(f"시간이 초과됐습니다. ")
bot_result : str = random.choice(reaction_emoji)
result_rock_paper_scissors : str = ""
if reaction_result is None:
result_rock_paper_scissors = f"왜 안냄?"
elif str(reaction_result) == bot_result:
result_rock_paper_scissors = f"봇 {bot_result} : {reaction_result} {ctx.author.mention}\n🤔비겼다!"
elif str(reaction_result) == "✌️" and bot_result == "✋":
result_rock_paper_scissors = f"봇 {bot_result} : {reaction_result} {ctx.author.mention}\n👍짝짝짝"
elif str(reaction_result) == "✊" and bot_result == "✌️":
result_rock_paper_scissors = f"봇 {bot_result} : {reaction_result} {ctx.author.mention}\n👍짝짝짝"
elif str(reaction_result) == "✋" and bot_result == "✊":
result_rock_paper_scissors = f"봇 {bot_result} : {reaction_result} {ctx.author.mention}\n👍짝짝짝"
else:
result_rock_paper_scissors = f"봇 {bot_result} : {reaction_result} {ctx.author.mention}\n🤪저런.."
return await ctx.send(result_rock_paper_scissors)
################ 보이스사용 ################
@commands.command(name=command[38][0], aliases=command[38][1:])
async def command_voice_use(self, ctx : commands.Context):
if ctx.message.channel.id != basicSetting[7]:
return
inidata_voice_use = repo.get_contents("test_setting.ini")
file_data_voice_use = base64.b64decode(inidata_voice_use.content)
file_data_voice_use = file_data_voice_use.decode('utf-8')
inputData_voice_use = file_data_voice_use.split('\n')
for i in range(len(inputData_voice_use)):
if inputData_voice_use[i].startswith("voice_use ="):
inputData_voice_use[i] = f"voice_use = 1\r"
basicSetting[21] = "1"
result_voice_use = '\n'.join(inputData_voice_use)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_voice_use, contents.sha)
if basicSetting[6] != "":
try:
await self.bot.get_channel(basicSetting[6]).connect(reconnect=True, timeout=5)
except:
await ctx.send( '< 음성채널 접속 에러! >', tts=False)
pass
if self.bot.voice_clients[0].is_connected() :
print("보이스 사용 설정 완료!")
return await ctx.send(f"```보이스를 사용하도록 설정하였습니다.!```")
return await ctx.send(f"```보이스 사용 설정이 완료 되었습니다!\n< 음성채널 접속 후 [{command[5][0]}] 명령을 사용 하세요 >```")
################ 보이스미사용 ################
@commands.command(name=command[39][0], aliases=command[39][1:])
async def command_voice_not_use(self, ctx : commands.Context):
if ctx.message.channel.id != basicSetting[7]:
return
for vc in self.bot.voice_clients:
if vc.guild.id == int(ctx.guild.id):
if vc.is_playing():
vc.stop()
await vc.disconnect(force=True)
inidata_voice_use = repo.get_contents("test_setting.ini")
file_data_voice_use = base64.b64decode(inidata_voice_use.content)
file_data_voice_use = file_data_voice_use.decode('utf-8')
inputData_voice_use = file_data_voice_use.split('\n')
for i in range(len(inputData_voice_use)):
if inputData_voice_use[i].startswith("voice_use ="):
inputData_voice_use[i] = f"voice_use = 0\r"
basicSetting[21] = "0"
result_voice_use = '\n'.join(inputData_voice_use)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_voice_use, contents.sha)
return await ctx.send(f"```보이스를 사용하지 않도록 설정하였습니다.!```")
################ 럭키박스 ################
@commands.command(name=command[41][0], aliases=command[41][1:])
async def command_randombox_game(self, ctx : commands.Context, *, args : str = None):
if basicSetting[19] != "" and ctx.message.channel.id == basicSetting[7]:
return
if ctx.message.channel.id != basicSetting[7] and ctx.message.channel.id != basicSetting[19]:
return
if not args:
return await ctx.send(f'```명령어 [추첨인원] (대기시간/초) *(메모) 형태로 입력해주시기 바랍나다.```')
memo_data : str = ""
waiting_time : int = 30
if args.find("*") == -1:
input_game_data = args.split()
else:
input_game_data = args[:args.find("*")-1].split()
memo_data = args[args.find("*")+1:]
try:
num_cong = int(input_game_data[0]) # 뽑을 인원
if num_cong <= 0:
return await ctx.send(f'```추첨인원이 0보다 작거나 같습니다. 재입력 해주세요```')
except ValueError:
return await ctx.send('```추첨인원은 숫자로 입력 바랍니다\nex)!럭키박스 1```')
if len(input_game_data) >= 2:
waiting_time : int = 30
try:
waiting_time = int(input_game_data[1]) # 대기시간
if waiting_time <= 0 :
return await ctx.send(f'```대기시간이 0보다 작거나 같습니다. 재입력 해주세요```')
except ValueError:
return await ctx.send(f'```대기시간(초)는 숫자로 입력 바랍니다\nex)!럭키박스 1 60```')
reaction_emoji : list = ["✅", "❌"]
embed = discord.Embed(title = f"📦 럭키박스! 묻고 더블로 가! (잔여시간 : {waiting_time}초)", description = f"참가를 원하시면 ✅를 클릭해주세요!", timestamp =datetime.datetime.now(datetime.timezone(datetime.timedelta(hours=int(basicSetting[0])))),
color=0x00ff00
)
if memo_data != "":
embed.add_field(name = "📜 메모", value = f"```{memo_data}```", inline=False)
game_message : discord.message.Message = await ctx.send(embed = embed)
for emoji in reaction_emoji:
await game_message.add_reaction(emoji)
cache_msg = await ctx.fetch_message(game_message.id)
for i in range(waiting_time):
embed.title = f"📦 럭키박스! 묻고 더블로 가! (잔여시간 : {waiting_time - i}초)"
await game_message.edit(embed=embed)
cache_msg = await ctx.fetch_message(game_message.id)
if cache_msg.reactions[1].count >= 2:
tmp_users = await cache_msg.reactions[1].users().flatten()
for user in tmp_users:
if user.id == ctx.author.id:
embed.title = f"😫 럭키박스! 취소! 😱"
embed.description = ""
await game_message.edit(embed=embed)
return await ctx.send(f"```게임이 취소되었습니다.!```")
await asyncio.sleep(1)
if cache_msg.reactions[0].count == 1:
embed.title = f"😫 럭키박스! 추첨 실패! 😱"
embed.description = ""
await game_message.edit(embed=embed)
return await ctx.send(f"```참여자가 없어 게임이 취소되었습니다.!```")
if num_cong >= cache_msg.reactions[0].count-1:
embed.title = f"😫 럭키박스! 추첨 실패! 😱"
embed.description = ""
await game_message.edit(embed=embed)
return await ctx.send(f'```추첨인원이 참여인원과 같거나 많습니다. 재입력 해주세요```')
participant_users = await cache_msg.reactions[0].users().flatten()
del_index : int = 0
for i, user in enumerate(participant_users):
if self.bot.user.id == user.id:
del_index = i
del participant_users[del_index]
user_name_list : list = []
for user in participant_users:
user_name_list.append(user.mention)
for _ in range(num_cong + 5):
random.shuffle(user_name_list)
result_users = None
for _ in range(num_cong + 5):
result_users = random.sample(user_name_list, num_cong)
lose_user = list(set(user_name_list)-set(result_users))
embed.title = f"🎉 럭키박스! 결과발표! 🎉"
embed.description = ""
embed.add_field(name = f"👥 참가자 ({len(user_name_list)}명)", value = f"{', '.join(user_name_list)}", inline=False)
embed.add_field(name = f"😍 당첨 ({num_cong}명)", value = f"{', '.join(result_users)}")
if len(lose_user) != 0:
embed.add_field(name = f"😭 낙첨 ({len(lose_user)}명)", value = f"{', '.join(lose_user)}")
return await game_message.edit(embed=embed)
################ 컷등록 ################
@commands.command(name=command[43][0], aliases=command[43][1:])
async def multi_boss_cut(self, ctx, *, args : str = None):
if ctx.message.channel.id != basicSetting[7]:
return
if not args:
return await ctx.send('```보스타임 정보를 입력해주세요```', tts=False)
boss_data_list : list = args.split("\n")
boss_data_dict : dict = {}
result_boss_name : list = []
for boss_data in boss_data_list:
tmp_boss_name = boss_data[boss_data.rfind(": ")+1:].strip()
if tmp_boss_name.find(" ") != -1:
tmp_boss_name = tmp_boss_name[:tmp_boss_name.find(" ")].strip()
tmp_boss_time = boss_data[:boss_data.rfind(" : ")].strip()
try:
if list(tmp_boss_time).count(":") > 1:
tmp_hour = int(tmp_boss_time[tmp_boss_time.find(":")-2:tmp_boss_time.find(":")])
tmp_minute = int(tmp_boss_time[tmp_boss_time.find(":")+1:tmp_boss_time.rfind(":")])
tmp_second = int(tmp_boss_time[tmp_boss_time.rfind(":")+1:])
else:
tmp_hour = int(tmp_boss_time[tmp_boss_time.find(":")-2:tmp_boss_time.find(":")])
tmp_minute = int(tmp_boss_time[tmp_boss_time.rfind(":")+1:])
tmp_second = 0
if tmp_hour > 23 or tmp_hour < 0 or tmp_minute > 60 or tmp_second > 60:
return await ctx.send(f"**[{tmp_boss_name}]**의 올바른 시간(00:00:00 ~ 23:59:59)을 입력해주세요.")
except:
return await ctx.send(f"**[{tmp_boss_name}]**의 올바른 시간(00:00:00 ~ 23:59:59)을 입력해주세요. ")
if "@" != boss_data[0]:
boss_data_dict[tmp_boss_name] = {"hour" : tmp_hour, "minute" : tmp_minute, "second" : tmp_second}
for i in range(bossNum):
if bossData[i][0] in boss_data_dict:
curr_now = datetime.datetime.now()
now2 = datetime.datetime.now()
tmp_now = datetime.datetime.now()
tmp_now = tmp_now.replace(hour=int(boss_data_dict[bossData[i][0]]["hour"]), minute=int(boss_data_dict[bossData[i][0]]["minute"]), second=int(boss_data_dict[bossData[i][0]]["second"]))
bossFlag[i] = False
bossFlag0[i] = False
bossMungFlag[i] = False
bossMungCnt[i] = 0
if tmp_now > now2 :
tmp_now = tmp_now + datetime.timedelta(days=int(-1))
if tmp_now < now2 :
deltaTime = datetime.timedelta(hours = int(bossData[i][1]), minutes = int(bossData[i][5]))
while now2 > tmp_now :
tmp_now = tmp_now + deltaTime
bossMungCnt[i] = bossMungCnt[i] + 1
now2 = tmp_now
bossMungCnt[i] = bossMungCnt[i] - 1
else :
now2 = now2 + datetime.timedelta(hours = int(bossData[i][1]), minutes = int(bossData[i][5]))
tmp_bossTime[i] = bossTime[i] = nextTime = now2
tmp_bossTimeString[i] = bossTimeString[i] = nextTime.strftime('%H:%M:%S')
tmp_bossDateString[i] = bossDateString[i] = nextTime.strftime('%Y-%m-%d')
if curr_now + datetime.timedelta(minutes=int(basicSetting[1])) <= tmp_bossTime[i] < curr_now + datetime.timedelta(minutes=int(basicSetting[3])):
bossFlag0[i] = True
if tmp_bossTime[i] < curr_now + datetime.timedelta(minutes=int(basicSetting[1])):
bossFlag[i] = True
bossFlag0[i] = True
result_boss_name.append(bossData[i][0])
return await ctx.send(f"```[{', '.join(result_boss_name)}] 보스 [컷등록]이 완료되었습니다. [{command[22][0]}]으로 등록시간을 확인해보세요```", tts=False)
################ 예상등록 ################
@commands.command(name=command[44][0], aliases=command[44][1:])
async def multi_boss_predict(self, ctx, *, args : str = None):
if ctx.message.channel.id != basicSetting[7]:
return
if not args:
return await ctx.send('```보스타임 정보를 입력해주세요```', tts=False)
boss_data_list : list = args.split("\n")
boss_data_dict : dict = {}
result_boss_name : list = []
for boss_data in boss_data_list:
tmp_boss_name = boss_data[boss_data.rfind(": ")+1:].strip()
if tmp_boss_name.find(" ") != -1:
tmp_boss_name = tmp_boss_name[:tmp_boss_name.find(" ")].strip()
tmp_boss_time = boss_data[:boss_data.rfind(" : ")].strip()
try:
if list(tmp_boss_time).count(":") > 1:
tmp_hour = int(tmp_boss_time[tmp_boss_time.find(":")-2:tmp_boss_time.find(":")])
tmp_minute = int(tmp_boss_time[tmp_boss_time.find(":")+1:tmp_boss_time.rfind(":")])
tmp_second = int(tmp_boss_time[tmp_boss_time.rfind(":")+1:])
else:
tmp_hour = int(tmp_boss_time[tmp_boss_time.find(":")-2:tmp_boss_time.find(":")])
tmp_minute = int(tmp_boss_time[tmp_boss_time.rfind(":")+1:])
tmp_second = 0
if tmp_hour > 23 or tmp_hour < 0 or tmp_minute > 60 or tmp_second > 60:
return await ctx.send(f"**[{tmp_boss_name}]**의 올바른 시간(00:00:00 ~ 23:59:59)을 입력해주세요. ")
except:
return await ctx.send(f"**[{tmp_boss_name}]**의 올바른 시간(00:00:00 ~ 23:59:59)을 입력해주세요. ")
if "@" != boss_data[0]:
boss_data_dict[tmp_boss_name] = {"hour" : tmp_hour, "minute" : tmp_minute, "second" : tmp_second}
for i in range(bossNum):
if bossData[i][0] in boss_data_dict:
now2 = datetime.datetime.now()
tmp_now = datetime.datetime.now()
tmp_now = tmp_now.replace(hour=int(boss_data_dict[bossData[i][0]]["hour"]), minute=int(boss_data_dict[bossData[i][0]]["minute"]), second=int(boss_data_dict[bossData[i][0]]["second"]))
bossFlag[i] = False
bossFlag0[i] = False
bossMungFlag[i] = False
bossMungCnt[i] = 0
if tmp_now < now2 :
tmp_now = tmp_now + datetime.timedelta(days=int(1))
tmp_bossTime[i] = bossTime[i] = nextTime = tmp_now
tmp_bossTimeString[i] = bossTimeString[i] = nextTime.strftime('%H:%M:%S')
tmp_bossDateString[i] = bossDateString[i] = nextTime.strftime('%Y-%m-%d')
if now2 + datetime.timedelta(minutes=int(basicSetting[1])) <= tmp_bossTime[i] < now2 + datetime.timedelta(minutes=int(basicSetting[3])):
bossFlag0[i] = True
if tmp_bossTime[i] < now2 + datetime.timedelta(minutes=int(basicSetting[1])):
bossFlag[i] = True
bossFlag0[i] = True
result_boss_name.append(bossData[i][0])
return await ctx.send(f"```[{', '.join(result_boss_name)}] 보스 [예상등록]이 완료되었습니다. [{command[22][0]}]으로 등록시간을 확인해보세요```", tts=False)
################ 추가등록 ################
@commands.command(name=command[45][0], aliases=command[45][1:])
async def multi_boss_delta_add(self, ctx, *, args : str = None):
if ctx.message.channel.id != basicSetting[7]:
return
if not args:
return await ctx.send(f"```[{command[45][0]}] [시간(00:00)] [추가시간(숫자)] [보스명1] [보스명2] [보스명3] ... 양식으로 입력해주세요```", tts=False)
input_data_list : list = []
input_data_list = args.split()
result_boss_name : list = []
if len(input_data_list) < 3:
return await ctx.send(f"```[{command[45][0]}] [시간(00:00)] [추가시간(숫자)] [보스명1] [보스명2] [보스명3] ... 양식으로 입력해주세요```", tts=False)
try:
input_hour = int(input_data_list[0][:input_data_list[0].find(":")])
input_minute = int(input_data_list[0][input_data_list[0].find(":")+1:])
input_delta_time = int(input_data_list[1])
except:
return await ctx.send(f"시간 및 추가시간은 숫자로 입력해주세요. ")
boss_name_list : list = input_data_list[2:]
if input_hour > 23 or input_hour < 0 or input_minute > 60:
return await ctx.send(f"올바른 시간(00:00:00 ~ 23:59:59)을 입력해주세요.")
for i in range(bossNum):
if bossData[i][0] in boss_name_list:
curr_now = datetime.datetime.now()
now2 = datetime.datetime.now()
tmp_now = datetime.datetime.now()
tmp_now = tmp_now.replace(hour=int(input_hour), minute=int(input_minute), second=0) + datetime.timedelta(hours=int(input_delta_time))
bossFlag[i] = False
bossFlag0[i] = False
bossMungFlag[i] = False
bossMungCnt[i] = 0
if tmp_now < now2 :
deltaTime = datetime.timedelta(hours = int(bossData[i][1]), minutes = int(bossData[i][5]))
while now2 > tmp_now :
tmp_now = tmp_now + deltaTime
bossMungCnt[i] = bossMungCnt[i] + 1
now2 = tmp_now
bossMungCnt[i] = bossMungCnt[i] - 1
else :
now2 = tmp_now
tmp_bossTime[i] = bossTime[i] = nextTime = now2
tmp_bossTimeString[i] = bossTimeString[i] = nextTime.strftime('%H:%M:%S')
tmp_bossDateString[i] = bossDateString[i] = nextTime.strftime('%Y-%m-%d')
if curr_now + datetime.timedelta(minutes=int(basicSetting[1])) <= tmp_bossTime[i] < curr_now + datetime.timedelta(minutes=int(basicSetting[3])):
bossFlag0[i] = True
if tmp_bossTime[i] < curr_now + datetime.timedelta(minutes=int(basicSetting[1])):
bossFlag[i] = True
bossFlag0[i] = True
result_boss_name.append(bossData[i][0])
return await ctx.send(f"```[{', '.join(list(result_boss_name))}] 보스 [추가등록]이 완료되었습니다. [{command[27][0]}]으로 등록시간을 확인해보세요```", tts=False)
################ ?????????????? ################
@commands.command(name='!오빠')
async def brother1_(self, ctx):
if basicSetting[21] != "1":
return await ctx.send('```보이스를 사용하지 않도록 설정되어 있습니다.```', tts=False)
return await PlaySound(ctx.voice_client, './sound/오빠.mp3')
@commands.command(name='!언니')
async def sister_(self, ctx):
if basicSetting[21] != "1":
return await ctx.send('```보이스를 사용하지 않도록 설정되어 있습니다.```', tts=False)
return await PlaySound(ctx.voice_client, './sound/언니.mp3')
@commands.command(name='!형')
async def brother2_(self, ctx):
if basicSetting[21] != "1":
return await ctx.send('```보이스를 사용하지 않도록 설정되어 있습니다.```', tts=False)
return await PlaySound(ctx.voice_client, './sound/형.mp3')
@commands.command(name='!TJ', aliases=['!tj'])
async def TJ_(self, ctx):
if basicSetting[21] != "1":
return await ctx.send('```보이스를 사용하지 않도록 설정되어 있습니다.```', tts=False)
resultTJ = random.randrange(1,9)
return await PlaySound(ctx.voice_client, './sound/TJ' + str(resultTJ) +'.mp3')
class IlsangDistributionBot(commands.AutoShardedBot):
def __init__(self):
super().__init__(command_prefix=[""], help_command=None)
def run(self):
super().run(access_token, reconnect=True)
async def on_ready(self):
global basicSetting
global channel
global channel_info
global channel_name
global channel_id
global channel_voice_name
global channel_voice_id
global channel_type
global chkvoicechannel
global chflg
global endTime
global setting_channel_name
print("Logged in as ") #화면에 봇의 아이디, 닉네임이 출력됩니다.
print(self.user.name)
print(self.user.id)
print("===========")
channel_name, channel_id, channel_voice_name, channel_voice_id = await get_guild_channel_info(self)
await dbLoad()
if str(basicSetting[7]) in channel_id:
channel = basicSetting[7]
setting_channel_name = self.get_channel(basicSetting[7]).name
now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
print('< 접속시간 [' + now.strftime('%Y-%m-%d ') + now.strftime('%H:%M:%S') + '] >')
print('< 텍스트채널 [' + self.get_channel(basicSetting[7]).name + '] 접속완료>')
if basicSetting[21] == "1" and str(basicSetting[6]) in channel_voice_id:
try:
await self.get_channel(basicSetting[6]).connect(reconnect=True, timeout=5)
print('< 음성채널 [' + self.get_channel(basicSetting[6]).name + '] 접속완료 >')
except:
print('< 음성채널 [' + self.get_channel(basicSetting[6]).name + '] 접속에러 >')
pass
elif basicSetting[21] == "1" and str(basicSetting[6]) not in channel_voice_id:
print(f"설정된 음성채널 값이 없거나 잘못 됐습니다. 음성채널 접속 후 **[{command[5][0]}]** 명령어 먼저 입력하여 사용해주시기 바랍니다.")
await self.get_channel(int(basicSetting[7])).send(f"설정된 음성채널 값이 없거나 잘못 됐습니다. 음성채널 접속 후 **[{command[5][0]}]** 명령어 먼저 입력하여 사용해주시기 바랍니다.")
if basicSetting[8] != "":
if str(basicSetting[8]) in channel_id:
print('< 사다리채널 [' + self.get_channel(int(basicSetting[8])).name + '] 접속완료 >')
else:
basicSetting[8] = ""
print(f"사다리채널 ID 오류! [{command[28][0]} 사다리] 명령으로 재설정 바랍니다.")
if basicSetting[11] != "":
if str(basicSetting[11]) in channel_id:
print('< 정산채널 [' + self.get_channel(int(basicSetting[11])).name + '] 접속완료>')
else:
basicSetting[11] = ""
print(f"정산채널 ID 오류! [{command[28][0]} 정산] 명령으로 재설정 바랍니다.")
if basicSetting[18] != "":
if str(basicSetting[18]) in channel_id:
print('< 척살채널 [' + self.get_channel(int(basicSetting[18])).name + '] 접속완료>')
else:
basicSetting[18] = ""
print(f"척살채널 ID 오류! [{command[28][0]} 척살] 명령으로 재설정 바랍니다.")
if basicSetting[19] != "":
if str(basicSetting[19]) in channel_id:
print('< 경주채널 [' + self.get_channel(int(basicSetting[19])).name + '] 접속완료>')
else:
basicSetting[19] = ""
print(f"경주채널 ID 오류! [{command[28][0]} 경주] 명령으로 재설정 바랍니다.")
if basicSetting[20] != "":
if str(basicSetting[20]) in channel_id:
print('< 아이템채널 [' + self.get_channel(int(basicSetting[20])).name + '] 접속완료>')
else:
basicSetting[20] = ""
print(f"아이템채널 ID 오류! [{command[28][0]} 아이템] 명령으로 재설정 바랍니다.")
if int(basicSetting[13]) != 0 :
print('< 보탐봇 재시작 시간 ' + endTime.strftime('%Y-%m-%d ') + endTime.strftime('%H:%M:%S') + ' >')
print('< 보탐봇 재시작 주기 ' + basicSetting[13] + '일 >')
else :
print('< 보탐봇 재시작 설정안됨 >')
chflg = 1
else:
basicSetting[6] = ""
basicSetting[7] = ""
print(f"설정된 채널 값이 없거나 잘못 됐습니다. **[{command[0][0]}]** 명령어 먼저 입력하여 사용해주시기 바랍니다.")
# 디스코드에는 현재 본인이 어떤 게임을 플레이하는지 보여주는 기능이 있습니다.
# 이 기능을 사용하여 봇의 상태를 간단하게 출력해줄 수 있습니다.
await self.change_presence(status=discord.Status.online, activity=discord.Game(name=command[1][0], type=1), afk=False)
async def on_message(self, msg):
await self.wait_until_ready()
if msg.author.bot: #만약 메시지를 보낸사람이 봇일 경우에는
return None #동작하지 않고 무시합니다.
ori_msg = msg
global channel
global basicSetting
global bossData
global fixed_bossData
global bossNum
global fixed_bossNum
global chkvoicechannel
global chkrelogin
global bossTime
global tmp_bossTime
global fixed_bossTime
global bossTimeString
global bossDateString
global tmp_bossTimeString
global tmp_bossDateString
global bossFlag
global bossFlag0
global bossMungFlag
global bossMungCnt
global channel_info
global channel_name
global channel_id
global channel_voice_name
global channel_voice_id
global channel_type
global chflg
global LoadChk
global indexFixedBossname
global FixedBossDateData
global gc #정산
global credentials #정산
global regenembed
global command
global kill_Data
id = msg.author.id #id라는 변수에는 메시지를 보낸사람의 ID를 담습니다.
if chflg == 1 :
if self.get_channel(basicSetting[7]).id == msg.channel.id:
channel = basicSetting[7]
message = msg
for command_str in ["컷", "멍", "예상", "삭제", "메모", "카톡켬", "카톡끔"]:
if command_str in message.content:
tmp_msg : str = ""
for key, value in boss_nick.items():
if message.content[:message.content.find(command_str)].strip() in value:
message.content = message.content.replace(message.content[:message.content.find(command_str)], key)
hello = message.content
for i in range(bossNum):
################ 보스 컷처리 ################
if message.content.startswith(bossData[i][0] +'컷') or message.content.startswith(convertToInitialLetters(bossData[i][0] +'컷')) or message.content.startswith(bossData[i][0] +' 컷') or message.content.startswith(convertToInitialLetters(bossData[i][0] +' 컷')):
if hello.find(' ') != -1 :
bossData[i][6] = hello[hello.find(' ')+2:]
hello = hello[:hello.find(' ')]
else:
bossData[i][6] = ''
curr_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_msg = bossData[i][0] +'컷'
if len(hello) > len(tmp_msg) + 3 :
if hello.find(':') != -1 :
chkpos = hello.find(':')
hours1 = hello[chkpos-2:chkpos]
minutes1 = hello[chkpos+1:chkpos+3]
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = tmp_now.replace(hour=int(hours1), minute=int(minutes1))
else:
chkpos = len(hello)-2
hours1 = hello[chkpos-2:chkpos]
minutes1 = hello[chkpos:chkpos+2]
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = tmp_now.replace(hour=int(hours1), minute=int(minutes1))
else:
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = now2
bossFlag[i] = False
bossFlag0[i] = False
bossMungFlag[i] = False
bossMungCnt[i] = 0
if tmp_now > now2 :
tmp_now = tmp_now + datetime.timedelta(days=int(-1))
if tmp_now < now2 :
deltaTime = datetime.timedelta(hours = int(bossData[i][1]), minutes = int(bossData[i][5]))
while now2 > tmp_now :
tmp_now = tmp_now + deltaTime
bossMungCnt[i] = bossMungCnt[i] + 1
now2 = tmp_now
bossMungCnt[i] = bossMungCnt[i] - 1
else :
now2 = now2 + datetime.timedelta(hours = int(bossData[i][1]), minutes = int(bossData[i][5]))
tmp_bossTime[i] = bossTime[i] = nextTime = now2
tmp_bossTimeString[i] = bossTimeString[i] = nextTime.strftime('%H:%M:%S')
tmp_bossDateString[i] = bossDateString[i] = nextTime.strftime('%Y-%m-%d')
if curr_now + datetime.timedelta(minutes=int(basicSetting[1])) <= tmp_bossTime[i] < curr_now + datetime.timedelta(minutes=int(basicSetting[3])):
bossFlag0[i] = True
if tmp_bossTime[i] < curr_now + datetime.timedelta(minutes=int(basicSetting[1])):
bossFlag[i] = True
bossFlag0[i] = True
embed = discord.Embed(
description= '```다음 ' + bossData[i][0] + ' ' + bossTimeString[i] + '입니다.```',
color=0xff0000
)
await self.get_channel(channel).send(embed=embed, tts=False)
################ 보스 멍 처리 ################
if message.content.startswith(bossData[i][0] +'멍') or message.content.startswith(bossData[i][0] +' 멍'):
if hello.find(' ') != -1 :
bossData[i][6] = hello[hello.find(' ')+2:]
hello = hello[:hello.find(' ')]
else:
bossData[i][6] = ''
tmp_msg = bossData[i][0] +'멍'
tmp_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
if len(hello) > len(tmp_msg) + 3 :
temptime = tmp_now
if hello.find(':') != -1 :
chkpos = hello.find(':')
hours1 = hello[chkpos-2:chkpos]
minutes1 = hello[chkpos+1:chkpos+3]
temptime = tmp_now.replace(hour=int(hours1), minute=int(minutes1))
else:
chkpos = len(hello)-2
hours1 = hello[chkpos-2:chkpos]
minutes1 = hello[chkpos:chkpos+2]
temptime = tmp_now.replace(hour=int(hours1), minute=int(minutes1))
bossMungCnt[i] = 0
bossFlag[i] = False
bossFlag0[i] = False
bossMungFlag[i] = False
if temptime > tmp_now :
temptime = temptime + datetime.timedelta(days=int(-1))
if temptime < tmp_now :
deltaTime = datetime.timedelta(hours = int(bossData[i][1]), minutes = int(bossData[i][5]))
while temptime < tmp_now :
temptime = temptime + deltaTime
bossMungCnt[i] = bossMungCnt[i] + 1
tmp_bossTime[i] = bossTime[i] = temptime
tmp_bossTimeString[i] = bossTimeString[i] = temptime.strftime('%H:%M:%S')
tmp_bossDateString[i] = bossDateString[i] = temptime.strftime('%Y-%m-%d')
if tmp_now + datetime.timedelta(minutes=int(basicSetting[1])) <= tmp_bossTime[i] < tmp_now + datetime.timedelta(minutes=int(basicSetting[3])):
bossFlag0[i] = True
if tmp_bossTime[i] < tmp_now + datetime.timedelta(minutes=int(basicSetting[1])):
bossFlag[i] = True
bossFlag0[i] = True
embed = discord.Embed(
description= '```다음 ' + bossData[i][0] + ' ' + bossTimeString[i] + '입니다.```',
color=0xff0000
)
await self.get_channel(channel).send(embed=embed, tts=False)
else:
if tmp_bossTime[i] < tmp_now :
nextTime = tmp_bossTime[i] + datetime.timedelta(hours = int(bossData[i][1]), minutes = int(bossData[i][5]))
bossFlag[i] = False
bossFlag0[i] = False
bossMungFlag[i] = False
bossMungCnt[i] = bossMungCnt[i] + 1
tmp_bossTime[i] = bossTime[i] = nextTime
tmp_bossTimeString[i] = bossTimeString[i] = nextTime.strftime('%H:%M:%S')
tmp_bossDateString[i] = bossDateString[i] = nextTime.strftime('%Y-%m-%d')
if tmp_now + datetime.timedelta(minutes=int(basicSetting[1])) <= tmp_bossTime[i] < tmp_now + datetime.timedelta(minutes=int(basicSetting[3])):
bossFlag0[i] = True
if tmp_bossTime[i] < tmp_now + datetime.timedelta(minutes=int(basicSetting[1])):
bossFlag[i] = True
bossFlag0[i] = True
embed = discord.Embed(
description= '```다음 ' + bossData[i][0] + ' ' + bossTimeString[i] + '입니다.```',
color=0xff0000
)
await self.get_channel(channel).send(embed=embed, tts=False)
else:
await self.get_channel(channel).send('```' + bossData[i][0] + '탐이 아직 안됐습니다. 다음 ' + bossData[i][0] + '탐 [' + tmp_bossTimeString[i] + '] 입니다```', tts=False)
################ 예상 보스 타임 입력 ################
if message.content.startswith(bossData[i][0] +'예상') or message.content.startswith(bossData[i][0] +' 예상'):
if hello.find(' ') != -1 :
bossData[i][6] = hello[hello.find(' ')+2:]
hello = hello[:hello.find(' ')]
else:
bossData[i][6] = ''
tmp_msg = bossData[i][0] +'예상'
if len(hello) > len(tmp_msg) + 4 :
if hello.find(':') != -1 :
chkpos = hello.find(':')
hours1 = hello[chkpos-2:chkpos]
minutes1 = hello[chkpos+1:chkpos+3]
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = tmp_now.replace(hour=int(hours1), minute=int(minutes1))
else:
chkpos = len(hello)-2
hours1 = hello[chkpos-2:chkpos]
minutes1 = hello[chkpos:chkpos+2]
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = tmp_now.replace(hour=int(hours1), minute=int(minutes1))
bossFlag[i] = False
bossFlag0[i] = False
bossMungFlag[i] = False
bossMungCnt[i] = 0
if tmp_now < now2 :
tmp_now = tmp_now + datetime.timedelta(days=int(1))
tmp_bossTime[i] = bossTime[i] = nextTime = tmp_now
tmp_bossTimeString[i] = bossTimeString[i] = nextTime.strftime('%H:%M:%S')
tmp_bossDateString[i] = bossDateString[i] = nextTime.strftime('%Y-%m-%d')
if now2 + datetime.timedelta(minutes=int(basicSetting[1])) <= tmp_bossTime[i] < now2 + datetime.timedelta(minutes=int(basicSetting[3])):
bossFlag0[i] = True
if tmp_bossTime[i] < now2 + datetime.timedelta(minutes=int(basicSetting[1])):
bossFlag[i] = True
bossFlag0[i] = True
embed = discord.Embed(
description= '```다음 ' + bossData[i][0] + ' ' + bossTimeString[i] + '입니다.```',
color=0xff0000
)
await self.get_channel(channel).send(embed=embed, tts=False)
else:
await self.get_channel(channel).send('```' + bossData[i][0] +' 예상 시간을 입력해주세요.```', tts=False)
################ 보스타임 삭제 ################
if message.content == bossData[i][0] +'삭제' or message.content == bossData[i][0] +' 삭제':
bossTime[i] = datetime.datetime.now()+datetime.timedelta(days=365, hours = int(basicSetting[0]))
tmp_bossTime[i] = datetime.datetime.now()+datetime.timedelta(days=365, hours = int(basicSetting[0]))
bossTimeString[i] = '99:99:99'
bossDateString[i] = '9999-99-99'
tmp_bossTimeString[i] = '99:99:99'
tmp_bossDateString[i] = '9999-99-99'
bossFlag[i] = False
bossFlag0[i] = False
bossMungFlag[i] = False
bossMungCnt[i] = 0
await self.get_channel(channel).send('<' + bossData[i][0] + ' 삭제완료>', tts=False)
await dbSave()
print ('<' + bossData[i][0] + ' 삭제완료>')
################ 보스별 메모 ################
if message.content.startswith(bossData[i][0] +'메모 '):
tmp_msg = bossData[i][0] +'메모 '
bossData[i][6] = hello[len(tmp_msg):]
await self.get_channel(channel).send('< ' + bossData[i][0] + ' [ ' + bossData[i][6] + ' ] 메모등록 완료>', tts=False)
if message.content.startswith(bossData[i][0] +'메모삭제'):
bossData[i][6] = ''
await self.get_channel(channel).send('< ' + bossData[i][0] + ' 메모삭제 완료>', tts=False)
await self.process_commands(ori_msg)
async def on_command_error(self, ctx : commands.Context, error : commands.CommandError):
if isinstance(error, CommandNotFound):
return
elif isinstance(error, MissingRequiredArgument):
return
elif isinstance(error, discord.ext.commands.MissingPermissions):
return await ctx.send(f"**[{ctx.message.content.split()[0]}]** 명령을 사용할 권한이 없습니다.!")
elif isinstance(error, discord.ext.commands.CheckFailure):
return await ctx.send(f"**[{ctx.message.content.split()[0]}]** 명령을 사용할 권한이 없습니다.!")
raise error
async def close(self):
await super().close()
print("일상디코봇 종료 완료.")
ilsang_distribution_bot : IlsangDistributionBot = IlsangDistributionBot()
ilsang_distribution_bot.add_cog(mainCog(ilsang_distribution_bot))
ilsang_distribution_bot.add_cog(taskCog(ilsang_distribution_bot))
ilsang_distribution_bot.run()
|
[] |
[] |
[
"BOT_TOKEN",
"GIT_REPO_RESTART",
"GIT_REPO",
"AWS_KEY",
"GIT_TOKEN",
"AWS_SECRET_KEY"
] |
[]
|
["BOT_TOKEN", "GIT_REPO_RESTART", "GIT_REPO", "AWS_KEY", "GIT_TOKEN", "AWS_SECRET_KEY"]
|
python
| 6 | 0 | |
src/cmd/go/go_test.go
|
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main_test
import (
"bytes"
"fmt"
"go/format"
"internal/race"
"internal/testenv"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
"testing"
"time"
)
var (
canRun = true // whether we can run go or ./testgo
canRace = false // whether we can run the race detector
canCgo = false // whether we can use cgo
exeSuffix string // ".exe" on Windows
skipExternal = false // skip external tests
)
func init() {
switch runtime.GOOS {
case "android", "nacl":
canRun = false
case "darwin":
switch runtime.GOARCH {
case "arm", "arm64":
canRun = false
}
case "linux":
switch runtime.GOARCH {
case "arm":
// many linux/arm machines are too slow to run
// the full set of external tests.
skipExternal = true
case "mips", "mipsle", "mips64", "mips64le":
// Also slow.
skipExternal = true
if testenv.Builder() != "" {
// On the builders, skip the cmd/go
// tests. They're too slow and already
// covered by other ports. There's
// nothing os/arch specific in the
// tests.
canRun = false
}
}
case "freebsd":
switch runtime.GOARCH {
case "arm":
// many freebsd/arm machines are too slow to run
// the full set of external tests.
skipExternal = true
canRun = false
}
case "windows":
exeSuffix = ".exe"
}
}
// testGOROOT is the GOROOT to use when running testgo, a cmd/go binary
// build from this process's current GOROOT, but run from a different
// (temp) directory.
var testGOROOT string
var testCC string
// The TestMain function creates a go command for testing purposes and
// deletes it after the tests have been run.
func TestMain(m *testing.M) {
if canRun {
args := []string{"build", "-tags", "testgo", "-o", "testgo" + exeSuffix}
if race.Enabled {
args = append(args, "-race")
}
out, err := exec.Command("go", args...).CombinedOutput()
if err != nil {
fmt.Fprintf(os.Stderr, "building testgo failed: %v\n%s", err, out)
os.Exit(2)
}
out, err = exec.Command("go", "env", "GOROOT").CombinedOutput()
if err != nil {
fmt.Fprintf(os.Stderr, "could not find testing GOROOT: %v\n%s", err, out)
os.Exit(2)
}
testGOROOT = strings.TrimSpace(string(out))
out, err = exec.Command("go", "env", "CC").CombinedOutput()
if err != nil {
fmt.Fprintf(os.Stderr, "could not find testing CC: %v\n%s", err, out)
os.Exit(2)
}
testCC = strings.TrimSpace(string(out))
if out, err := exec.Command("./testgo"+exeSuffix, "env", "CGO_ENABLED").Output(); err != nil {
fmt.Fprintf(os.Stderr, "running testgo failed: %v\n", err)
canRun = false
} else {
canCgo, err = strconv.ParseBool(strings.TrimSpace(string(out)))
if err != nil {
fmt.Fprintf(os.Stderr, "can't parse go env CGO_ENABLED output: %v\n", strings.TrimSpace(string(out)))
}
}
switch runtime.GOOS {
case "linux", "darwin", "freebsd", "windows":
// The race detector doesn't work on Alpine Linux:
// golang.org/issue/14481
canRace = canCgo && runtime.GOARCH == "amd64" && !isAlpineLinux()
}
}
// Don't let these environment variables confuse the test.
os.Unsetenv("GOBIN")
os.Unsetenv("GOPATH")
os.Unsetenv("GIT_ALLOW_PROTOCOL")
if home, ccacheDir := os.Getenv("HOME"), os.Getenv("CCACHE_DIR"); home != "" && ccacheDir == "" {
// On some systems the default C compiler is ccache.
// Setting HOME to a non-existent directory will break
// those systems. Set CCACHE_DIR to cope. Issue 17668.
os.Setenv("CCACHE_DIR", filepath.Join(home, ".ccache"))
}
os.Setenv("HOME", "/test-go-home-does-not-exist")
r := m.Run()
if canRun {
os.Remove("testgo" + exeSuffix)
}
os.Exit(r)
}
func isAlpineLinux() bool {
if runtime.GOOS != "linux" {
return false
}
fi, err := os.Lstat("/etc/alpine-release")
return err == nil && fi.Mode().IsRegular()
}
// The length of an mtime tick on this system. This is an estimate of
// how long we need to sleep to ensure that the mtime of two files is
// different.
// We used to try to be clever but that didn't always work (see golang.org/issue/12205).
var mtimeTick time.Duration = 1 * time.Second
// Manage a single run of the testgo binary.
type testgoData struct {
t *testing.T
temps []string
wd string
env []string
tempdir string
ran bool
inParallel bool
stdout, stderr bytes.Buffer
}
// testgo sets up for a test that runs testgo.
func testgo(t *testing.T) *testgoData {
testenv.MustHaveGoBuild(t)
if skipExternal {
t.Skip("skipping external tests on %s/%s", runtime.GOOS, runtime.GOARCH)
}
return &testgoData{t: t}
}
// must gives a fatal error if err is not nil.
func (tg *testgoData) must(err error) {
if err != nil {
tg.t.Fatal(err)
}
}
// check gives a test non-fatal error if err is not nil.
func (tg *testgoData) check(err error) {
if err != nil {
tg.t.Error(err)
}
}
// parallel runs the test in parallel by calling t.Parallel.
func (tg *testgoData) parallel() {
if tg.ran {
tg.t.Fatal("internal testsuite error: call to parallel after run")
}
if tg.wd != "" {
tg.t.Fatal("internal testsuite error: call to parallel after cd")
}
for _, e := range tg.env {
if strings.HasPrefix(e, "GOROOT=") || strings.HasPrefix(e, "GOPATH=") || strings.HasPrefix(e, "GOBIN=") {
val := e[strings.Index(e, "=")+1:]
if strings.HasPrefix(val, "testdata") || strings.HasPrefix(val, "./testdata") {
tg.t.Fatalf("internal testsuite error: call to parallel with testdata in environment (%s)", e)
}
}
}
tg.inParallel = true
tg.t.Parallel()
}
// pwd returns the current directory.
func (tg *testgoData) pwd() string {
wd, err := os.Getwd()
if err != nil {
tg.t.Fatalf("could not get working directory: %v", err)
}
return wd
}
// cd changes the current directory to the named directory. Note that
// using this means that the test must not be run in parallel with any
// other tests.
func (tg *testgoData) cd(dir string) {
if tg.inParallel {
tg.t.Fatal("internal testsuite error: changing directory when running in parallel")
}
if tg.wd == "" {
tg.wd = tg.pwd()
}
abs, err := filepath.Abs(dir)
tg.must(os.Chdir(dir))
if err == nil {
tg.setenv("PWD", abs)
}
}
// sleep sleeps for one tick, where a tick is a conservative estimate
// of how long it takes for a file modification to get a different
// mtime.
func (tg *testgoData) sleep() {
time.Sleep(mtimeTick)
}
// setenv sets an environment variable to use when running the test go
// command.
func (tg *testgoData) setenv(name, val string) {
if tg.inParallel && (name == "GOROOT" || name == "GOPATH" || name == "GOBIN") && (strings.HasPrefix(val, "testdata") || strings.HasPrefix(val, "./testdata")) {
tg.t.Fatalf("internal testsuite error: call to setenv with testdata (%s=%s) after parallel", name, val)
}
tg.unsetenv(name)
tg.env = append(tg.env, name+"="+val)
}
// unsetenv removes an environment variable.
func (tg *testgoData) unsetenv(name string) {
if tg.env == nil {
tg.env = append([]string(nil), os.Environ()...)
}
for i, v := range tg.env {
if strings.HasPrefix(v, name+"=") {
tg.env = append(tg.env[:i], tg.env[i+1:]...)
break
}
}
}
func (tg *testgoData) goTool() string {
if tg.wd == "" {
return "./testgo" + exeSuffix
}
return filepath.Join(tg.wd, "testgo"+exeSuffix)
}
// doRun runs the test go command, recording stdout and stderr and
// returning exit status.
func (tg *testgoData) doRun(args []string) error {
if !canRun {
panic("testgoData.doRun called but canRun false")
}
if tg.inParallel {
for _, arg := range args {
if strings.HasPrefix(arg, "testdata") || strings.HasPrefix(arg, "./testdata") {
tg.t.Fatal("internal testsuite error: parallel run using testdata")
}
}
}
hasGoroot := false
for _, v := range tg.env {
if strings.HasPrefix(v, "GOROOT=") {
hasGoroot = true
break
}
}
prog := tg.goTool()
if !hasGoroot {
tg.setenv("GOROOT", testGOROOT)
}
tg.t.Logf("running testgo %v", args)
cmd := exec.Command(prog, args...)
tg.stdout.Reset()
tg.stderr.Reset()
cmd.Stdout = &tg.stdout
cmd.Stderr = &tg.stderr
cmd.Env = tg.env
status := cmd.Run()
if tg.stdout.Len() > 0 {
tg.t.Log("standard output:")
tg.t.Log(tg.stdout.String())
}
if tg.stderr.Len() > 0 {
tg.t.Log("standard error:")
tg.t.Log(tg.stderr.String())
}
tg.ran = true
return status
}
// run runs the test go command, and expects it to succeed.
func (tg *testgoData) run(args ...string) {
if status := tg.doRun(args); status != nil {
tg.t.Logf("go %v failed unexpectedly: %v", args, status)
tg.t.FailNow()
}
}
// runFail runs the test go command, and expects it to fail.
func (tg *testgoData) runFail(args ...string) {
if status := tg.doRun(args); status == nil {
tg.t.Fatal("testgo succeeded unexpectedly")
} else {
tg.t.Log("testgo failed as expected:", status)
}
}
// runGit runs a git command, and expects it to succeed.
func (tg *testgoData) runGit(dir string, args ...string) {
cmd := exec.Command("git", args...)
tg.stdout.Reset()
tg.stderr.Reset()
cmd.Stdout = &tg.stdout
cmd.Stderr = &tg.stderr
cmd.Dir = dir
cmd.Env = tg.env
status := cmd.Run()
if tg.stdout.Len() > 0 {
tg.t.Log("git standard output:")
tg.t.Log(tg.stdout.String())
}
if tg.stderr.Len() > 0 {
tg.t.Log("git standard error:")
tg.t.Log(tg.stderr.String())
}
if status != nil {
tg.t.Logf("git %v failed unexpectedly: %v", args, status)
tg.t.FailNow()
}
}
// getStdout returns standard output of the testgo run as a string.
func (tg *testgoData) getStdout() string {
if !tg.ran {
tg.t.Fatal("internal testsuite error: stdout called before run")
}
return tg.stdout.String()
}
// getStderr returns standard error of the testgo run as a string.
func (tg *testgoData) getStderr() string {
if !tg.ran {
tg.t.Fatal("internal testsuite error: stdout called before run")
}
return tg.stderr.String()
}
// doGrepMatch looks for a regular expression in a buffer, and returns
// whether it is found. The regular expression is matched against
// each line separately, as with the grep command.
func (tg *testgoData) doGrepMatch(match string, b *bytes.Buffer) bool {
if !tg.ran {
tg.t.Fatal("internal testsuite error: grep called before run")
}
re := regexp.MustCompile(match)
for _, ln := range bytes.Split(b.Bytes(), []byte{'\n'}) {
if re.Match(ln) {
return true
}
}
return false
}
// doGrep looks for a regular expression in a buffer and fails if it
// is not found. The name argument is the name of the output we are
// searching, "output" or "error". The msg argument is logged on
// failure.
func (tg *testgoData) doGrep(match string, b *bytes.Buffer, name, msg string) {
if !tg.doGrepMatch(match, b) {
tg.t.Log(msg)
tg.t.Logf("pattern %v not found in standard %s", match, name)
tg.t.FailNow()
}
}
// grepStdout looks for a regular expression in the test run's
// standard output and fails, logging msg, if it is not found.
func (tg *testgoData) grepStdout(match, msg string) {
tg.doGrep(match, &tg.stdout, "output", msg)
}
// grepStderr looks for a regular expression in the test run's
// standard error and fails, logging msg, if it is not found.
func (tg *testgoData) grepStderr(match, msg string) {
tg.doGrep(match, &tg.stderr, "error", msg)
}
// grepBoth looks for a regular expression in the test run's standard
// output or stand error and fails, logging msg, if it is not found.
func (tg *testgoData) grepBoth(match, msg string) {
if !tg.doGrepMatch(match, &tg.stdout) && !tg.doGrepMatch(match, &tg.stderr) {
tg.t.Log(msg)
tg.t.Logf("pattern %v not found in standard output or standard error", match)
tg.t.FailNow()
}
}
// doGrepNot looks for a regular expression in a buffer and fails if
// it is found. The name and msg arguments are as for doGrep.
func (tg *testgoData) doGrepNot(match string, b *bytes.Buffer, name, msg string) {
if tg.doGrepMatch(match, b) {
tg.t.Log(msg)
tg.t.Logf("pattern %v found unexpectedly in standard %s", match, name)
tg.t.FailNow()
}
}
// grepStdoutNot looks for a regular expression in the test run's
// standard output and fails, logging msg, if it is found.
func (tg *testgoData) grepStdoutNot(match, msg string) {
tg.doGrepNot(match, &tg.stdout, "output", msg)
}
// grepStderrNot looks for a regular expression in the test run's
// standard error and fails, logging msg, if it is found.
func (tg *testgoData) grepStderrNot(match, msg string) {
tg.doGrepNot(match, &tg.stderr, "error", msg)
}
// grepBothNot looks for a regular expression in the test run's
// standard output or stand error and fails, logging msg, if it is
// found.
func (tg *testgoData) grepBothNot(match, msg string) {
if tg.doGrepMatch(match, &tg.stdout) || tg.doGrepMatch(match, &tg.stderr) {
tg.t.Log(msg)
tg.t.Fatalf("pattern %v found unexpectedly in standard output or standard error", match)
}
}
// doGrepCount counts the number of times a regexp is seen in a buffer.
func (tg *testgoData) doGrepCount(match string, b *bytes.Buffer) int {
if !tg.ran {
tg.t.Fatal("internal testsuite error: doGrepCount called before run")
}
re := regexp.MustCompile(match)
c := 0
for _, ln := range bytes.Split(b.Bytes(), []byte{'\n'}) {
if re.Match(ln) {
c++
}
}
return c
}
// grepCountBoth returns the number of times a regexp is seen in both
// standard output and standard error.
func (tg *testgoData) grepCountBoth(match string) int {
return tg.doGrepCount(match, &tg.stdout) + tg.doGrepCount(match, &tg.stderr)
}
// creatingTemp records that the test plans to create a temporary file
// or directory. If the file or directory exists already, it will be
// removed. When the test completes, the file or directory will be
// removed if it exists.
func (tg *testgoData) creatingTemp(path string) {
if filepath.IsAbs(path) && !strings.HasPrefix(path, tg.tempdir) {
tg.t.Fatalf("internal testsuite error: creatingTemp(%q) with absolute path not in temporary directory", path)
}
// If we have changed the working directory, make sure we have
// an absolute path, because we are going to change directory
// back before we remove the temporary.
if tg.wd != "" && !filepath.IsAbs(path) {
path = filepath.Join(tg.pwd(), path)
}
tg.must(os.RemoveAll(path))
tg.temps = append(tg.temps, path)
}
// makeTempdir makes a temporary directory for a run of testgo. If
// the temporary directory was already created, this does nothing.
func (tg *testgoData) makeTempdir() {
if tg.tempdir == "" {
var err error
tg.tempdir, err = ioutil.TempDir("", "gotest")
tg.must(err)
}
}
// tempFile adds a temporary file for a run of testgo.
func (tg *testgoData) tempFile(path, contents string) {
tg.makeTempdir()
tg.must(os.MkdirAll(filepath.Join(tg.tempdir, filepath.Dir(path)), 0755))
bytes := []byte(contents)
if strings.HasSuffix(path, ".go") {
formatted, err := format.Source(bytes)
if err == nil {
bytes = formatted
}
}
tg.must(ioutil.WriteFile(filepath.Join(tg.tempdir, path), bytes, 0644))
}
// tempDir adds a temporary directory for a run of testgo.
func (tg *testgoData) tempDir(path string) {
tg.makeTempdir()
if err := os.MkdirAll(filepath.Join(tg.tempdir, path), 0755); err != nil && !os.IsExist(err) {
tg.t.Fatal(err)
}
}
// path returns the absolute pathname to file with the temporary
// directory.
func (tg *testgoData) path(name string) string {
if tg.tempdir == "" {
tg.t.Fatalf("internal testsuite error: path(%q) with no tempdir", name)
}
if name == "." {
return tg.tempdir
}
return filepath.Join(tg.tempdir, name)
}
// mustExist fails if path does not exist.
func (tg *testgoData) mustExist(path string) {
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
tg.t.Fatalf("%s does not exist but should", path)
}
tg.t.Fatalf("%s stat failed: %v", path, err)
}
}
// mustNotExist fails if path exists.
func (tg *testgoData) mustNotExist(path string) {
if _, err := os.Stat(path); err == nil || !os.IsNotExist(err) {
tg.t.Fatalf("%s exists but should not (%v)", path, err)
}
}
// wantExecutable fails with msg if path is not executable.
func (tg *testgoData) wantExecutable(path, msg string) {
if st, err := os.Stat(path); err != nil {
if !os.IsNotExist(err) {
tg.t.Log(err)
}
tg.t.Fatal(msg)
} else {
if runtime.GOOS != "windows" && st.Mode()&0111 == 0 {
tg.t.Fatalf("binary %s exists but is not executable", path)
}
}
}
// wantArchive fails if path is not an archive.
func (tg *testgoData) wantArchive(path string) {
f, err := os.Open(path)
if err != nil {
tg.t.Fatal(err)
}
buf := make([]byte, 100)
io.ReadFull(f, buf)
f.Close()
if !bytes.HasPrefix(buf, []byte("!<arch>\n")) {
tg.t.Fatalf("file %s exists but is not an archive", path)
}
}
// isStale reports whether pkg is stale, and why
func (tg *testgoData) isStale(pkg string) (bool, string) {
tg.run("list", "-f", "{{.Stale}}:{{.StaleReason}}", pkg)
v := strings.TrimSpace(tg.getStdout())
f := strings.SplitN(v, ":", 2)
if len(f) == 2 {
switch f[0] {
case "true":
return true, f[1]
case "false":
return false, f[1]
}
}
tg.t.Fatalf("unexpected output checking staleness of package %v: %v", pkg, v)
panic("unreachable")
}
// wantStale fails with msg if pkg is not stale.
func (tg *testgoData) wantStale(pkg, reason, msg string) {
stale, why := tg.isStale(pkg)
if !stale {
tg.t.Fatal(msg)
}
if reason == "" && why != "" || !strings.Contains(why, reason) {
tg.t.Errorf("wrong reason for Stale=true: %q, want %q", why, reason)
}
}
// wantNotStale fails with msg if pkg is stale.
func (tg *testgoData) wantNotStale(pkg, reason, msg string) {
stale, why := tg.isStale(pkg)
if stale {
tg.t.Fatal(msg)
}
if reason == "" && why != "" || !strings.Contains(why, reason) {
tg.t.Errorf("wrong reason for Stale=false: %q, want %q", why, reason)
}
}
// cleanup cleans up a test that runs testgo.
func (tg *testgoData) cleanup() {
if tg.wd != "" {
if err := os.Chdir(tg.wd); err != nil {
// We are unlikely to be able to continue.
fmt.Fprintln(os.Stderr, "could not restore working directory, crashing:", err)
os.Exit(2)
}
}
for _, path := range tg.temps {
tg.check(os.RemoveAll(path))
}
if tg.tempdir != "" {
tg.check(os.RemoveAll(tg.tempdir))
}
}
// failSSH puts an ssh executable in the PATH that always fails.
// This is to stub out uses of ssh by go get.
func (tg *testgoData) failSSH() {
wd, err := os.Getwd()
if err != nil {
tg.t.Fatal(err)
}
fail := filepath.Join(wd, "testdata/failssh")
tg.setenv("PATH", fmt.Sprintf("%v%c%v", fail, filepath.ListSeparator, os.Getenv("PATH")))
}
func TestFileLineInErrorMessages(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("err.go", `package main; import "bar"`)
path := tg.path("err.go")
tg.runFail("run", path)
shortPath := path
if rel, err := filepath.Rel(tg.pwd(), path); err == nil && len(rel) < len(path) {
shortPath = rel
}
tg.grepStderr("^"+regexp.QuoteMeta(shortPath)+":", "missing file:line in error message")
}
func TestProgramNameInCrashMessages(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("triv.go", `package main; func main() {}`)
tg.runFail("build", "-ldflags", "-crash_for_testing", tg.path("triv.go"))
tg.grepStderr(`[/\\]tool[/\\].*[/\\]link`, "missing linker name in error message")
}
func TestBrokenTestsWithoutTestFunctionsAllFail(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
// TODO: tg.parallel()
tg.runFail("test", "./testdata/src/badtest/...")
tg.grepBothNot("^ok", "test passed unexpectedly")
tg.grepBoth("FAIL.*badtest/badexec", "test did not run everything")
tg.grepBoth("FAIL.*badtest/badsyntax", "test did not run everything")
tg.grepBoth("FAIL.*badtest/badvar", "test did not run everything")
}
func TestGoBuildDashAInDevBranch(t *testing.T) {
if testing.Short() {
t.Skip("don't rebuild the standard library in short mode")
}
tg := testgo(t)
defer tg.cleanup()
tg.run("install", "math") // should be up to date already but just in case
tg.setenv("TESTGO_IS_GO_RELEASE", "0")
tg.run("build", "-v", "-a", "math")
tg.grepStderr("runtime", "testgo build -a math in dev branch DID NOT build runtime, but should have")
// Everything is out of date. Rebuild to leave things in a better state.
tg.run("install", "std")
}
func TestGoBuildDashAInReleaseBranch(t *testing.T) {
if testing.Short() {
t.Skip("don't rebuild the standard library in short mode")
}
tg := testgo(t)
defer tg.cleanup()
tg.run("install", "math", "net/http") // should be up to date already but just in case
tg.setenv("TESTGO_IS_GO_RELEASE", "1")
tg.run("install", "-v", "-a", "math")
tg.grepStderr("runtime", "testgo build -a math in release branch DID NOT build runtime, but should have")
// Now runtime.a is updated (newer mtime), so everything would look stale if not for being a release.
tg.run("build", "-v", "net/http")
tg.grepStderrNot("strconv", "testgo build -v net/http in release branch with newer runtime.a DID build strconv but should not have")
tg.grepStderrNot("golang.org/x/net/http2/hpack", "testgo build -v net/http in release branch with newer runtime.a DID build .../golang.org/x/net/http2/hpack but should not have")
tg.grepStderrNot("net/http", "testgo build -v net/http in release branch with newer runtime.a DID build net/http but should not have")
// Everything is out of date. Rebuild to leave things in a better state.
tg.run("install", "std")
}
func TestNewReleaseRebuildsStalePackagesInGOPATH(t *testing.T) {
if testing.Short() {
t.Skip("don't rebuild the standard library in short mode")
}
tg := testgo(t)
defer tg.cleanup()
addNL := func(name string) (restore func()) {
data, err := ioutil.ReadFile(name)
if err != nil {
t.Fatal(err)
}
old := data
data = append(data, '\n')
if err := ioutil.WriteFile(name, append(data, '\n'), 0666); err != nil {
t.Fatal(err)
}
tg.sleep()
return func() {
if err := ioutil.WriteFile(name, old, 0666); err != nil {
t.Fatal(err)
}
}
}
tg.setenv("TESTGO_IS_GO_RELEASE", "1")
tg.tempFile("d1/src/p1/p1.go", `package p1`)
tg.setenv("GOPATH", tg.path("d1"))
tg.run("install", "-a", "p1")
tg.wantNotStale("p1", "", "./testgo list claims p1 is stale, incorrectly")
tg.sleep()
// Changing mtime and content of runtime/internal/sys/sys.go
// should have no effect: we're in a release, which doesn't rebuild
// for general mtime or content changes.
sys := runtime.GOROOT() + "/src/runtime/internal/sys/sys.go"
restore := addNL(sys)
defer restore()
tg.wantNotStale("p1", "", "./testgo list claims p1 is stale, incorrectly, after updating runtime/internal/sys/sys.go")
restore()
tg.wantNotStale("p1", "", "./testgo list claims p1 is stale, incorrectly, after restoring runtime/internal/sys/sys.go")
// But changing runtime/internal/sys/zversion.go should have an effect:
// that's how we tell when we flip from one release to another.
zversion := runtime.GOROOT() + "/src/runtime/internal/sys/zversion.go"
restore = addNL(zversion)
defer restore()
tg.wantStale("p1", "build ID mismatch", "./testgo list claims p1 is NOT stale, incorrectly, after changing to new release")
restore()
tg.wantNotStale("p1", "", "./testgo list claims p1 is stale, incorrectly, after changing back to old release")
addNL(zversion)
tg.wantStale("p1", "build ID mismatch", "./testgo list claims p1 is NOT stale, incorrectly, after changing again to new release")
tg.run("install", "p1")
tg.wantNotStale("p1", "", "./testgo list claims p1 is stale after building with new release")
// Restore to "old" release.
restore()
tg.wantStale("p1", "build ID mismatch", "./testgo list claims p1 is NOT stale, incorrectly, after changing to old release after new build")
tg.run("install", "p1")
tg.wantNotStale("p1", "", "./testgo list claims p1 is stale after building with old release")
// Everything is out of date. Rebuild to leave things in a better state.
tg.run("install", "std")
}
func TestGoListStandard(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
// TODO: tg.parallel()
tg.cd(runtime.GOROOT() + "/src")
tg.run("list", "-f", "{{if not .Standard}}{{.ImportPath}}{{end}}", "./...")
stdout := tg.getStdout()
for _, line := range strings.Split(stdout, "\n") {
if strings.HasPrefix(line, "_/") && strings.HasSuffix(line, "/src") {
// $GOROOT/src shows up if there are any .go files there.
// We don't care.
continue
}
if line == "" {
continue
}
t.Errorf("package in GOROOT not listed as standard: %v", line)
}
// Similarly, expanding std should include some of our vendored code.
tg.run("list", "std", "cmd")
tg.grepStdout("golang.org/x/net/http2/hpack", "list std cmd did not mention vendored hpack")
tg.grepStdout("golang.org/x/arch/x86/x86asm", "list std cmd did not mention vendored x86asm")
}
func TestGoInstallCleansUpAfterGoBuild(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
// TODO: tg.parallel()
tg.tempFile("src/mycmd/main.go", `package main; func main(){}`)
tg.setenv("GOPATH", tg.path("."))
tg.cd(tg.path("src/mycmd"))
doesNotExist := func(file, msg string) {
if _, err := os.Stat(file); err == nil {
t.Fatal(msg)
} else if !os.IsNotExist(err) {
t.Fatal(msg, "error:", err)
}
}
tg.run("build")
tg.wantExecutable("mycmd"+exeSuffix, "testgo build did not write command binary")
tg.run("install")
doesNotExist("mycmd"+exeSuffix, "testgo install did not remove command binary")
tg.run("build")
tg.wantExecutable("mycmd"+exeSuffix, "testgo build did not write command binary (second time)")
// Running install with arguments does not remove the target,
// even in the same directory.
tg.run("install", "mycmd")
tg.wantExecutable("mycmd"+exeSuffix, "testgo install mycmd removed command binary when run in mycmd")
tg.run("build")
tg.wantExecutable("mycmd"+exeSuffix, "testgo build did not write command binary (third time)")
// And especially not outside the directory.
tg.cd(tg.path("."))
if data, err := ioutil.ReadFile("src/mycmd/mycmd" + exeSuffix); err != nil {
t.Fatal("could not read file:", err)
} else {
if err := ioutil.WriteFile("mycmd"+exeSuffix, data, 0555); err != nil {
t.Fatal("could not write file:", err)
}
}
tg.run("install", "mycmd")
tg.wantExecutable("src/mycmd/mycmd"+exeSuffix, "testgo install mycmd removed command binary from its source dir when run outside mycmd")
tg.wantExecutable("mycmd"+exeSuffix, "testgo install mycmd removed command binary from current dir when run outside mycmd")
}
func TestGoInstallRebuildsStalePackagesInOtherGOPATH(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("d1/src/p1/p1.go", `package p1
import "p2"
func F() { p2.F() }`)
tg.tempFile("d2/src/p2/p2.go", `package p2
func F() {}`)
sep := string(filepath.ListSeparator)
tg.setenv("GOPATH", tg.path("d1")+sep+tg.path("d2"))
tg.run("install", "p1")
tg.wantNotStale("p1", "", "./testgo list claims p1 is stale, incorrectly")
tg.wantNotStale("p2", "", "./testgo list claims p2 is stale, incorrectly")
tg.sleep()
if f, err := os.OpenFile(tg.path("d2/src/p2/p2.go"), os.O_WRONLY|os.O_APPEND, 0); err != nil {
t.Fatal(err)
} else if _, err = f.WriteString(`func G() {}`); err != nil {
t.Fatal(err)
} else {
tg.must(f.Close())
}
tg.wantStale("p2", "newer source file", "./testgo list claims p2 is NOT stale, incorrectly")
tg.wantStale("p1", "stale dependency", "./testgo list claims p1 is NOT stale, incorrectly")
tg.run("install", "p1")
tg.wantNotStale("p2", "", "./testgo list claims p2 is stale after reinstall, incorrectly")
tg.wantNotStale("p1", "", "./testgo list claims p1 is stale after reinstall, incorrectly")
}
func TestGoInstallDetectsRemovedFiles(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("src/mypkg/x.go", `package mypkg`)
tg.tempFile("src/mypkg/y.go", `package mypkg`)
tg.tempFile("src/mypkg/z.go", `// +build missingtag
package mypkg`)
tg.setenv("GOPATH", tg.path("."))
tg.run("install", "mypkg")
tg.wantNotStale("mypkg", "", "./testgo list mypkg claims mypkg is stale, incorrectly")
// z.go was not part of the build; removing it is okay.
tg.must(os.Remove(tg.path("src/mypkg/z.go")))
tg.wantNotStale("mypkg", "", "./testgo list mypkg claims mypkg is stale after removing z.go; should not be stale")
// y.go was part of the package; removing it should be detected.
tg.must(os.Remove(tg.path("src/mypkg/y.go")))
tg.wantStale("mypkg", "build ID mismatch", "./testgo list mypkg claims mypkg is NOT stale after removing y.go; should be stale")
}
func TestWildcardMatchesSyntaxErrorDirs(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
// TODO: tg.parallel()
tg.tempFile("src/mypkg/x.go", `package mypkg`)
tg.tempFile("src/mypkg/y.go", `pkg mypackage`)
tg.setenv("GOPATH", tg.path("."))
tg.cd(tg.path("src/mypkg"))
tg.runFail("list", "./...")
tg.runFail("build", "./...")
tg.runFail("install", "./...")
}
func TestGoListWithTags(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.tempFile("src/mypkg/x.go", "// +build thetag\n\npackage mypkg\n")
tg.setenv("GOPATH", tg.path("."))
tg.cd(tg.path("./src"))
tg.run("list", "-tags=thetag", "./my...")
tg.grepStdout("mypkg", "did not find mypkg")
}
func TestGoInstallErrorOnCrossCompileToBin(t *testing.T) {
if testing.Short() {
t.Skip("don't install into GOROOT in short mode")
}
tg := testgo(t)
defer tg.cleanup()
tg.tempFile("src/mycmd/x.go", `package main
func main() {}`)
tg.setenv("GOPATH", tg.path("."))
tg.cd(tg.path("src/mycmd"))
tg.run("build", "mycmd")
goarch := "386"
if runtime.GOARCH == "386" {
goarch = "amd64"
}
tg.setenv("GOOS", "linux")
tg.setenv("GOARCH", goarch)
tg.run("install", "mycmd")
tg.setenv("GOBIN", tg.path("."))
tg.runFail("install", "mycmd")
tg.run("install", "cmd/pack")
}
func TestGoInstallDetectsRemovedFilesInPackageMain(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("src/mycmd/x.go", `package main
func main() {}`)
tg.tempFile("src/mycmd/y.go", `package main`)
tg.tempFile("src/mycmd/z.go", `// +build missingtag
package main`)
tg.setenv("GOPATH", tg.path("."))
tg.run("install", "mycmd")
tg.wantNotStale("mycmd", "", "./testgo list mypkg claims mycmd is stale, incorrectly")
// z.go was not part of the build; removing it is okay.
tg.must(os.Remove(tg.path("src/mycmd/z.go")))
tg.wantNotStale("mycmd", "", "./testgo list mycmd claims mycmd is stale after removing z.go; should not be stale")
// y.go was part of the package; removing it should be detected.
tg.must(os.Remove(tg.path("src/mycmd/y.go")))
tg.wantStale("mycmd", "build ID mismatch", "./testgo list mycmd claims mycmd is NOT stale after removing y.go; should be stale")
}
func testLocalRun(tg *testgoData, exepath, local, match string) {
out, err := exec.Command(exepath).Output()
if err != nil {
tg.t.Fatalf("error running %v: %v", exepath, err)
}
if !regexp.MustCompile(match).Match(out) {
tg.t.Log(string(out))
tg.t.Errorf("testdata/%s/easy.go did not generate expected output", local)
}
}
func testLocalEasy(tg *testgoData, local string) {
exepath := "./easy" + exeSuffix
tg.creatingTemp(exepath)
tg.run("build", "-o", exepath, filepath.Join("testdata", local, "easy.go"))
testLocalRun(tg, exepath, local, `(?m)^easysub\.Hello`)
}
func testLocalEasySub(tg *testgoData, local string) {
exepath := "./easysub" + exeSuffix
tg.creatingTemp(exepath)
tg.run("build", "-o", exepath, filepath.Join("testdata", local, "easysub", "main.go"))
testLocalRun(tg, exepath, local, `(?m)^easysub\.Hello`)
}
func testLocalHard(tg *testgoData, local string) {
exepath := "./hard" + exeSuffix
tg.creatingTemp(exepath)
tg.run("build", "-o", exepath, filepath.Join("testdata", local, "hard.go"))
testLocalRun(tg, exepath, local, `(?m)^sub\.Hello`)
}
func testLocalInstall(tg *testgoData, local string) {
tg.runFail("install", filepath.Join("testdata", local, "easy.go"))
}
func TestLocalImportsEasy(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
testLocalEasy(tg, "local")
}
func TestLocalImportsEasySub(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
testLocalEasySub(tg, "local")
}
func TestLocalImportsHard(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
testLocalHard(tg, "local")
}
func TestLocalImportsGoInstallShouldFail(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
testLocalInstall(tg, "local")
}
const badDirName = `#$%:, &()*;<=>?\^{}`
func copyBad(tg *testgoData) {
if runtime.GOOS == "windows" {
tg.t.Skipf("skipping test because %q is an invalid directory name", badDirName)
}
tg.must(filepath.Walk("testdata/local",
func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
var data []byte
data, err = ioutil.ReadFile(path)
if err != nil {
return err
}
newpath := strings.Replace(path, "local", badDirName, 1)
tg.tempFile(newpath, string(data))
return nil
}))
tg.cd(tg.path("."))
}
func TestBadImportsEasy(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
// TODO: tg.parallel()
copyBad(tg)
testLocalEasy(tg, badDirName)
}
func TestBadImportsEasySub(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
copyBad(tg)
testLocalEasySub(tg, badDirName)
}
func TestBadImportsHard(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
copyBad(tg)
testLocalHard(tg, badDirName)
}
func TestBadImportsGoInstallShouldFail(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
copyBad(tg)
testLocalInstall(tg, badDirName)
}
func TestInternalPackagesInGOROOTAreRespected(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.runFail("build", "-v", "./testdata/testinternal")
tg.grepBoth(`testinternal(\/|\\)p\.go\:3\:8\: use of internal package not allowed`, "wrong error message for testdata/testinternal")
}
func TestInternalPackagesOutsideGOROOTAreRespected(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.runFail("build", "-v", "./testdata/testinternal2")
tg.grepBoth(`testinternal2(\/|\\)p\.go\:3\:8\: use of internal package not allowed`, "wrote error message for testdata/testinternal2")
}
func TestRunInternal(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
dir := filepath.Join(tg.pwd(), "testdata")
tg.setenv("GOPATH", dir)
tg.run("run", filepath.Join(dir, "src/run/good.go"))
tg.runFail("run", filepath.Join(dir, "src/run/bad.go"))
tg.grepStderr(`testdata(\/|\\)src(\/|\\)run(\/|\\)bad\.go\:3\:8\: use of internal package not allowed`, "unexpected error for run/bad.go")
}
func testMove(t *testing.T, vcs, url, base, config string) {
testenv.MustHaveExternalNetwork(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempDir("src")
tg.setenv("GOPATH", tg.path("."))
tg.run("get", "-d", url)
tg.run("get", "-d", "-u", url)
switch vcs {
case "svn":
// SVN doesn't believe in text files so we can't just edit the config.
// Check out a different repo into the wrong place.
tg.must(os.RemoveAll(tg.path("src/code.google.com/p/rsc-svn")))
tg.run("get", "-d", "-u", "code.google.com/p/rsc-svn2/trunk")
tg.must(os.Rename(tg.path("src/code.google.com/p/rsc-svn2"), tg.path("src/code.google.com/p/rsc-svn")))
default:
path := tg.path(filepath.Join("src", config))
data, err := ioutil.ReadFile(path)
tg.must(err)
data = bytes.Replace(data, []byte(base), []byte(base+"XXX"), -1)
tg.must(ioutil.WriteFile(path, data, 0644))
}
if vcs == "git" {
// git will ask for a username and password when we
// run go get -d -f -u. An empty username and
// password will work. Prevent asking by setting
// GIT_ASKPASS.
tg.creatingTemp("sink" + exeSuffix)
tg.tempFile("src/sink/sink.go", `package main; func main() {}`)
tg.run("build", "-o", "sink"+exeSuffix, "sink")
tg.setenv("GIT_ASKPASS", filepath.Join(tg.pwd(), "sink"+exeSuffix))
}
tg.runFail("get", "-d", "-u", url)
tg.grepStderr("is a custom import path for", "go get -d -u "+url+" failed for wrong reason")
tg.runFail("get", "-d", "-f", "-u", url)
tg.grepStderr("validating server certificate|[nN]ot [fF]ound", "go get -d -f -u "+url+" failed for wrong reason")
}
func TestInternalPackageErrorsAreHandled(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.run("list", "./testdata/testinternal3")
}
func TestInternalCache(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata/testinternal4"))
tg.runFail("build", "p")
tg.grepStderr("internal", "did not fail to build p")
}
func TestMoveGit(t *testing.T) {
testMove(t, "git", "rsc.io/pdf", "pdf", "rsc.io/pdf/.git/config")
}
func TestMoveHG(t *testing.T) {
testMove(t, "hg", "vcs-test.golang.org/go/custom-hg-hello", "custom-hg-hello", "vcs-test.golang.org/go/custom-hg-hello/.hg/hgrc")
}
// TODO(rsc): Set up a test case on SourceForge (?) for svn.
// func testMoveSVN(t *testing.T) {
// testMove(t, "svn", "code.google.com/p/rsc-svn/trunk", "-", "-")
// }
func TestImportCommandMatch(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata/importcom"))
tg.run("build", "./testdata/importcom/works.go")
}
func TestImportCommentMismatch(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata/importcom"))
tg.runFail("build", "./testdata/importcom/wrongplace.go")
tg.grepStderr(`wrongplace expects import "my/x"`, "go build did not mention incorrect import")
}
func TestImportCommentSyntaxError(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata/importcom"))
tg.runFail("build", "./testdata/importcom/bad.go")
tg.grepStderr("cannot parse import comment", "go build did not mention syntax error")
}
func TestImportCommentConflict(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata/importcom"))
tg.runFail("build", "./testdata/importcom/conflict.go")
tg.grepStderr("found import comments", "go build did not mention comment conflict")
}
// cmd/go: custom import path checking should not apply to Go packages without import comment.
func TestIssue10952(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
if _, err := exec.LookPath("git"); err != nil {
t.Skip("skipping because git binary not found")
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempDir("src")
tg.setenv("GOPATH", tg.path("."))
const importPath = "github.com/zombiezen/go-get-issue-10952"
tg.run("get", "-d", "-u", importPath)
repoDir := tg.path("src/" + importPath)
tg.runGit(repoDir, "remote", "set-url", "origin", "https://"+importPath+".git")
tg.run("get", "-d", "-u", importPath)
}
func TestIssue16471(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
if _, err := exec.LookPath("git"); err != nil {
t.Skip("skipping because git binary not found")
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempDir("src")
tg.setenv("GOPATH", tg.path("."))
tg.must(os.MkdirAll(tg.path("src/rsc.io/go-get-issue-10952"), 0755))
tg.runGit(tg.path("src/rsc.io"), "clone", "https://github.com/zombiezen/go-get-issue-10952")
tg.runFail("get", "-u", "rsc.io/go-get-issue-10952")
tg.grepStderr("rsc.io/go-get-issue-10952 is a custom import path for https://github.com/rsc/go-get-issue-10952, but .* is checked out from https://github.com/zombiezen/go-get-issue-10952", "did not detect updated import path")
}
// Test git clone URL that uses SCP-like syntax and custom import path checking.
func TestIssue11457(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
if _, err := exec.LookPath("git"); err != nil {
t.Skip("skipping because git binary not found")
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempDir("src")
tg.setenv("GOPATH", tg.path("."))
const importPath = "rsc.io/go-get-issue-11457"
tg.run("get", "-d", "-u", importPath)
repoDir := tg.path("src/" + importPath)
tg.runGit(repoDir, "remote", "set-url", "origin", "[email protected]:rsc/go-get-issue-11457")
// At this time, custom import path checking compares remotes verbatim (rather than
// just the host and path, skipping scheme and user), so we expect go get -u to fail.
// However, the goal of this test is to verify that gitRemoteRepo correctly parsed
// the SCP-like syntax, and we expect it to appear in the error message.
tg.runFail("get", "-d", "-u", importPath)
want := " is checked out from ssh://[email protected]/rsc/go-get-issue-11457"
if !strings.HasSuffix(strings.TrimSpace(tg.getStderr()), want) {
t.Error("expected clone URL to appear in stderr")
}
}
func TestGetGitDefaultBranch(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
if _, err := exec.LookPath("git"); err != nil {
t.Skip("skipping because git binary not found")
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempDir("src")
tg.setenv("GOPATH", tg.path("."))
// This repo has two branches, master and another-branch.
// The another-branch is the default that you get from 'git clone'.
// The go get command variants should not override this.
const importPath = "github.com/rsc/go-get-default-branch"
tg.run("get", "-d", importPath)
repoDir := tg.path("src/" + importPath)
tg.runGit(repoDir, "branch", "--contains", "HEAD")
tg.grepStdout(`\* another-branch`, "not on correct default branch")
tg.run("get", "-d", "-u", importPath)
tg.runGit(repoDir, "branch", "--contains", "HEAD")
tg.grepStdout(`\* another-branch`, "not on correct default branch")
}
func TestAccidentalGitCheckout(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
if _, err := exec.LookPath("git"); err != nil {
t.Skip("skipping because git binary not found")
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempDir("src")
tg.setenv("GOPATH", tg.path("."))
tg.runFail("get", "-u", "vcs-test.golang.org/go/test1-svn-git")
tg.grepStderr("src[\\\\/]vcs-test.* uses git, but parent .*src[\\\\/]vcs-test.* uses svn", "get did not fail for right reason")
tg.runFail("get", "-u", "vcs-test.golang.org/go/test2-svn-git/test2main")
tg.grepStderr("src[\\\\/]vcs-test.* uses git, but parent .*src[\\\\/]vcs-test.* uses svn", "get did not fail for right reason")
}
func TestErrorMessageForSyntaxErrorInTestGoFileSaysFAIL(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
tg.runFail("test", "syntaxerror")
tg.grepStderr("FAIL", "go test did not say FAIL")
}
func TestWildcardsDoNotLookInUselessDirectories(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
tg.runFail("list", "...")
tg.grepBoth("badpkg", "go list ... failure does not mention badpkg")
tg.run("list", "m...")
}
func TestRelativeImportsGoTest(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.run("test", "./testdata/testimport")
}
func TestRelativeImportsGoTestDashI(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.run("test", "-i", "./testdata/testimport")
}
func TestRelativeImportsInCommandLinePackage(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
// TODO: tg.parallel()
files, err := filepath.Glob("./testdata/testimport/*.go")
tg.must(err)
tg.run(append([]string{"test"}, files...)...)
}
func TestNonCanonicalImportPaths(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
tg.runFail("build", "canonical/d")
tg.grepStderr("package canonical/d", "did not report canonical/d")
tg.grepStderr("imports canonical/b", "did not report canonical/b")
tg.grepStderr("imports canonical/a/: non-canonical", "did not report canonical/a/")
}
func TestVersionControlErrorMessageIncludesCorrectDirectory(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata/shadow/root1"))
tg.runFail("get", "-u", "foo")
// TODO(iant): We should not have to use strconv.Quote here.
// The code in vcs.go should be changed so that it is not required.
quoted := strconv.Quote(filepath.Join("testdata", "shadow", "root1", "src", "foo"))
quoted = quoted[1 : len(quoted)-1]
tg.grepStderr(regexp.QuoteMeta(quoted), "go get -u error does not mention shadow/root1/src/foo")
}
func TestInstallFailsWithNoBuildableFiles(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
tg.setenv("CGO_ENABLED", "0")
tg.runFail("install", "cgotest")
tg.grepStderr("build constraints exclude all Go files", "go install cgotest did not report 'build constraints exclude all Go files'")
}
func TestRelativeGOBINFail(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.tempFile("triv.go", `package main; func main() {}`)
tg.setenv("GOBIN", ".")
tg.runFail("install")
tg.grepStderr("cannot install, GOBIN must be an absolute path", "go install must fail if $GOBIN is a relative path")
}
// Test that without $GOBIN set, binaries get installed
// into the GOPATH bin directory.
func TestInstallIntoGOPATH(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.creatingTemp("testdata/bin/go-cmd-test" + exeSuffix)
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
tg.run("install", "go-cmd-test")
tg.wantExecutable("testdata/bin/go-cmd-test"+exeSuffix, "go install go-cmd-test did not write to testdata/bin/go-cmd-test")
}
// Issue 12407
func TestBuildOutputToDevNull(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
tg.run("build", "-o", os.DevNull, "go-cmd-test")
}
func TestPackageMainTestImportsArchiveNotBinary(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
gobin := filepath.Join(tg.pwd(), "testdata", "bin")
tg.creatingTemp(gobin)
tg.setenv("GOBIN", gobin)
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
tg.must(os.Chtimes("./testdata/src/main_test/m.go", time.Now(), time.Now()))
tg.sleep()
tg.run("test", "main_test")
tg.run("install", "main_test")
tg.wantNotStale("main_test", "", "after go install, main listed as stale")
tg.run("test", "main_test")
}
// The runtime version string takes one of two forms:
// "go1.X[.Y]" for Go releases, and "devel +hash" at tip.
// Determine whether we are in a released copy by
// inspecting the version.
var isGoRelease = strings.HasPrefix(runtime.Version(), "go1")
// Issue 12690
func TestPackageNotStaleWithTrailingSlash(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
// Make sure the packages below are not stale.
tg.run("install", "runtime", "os", "io")
goroot := runtime.GOROOT()
tg.setenv("GOROOT", goroot+"/")
want := ""
if isGoRelease {
want = "standard package in Go release distribution"
}
tg.wantNotStale("runtime", want, "with trailing slash in GOROOT, runtime listed as stale")
tg.wantNotStale("os", want, "with trailing slash in GOROOT, os listed as stale")
tg.wantNotStale("io", want, "with trailing slash in GOROOT, io listed as stale")
}
// With $GOBIN set, binaries get installed to $GOBIN.
func TestInstallIntoGOBIN(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
gobin := filepath.Join(tg.pwd(), "testdata", "bin1")
tg.creatingTemp(gobin)
tg.setenv("GOBIN", gobin)
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
tg.run("install", "go-cmd-test")
tg.wantExecutable("testdata/bin1/go-cmd-test"+exeSuffix, "go install go-cmd-test did not write to testdata/bin1/go-cmd-test")
}
// Issue 11065
func TestInstallToCurrentDirectoryCreatesExecutable(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
pkg := filepath.Join(tg.pwd(), "testdata", "src", "go-cmd-test")
tg.creatingTemp(filepath.Join(pkg, "go-cmd-test"+exeSuffix))
tg.setenv("GOBIN", pkg)
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
tg.cd(pkg)
tg.run("install")
tg.wantExecutable("go-cmd-test"+exeSuffix, "go install did not write to current directory")
}
// Without $GOBIN set, installing a program outside $GOPATH should fail
// (there is nowhere to install it).
func TestInstallWithoutDestinationFails(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.runFail("install", "testdata/src/go-cmd-test/helloworld.go")
tg.grepStderr("no install location for .go files listed on command line", "wrong error")
}
// With $GOBIN set, should install there.
func TestInstallToGOBINCommandLinePackage(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
gobin := filepath.Join(tg.pwd(), "testdata", "bin1")
tg.creatingTemp(gobin)
tg.setenv("GOBIN", gobin)
tg.run("install", "testdata/src/go-cmd-test/helloworld.go")
tg.wantExecutable("testdata/bin1/helloworld"+exeSuffix, "go install testdata/src/go-cmd-test/helloworld.go did not write testdata/bin1/helloworld")
}
func TestGoGetNonPkg(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
tg := testgo(t)
defer tg.cleanup()
tg.tempDir("gobin")
tg.setenv("GOPATH", tg.path("."))
tg.setenv("GOBIN", tg.path("gobin"))
tg.runFail("get", "-d", "golang.org/x/tools")
tg.grepStderr("golang.org/x/tools: no Go files", "missing error")
tg.runFail("get", "-d", "-u", "golang.org/x/tools")
tg.grepStderr("golang.org/x/tools: no Go files", "missing error")
tg.runFail("get", "-d", "golang.org/x/tools")
tg.grepStderr("golang.org/x/tools: no Go files", "missing error")
}
func TestGoGetTestOnlyPkg(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
tg := testgo(t)
defer tg.cleanup()
tg.tempDir("gopath")
tg.setenv("GOPATH", tg.path("gopath"))
tg.run("get", "golang.org/x/tour/content")
tg.run("get", "-t", "golang.org/x/tour/content")
}
func TestInstalls(t *testing.T) {
if testing.Short() {
t.Skip("don't install into GOROOT in short mode")
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempDir("gobin")
tg.setenv("GOPATH", tg.path("."))
goroot := runtime.GOROOT()
tg.setenv("GOROOT", goroot)
// cmd/fix installs into tool
tg.run("env", "GOOS")
goos := strings.TrimSpace(tg.getStdout())
tg.setenv("GOOS", goos)
tg.run("env", "GOARCH")
goarch := strings.TrimSpace(tg.getStdout())
tg.setenv("GOARCH", goarch)
fixbin := filepath.Join(goroot, "pkg", "tool", goos+"_"+goarch, "fix") + exeSuffix
tg.must(os.RemoveAll(fixbin))
tg.run("install", "cmd/fix")
tg.wantExecutable(fixbin, "did not install cmd/fix to $GOROOT/pkg/tool")
tg.must(os.Remove(fixbin))
tg.setenv("GOBIN", tg.path("gobin"))
tg.run("install", "cmd/fix")
tg.wantExecutable(fixbin, "did not install cmd/fix to $GOROOT/pkg/tool with $GOBIN set")
tg.unsetenv("GOBIN")
// gopath program installs into GOBIN
tg.tempFile("src/progname/p.go", `package main; func main() {}`)
tg.setenv("GOBIN", tg.path("gobin"))
tg.run("install", "progname")
tg.unsetenv("GOBIN")
tg.wantExecutable(tg.path("gobin/progname")+exeSuffix, "did not install progname to $GOBIN/progname")
// gopath program installs into GOPATH/bin
tg.run("install", "progname")
tg.wantExecutable(tg.path("bin/progname")+exeSuffix, "did not install progname to $GOPATH/bin/progname")
}
func TestRejectRelativeDotPathInGOPATHCommandLinePackage(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.setenv("GOPATH", ".")
tg.runFail("build", "testdata/src/go-cmd-test/helloworld.go")
tg.grepStderr("GOPATH entry is relative", "expected an error message rejecting relative GOPATH entries")
}
func TestRejectRelativePathsInGOPATH(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
sep := string(filepath.ListSeparator)
tg.setenv("GOPATH", sep+filepath.Join(tg.pwd(), "testdata")+sep+".")
tg.runFail("build", "go-cmd-test")
tg.grepStderr("GOPATH entry is relative", "expected an error message rejecting relative GOPATH entries")
}
func TestRejectRelativePathsInGOPATHCommandLinePackage(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.setenv("GOPATH", "testdata")
tg.runFail("build", "testdata/src/go-cmd-test/helloworld.go")
tg.grepStderr("GOPATH entry is relative", "expected an error message rejecting relative GOPATH entries")
}
// Issue 4104.
func TestGoTestWithPackageListedMultipleTimes(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.run("test", "errors", "errors", "errors", "errors", "errors")
if strings.Contains(strings.TrimSpace(tg.getStdout()), "\n") {
t.Error("go test errors errors errors errors errors tested the same package multiple times")
}
}
func TestGoListHasAConsistentOrder(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.run("list", "std")
first := tg.getStdout()
tg.run("list", "std")
if first != tg.getStdout() {
t.Error("go list std ordering is inconsistent")
}
}
func TestGoListStdDoesNotIncludeCommands(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.run("list", "std")
tg.grepStdoutNot("cmd/", "go list std shows commands")
}
func TestGoListCmdOnlyShowsCommands(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.run("list", "cmd")
out := strings.TrimSpace(tg.getStdout())
for _, line := range strings.Split(out, "\n") {
if !strings.Contains(line, "cmd/") {
t.Error("go list cmd shows non-commands")
break
}
}
}
func TestGoListDedupsPackages(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
// TODO: tg.parallel()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
tg.run("list", "xtestonly", "./testdata/src/xtestonly/...")
got := strings.TrimSpace(tg.getStdout())
const want = "xtestonly"
if got != want {
t.Errorf("got %q; want %q", got, want)
}
}
// Issue 4096. Validate the output of unsuccessful go install foo/quxx.
func TestUnsuccessfulGoInstallShouldMentionMissingPackage(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.runFail("install", "foo/quxx")
if tg.grepCountBoth(`cannot find package "foo/quxx" in any of`) != 1 {
t.Error(`go install foo/quxx expected error: .*cannot find package "foo/quxx" in any of`)
}
}
func TestGOROOTSearchFailureReporting(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.runFail("install", "foo/quxx")
if tg.grepCountBoth(regexp.QuoteMeta(filepath.Join("foo", "quxx"))+` \(from \$GOROOT\)$`) != 1 {
t.Error(`go install foo/quxx expected error: .*foo/quxx (from $GOROOT)`)
}
}
func TestMultipleGOPATHEntriesReportedSeparately(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
sep := string(filepath.ListSeparator)
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata", "a")+sep+filepath.Join(tg.pwd(), "testdata", "b"))
tg.runFail("install", "foo/quxx")
if tg.grepCountBoth(`testdata[/\\].[/\\]src[/\\]foo[/\\]quxx`) != 2 {
t.Error(`go install foo/quxx expected error: .*testdata/a/src/foo/quxx (from $GOPATH)\n.*testdata/b/src/foo/quxx`)
}
}
// Test (from $GOPATH) annotation is reported for the first GOPATH entry,
func TestMentionGOPATHInFirstGOPATHEntry(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
sep := string(filepath.ListSeparator)
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata", "a")+sep+filepath.Join(tg.pwd(), "testdata", "b"))
tg.runFail("install", "foo/quxx")
if tg.grepCountBoth(regexp.QuoteMeta(filepath.Join("testdata", "a", "src", "foo", "quxx"))+` \(from \$GOPATH\)$`) != 1 {
t.Error(`go install foo/quxx expected error: .*testdata/a/src/foo/quxx (from $GOPATH)`)
}
}
// but not on the second.
func TestMentionGOPATHNotOnSecondEntry(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
sep := string(filepath.ListSeparator)
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata", "a")+sep+filepath.Join(tg.pwd(), "testdata", "b"))
tg.runFail("install", "foo/quxx")
if tg.grepCountBoth(regexp.QuoteMeta(filepath.Join("testdata", "b", "src", "foo", "quxx"))+`$`) != 1 {
t.Error(`go install foo/quxx expected error: .*testdata/b/src/foo/quxx`)
}
}
func homeEnvName() string {
switch runtime.GOOS {
case "windows":
return "USERPROFILE"
case "plan9":
return "home"
default:
return "HOME"
}
}
func TestDefaultGOPATH(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempDir("home/go")
tg.setenv(homeEnvName(), tg.path("home"))
tg.run("env", "GOPATH")
tg.grepStdout(regexp.QuoteMeta(tg.path("home/go")), "want GOPATH=$HOME/go")
tg.setenv("GOROOT", tg.path("home/go"))
tg.run("env", "GOPATH")
tg.grepStdoutNot(".", "want unset GOPATH because GOROOT=$HOME/go")
tg.setenv("GOROOT", tg.path("home/go")+"/")
tg.run("env", "GOPATH")
tg.grepStdoutNot(".", "want unset GOPATH because GOROOT=$HOME/go/")
}
func TestDefaultGOPATHGet(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
tg := testgo(t)
defer tg.cleanup()
tg.setenv("GOPATH", "")
tg.tempDir("home")
tg.setenv(homeEnvName(), tg.path("home"))
// warn for creating directory
tg.run("get", "-v", "github.com/golang/example/hello")
tg.grepStderr("created GOPATH="+regexp.QuoteMeta(tg.path("home/go"))+"; see 'go help gopath'", "did not create GOPATH")
// no warning if directory already exists
tg.must(os.RemoveAll(tg.path("home/go")))
tg.tempDir("home/go")
tg.run("get", "github.com/golang/example/hello")
tg.grepStderrNot(".", "expected no output on standard error")
// error if $HOME/go is a file
tg.must(os.RemoveAll(tg.path("home/go")))
tg.tempFile("home/go", "")
tg.runFail("get", "github.com/golang/example/hello")
tg.grepStderr(`mkdir .*[/\\]go: .*(not a directory|cannot find the path)`, "expected error because $HOME/go is a file")
}
func TestDefaultGOPATHPrintedSearchList(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.setenv("GOPATH", "")
tg.tempDir("home")
tg.setenv(homeEnvName(), tg.path("home"))
tg.runFail("install", "github.com/golang/example/hello")
tg.grepStderr(regexp.QuoteMeta(tg.path("home/go/src/github.com/golang/example/hello"))+`.*from \$GOPATH`, "expected default GOPATH")
}
// Issue 4186. go get cannot be used to download packages to $GOROOT.
// Test that without GOPATH set, go get should fail.
func TestGoGetIntoGOROOT(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempDir("src")
// Fails because GOROOT=GOPATH
tg.setenv("GOPATH", tg.path("."))
tg.setenv("GOROOT", tg.path("."))
tg.runFail("get", "-d", "github.com/golang/example/hello")
tg.grepStderr("warning: GOPATH set to GOROOT", "go should detect GOPATH=GOROOT")
tg.grepStderr(`\$GOPATH must not be set to \$GOROOT`, "go should detect GOPATH=GOROOT")
// Fails because GOROOT=GOPATH after cleaning.
tg.setenv("GOPATH", tg.path(".")+"/")
tg.setenv("GOROOT", tg.path("."))
tg.runFail("get", "-d", "github.com/golang/example/hello")
tg.grepStderr("warning: GOPATH set to GOROOT", "go should detect GOPATH=GOROOT")
tg.grepStderr(`\$GOPATH must not be set to \$GOROOT`, "go should detect GOPATH=GOROOT")
tg.setenv("GOPATH", tg.path("."))
tg.setenv("GOROOT", tg.path(".")+"/")
tg.runFail("get", "-d", "github.com/golang/example/hello")
tg.grepStderr("warning: GOPATH set to GOROOT", "go should detect GOPATH=GOROOT")
tg.grepStderr(`\$GOPATH must not be set to \$GOROOT`, "go should detect GOPATH=GOROOT")
// Fails because GOROOT=$HOME/go so default GOPATH unset.
tg.tempDir("home/go")
tg.setenv(homeEnvName(), tg.path("home"))
tg.setenv("GOPATH", "")
tg.setenv("GOROOT", tg.path("home/go"))
tg.runFail("get", "-d", "github.com/golang/example/hello")
tg.grepStderr(`\$GOPATH not set`, "expected GOPATH not set")
tg.setenv(homeEnvName(), tg.path("home")+"/")
tg.setenv("GOPATH", "")
tg.setenv("GOROOT", tg.path("home/go"))
tg.runFail("get", "-d", "github.com/golang/example/hello")
tg.grepStderr(`\$GOPATH not set`, "expected GOPATH not set")
tg.setenv(homeEnvName(), tg.path("home"))
tg.setenv("GOPATH", "")
tg.setenv("GOROOT", tg.path("home/go")+"/")
tg.runFail("get", "-d", "github.com/golang/example/hello")
tg.grepStderr(`\$GOPATH not set`, "expected GOPATH not set")
}
func TestLdflagsArgumentsWithSpacesIssue3941(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("main.go", `package main
var extern string
func main() {
println(extern)
}`)
tg.run("run", "-ldflags", `-X "main.extern=hello world"`, tg.path("main.go"))
tg.grepStderr("^hello world", `ldflags -X "main.extern=hello world"' failed`)
}
func TestGoTestCpuprofileLeavesBinaryBehind(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
// TODO: tg.parallel()
tg.makeTempdir()
tg.cd(tg.path("."))
tg.run("test", "-cpuprofile", "errors.prof", "errors")
tg.wantExecutable("errors.test"+exeSuffix, "go test -cpuprofile did not create errors.test")
}
func TestGoTestCpuprofileDashOControlsBinaryLocation(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
// TODO: tg.parallel()
tg.makeTempdir()
tg.cd(tg.path("."))
tg.run("test", "-cpuprofile", "errors.prof", "-o", "myerrors.test"+exeSuffix, "errors")
tg.wantExecutable("myerrors.test"+exeSuffix, "go test -cpuprofile -o myerrors.test did not create myerrors.test")
}
func TestGoTestMutexprofileLeavesBinaryBehind(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
// TODO: tg.parallel()
tg.makeTempdir()
tg.cd(tg.path("."))
tg.run("test", "-mutexprofile", "errors.prof", "errors")
tg.wantExecutable("errors.test"+exeSuffix, "go test -mutexprofile did not create errors.test")
}
func TestGoTestMutexprofileDashOControlsBinaryLocation(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
// TODO: tg.parallel()
tg.makeTempdir()
tg.cd(tg.path("."))
tg.run("test", "-mutexprofile", "errors.prof", "-o", "myerrors.test"+exeSuffix, "errors")
tg.wantExecutable("myerrors.test"+exeSuffix, "go test -mutexprofile -o myerrors.test did not create myerrors.test")
}
func TestGoTestDashCDashOControlsBinaryLocation(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.run("test", "-c", "-o", tg.path("myerrors.test"+exeSuffix), "errors")
tg.wantExecutable(tg.path("myerrors.test"+exeSuffix), "go test -c -o myerrors.test did not create myerrors.test")
}
func TestGoTestDashOWritesBinary(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.run("test", "-o", tg.path("myerrors.test"+exeSuffix), "errors")
tg.wantExecutable(tg.path("myerrors.test"+exeSuffix), "go test -o myerrors.test did not create myerrors.test")
}
func TestGoTestDashIDashOWritesBinary(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.run("test", "-v", "-i", "-o", tg.path("myerrors.test"+exeSuffix), "errors")
tg.grepBothNot("PASS|FAIL", "test should not have run")
tg.wantExecutable(tg.path("myerrors.test"+exeSuffix), "go test -o myerrors.test did not create myerrors.test")
}
// Issue 4568.
func TestSymlinksList(t *testing.T) {
testenv.MustHaveSymlink(t)
tg := testgo(t)
defer tg.cleanup()
// TODO: tg.parallel()
tg.tempDir("src")
tg.must(os.Symlink(tg.path("."), tg.path("src/dir1")))
tg.tempFile("src/dir1/p.go", "package p")
tg.setenv("GOPATH", tg.path("."))
tg.cd(tg.path("src"))
tg.run("list", "-f", "{{.Root}}", "dir1")
if strings.TrimSpace(tg.getStdout()) != tg.path(".") {
t.Error("confused by symlinks")
}
}
// Issue 14054.
func TestSymlinksVendor(t *testing.T) {
testenv.MustHaveSymlink(t)
tg := testgo(t)
defer tg.cleanup()
// TODO: tg.parallel()
tg.tempDir("gopath/src/dir1/vendor/v")
tg.tempFile("gopath/src/dir1/p.go", "package main\nimport _ `v`\nfunc main(){}")
tg.tempFile("gopath/src/dir1/vendor/v/v.go", "package v")
tg.must(os.Symlink(tg.path("gopath/src/dir1"), tg.path("symdir1")))
tg.setenv("GOPATH", tg.path("gopath"))
tg.cd(tg.path("symdir1"))
tg.run("list", "-f", "{{.Root}}", ".")
if strings.TrimSpace(tg.getStdout()) != tg.path("gopath") {
t.Error("list confused by symlinks")
}
// All of these should succeed, not die in vendor-handling code.
tg.run("run", "p.go")
tg.run("build")
tg.run("install")
}
// Issue 15201.
func TestSymlinksVendor15201(t *testing.T) {
testenv.MustHaveSymlink(t)
tg := testgo(t)
defer tg.cleanup()
tg.tempDir("gopath/src/x/y/_vendor/src/x")
tg.must(os.Symlink("../../..", tg.path("gopath/src/x/y/_vendor/src/x/y")))
tg.tempFile("gopath/src/x/y/w/w.go", "package w\nimport \"x/y/z\"\n")
tg.must(os.Symlink("../_vendor/src", tg.path("gopath/src/x/y/w/vendor")))
tg.tempFile("gopath/src/x/y/z/z.go", "package z\n")
tg.setenv("GOPATH", tg.path("gopath/src/x/y/_vendor")+string(filepath.ListSeparator)+tg.path("gopath"))
tg.cd(tg.path("gopath/src"))
tg.run("list", "./...")
}
func TestSymlinksInternal(t *testing.T) {
testenv.MustHaveSymlink(t)
tg := testgo(t)
defer tg.cleanup()
tg.tempDir("gopath/src/dir1/internal/v")
tg.tempFile("gopath/src/dir1/p.go", "package main\nimport _ `dir1/internal/v`\nfunc main(){}")
tg.tempFile("gopath/src/dir1/internal/v/v.go", "package v")
tg.must(os.Symlink(tg.path("gopath/src/dir1"), tg.path("symdir1")))
tg.setenv("GOPATH", tg.path("gopath"))
tg.cd(tg.path("symdir1"))
tg.run("list", "-f", "{{.Root}}", ".")
if strings.TrimSpace(tg.getStdout()) != tg.path("gopath") {
t.Error("list confused by symlinks")
}
// All of these should succeed, not die in internal-handling code.
tg.run("run", "p.go")
tg.run("build")
tg.run("install")
}
// Issue 4515.
func TestInstallWithTags(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempDir("bin")
tg.tempFile("src/example/a/main.go", `package main
func main() {}`)
tg.tempFile("src/example/b/main.go", `// +build mytag
package main
func main() {}`)
tg.setenv("GOPATH", tg.path("."))
tg.run("install", "-tags", "mytag", "example/a", "example/b")
tg.wantExecutable(tg.path("bin/a"+exeSuffix), "go install example/a example/b did not install binaries")
tg.wantExecutable(tg.path("bin/b"+exeSuffix), "go install example/a example/b did not install binaries")
tg.must(os.Remove(tg.path("bin/a" + exeSuffix)))
tg.must(os.Remove(tg.path("bin/b" + exeSuffix)))
tg.run("install", "-tags", "mytag", "example/...")
tg.wantExecutable(tg.path("bin/a"+exeSuffix), "go install example/... did not install binaries")
tg.wantExecutable(tg.path("bin/b"+exeSuffix), "go install example/... did not install binaries")
tg.run("list", "-tags", "mytag", "example/b...")
if strings.TrimSpace(tg.getStdout()) != "example/b" {
t.Error("go list example/b did not find example/b")
}
}
// Issue 4773
func TestCaseCollisions(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempDir("src/example/a/pkg")
tg.tempDir("src/example/a/Pkg")
tg.tempDir("src/example/b")
tg.setenv("GOPATH", tg.path("."))
tg.tempFile("src/example/a/a.go", `package p
import (
_ "example/a/pkg"
_ "example/a/Pkg"
)`)
tg.tempFile("src/example/a/pkg/pkg.go", `package pkg`)
tg.tempFile("src/example/a/Pkg/pkg.go", `package pkg`)
tg.run("list", "-json", "example/a")
tg.grepStdout("case-insensitive import collision", "go list -json example/a did not report import collision")
tg.runFail("build", "example/a")
tg.grepStderr("case-insensitive import collision", "go build example/a did not report import collision")
tg.tempFile("src/example/b/file.go", `package b`)
tg.tempFile("src/example/b/FILE.go", `package b`)
f, err := os.Open(tg.path("src/example/b"))
tg.must(err)
names, err := f.Readdirnames(0)
tg.must(err)
tg.check(f.Close())
args := []string{"list"}
if len(names) == 2 {
// case-sensitive file system, let directory read find both files
args = append(args, "example/b")
} else {
// case-insensitive file system, list files explicitly on command line
args = append(args, tg.path("src/example/b/file.go"), tg.path("src/example/b/FILE.go"))
}
tg.runFail(args...)
tg.grepStderr("case-insensitive file name collision", "go list example/b did not report file name collision")
tg.runFail("list", "example/a/pkg", "example/a/Pkg")
tg.grepStderr("case-insensitive import collision", "go list example/a/pkg example/a/Pkg did not report import collision")
tg.run("list", "-json", "-e", "example/a/pkg", "example/a/Pkg")
tg.grepStdout("case-insensitive import collision", "go list -json -e example/a/pkg example/a/Pkg did not report import collision")
tg.runFail("build", "example/a/pkg", "example/a/Pkg")
tg.grepStderr("case-insensitive import collision", "go build example/a/pkg example/a/Pkg did not report import collision")
}
// Issue 17451, 17662.
func TestSymlinkWarning(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.setenv("GOPATH", tg.path("."))
tg.tempDir("src/example/xx")
tg.tempDir("yy/zz")
tg.tempFile("yy/zz/zz.go", "package zz\n")
if err := os.Symlink(tg.path("yy"), tg.path("src/example/xx/yy")); err != nil {
t.Skip("symlink failed: %v", err)
}
tg.run("list", "example/xx/z...")
tg.grepStdoutNot(".", "list should not have matched anything")
tg.grepStderr("matched no packages", "list should have reported that pattern matched no packages")
tg.grepStderrNot("symlink", "list should not have reported symlink")
tg.run("list", "example/xx/...")
tg.grepStdoutNot(".", "list should not have matched anything")
tg.grepStderr("matched no packages", "list should have reported that pattern matched no packages")
tg.grepStderr("ignoring symlink", "list should have reported symlink")
}
// Issue 8181.
func TestGoGetDashTIssue8181(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.setenv("GOPATH", tg.path("."))
tg.run("get", "-v", "-t", "github.com/rsc/go-get-issue-8181/a", "github.com/rsc/go-get-issue-8181/b")
tg.run("list", "...")
tg.grepStdout("x/build/gerrit", "missing expected x/build/gerrit")
}
func TestIssue11307(t *testing.T) {
// go get -u was not working except in checkout directory
testenv.MustHaveExternalNetwork(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.setenv("GOPATH", tg.path("."))
tg.run("get", "github.com/rsc/go-get-issue-11307")
tg.run("get", "-u", "github.com/rsc/go-get-issue-11307") // was failing
}
func TestShadowingLogic(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
pwd := tg.pwd()
sep := string(filepath.ListSeparator)
tg.setenv("GOPATH", filepath.Join(pwd, "testdata", "shadow", "root1")+sep+filepath.Join(pwd, "testdata", "shadow", "root2"))
// The math in root1 is not "math" because the standard math is.
tg.run("list", "-f", "({{.ImportPath}}) ({{.ConflictDir}})", "./testdata/shadow/root1/src/math")
pwdForwardSlash := strings.Replace(pwd, string(os.PathSeparator), "/", -1)
if !strings.HasPrefix(pwdForwardSlash, "/") {
pwdForwardSlash = "/" + pwdForwardSlash
}
// The output will have makeImportValid applies, but we only
// bother to deal with characters we might reasonably see.
for _, r := range " :" {
pwdForwardSlash = strings.Replace(pwdForwardSlash, string(r), "_", -1)
}
want := "(_" + pwdForwardSlash + "/testdata/shadow/root1/src/math) (" + filepath.Join(runtime.GOROOT(), "src", "math") + ")"
if strings.TrimSpace(tg.getStdout()) != want {
t.Error("shadowed math is not shadowed; looking for", want)
}
// The foo in root1 is "foo".
tg.run("list", "-f", "({{.ImportPath}}) ({{.ConflictDir}})", "./testdata/shadow/root1/src/foo")
if strings.TrimSpace(tg.getStdout()) != "(foo) ()" {
t.Error("unshadowed foo is shadowed")
}
// The foo in root2 is not "foo" because the foo in root1 got there first.
tg.run("list", "-f", "({{.ImportPath}}) ({{.ConflictDir}})", "./testdata/shadow/root2/src/foo")
want = "(_" + pwdForwardSlash + "/testdata/shadow/root2/src/foo) (" + filepath.Join(pwd, "testdata", "shadow", "root1", "src", "foo") + ")"
if strings.TrimSpace(tg.getStdout()) != want {
t.Error("shadowed foo is not shadowed; looking for", want)
}
// The error for go install should mention the conflicting directory.
tg.runFail("install", "./testdata/shadow/root2/src/foo")
want = "go install: no install location for " + filepath.Join(pwd, "testdata", "shadow", "root2", "src", "foo") + ": hidden by " + filepath.Join(pwd, "testdata", "shadow", "root1", "src", "foo")
if strings.TrimSpace(tg.getStderr()) != want {
t.Error("wrong shadowed install error; looking for", want)
}
}
// Only succeeds if source order is preserved.
func TestSourceFileNameOrderPreserved(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.run("test", "testdata/example1_test.go", "testdata/example2_test.go")
}
// Check that coverage analysis works at all.
// Don't worry about the exact numbers but require not 0.0%.
func checkCoverage(tg *testgoData, data string) {
if regexp.MustCompile(`[^0-9]0\.0%`).MatchString(data) {
tg.t.Error("some coverage results are 0.0%")
}
tg.t.Log(data)
}
func TestCoverageRuns(t *testing.T) {
if testing.Short() {
t.Skip("don't build libraries for coverage in short mode")
}
tg := testgo(t)
defer tg.cleanup()
tg.run("test", "-short", "-coverpkg=strings", "strings", "regexp")
data := tg.getStdout() + tg.getStderr()
tg.run("test", "-short", "-cover", "strings", "math", "regexp")
data += tg.getStdout() + tg.getStderr()
checkCoverage(tg, data)
}
// Check that coverage analysis uses set mode.
func TestCoverageUsesSetMode(t *testing.T) {
if testing.Short() {
t.Skip("don't build libraries for coverage in short mode")
}
tg := testgo(t)
defer tg.cleanup()
tg.creatingTemp("testdata/cover.out")
tg.run("test", "-short", "-cover", "encoding/binary", "-coverprofile=testdata/cover.out")
data := tg.getStdout() + tg.getStderr()
if out, err := ioutil.ReadFile("testdata/cover.out"); err != nil {
t.Error(err)
} else {
if !bytes.Contains(out, []byte("mode: set")) {
t.Error("missing mode: set")
}
}
checkCoverage(tg, data)
}
func TestCoverageUsesAtomicModeForRace(t *testing.T) {
if testing.Short() {
t.Skip("don't build libraries for coverage in short mode")
}
if !canRace {
t.Skip("skipping because race detector not supported")
}
tg := testgo(t)
defer tg.cleanup()
tg.creatingTemp("testdata/cover.out")
tg.run("test", "-short", "-race", "-cover", "encoding/binary", "-coverprofile=testdata/cover.out")
data := tg.getStdout() + tg.getStderr()
if out, err := ioutil.ReadFile("testdata/cover.out"); err != nil {
t.Error(err)
} else {
if !bytes.Contains(out, []byte("mode: atomic")) {
t.Error("missing mode: atomic")
}
}
checkCoverage(tg, data)
}
func TestCoverageImportMainLoop(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
tg.runFail("test", "importmain/test")
tg.grepStderr("not an importable package", "did not detect import main")
tg.runFail("test", "-cover", "importmain/test")
tg.grepStderr("not an importable package", "did not detect import main")
}
func TestPluginNonMain(t *testing.T) {
wd, err := os.Getwd()
if err != nil {
t.Fatal(err)
}
pkg := filepath.Join(wd, "testdata", "testdep", "p2")
tg := testgo(t)
defer tg.cleanup()
tg.runFail("build", "-buildmode=plugin", pkg)
}
func TestTestEmpty(t *testing.T) {
if !canRace {
t.Skip("no race detector")
}
wd, _ := os.Getwd()
testdata := filepath.Join(wd, "testdata")
for _, dir := range []string{"pkg", "test", "xtest", "pkgtest", "pkgxtest", "pkgtestxtest", "testxtest"} {
t.Run(dir, func(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.setenv("GOPATH", testdata)
tg.cd(filepath.Join(testdata, "src/empty/"+dir))
tg.run("test", "-cover", "-coverpkg=.", "-race")
})
if testing.Short() {
break
}
}
}
func TestNoGoError(t *testing.T) {
wd, _ := os.Getwd()
testdata := filepath.Join(wd, "testdata")
for _, dir := range []string{"empty/test", "empty/xtest", "empty/testxtest", "exclude", "exclude/ignore", "exclude/empty"} {
t.Run(dir, func(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.setenv("GOPATH", testdata)
tg.cd(filepath.Join(testdata, "src"))
tg.runFail("build", "./"+dir)
var want string
if strings.Contains(dir, "test") {
want = "no non-test Go files in "
} else if dir == "exclude" {
want = "build constraints exclude all Go files in "
} else {
want = "no Go files in "
}
tg.grepStderr(want, "wrong reason for failure")
})
}
}
func TestTestRaceInstall(t *testing.T) {
if !canRace {
t.Skip("no race detector")
}
if testing.Short() && testenv.Builder() == "" {
t.Skip("don't rebuild the standard library in short mode")
}
tg := testgo(t)
defer tg.cleanup()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
tg.tempDir("pkg")
pkgdir := tg.path("pkg")
tg.run("install", "-race", "-pkgdir="+pkgdir, "std")
tg.run("test", "-race", "-pkgdir="+pkgdir, "-i", "-v", "empty/pkg")
if tg.getStderr() != "" {
t.Error("go test -i -race: rebuilds cached packages")
}
}
func TestBuildDryRunWithCgo(t *testing.T) {
if !canCgo {
t.Skip("skipping because cgo not enabled")
}
tg := testgo(t)
defer tg.cleanup()
tg.tempFile("foo.go", `package main
/*
#include <limits.h>
*/
import "C"
func main() {
println(C.INT_MAX)
}`)
tg.run("build", "-n", tg.path("foo.go"))
tg.grepStderrNot(`os.Stat .* no such file or directory`, "unexpected stat of archive file")
}
func TestCoverageWithCgo(t *testing.T) {
if !canCgo {
t.Skip("skipping because cgo not enabled")
}
for _, dir := range []string{"cgocover", "cgocover2", "cgocover3", "cgocover4"} {
t.Run(dir, func(t *testing.T) {
tg := testgo(t)
tg.parallel()
defer tg.cleanup()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
tg.run("test", "-short", "-cover", dir)
data := tg.getStdout() + tg.getStderr()
checkCoverage(tg, data)
})
}
}
func TestCgoAsmError(t *testing.T) {
if !canCgo {
t.Skip("skipping because cgo not enabled")
}
tg := testgo(t)
tg.parallel()
defer tg.cleanup()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
tg.runFail("build", "cgoasm")
tg.grepBoth("package using cgo has Go assembly file", "did not detect Go assembly file")
}
func TestCgoDependsOnSyscall(t *testing.T) {
if testing.Short() {
t.Skip("skipping test that removes $GOROOT/pkg/*_race in short mode")
}
if !canCgo {
t.Skip("skipping because cgo not enabled")
}
if !canRace {
t.Skip("skipping because race detector not supported")
}
tg := testgo(t)
defer tg.cleanup()
files, err := filepath.Glob(filepath.Join(runtime.GOROOT(), "pkg", "*_race"))
tg.must(err)
for _, file := range files {
tg.check(os.RemoveAll(file))
}
tg.tempFile("src/foo/foo.go", `
package foo
//#include <stdio.h>
import "C"`)
tg.setenv("GOPATH", tg.path("."))
tg.run("build", "-race", "foo")
}
func TestCgoShowsFullPathNames(t *testing.T) {
if !canCgo {
t.Skip("skipping because cgo not enabled")
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("src/x/y/dirname/foo.go", `
package foo
import "C"
func f() {`)
tg.setenv("GOPATH", tg.path("."))
tg.runFail("build", "x/y/dirname")
tg.grepBoth("x/y/dirname", "error did not use full path")
}
func TestCgoHandlesWlORIGIN(t *testing.T) {
if !canCgo {
t.Skip("skipping because cgo not enabled")
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("src/origin/origin.go", `package origin
// #cgo !darwin LDFLAGS: -Wl,-rpath,$ORIGIN
// void f(void) {}
import "C"
func f() { C.f() }`)
tg.setenv("GOPATH", tg.path("."))
tg.run("build", "origin")
}
func TestCgoPkgConfig(t *testing.T) {
if !canCgo {
t.Skip("skipping because cgo not enabled")
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.run("env", "PKG_CONFIG")
pkgConfig := strings.TrimSpace(tg.getStdout())
if out, err := exec.Command(pkgConfig, "--atleast-pkgconfig-version", "0.24").CombinedOutput(); err != nil {
t.Skipf("%s --atleast-pkgconfig-version 0.24: %v\n%s", pkgConfig, err, out)
}
// OpenBSD's pkg-config is strict about whitespace and only
// supports backslash-escaped whitespace. It does not support
// quotes, which the normal freedesktop.org pkg-config does
// support. See http://man.openbsd.org/pkg-config.1
tg.tempFile("foo.pc", `
Name: foo
Description: The foo library
Version: 1.0.0
Cflags: -Dhello=10 -Dworld=+32 -DDEFINED_FROM_PKG_CONFIG=hello\ world
`)
tg.tempFile("foo.go", `package main
/*
#cgo pkg-config: foo
int value() {
return DEFINED_FROM_PKG_CONFIG;
}
*/
import "C"
import "os"
func main() {
if C.value() != 42 {
println("value() =", C.value(), "wanted 42")
os.Exit(1)
}
}
`)
tg.setenv("PKG_CONFIG_PATH", tg.path("."))
tg.run("run", tg.path("foo.go"))
}
// "go test -c -test.bench=XXX errors" should not hang
func TestIssue6480(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
// TODO: tg.parallel()
tg.makeTempdir()
tg.cd(tg.path("."))
tg.run("test", "-c", "-test.bench=XXX", "errors")
}
// cmd/cgo: undefined reference when linking a C-library using gccgo
func TestIssue7573(t *testing.T) {
if !canCgo {
t.Skip("skipping because cgo not enabled")
}
if _, err := exec.LookPath("gccgo"); err != nil {
t.Skip("skipping because no gccgo compiler found")
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("src/cgoref/cgoref.go", `
package main
// #cgo LDFLAGS: -L alibpath -lalib
// void f(void) {}
import "C"
func main() { C.f() }`)
tg.setenv("GOPATH", tg.path("."))
tg.run("build", "-n", "-compiler", "gccgo", "cgoref")
tg.grepStderr(`gccgo.*\-L [^ ]*alibpath \-lalib`, `no Go-inline "#cgo LDFLAGS:" ("-L alibpath -lalib") passed to gccgo linking stage`)
}
func TestListTemplateContextFunction(t *testing.T) {
t.Parallel()
for _, tt := range []struct {
v string
want string
}{
{"GOARCH", runtime.GOARCH},
{"GOOS", runtime.GOOS},
{"GOROOT", filepath.Clean(runtime.GOROOT())},
{"GOPATH", os.Getenv("GOPATH")},
{"CgoEnabled", ""},
{"UseAllFiles", ""},
{"Compiler", ""},
{"BuildTags", ""},
{"ReleaseTags", ""},
{"InstallSuffix", ""},
} {
tt := tt
t.Run(tt.v, func(t *testing.T) {
tg := testgo(t)
tg.parallel()
defer tg.cleanup()
tmpl := "{{context." + tt.v + "}}"
tg.run("list", "-f", tmpl)
if tt.want == "" {
return
}
if got := strings.TrimSpace(tg.getStdout()); got != tt.want {
t.Errorf("go list -f %q: got %q; want %q", tmpl, got, tt.want)
}
})
}
}
// cmd/go: "go test" should fail if package does not build
func TestIssue7108(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
tg.runFail("test", "notest")
}
// cmd/go: go test -a foo does not rebuild regexp.
func TestIssue6844(t *testing.T) {
if testing.Short() {
t.Skip("don't rebuild the standard library in short mode")
}
tg := testgo(t)
defer tg.cleanup()
tg.creatingTemp("deps.test" + exeSuffix)
tg.run("test", "-x", "-a", "-c", "testdata/dep_test.go")
tg.grepStderr("regexp", "go test -x -a -c testdata/dep-test.go did not rebuild regexp")
}
func TestBuildDashIInstallsDependencies(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("src/x/y/foo/foo.go", `package foo
func F() {}`)
tg.tempFile("src/x/y/bar/bar.go", `package bar
import "x/y/foo"
func F() { foo.F() }`)
tg.setenv("GOPATH", tg.path("."))
checkbar := func(desc string) {
tg.sleep()
tg.must(os.Chtimes(tg.path("src/x/y/foo/foo.go"), time.Now(), time.Now()))
tg.sleep()
tg.run("build", "-v", "-i", "x/y/bar")
tg.grepBoth("x/y/foo", "first build -i "+desc+" did not build x/y/foo")
tg.run("build", "-v", "-i", "x/y/bar")
tg.grepBothNot("x/y/foo", "second build -i "+desc+" built x/y/foo")
}
checkbar("pkg")
tg.creatingTemp("bar" + exeSuffix)
tg.tempFile("src/x/y/bar/bar.go", `package main
import "x/y/foo"
func main() { foo.F() }`)
checkbar("cmd")
}
func TestGoBuildInTestOnlyDirectoryFailsWithAGoodError(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.runFail("build", "./testdata/testonly")
tg.grepStderr("no non-test Go files in", "go build ./testdata/testonly produced unexpected error")
}
func TestGoTestDetectsTestOnlyImportCycles(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
tg.runFail("test", "-c", "testcycle/p3")
tg.grepStderr("import cycle not allowed in test", "go test testcycle/p3 produced unexpected error")
tg.runFail("test", "-c", "testcycle/q1")
tg.grepStderr("import cycle not allowed in test", "go test testcycle/q1 produced unexpected error")
}
func TestGoTestFooTestWorks(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.run("test", "testdata/standalone_test.go")
}
func TestGoTestFlagsAfterPackage(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.run("test", "testdata/flag_test.go", "-v", "-args", "-v=7") // Two distinct -v flags.
tg.run("test", "-v", "testdata/flag_test.go", "-args", "-v=7") // Two distinct -v flags.
}
func TestGoTestXtestonlyWorks(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
tg.run("clean", "-i", "xtestonly")
tg.run("test", "xtestonly")
}
func TestGoTestBuildsAnXtestContainingOnlyNonRunnableExamples(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.run("test", "-v", "./testdata/norunexample")
tg.grepStdout("File with non-runnable example was built.", "file with non-runnable example was not built")
}
func TestGoGenerateHandlesSimpleCommand(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("skipping because windows has no echo command")
}
tg := testgo(t)
defer tg.cleanup()
tg.run("generate", "./testdata/generate/test1.go")
tg.grepStdout("Success", "go generate ./testdata/generate/test1.go generated wrong output")
}
func TestGoGenerateHandlesCommandAlias(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("skipping because windows has no echo command")
}
tg := testgo(t)
defer tg.cleanup()
tg.run("generate", "./testdata/generate/test2.go")
tg.grepStdout("Now is the time for all good men", "go generate ./testdata/generate/test2.go generated wrong output")
}
func TestGoGenerateVariableSubstitution(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("skipping because windows has no echo command")
}
tg := testgo(t)
defer tg.cleanup()
tg.run("generate", "./testdata/generate/test3.go")
tg.grepStdout(runtime.GOARCH+" test3.go:7 pabc xyzp/test3.go/123", "go generate ./testdata/generate/test3.go generated wrong output")
}
func TestGoGenerateRunFlag(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("skipping because windows has no echo command")
}
tg := testgo(t)
defer tg.cleanup()
tg.run("generate", "-run", "y.s", "./testdata/generate/test4.go")
tg.grepStdout("yes", "go generate -run yes ./testdata/generate/test4.go did not select yes")
tg.grepStdoutNot("no", "go generate -run yes ./testdata/generate/test4.go selected no")
}
func TestGoGenerateEnv(t *testing.T) {
switch runtime.GOOS {
case "plan9", "windows":
t.Skipf("skipping because %s does not have the env command", runtime.GOOS)
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("env.go", "package main\n\n//go:generate env")
tg.run("generate", tg.path("env.go"))
for _, v := range []string{"GOARCH", "GOOS", "GOFILE", "GOLINE", "GOPACKAGE", "DOLLAR"} {
tg.grepStdout("^"+v+"=", "go generate environment missing "+v)
}
}
func TestGoGenerateBadImports(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("skipping because windows has no echo command")
}
// This package has an invalid import causing an import cycle,
// but go generate is supposed to still run.
tg := testgo(t)
defer tg.cleanup()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
tg.run("generate", "gencycle")
tg.grepStdout("hello world", "go generate gencycle did not run generator")
}
func TestGoGetCustomDomainWildcard(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
tg := testgo(t)
defer tg.cleanup()
tg.makeTempdir()
tg.setenv("GOPATH", tg.path("."))
tg.run("get", "-u", "rsc.io/pdf/...")
tg.wantExecutable(tg.path("bin/pdfpasswd"+exeSuffix), "did not build rsc/io/pdf/pdfpasswd")
}
func TestGoGetInternalWildcard(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
tg := testgo(t)
defer tg.cleanup()
tg.makeTempdir()
tg.setenv("GOPATH", tg.path("."))
// used to fail with errors about internal packages
tg.run("get", "github.com/rsc/go-get-issue-11960/...")
}
func TestGoVetWithExternalTests(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.makeTempdir()
tg.run("install", "cmd/vet")
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
tg.runFail("vet", "vetpkg")
tg.grepBoth("missing argument for Printf", "go vet vetpkg did not find missing argument for Printf")
}
func TestGoVetWithTags(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.makeTempdir()
tg.run("install", "cmd/vet")
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
tg.runFail("vet", "-tags", "tagtest", "vetpkg")
tg.grepBoth(`c\.go.*wrong number of args for format`, "go vet vetpkg did not run scan tagged file")
}
func TestGoVetWithFlagsOn(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.makeTempdir()
tg.run("install", "cmd/vet")
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
tg.runFail("vet", "-printf", "vetpkg")
tg.grepBoth("missing argument for Printf", "go vet -printf vetpkg did not find missing argument for Printf")
}
func TestGoVetWithFlagsOff(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.makeTempdir()
tg.run("install", "cmd/vet")
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
tg.run("vet", "-printf=false", "vetpkg")
}
// Issue 9767, 19769.
func TestGoGetDotSlashDownload(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
tg := testgo(t)
defer tg.cleanup()
tg.tempDir("src/rsc.io")
tg.setenv("GOPATH", tg.path("."))
tg.cd(tg.path("src/rsc.io"))
tg.run("get", "./pprof_mac_fix")
}
// Issue 13037: Was not parsing <meta> tags in 404 served over HTTPS
func TestGoGetHTTPS404(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
switch runtime.GOOS {
case "darwin", "linux", "freebsd":
default:
t.Skipf("test case does not work on %s", runtime.GOOS)
}
tg := testgo(t)
defer tg.cleanup()
tg.tempDir("src")
tg.setenv("GOPATH", tg.path("."))
tg.run("get", "bazil.org/fuse/fs/fstestutil")
}
// Test that you cannot import a main package.
// See golang.org/issue/4210 and golang.org/issue/17475.
func TestImportMain(t *testing.T) {
tg := testgo(t)
tg.parallel()
defer tg.cleanup()
// Importing package main from that package main's test should work.
tg.tempFile("src/x/main.go", `package main
var X int
func main() {}`)
tg.tempFile("src/x/main_test.go", `package main_test
import xmain "x"
import "testing"
var _ = xmain.X
func TestFoo(t *testing.T) {}
`)
tg.setenv("GOPATH", tg.path("."))
tg.creatingTemp("x" + exeSuffix)
tg.run("build", "x")
tg.run("test", "x")
// Importing package main from another package should fail.
tg.tempFile("src/p1/p.go", `package p1
import xmain "x"
var _ = xmain.X
`)
tg.runFail("build", "p1")
tg.grepStderr("import \"x\" is a program, not an importable package", "did not diagnose package main")
// ... even in that package's test.
tg.tempFile("src/p2/p.go", `package p2
`)
tg.tempFile("src/p2/p_test.go", `package p2
import xmain "x"
import "testing"
var _ = xmain.X
func TestFoo(t *testing.T) {}
`)
tg.run("build", "p2")
tg.runFail("test", "p2")
tg.grepStderr("import \"x\" is a program, not an importable package", "did not diagnose package main")
// ... even if that package's test is an xtest.
tg.tempFile("src/p3/p.go", `package p
`)
tg.tempFile("src/p3/p_test.go", `package p_test
import xmain "x"
import "testing"
var _ = xmain.X
func TestFoo(t *testing.T) {}
`)
tg.run("build", "p3")
tg.runFail("test", "p3")
tg.grepStderr("import \"x\" is a program, not an importable package", "did not diagnose package main")
// ... even if that package is a package main
tg.tempFile("src/p4/p.go", `package main
func main() {}
`)
tg.tempFile("src/p4/p_test.go", `package main
import xmain "x"
import "testing"
var _ = xmain.X
func TestFoo(t *testing.T) {}
`)
tg.creatingTemp("p4" + exeSuffix)
tg.run("build", "p4")
tg.runFail("test", "p4")
tg.grepStderr("import \"x\" is a program, not an importable package", "did not diagnose package main")
// ... even if that package is a package main using an xtest.
tg.tempFile("src/p5/p.go", `package main
func main() {}
`)
tg.tempFile("src/p5/p_test.go", `package main_test
import xmain "x"
import "testing"
var _ = xmain.X
func TestFoo(t *testing.T) {}
`)
tg.creatingTemp("p5" + exeSuffix)
tg.run("build", "p5")
tg.runFail("test", "p5")
tg.grepStderr("import \"x\" is a program, not an importable package", "did not diagnose package main")
}
// Test that you cannot use a local import in a package
// accessed by a non-local import (found in a GOPATH/GOROOT).
// See golang.org/issue/17475.
func TestImportLocal(t *testing.T) {
tg := testgo(t)
tg.parallel()
defer tg.cleanup()
tg.tempFile("src/dir/x/x.go", `package x
var X int
`)
tg.setenv("GOPATH", tg.path("."))
tg.run("build", "dir/x")
// Ordinary import should work.
tg.tempFile("src/dir/p0/p.go", `package p0
import "dir/x"
var _ = x.X
`)
tg.run("build", "dir/p0")
// Relative import should not.
tg.tempFile("src/dir/p1/p.go", `package p1
import "../x"
var _ = x.X
`)
tg.runFail("build", "dir/p1")
tg.grepStderr("local import.*in non-local package", "did not diagnose local import")
// ... even in a test.
tg.tempFile("src/dir/p2/p.go", `package p2
`)
tg.tempFile("src/dir/p2/p_test.go", `package p2
import "../x"
import "testing"
var _ = x.X
func TestFoo(t *testing.T) {}
`)
tg.run("build", "dir/p2")
tg.runFail("test", "dir/p2")
tg.grepStderr("local import.*in non-local package", "did not diagnose local import")
// ... even in an xtest.
tg.tempFile("src/dir/p2/p_test.go", `package p2_test
import "../x"
import "testing"
var _ = x.X
func TestFoo(t *testing.T) {}
`)
tg.run("build", "dir/p2")
tg.runFail("test", "dir/p2")
tg.grepStderr("local import.*in non-local package", "did not diagnose local import")
// Relative import starting with ./ should not work either.
tg.tempFile("src/dir/d.go", `package dir
import "./x"
var _ = x.X
`)
tg.runFail("build", "dir")
tg.grepStderr("local import.*in non-local package", "did not diagnose local import")
// ... even in a test.
tg.tempFile("src/dir/d.go", `package dir
`)
tg.tempFile("src/dir/d_test.go", `package dir
import "./x"
import "testing"
var _ = x.X
func TestFoo(t *testing.T) {}
`)
tg.run("build", "dir")
tg.runFail("test", "dir")
tg.grepStderr("local import.*in non-local package", "did not diagnose local import")
// ... even in an xtest.
tg.tempFile("src/dir/d_test.go", `package dir_test
import "./x"
import "testing"
var _ = x.X
func TestFoo(t *testing.T) {}
`)
tg.run("build", "dir")
tg.runFail("test", "dir")
tg.grepStderr("local import.*in non-local package", "did not diagnose local import")
// Relative import plain ".." should not work.
tg.tempFile("src/dir/x/y/y.go", `package dir
import ".."
var _ = x.X
`)
tg.runFail("build", "dir/x/y")
tg.grepStderr("local import.*in non-local package", "did not diagnose local import")
// ... even in a test.
tg.tempFile("src/dir/x/y/y.go", `package y
`)
tg.tempFile("src/dir/x/y/y_test.go", `package y
import ".."
import "testing"
var _ = x.X
func TestFoo(t *testing.T) {}
`)
tg.run("build", "dir/x/y")
tg.runFail("test", "dir/x/y")
tg.grepStderr("local import.*in non-local package", "did not diagnose local import")
// ... even in an x test.
tg.tempFile("src/dir/x/y/y_test.go", `package y_test
import ".."
import "testing"
var _ = x.X
func TestFoo(t *testing.T) {}
`)
tg.run("build", "dir/x/y")
tg.runFail("test", "dir/x/y")
tg.grepStderr("local import.*in non-local package", "did not diagnose local import")
// Relative import "." should not work.
tg.tempFile("src/dir/x/xx.go", `package x
import "."
var _ = x.X
`)
tg.runFail("build", "dir/x")
tg.grepStderr("local import.*in non-local package", "did not diagnose local import")
// ... even in a test.
tg.tempFile("src/dir/x/xx.go", `package x
`)
tg.tempFile("src/dir/x/xx_test.go", `package x
import "."
import "testing"
var _ = x.X
func TestFoo(t *testing.T) {}
`)
tg.run("build", "dir/x")
tg.runFail("test", "dir/x")
tg.grepStderr("local import.*in non-local package", "did not diagnose local import")
// ... even in an xtest.
tg.tempFile("src/dir/x/xx.go", `package x
`)
tg.tempFile("src/dir/x/xx_test.go", `package x_test
import "."
import "testing"
var _ = x.X
func TestFoo(t *testing.T) {}
`)
tg.run("build", "dir/x")
tg.runFail("test", "dir/x")
tg.grepStderr("local import.*in non-local package", "did not diagnose local import")
}
func TestGoGetInsecure(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
tg := testgo(t)
defer tg.cleanup()
tg.makeTempdir()
tg.setenv("GOPATH", tg.path("."))
tg.failSSH()
const repo = "insecure.go-get-issue-15410.appspot.com/pkg/p"
// Try go get -d of HTTP-only repo (should fail).
tg.runFail("get", "-d", repo)
// Try again with -insecure (should succeed).
tg.run("get", "-d", "-insecure", repo)
// Try updating without -insecure (should fail).
tg.runFail("get", "-d", "-u", "-f", repo)
}
func TestGoGetUpdateInsecure(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
tg := testgo(t)
defer tg.cleanup()
tg.makeTempdir()
tg.setenv("GOPATH", tg.path("."))
const repo = "github.com/golang/example"
// Clone the repo via HTTP manually.
cmd := exec.Command("git", "clone", "-q", "http://"+repo, tg.path("src/"+repo))
if out, err := cmd.CombinedOutput(); err != nil {
t.Fatalf("cloning %v repo: %v\n%s", repo, err, out)
}
// Update without -insecure should fail.
// Update with -insecure should succeed.
// We need -f to ignore import comments.
const pkg = repo + "/hello"
tg.runFail("get", "-d", "-u", "-f", pkg)
tg.run("get", "-d", "-u", "-f", "-insecure", pkg)
}
func TestGoGetInsecureCustomDomain(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
tg := testgo(t)
defer tg.cleanup()
tg.makeTempdir()
tg.setenv("GOPATH", tg.path("."))
const repo = "insecure.go-get-issue-15410.appspot.com/pkg/p"
tg.runFail("get", "-d", repo)
tg.run("get", "-d", "-insecure", repo)
}
func TestGoRunDirs(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.cd("testdata/rundir")
tg.runFail("run", "x.go", "sub/sub.go")
tg.grepStderr("named files must all be in one directory; have ./ and sub/", "wrong output")
tg.runFail("run", "sub/sub.go", "x.go")
tg.grepStderr("named files must all be in one directory; have sub/ and ./", "wrong output")
}
func TestGoInstallPkgdir(t *testing.T) {
tg := testgo(t)
tg.parallel()
defer tg.cleanup()
tg.makeTempdir()
pkg := tg.path(".")
tg.run("install", "-pkgdir", pkg, "errors")
_, err := os.Stat(filepath.Join(pkg, "errors.a"))
tg.must(err)
_, err = os.Stat(filepath.Join(pkg, "runtime.a"))
tg.must(err)
}
func TestGoTestRaceInstallCgo(t *testing.T) {
if !canRace {
t.Skip("skipping because race detector not supported")
}
// golang.org/issue/10500.
// This used to install a race-enabled cgo.
tg := testgo(t)
defer tg.cleanup()
tg.run("tool", "-n", "cgo")
cgo := strings.TrimSpace(tg.stdout.String())
old, err := os.Stat(cgo)
tg.must(err)
tg.run("test", "-race", "-i", "runtime/race")
new, err := os.Stat(cgo)
tg.must(err)
if !new.ModTime().Equal(old.ModTime()) {
t.Fatalf("go test -i runtime/race reinstalled cmd/cgo")
}
}
func TestGoTestRaceFailures(t *testing.T) {
if !canRace {
t.Skip("skipping because race detector not supported")
}
tg := testgo(t)
tg.parallel()
defer tg.cleanup()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
tg.run("test", "testrace")
tg.runFail("test", "-race", "testrace")
tg.grepStdout("FAIL: TestRace", "TestRace did not fail")
tg.grepBothNot("PASS", "something passed")
tg.runFail("test", "-race", "testrace", "-run", "XXX", "-bench", ".")
tg.grepStdout("FAIL: BenchmarkRace", "BenchmarkRace did not fail")
tg.grepBothNot("PASS", "something passed")
}
func TestGoTestImportErrorStack(t *testing.T) {
const out = `package testdep/p1 (test)
imports testdep/p2
imports testdep/p3: build constraints exclude all Go files `
tg := testgo(t)
defer tg.cleanup()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
tg.runFail("test", "testdep/p1")
if !strings.Contains(tg.stderr.String(), out) {
t.Fatalf("did not give full import stack:\n\n%s", tg.stderr.String())
}
}
func TestGoGetUpdate(t *testing.T) {
// golang.org/issue/9224.
// The recursive updating was trying to walk to
// former dependencies, not current ones.
testenv.MustHaveExternalNetwork(t)
tg := testgo(t)
defer tg.cleanup()
tg.makeTempdir()
tg.setenv("GOPATH", tg.path("."))
rewind := func() {
tg.run("get", "github.com/rsc/go-get-issue-9224-cmd")
cmd := exec.Command("git", "reset", "--hard", "HEAD~")
cmd.Dir = tg.path("src/github.com/rsc/go-get-issue-9224-lib")
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("git: %v\n%s", err, out)
}
}
rewind()
tg.run("get", "-u", "github.com/rsc/go-get-issue-9224-cmd")
// Again with -d -u.
rewind()
tg.run("get", "-d", "-u", "github.com/rsc/go-get-issue-9224-cmd")
}
// Issue #20512.
func TestGoGetRace(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
if !canRace {
t.Skip("skipping because race detector not supported")
}
tg := testgo(t)
defer tg.cleanup()
tg.makeTempdir()
tg.setenv("GOPATH", tg.path("."))
tg.run("get", "-race", "github.com/rsc/go-get-issue-9224-cmd")
}
func TestGoGetDomainRoot(t *testing.T) {
// golang.org/issue/9357.
// go get foo.io (not foo.io/subdir) was not working consistently.
testenv.MustHaveExternalNetwork(t)
tg := testgo(t)
defer tg.cleanup()
tg.makeTempdir()
tg.setenv("GOPATH", tg.path("."))
// go-get-issue-9357.appspot.com is running
// the code at github.com/rsc/go-get-issue-9357,
// a trivial Go on App Engine app that serves a
// <meta> tag for the domain root.
tg.run("get", "-d", "go-get-issue-9357.appspot.com")
tg.run("get", "go-get-issue-9357.appspot.com")
tg.run("get", "-u", "go-get-issue-9357.appspot.com")
tg.must(os.RemoveAll(tg.path("src/go-get-issue-9357.appspot.com")))
tg.run("get", "go-get-issue-9357.appspot.com")
tg.must(os.RemoveAll(tg.path("src/go-get-issue-9357.appspot.com")))
tg.run("get", "-u", "go-get-issue-9357.appspot.com")
}
func TestGoInstallShadowedGOPATH(t *testing.T) {
// golang.org/issue/3652.
// go get foo.io (not foo.io/subdir) was not working consistently.
testenv.MustHaveExternalNetwork(t)
tg := testgo(t)
defer tg.cleanup()
tg.makeTempdir()
tg.setenv("GOPATH", tg.path("gopath1")+string(filepath.ListSeparator)+tg.path("gopath2"))
tg.tempDir("gopath1/src/test")
tg.tempDir("gopath2/src/test")
tg.tempFile("gopath2/src/test/main.go", "package main\nfunc main(){}\n")
tg.cd(tg.path("gopath2/src/test"))
tg.runFail("install")
tg.grepStderr("no install location for.*gopath2.src.test: hidden by .*gopath1.src.test", "missing error")
}
func TestGoBuildGOPATHOrder(t *testing.T) {
// golang.org/issue/14176#issuecomment-179895769
// golang.org/issue/14192
// -I arguments to compiler could end up not in GOPATH order,
// leading to unexpected import resolution in the compiler.
// This is still not a complete fix (see golang.org/issue/14271 and next test)
// but it is clearly OK and enough to fix both of the two reported
// instances of the underlying problem. It will have to do for now.
tg := testgo(t)
defer tg.cleanup()
tg.makeTempdir()
tg.setenv("GOPATH", tg.path("p1")+string(filepath.ListSeparator)+tg.path("p2"))
tg.tempFile("p1/src/foo/foo.go", "package foo\n")
tg.tempFile("p2/src/baz/baz.go", "package baz\n")
tg.tempFile("p2/pkg/"+runtime.GOOS+"_"+runtime.GOARCH+"/foo.a", "bad\n")
tg.tempFile("p1/src/bar/bar.go", `
package bar
import _ "baz"
import _ "foo"
`)
tg.run("install", "-x", "bar")
}
func TestGoBuildGOPATHOrderBroken(t *testing.T) {
// This test is known not to work.
// See golang.org/issue/14271.
t.Skip("golang.org/issue/14271")
tg := testgo(t)
defer tg.cleanup()
tg.makeTempdir()
tg.tempFile("p1/src/foo/foo.go", "package foo\n")
tg.tempFile("p2/src/baz/baz.go", "package baz\n")
tg.tempFile("p1/pkg/"+runtime.GOOS+"_"+runtime.GOARCH+"/baz.a", "bad\n")
tg.tempFile("p2/pkg/"+runtime.GOOS+"_"+runtime.GOARCH+"/foo.a", "bad\n")
tg.tempFile("p1/src/bar/bar.go", `
package bar
import _ "baz"
import _ "foo"
`)
colon := string(filepath.ListSeparator)
tg.setenv("GOPATH", tg.path("p1")+colon+tg.path("p2"))
tg.run("install", "-x", "bar")
tg.setenv("GOPATH", tg.path("p2")+colon+tg.path("p1"))
tg.run("install", "-x", "bar")
}
func TestIssue11709(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.tempFile("run.go", `
package main
import "os"
func main() {
if os.Getenv("TERM") != "" {
os.Exit(1)
}
}`)
tg.unsetenv("TERM")
tg.run("run", tg.path("run.go"))
}
func TestIssue12096(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.tempFile("test_test.go", `
package main
import ("os"; "testing")
func TestEnv(t *testing.T) {
if os.Getenv("TERM") != "" {
t.Fatal("TERM is set")
}
}`)
tg.unsetenv("TERM")
tg.run("test", tg.path("test_test.go"))
}
func TestGoBuildOutput(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.makeTempdir()
tg.cd(tg.path("."))
nonExeSuffix := ".exe"
if exeSuffix == ".exe" {
nonExeSuffix = ""
}
tg.tempFile("x.go", "package main\nfunc main(){}\n")
tg.run("build", "x.go")
tg.wantExecutable("x"+exeSuffix, "go build x.go did not write x"+exeSuffix)
tg.must(os.Remove(tg.path("x" + exeSuffix)))
tg.mustNotExist("x" + nonExeSuffix)
tg.run("build", "-o", "myprog", "x.go")
tg.mustNotExist("x")
tg.mustNotExist("x.exe")
tg.wantExecutable("myprog", "go build -o myprog x.go did not write myprog")
tg.mustNotExist("myprog.exe")
tg.tempFile("p.go", "package p\n")
tg.run("build", "p.go")
tg.mustNotExist("p")
tg.mustNotExist("p.a")
tg.mustNotExist("p.o")
tg.mustNotExist("p.exe")
tg.run("build", "-o", "p.a", "p.go")
tg.wantArchive("p.a")
tg.run("build", "cmd/gofmt")
tg.wantExecutable("gofmt"+exeSuffix, "go build cmd/gofmt did not write gofmt"+exeSuffix)
tg.must(os.Remove(tg.path("gofmt" + exeSuffix)))
tg.mustNotExist("gofmt" + nonExeSuffix)
tg.run("build", "-o", "mygofmt", "cmd/gofmt")
tg.wantExecutable("mygofmt", "go build -o mygofmt cmd/gofmt did not write mygofmt")
tg.mustNotExist("mygofmt.exe")
tg.mustNotExist("gofmt")
tg.mustNotExist("gofmt.exe")
tg.run("build", "sync/atomic")
tg.mustNotExist("atomic")
tg.mustNotExist("atomic.exe")
tg.run("build", "-o", "myatomic.a", "sync/atomic")
tg.wantArchive("myatomic.a")
tg.mustNotExist("atomic")
tg.mustNotExist("atomic.a")
tg.mustNotExist("atomic.exe")
tg.runFail("build", "-o", "whatever", "cmd/gofmt", "sync/atomic")
tg.grepStderr("multiple packages", "did not reject -o with multiple packages")
}
func TestGoBuildARM(t *testing.T) {
if testing.Short() {
t.Skip("skipping cross-compile in short mode")
}
tg := testgo(t)
defer tg.cleanup()
tg.makeTempdir()
tg.cd(tg.path("."))
tg.setenv("GOARCH", "arm")
tg.setenv("GOOS", "linux")
tg.setenv("GOARM", "5")
tg.tempFile("hello.go", `package main
func main() {}`)
tg.run("build", "hello.go")
tg.grepStderrNot("unable to find math.a", "did not build math.a correctly")
}
func TestIssue13655(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
for _, pkg := range []string{"runtime", "runtime/internal/atomic"} {
tg.run("list", "-f", "{{.Deps}}", pkg)
tg.grepStdout("runtime/internal/sys", "did not find required dependency of "+pkg+" on runtime/internal/sys")
}
}
// For issue 14337.
func TestParallelTest(t *testing.T) {
tg := testgo(t)
tg.parallel()
defer tg.cleanup()
tg.makeTempdir()
const testSrc = `package package_test
import (
"testing"
)
func TestTest(t *testing.T) {
}`
tg.tempFile("src/p1/p1_test.go", strings.Replace(testSrc, "package_test", "p1_test", 1))
tg.tempFile("src/p2/p2_test.go", strings.Replace(testSrc, "package_test", "p2_test", 1))
tg.tempFile("src/p3/p3_test.go", strings.Replace(testSrc, "package_test", "p3_test", 1))
tg.tempFile("src/p4/p4_test.go", strings.Replace(testSrc, "package_test", "p4_test", 1))
tg.setenv("GOPATH", tg.path("."))
tg.run("test", "-p=4", "p1", "p2", "p3", "p4")
}
func TestCgoConsistentResults(t *testing.T) {
if !canCgo {
t.Skip("skipping because cgo not enabled")
}
switch runtime.GOOS {
case "freebsd":
testenv.SkipFlaky(t, 15405)
case "solaris":
testenv.SkipFlaky(t, 13247)
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
exe1 := tg.path("cgotest1" + exeSuffix)
exe2 := tg.path("cgotest2" + exeSuffix)
tg.run("build", "-o", exe1, "cgotest")
tg.run("build", "-x", "-o", exe2, "cgotest")
b1, err := ioutil.ReadFile(exe1)
tg.must(err)
b2, err := ioutil.ReadFile(exe2)
tg.must(err)
if !tg.doGrepMatch(`-fdebug-prefix-map=\$WORK`, &tg.stderr) {
t.Skip("skipping because C compiler does not support -fdebug-prefix-map")
}
if !bytes.Equal(b1, b2) {
t.Error("building cgotest twice did not produce the same output")
}
}
// Issue 14444: go get -u .../ duplicate loads errors
func TestGoGetUpdateAllDoesNotTryToLoadDuplicates(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
tg := testgo(t)
defer tg.cleanup()
tg.makeTempdir()
tg.setenv("GOPATH", tg.path("."))
tg.run("get", "-u", ".../")
tg.grepStderrNot("duplicate loads of", "did not remove old packages from cache")
}
// Issue 17119 more duplicate load errors
func TestIssue17119(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
tg.runFail("build", "dupload")
tg.grepBothNot("duplicate load|internal error", "internal error")
}
func TestFatalInBenchmarkCauseNonZeroExitStatus(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
// TODO: tg.parallel()
tg.runFail("test", "-run", "^$", "-bench", ".", "./testdata/src/benchfatal")
tg.grepBothNot("^ok", "test passed unexpectedly")
tg.grepBoth("FAIL.*benchfatal", "test did not run everything")
}
func TestBinaryOnlyPackages(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.setenv("GOPATH", tg.path("."))
tg.tempFile("src/p1/p1.go", `//go:binary-only-package
package p1
`)
tg.wantStale("p1", "cannot access install target", "p1 is binary-only but has no binary, should be stale")
tg.runFail("install", "p1")
tg.grepStderr("missing or invalid package binary", "did not report attempt to compile binary-only package")
tg.tempFile("src/p1/p1.go", `
package p1
import "fmt"
func F(b bool) { fmt.Printf("hello from p1\n"); if b { F(false) } }
`)
tg.run("install", "p1")
os.Remove(tg.path("src/p1/p1.go"))
tg.mustNotExist(tg.path("src/p1/p1.go"))
tg.tempFile("src/p2/p2.go", `//go:binary-only-packages-are-not-great
package p2
import "p1"
func F() { p1.F(true) }
`)
tg.runFail("install", "p2")
tg.grepStderr("no Go files", "did not complain about missing sources")
tg.tempFile("src/p1/missing.go", `//go:binary-only-package
package p1
func G()
`)
tg.wantNotStale("p1", "no source code", "should NOT want to rebuild p1 (first)")
tg.run("install", "-x", "p1") // no-op, up to date
tg.grepBothNot("/compile", "should not have run compiler")
tg.run("install", "p2") // does not rebuild p1 (or else p2 will fail)
tg.wantNotStale("p2", "", "should NOT want to rebuild p2")
// changes to the non-source-code do not matter,
// and only one file needs the special comment.
tg.tempFile("src/p1/missing2.go", `
package p1
func H()
`)
tg.wantNotStale("p1", "no source code", "should NOT want to rebuild p1 (second)")
tg.wantNotStale("p2", "", "should NOT want to rebuild p2")
tg.tempFile("src/p3/p3.go", `
package main
import (
"p1"
"p2"
)
func main() {
p1.F(false)
p2.F()
}
`)
tg.run("install", "p3")
tg.run("run", tg.path("src/p3/p3.go"))
tg.grepStdout("hello from p1", "did not see message from p1")
tg.tempFile("src/p4/p4.go", `package main`)
tg.tempFile("src/p4/p4not.go", `//go:binary-only-package
// +build asdf
package main
`)
tg.run("list", "-f", "{{.BinaryOnly}}", "p4")
tg.grepStdout("false", "did not see BinaryOnly=false for p4")
}
// Issue 16050.
func TestAlwaysLinkSysoFiles(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempDir("src/syso")
tg.tempFile("src/syso/a.syso", ``)
tg.tempFile("src/syso/b.go", `package syso`)
tg.setenv("GOPATH", tg.path("."))
// We should see the .syso file regardless of the setting of
// CGO_ENABLED.
tg.setenv("CGO_ENABLED", "1")
tg.run("list", "-f", "{{.SysoFiles}}", "syso")
tg.grepStdout("a.syso", "missing syso file with CGO_ENABLED=1")
tg.setenv("CGO_ENABLED", "0")
tg.run("list", "-f", "{{.SysoFiles}}", "syso")
tg.grepStdout("a.syso", "missing syso file with CGO_ENABLED=0")
}
// Issue 16120.
func TestGenerateUsesBuildContext(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("this test won't run under Windows")
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempDir("src/gen")
tg.tempFile("src/gen/gen.go", "package gen\n//go:generate echo $GOOS $GOARCH\n")
tg.setenv("GOPATH", tg.path("."))
tg.setenv("GOOS", "linux")
tg.setenv("GOARCH", "amd64")
tg.run("generate", "gen")
tg.grepStdout("linux amd64", "unexpected GOOS/GOARCH combination")
tg.setenv("GOOS", "darwin")
tg.setenv("GOARCH", "386")
tg.run("generate", "gen")
tg.grepStdout("darwin 386", "unexpected GOOS/GOARCH combination")
}
// Issue 14450: go get -u .../ tried to import not downloaded package
func TestGoGetUpdateWithWildcard(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.setenv("GOPATH", tg.path("."))
const aPkgImportPath = "github.com/tmwh/go-get-issue-14450/a"
tg.run("get", aPkgImportPath)
tg.run("get", "-u", ".../")
tg.grepStderrNot("cannot find package", "did not update packages given wildcard path")
var expectedPkgPaths = []string{
"src/github.com/tmwh/go-get-issue-14450/b",
"src/github.com/tmwh/go-get-issue-14450-b-dependency/c",
"src/github.com/tmwh/go-get-issue-14450-b-dependency/d",
}
for _, importPath := range expectedPkgPaths {
_, err := os.Stat(tg.path(importPath))
tg.must(err)
}
const notExpectedPkgPath = "src/github.com/tmwh/go-get-issue-14450-c-dependency/e"
tg.mustNotExist(tg.path(notExpectedPkgPath))
}
func TestGoEnv(t *testing.T) {
tg := testgo(t)
tg.parallel()
defer tg.cleanup()
tg.setenv("GOARCH", "arm")
tg.run("env", "GOARCH")
tg.grepStdout("^arm$", "GOARCH not honored")
tg.run("env", "GCCGO")
tg.grepStdout(".", "GCCGO unexpectedly empty")
tg.run("env", "CGO_CFLAGS")
tg.grepStdout(".", "default CGO_CFLAGS unexpectedly empty")
tg.setenv("CGO_CFLAGS", "-foobar")
tg.run("env", "CGO_CFLAGS")
tg.grepStdout("^-foobar$", "CGO_CFLAGS not honored")
tg.setenv("CC", "gcc -fmust -fgo -ffaster")
tg.run("env", "CC")
tg.grepStdout("gcc", "CC not found")
tg.run("env", "GOGCCFLAGS")
tg.grepStdout("-ffaster", "CC arguments not found")
}
const (
noMatchesPattern = `(?m)^ok.*\[no tests to run\]`
okPattern = `(?m)^ok`
)
func TestMatchesNoTests(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
// TODO: tg.parallel()
tg.run("test", "-run", "ThisWillNotMatch", "testdata/standalone_test.go")
tg.grepBoth(noMatchesPattern, "go test did not say [no tests to run]")
}
func TestMatchesNoTestsDoesNotOverrideBuildFailure(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
tg.runFail("test", "-run", "ThisWillNotMatch", "syntaxerror")
tg.grepBothNot(noMatchesPattern, "go test did say [no tests to run]")
tg.grepBoth("FAIL", "go test did not say FAIL")
}
func TestMatchesNoBenchmarksIsOK(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
// TODO: tg.parallel()
tg.run("test", "-run", "^$", "-bench", "ThisWillNotMatch", "testdata/standalone_benchmark_test.go")
tg.grepBothNot(noMatchesPattern, "go test did say [no tests to run]")
tg.grepBoth(okPattern, "go test did not say ok")
}
func TestMatchesOnlyExampleIsOK(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
// TODO: tg.parallel()
tg.run("test", "-run", "Example", "testdata/example1_test.go")
tg.grepBothNot(noMatchesPattern, "go test did say [no tests to run]")
tg.grepBoth(okPattern, "go test did not say ok")
}
func TestMatchesOnlyBenchmarkIsOK(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
// TODO: tg.parallel()
tg.run("test", "-run", "^$", "-bench", ".", "testdata/standalone_benchmark_test.go")
tg.grepBothNot(noMatchesPattern, "go test did say [no tests to run]")
tg.grepBoth(okPattern, "go test did not say ok")
}
func TestBenchmarkLabels(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
// TODO: tg.parallel()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
tg.run("test", "-run", "^$", "-bench", ".", "bench")
tg.grepStdout(`(?m)^goos: `+runtime.GOOS, "go test did not print goos")
tg.grepStdout(`(?m)^goarch: `+runtime.GOARCH, "go test did not print goarch")
tg.grepStdout(`(?m)^pkg: bench`, "go test did not say pkg: bench")
tg.grepBothNot(`(?s)pkg:.*pkg:`, "go test said pkg multiple times")
}
func TestBenchmarkLabelsOutsideGOPATH(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
// TODO: tg.parallel()
tg.run("test", "-run", "^$", "-bench", ".", "testdata/standalone_benchmark_test.go")
tg.grepStdout(`(?m)^goos: `+runtime.GOOS, "go test did not print goos")
tg.grepStdout(`(?m)^goarch: `+runtime.GOARCH, "go test did not print goarch")
tg.grepBothNot(`(?m)^pkg:`, "go test did say pkg:")
}
func TestMatchesOnlyTestIsOK(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
// TODO: tg.parallel()
tg.run("test", "-run", "Test", "testdata/standalone_test.go")
tg.grepBothNot(noMatchesPattern, "go test did say [no tests to run]")
tg.grepBoth(okPattern, "go test did not say ok")
}
func TestMatchesNoTestsWithSubtests(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.run("test", "-run", "ThisWillNotMatch", "testdata/standalone_sub_test.go")
tg.grepBoth(noMatchesPattern, "go test did not say [no tests to run]")
}
func TestMatchesNoSubtestsMatch(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.run("test", "-run", "Test/ThisWillNotMatch", "testdata/standalone_sub_test.go")
tg.grepBoth(noMatchesPattern, "go test did not say [no tests to run]")
}
func TestMatchesNoSubtestsDoesNotOverrideFailure(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.runFail("test", "-run", "TestThatFails/ThisWillNotMatch", "testdata/standalone_fail_sub_test.go")
tg.grepBothNot(noMatchesPattern, "go test did say [no tests to run]")
tg.grepBoth("FAIL", "go test did not say FAIL")
}
func TestMatchesOnlySubtestIsOK(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.run("test", "-run", "Test/Sub", "testdata/standalone_sub_test.go")
tg.grepBothNot(noMatchesPattern, "go test did say [no tests to run]")
tg.grepBoth(okPattern, "go test did not say ok")
}
func TestMatchesNoSubtestsParallel(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.run("test", "-run", "Test/Sub/ThisWillNotMatch", "testdata/standalone_parallel_sub_test.go")
tg.grepBoth(noMatchesPattern, "go test did not say [no tests to run]")
}
func TestMatchesOnlySubtestParallelIsOK(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.run("test", "-run", "Test/Sub/Nested", "testdata/standalone_parallel_sub_test.go")
tg.grepBothNot(noMatchesPattern, "go test did say [no tests to run]")
tg.grepBoth(okPattern, "go test did not say ok")
}
// Issue 18845
func TestBenchTimeout(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.run("test", "-bench", ".", "-timeout", "750ms", "testdata/timeoutbench_test.go")
}
func TestLinkXImportPathEscape(t *testing.T) {
// golang.org/issue/16710
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
exe := "./linkx" + exeSuffix
tg.creatingTemp(exe)
tg.run("build", "-o", exe, "-ldflags", "-X=my.pkg.Text=linkXworked", "my.pkg/main")
out, err := exec.Command(exe).CombinedOutput()
if err != nil {
tg.t.Fatal(err)
}
if string(out) != "linkXworked\n" {
tg.t.Log(string(out))
tg.t.Fatal(`incorrect output: expected "linkXworked\n"`)
}
}
// Issue 18044.
func TestLdBindNow(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.setenv("LD_BIND_NOW", "1")
tg.run("help")
}
// Issue 18225.
// This is really a cmd/asm issue but this is a convenient place to test it.
func TestConcurrentAsm(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
asm := `DATA ·constants<>+0x0(SB)/8,$0
GLOBL ·constants<>(SB),8,$8
`
tg.tempFile("go/src/p/a.s", asm)
tg.tempFile("go/src/p/b.s", asm)
tg.tempFile("go/src/p/p.go", `package p`)
tg.setenv("GOPATH", tg.path("go"))
tg.run("build", "p")
}
// Issue 18778.
func TestDotDotDotOutsideGOPATH(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.tempFile("pkgs/a.go", `package x`)
tg.tempFile("pkgs/a_test.go", `package x_test
import "testing"
func TestX(t *testing.T) {}`)
tg.tempFile("pkgs/a/a.go", `package a`)
tg.tempFile("pkgs/a/a_test.go", `package a_test
import "testing"
func TestA(t *testing.T) {}`)
tg.cd(tg.path("pkgs"))
tg.run("build", "./...")
tg.run("test", "./...")
tg.run("list", "./...")
tg.grepStdout("pkgs$", "expected package not listed")
tg.grepStdout("pkgs/a", "expected package not listed")
}
// Issue 18975.
func TestFFLAGS(t *testing.T) {
if !canCgo {
t.Skip("skipping because cgo not enabled")
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("p/src/p/main.go", `package main
// #cgo FFLAGS: -no-such-fortran-flag
import "C"
func main() {}
`)
tg.tempFile("p/src/p/a.f", `! comment`)
tg.setenv("GOPATH", tg.path("p"))
// This should normally fail because we are passing an unknown flag,
// but issue #19080 points to Fortran compilers that succeed anyhow.
// To work either way we call doRun directly rather than run or runFail.
tg.doRun([]string{"build", "-x", "p"})
tg.grepStderr("no-such-fortran-flag", `missing expected "-no-such-fortran-flag"`)
}
// Issue 19198.
// This is really a cmd/link issue but this is a convenient place to test it.
func TestDuplicateGlobalAsmSymbols(t *testing.T) {
if runtime.GOARCH != "386" && runtime.GOARCH != "amd64" {
t.Skipf("skipping test on %s", runtime.GOARCH)
}
if !canCgo {
t.Skip("skipping because cgo not enabled")
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
asm := `
#include "textflag.h"
DATA sym<>+0x0(SB)/8,$0
GLOBL sym<>(SB),(NOPTR+RODATA),$8
TEXT ·Data(SB),NOSPLIT,$0
MOVB sym<>(SB), AX
MOVB AX, ret+0(FP)
RET
`
tg.tempFile("go/src/a/a.s", asm)
tg.tempFile("go/src/a/a.go", `package a; func Data() uint8`)
tg.tempFile("go/src/b/b.s", asm)
tg.tempFile("go/src/b/b.go", `package b; func Data() uint8`)
tg.tempFile("go/src/p/p.go", `
package main
import "a"
import "b"
import "C"
func main() {
_ = a.Data() + b.Data()
}
`)
tg.setenv("GOPATH", tg.path("go"))
exe := filepath.Join(tg.tempdir, "p.exe")
tg.creatingTemp(exe)
tg.run("build", "-o", exe, "p")
}
func TestBuildTagsNoComma(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.makeTempdir()
tg.setenv("GOPATH", tg.path("go"))
tg.run("install", "-tags", "tag1 tag2", "math")
tg.runFail("install", "-tags", "tag1,tag2", "math")
tg.grepBoth("space-separated list contains comma", "-tags with a comma-separated list didn't error")
tg.runFail("build", "-tags", "tag1,tag2", "math")
tg.grepBoth("space-separated list contains comma", "-tags with a comma-separated list didn't error")
}
func copyFile(src, dst string, perm os.FileMode) error {
sf, err := os.Open(src)
if err != nil {
return err
}
defer sf.Close()
df, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
if err != nil {
return err
}
_, err = io.Copy(df, sf)
err2 := df.Close()
if err != nil {
return err
}
return err2
}
func TestExecutableGOROOT(t *testing.T) {
if runtime.GOOS == "openbsd" {
t.Skipf("test case does not work on %s, missing os.Executable", runtime.GOOS)
}
// Env with no GOROOT.
var env []string
for _, e := range os.Environ() {
if !strings.HasPrefix(e, "GOROOT=") {
env = append(env, e)
}
}
check := func(t *testing.T, exe, want string) {
cmd := exec.Command(exe, "env", "GOROOT")
cmd.Env = env
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("%s env GOROOT: %v, %s", exe, err, out)
}
goroot, err := filepath.EvalSymlinks(strings.TrimSpace(string(out)))
if err != nil {
t.Fatal(err)
}
want, err = filepath.EvalSymlinks(want)
if err != nil {
t.Fatal(err)
}
if !strings.EqualFold(goroot, want) {
t.Errorf("go env GOROOT:\nhave %s\nwant %s", goroot, want)
} else {
t.Logf("go env GOROOT: %s", goroot)
}
}
// Note: Must not call tg methods inside subtests: tg is attached to outer t.
tg := testgo(t)
defer tg.cleanup()
tg.makeTempdir()
tg.tempDir("new/bin")
newGoTool := tg.path("new/bin/go" + exeSuffix)
tg.must(copyFile(tg.goTool(), newGoTool, 0775))
newRoot := tg.path("new")
t.Run("RelocatedExe", func(t *testing.T) {
t.Skip("TODO: skipping known broken test; see golang.org/issue/20284")
// Should fall back to default location in binary.
// No way to dig out other than look at source code.
data, err := ioutil.ReadFile("../../runtime/internal/sys/zversion.go")
if err != nil {
t.Fatal(err)
}
m := regexp.MustCompile("const DefaultGoroot = `([^`]+)`").FindStringSubmatch(string(data))
if m == nil {
t.Fatal("cannot find DefaultGoroot in ../../runtime/internal/sys/zversion.go")
}
check(t, newGoTool, m[1])
})
// If the binary is sitting in a bin dir next to ../pkg/tool, that counts as a GOROOT,
// so it should find the new tree.
tg.tempDir("new/pkg/tool")
t.Run("RelocatedTree", func(t *testing.T) {
check(t, newGoTool, newRoot)
})
tg.tempDir("other/bin")
symGoTool := tg.path("other/bin/go" + exeSuffix)
// Symlink into go tree should still find go tree.
t.Run("SymlinkedExe", func(t *testing.T) {
testenv.MustHaveSymlink(t)
if err := os.Symlink(newGoTool, symGoTool); err != nil {
t.Fatal(err)
}
check(t, symGoTool, newRoot)
})
}
func TestNeedVersion(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("goversion.go", `package main; func main() {}`)
path := tg.path("goversion.go")
tg.setenv("TESTGO_VERSION", "go1.testgo")
tg.runFail("run", path)
tg.grepStderr("compile", "does not match go tool version")
}
// Test that user can override default code generation flags.
func TestUserOverrideFlags(t *testing.T) {
if !canCgo {
t.Skip("skipping because cgo not enabled")
}
if runtime.GOOS != "linux" {
// We are testing platform-independent code, so it's
// OK to skip cases that work differently.
t.Skipf("skipping on %s because test only works if c-archive implies -shared", runtime.GOOS)
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("override.go", `package main
import "C"
//export GoFunc
func GoFunc() {}
func main() {}`)
tg.creatingTemp("override.a")
tg.creatingTemp("override.h")
tg.run("build", "-x", "-buildmode=c-archive", "-gcflags=-shared=false", tg.path("override.go"))
tg.grepStderr("compile .*-shared .*-shared=false", "user can not override code generation flag")
}
func TestCgoFlagContainsSpace(t *testing.T) {
if !canCgo {
t.Skip("skipping because cgo not enabled")
}
tg := testgo(t)
defer tg.cleanup()
ccName := filepath.Base(testCC)
tg.tempFile(fmt.Sprintf("src/%s/main.go", ccName), fmt.Sprintf(`package main
import (
"os"
"os/exec"
"strings"
)
func main() {
cmd := exec.Command(%q, os.Args[1:]...)
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err := cmd.Run()
if err != nil {
panic(err)
}
if os.Args[len(os.Args)-1] == "trivial.c" {
return
}
var success bool
for _, arg := range os.Args {
switch {
case strings.Contains(arg, "c flags"):
if success {
panic("duplicate CFLAGS")
}
success = true
case strings.Contains(arg, "ld flags"):
if success {
panic("duplicate LDFLAGS")
}
success = true
}
}
if !success {
panic("args should contains '-Ic flags' or '-Lld flags'")
}
}
`, testCC))
tg.cd(tg.path(fmt.Sprintf("src/%s", ccName)))
tg.run("build")
tg.setenv("CC", tg.path(fmt.Sprintf("src/%s/%s", ccName, ccName)))
tg.tempFile("src/cgo/main.go", `package main
// #cgo CFLAGS: -I"c flags"
// #cgo LDFLAGS: -L"ld flags"
import "C"
func main() {}
`)
tg.cd(tg.path("src/cgo"))
tg.run("run", "main.go")
}
// Issue #20435.
func TestGoTestRaceCoverModeFailures(t *testing.T) {
if !canRace {
t.Skip("skipping because race detector not supported")
}
tg := testgo(t)
tg.parallel()
defer tg.cleanup()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
tg.run("test", "testrace")
tg.runFail("test", "-race", "-covermode=set", "testrace")
tg.grepStderr(`-covermode must be "atomic", not "set", when -race is enabled`, "-race -covermode=set was allowed")
tg.grepBothNot("PASS", "something passed")
}
// Issue 9737: verify that GOARM and GO386 affect the computed build ID.
func TestBuildIDContainsArchModeEnv(t *testing.T) {
if testing.Short() {
t.Skip("skipping in short mode")
}
var tg *testgoData
testWith := func(before, after func()) func(*testing.T) {
return func(t *testing.T) {
tg = testgo(t)
defer tg.cleanup()
tg.tempFile("src/mycmd/x.go", `package main
func main() {}`)
tg.setenv("GOPATH", tg.path("."))
tg.cd(tg.path("src/mycmd"))
tg.setenv("GOOS", "linux")
before()
tg.run("install", "mycmd")
after()
tg.wantStale("mycmd", "build ID mismatch", "should be stale after environment variable change")
}
}
t.Run("386", testWith(func() {
tg.setenv("GOARCH", "386")
tg.setenv("GO386", "387")
}, func() {
tg.setenv("GO386", "sse2")
}))
t.Run("arm", testWith(func() {
tg.setenv("GOARCH", "arm")
tg.setenv("GOARM", "5")
}, func() {
tg.setenv("GOARM", "7")
}))
}
func TestTestRegexps(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
tg.run("test", "-cpu=1", "-run=X/Y", "-bench=X/Y", "-count=2", "-v", "testregexp")
var lines []string
for _, line := range strings.SplitAfter(tg.getStdout(), "\n") {
if strings.Contains(line, "=== RUN") || strings.Contains(line, "--- BENCH") || strings.Contains(line, "LOG") {
lines = append(lines, line)
}
}
// Important parts:
// TestX is run, twice
// TestX/Y is run, twice
// TestXX is run, twice
// TestZ is not run
// BenchmarkX is run but only with N=1, once
// BenchmarkXX is run but only with N=1, once
// BenchmarkX/Y is run in full, twice
want := `=== RUN TestX
=== RUN TestX/Y
x_test.go:6: LOG: X running
x_test.go:8: LOG: Y running
=== RUN TestXX
z_test.go:10: LOG: XX running
=== RUN TestX
=== RUN TestX/Y
x_test.go:6: LOG: X running
x_test.go:8: LOG: Y running
=== RUN TestXX
z_test.go:10: LOG: XX running
--- BENCH: BenchmarkX/Y
x_test.go:15: LOG: Y running N=1
x_test.go:15: LOG: Y running N=100
x_test.go:15: LOG: Y running N=10000
x_test.go:15: LOG: Y running N=1000000
x_test.go:15: LOG: Y running N=100000000
x_test.go:15: LOG: Y running N=2000000000
--- BENCH: BenchmarkX/Y
x_test.go:15: LOG: Y running N=1
x_test.go:15: LOG: Y running N=100
x_test.go:15: LOG: Y running N=10000
x_test.go:15: LOG: Y running N=1000000
x_test.go:15: LOG: Y running N=100000000
x_test.go:15: LOG: Y running N=2000000000
--- BENCH: BenchmarkX
x_test.go:13: LOG: X running N=1
--- BENCH: BenchmarkXX
z_test.go:18: LOG: XX running N=1
`
have := strings.Join(lines, "")
if have != want {
t.Errorf("reduced output:<<<\n%s>>> want:<<<\n%s>>>", have, want)
}
}
func TestListTests(t *testing.T) {
var tg *testgoData
testWith := func(listName, expected string) func(*testing.T) {
return func(t *testing.T) {
tg = testgo(t)
defer tg.cleanup()
tg.run("test", "./testdata/src/testlist/...", fmt.Sprintf("-list=%s", listName))
tg.grepStdout(expected, fmt.Sprintf("-test.list=%s returned %q, expected %s", listName, tg.getStdout(), expected))
}
}
t.Run("Test", testWith("Test", "TestSimple"))
t.Run("Bench", testWith("Benchmark", "BenchmarkSimple"))
t.Run("Example1", testWith("Example", "ExampleSimple"))
t.Run("Example2", testWith("Example", "ExampleWithEmptyOutput"))
}
func TestBadCommandLines(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.tempFile("src/x/x.go", "package x\n")
tg.setenv("GOPATH", tg.path("."))
tg.run("build", "x")
tg.tempFile("src/x/@y.go", "package x\n")
tg.runFail("build", "x")
tg.grepStderr("invalid input file name \"@y.go\"", "did not reject @y.go")
tg.must(os.Remove(tg.path("src/x/@y.go")))
tg.tempFile("src/x/-y.go", "package x\n")
tg.runFail("build", "x")
tg.grepStderr("invalid input file name \"-y.go\"", "did not reject -y.go")
tg.must(os.Remove(tg.path("src/x/-y.go")))
tg.runFail("build", "-gcflags=@x", "x")
tg.grepStderr("invalid command-line argument @x in command", "did not reject @x during exec")
tg.tempFile("src/@x/x.go", "package x\n")
tg.setenv("GOPATH", tg.path("."))
tg.runFail("build", "@x")
tg.grepStderr("invalid input directory name \"@x\"", "did not reject @x directory")
tg.tempFile("src/@x/y/y.go", "package y\n")
tg.setenv("GOPATH", tg.path("."))
tg.runFail("build", "@x/y")
tg.grepStderr("invalid import path \"@x/y\"", "did not reject @x/y import path")
tg.tempFile("src/-x/x.go", "package x\n")
tg.setenv("GOPATH", tg.path("."))
tg.runFail("build", "--", "-x")
tg.grepStderr("invalid input directory name \"-x\"", "did not reject -x directory")
tg.tempFile("src/-x/y/y.go", "package y\n")
tg.setenv("GOPATH", tg.path("."))
tg.runFail("build", "--", "-x/y")
tg.grepStderr("invalid import path \"-x/y\"", "did not reject -x/y import path")
}
func TestBadCgoDirectives(t *testing.T) {
if !canCgo {
t.Skip("no cgo")
}
tg := testgo(t)
defer tg.cleanup()
tg.tempFile("src/x/x.go", "package x\n")
tg.setenv("GOPATH", tg.path("."))
tg.tempFile("src/x/x.go", `package x
//go:cgo_ldflag "-fplugin=foo.so"
`)
tg.runFail("build", "x")
tg.grepStderr("//go:cgo_ldflag .* only allowed in cgo-generated code", "did not reject //go:cgo_ldflag directive")
tg.must(os.Remove(tg.path("src/x/x.go")))
tg.runFail("build", "x")
tg.grepStderr("no Go files", "did not report missing source code")
tg.tempFile("src/x/_cgo_yy.go", `package x
//go:cgo_ldflag "-fplugin=foo.so"
`)
tg.runFail("build", "x")
tg.grepStderr("no Go files", "did not report missing source code") // _* files are ignored...
tg.runFail("build", tg.path("src/x/_cgo_yy.go")) // ... but if forced, the comment is rejected
// Actually, today there is a separate issue that _ files named
// on the command-line are ignored. Once that is fixed,
// we want to see the cgo_ldflag error.
tg.grepStderr("//go:cgo_ldflag only allowed in cgo-generated code|no Go files", "did not reject //go:cgo_ldflag directive")
tg.must(os.Remove(tg.path("src/x/_cgo_yy.go")))
tg.tempFile("src/x/x.go", "package x\n")
tg.tempFile("src/x/y.go", `package x
// #cgo CFLAGS: -fplugin=foo.so
import "C"
`)
tg.runFail("build", "x")
tg.grepStderr("invalid flag in #cgo CFLAGS: -fplugin=foo.so", "did not reject -fplugin")
tg.tempFile("src/x/y.go", `package x
// #cgo CFLAGS: -Ibar -fplugin=foo.so
import "C"
`)
tg.runFail("build", "x")
tg.grepStderr("invalid flag in #cgo CFLAGS: -fplugin=foo.so", "did not reject -fplugin")
tg.tempFile("src/x/y.go", `package x
// #cgo pkg-config: -foo
import "C"
`)
tg.runFail("build", "x")
tg.grepStderr("invalid pkg-config package name: -foo", "did not reject pkg-config: -foo")
tg.tempFile("src/x/y.go", `package x
// #cgo pkg-config: @foo
import "C"
`)
tg.runFail("build", "x")
tg.grepStderr("invalid pkg-config package name: @foo", "did not reject pkg-config: -foo")
tg.tempFile("src/x/y.go", `package x
// #cgo CFLAGS: @foo
import "C"
`)
tg.runFail("build", "x")
tg.grepStderr("invalid flag in #cgo CFLAGS: @foo", "did not reject @foo flag")
tg.tempFile("src/x/y.go", `package x
// #cgo CFLAGS: -D
import "C"
`)
tg.runFail("build", "x")
tg.grepStderr("invalid flag in #cgo CFLAGS: -D without argument", "did not reject trailing -I flag")
// Note that -I @foo is allowed because we rewrite it into -I /path/to/src/@foo
// before the check is applied. There's no such rewrite for -D.
tg.tempFile("src/x/y.go", `package x
// #cgo CFLAGS: -D @foo
import "C"
`)
tg.runFail("build", "x")
tg.grepStderr("invalid flag in #cgo CFLAGS: -D @foo", "did not reject -D @foo flag")
tg.tempFile("src/x/y.go", `package x
// #cgo CFLAGS: -D@foo
import "C"
`)
tg.runFail("build", "x")
tg.grepStderr("invalid flag in #cgo CFLAGS: -D@foo", "did not reject -D@foo flag")
tg.setenv("CGO_CFLAGS", "-D@foo")
tg.tempFile("src/x/y.go", `package x
import "C"
`)
tg.run("build", "-n", "x")
tg.grepStderr("-D@foo", "did not find -D@foo in commands")
}
func TestTwoPkgConfigs(t *testing.T) {
if !canCgo {
t.Skip("no cgo")
}
if runtime.GOOS == "windows" || runtime.GOOS == "plan9" {
t.Skipf("no shell scripts on %s", runtime.GOOS)
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("src/x/a.go", `package x
// #cgo pkg-config: --static a
import "C"
`)
tg.tempFile("src/x/b.go", `package x
// #cgo pkg-config: --static a
import "C"
`)
tg.tempFile("pkg-config.sh", `#!/bin/sh
echo $* >>`+tg.path("pkg-config.out"))
tg.must(os.Chmod(tg.path("pkg-config.sh"), 0755))
tg.setenv("GOPATH", tg.path("."))
tg.setenv("PKG_CONFIG", tg.path("pkg-config.sh"))
tg.run("build", "x")
out, err := ioutil.ReadFile(tg.path("pkg-config.out"))
tg.must(err)
out = bytes.TrimSpace(out)
want := "--cflags --static --static -- a a\n--libs --static --static -- a a"
if !bytes.Equal(out, []byte(want)) {
t.Errorf("got %q want %q", out, want)
}
}
|
[
"\"HOME\"",
"\"CCACHE_DIR\"",
"\"PATH\"",
"\"GOPATH\"",
"\"TERM\"",
"\"TERM\""
] |
[] |
[
"CCACHE_DIR",
"GOPATH",
"TERM",
"HOME",
"PATH"
] |
[]
|
["CCACHE_DIR", "GOPATH", "TERM", "HOME", "PATH"]
|
go
| 5 | 0 | |
cmd/ddltest/ddl_test.go
|
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package ddltest
import (
"database/sql"
"database/sql/driver"
"flag"
"fmt"
"math/rand"
"os"
"os/exec"
"reflect"
"runtime"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
_ "github.com/go-sql-driver/mysql"
. "github.com/pingcap/check"
"github.com/pingcap/errors"
zaplog "github.com/pingcap/log"
"github.com/pingcap/parser/model"
"github.com/pingcap/parser/terror"
"github.com/pingcap/tidb/ddl"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/session"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/store"
"github.com/pingcap/tidb/store/tikv"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/logutil"
log "github.com/sirupsen/logrus"
goctx "golang.org/x/net/context"
)
func TestDDL(t *testing.T) {
CustomVerboseFlag = true
TestingT(t)
}
var (
etcd = flag.String("etcd", "127.0.0.1:2379", "etcd path")
tidbIP = flag.String("tidb_ip", "127.0.0.1", "tidb-server ip address")
tikvPath = flag.String("tikv_path", "", "tikv path")
lease = flag.Int("lease", 1, "DDL schema lease time, seconds")
serverNum = flag.Int("server_num", 3, "Maximum running tidb server")
startPort = flag.Int("start_port", 5000, "First tidb-server listening port")
statusPort = flag.Int("status_port", 8000, "First tidb-server status port")
logLevel = flag.String("L", "error", "log level")
ddlServerLogLevel = flag.String("ddl_log_level", "debug", "DDL server log level")
dataNum = flag.Int("n", 100, "minimal test dataset for a table")
enableRestart = flag.Bool("enable_restart", true, "whether random restart servers for tests")
)
var _ = Suite(&TestDDLSuite{})
type server struct {
*exec.Cmd
logFP *os.File
db *sql.DB
addr string
}
type TestDDLSuite struct {
store kv.Storage
dom *domain.Domain
s session.Session
ctx sessionctx.Context
m sync.Mutex
procs []*server
wg sync.WaitGroup
quit chan struct{}
retryCount int
}
func (s *TestDDLSuite) SetUpSuite(c *C) {
logutil.InitLogger(&logutil.LogConfig{Config: zaplog.Config{Level: *logLevel}})
s.quit = make(chan struct{})
var err error
s.store, err = store.New(fmt.Sprintf("tikv://%s%s", *etcd, *tikvPath))
c.Assert(err, IsNil)
// Make sure the schema lease of this session is equal to other TiDB servers'.
session.SetSchemaLease(time.Duration(*lease) * time.Second)
s.dom, err = session.BootstrapSession(s.store)
c.Assert(err, IsNil)
s.s, err = session.CreateSession(s.store)
c.Assert(err, IsNil)
s.ctx = s.s.(sessionctx.Context)
goCtx := goctx.Background()
_, err = s.s.Execute(goCtx, "create database if not exists test_ddl")
c.Assert(err, IsNil)
s.Bootstrap(c)
// Stop current DDL worker, so that we can't be the owner now.
err = domain.GetDomain(s.ctx).DDL().Stop()
c.Assert(err, IsNil)
ddl.RunWorker = false
session.ResetStoreForWithTiKVTest(s.store)
s.s, err = session.CreateSession(s.store)
c.Assert(err, IsNil)
s.dom, err = session.BootstrapSession(s.store)
c.Assert(err, IsNil)
s.ctx = s.s.(sessionctx.Context)
_, err = s.s.Execute(goCtx, "use test_ddl")
c.Assert(err, IsNil)
addEnvPath("..")
// Start multi tidb servers
s.procs = make([]*server, *serverNum)
// Set server restart retry count.
s.retryCount = 5
createLogFiles(c, *serverNum)
err = s.startServers()
c.Assert(err, IsNil)
s.wg.Add(1)
go s.restartServerRegularly()
}
// restartServerRegularly restarts a tidb server regularly.
func (s *TestDDLSuite) restartServerRegularly() {
defer s.wg.Done()
var err error
after := *lease * (6 + randomIntn(6))
for {
select {
case <-time.After(time.Duration(after) * time.Second):
if *enableRestart {
err = s.restartServerRand()
if err != nil {
log.Fatalf("restartServerRand failed, err %v", errors.ErrorStack(err))
}
}
case <-s.quit:
return
}
}
}
func (s *TestDDLSuite) TearDownSuite(c *C) {
close(s.quit)
s.wg.Wait()
s.dom.Close()
// TODO: Remove these logs after testing.
quitCh := make(chan struct{})
go func() {
select {
case <-time.After(100 * time.Second):
buf := make([]byte, 2<<20)
size := runtime.Stack(buf, true)
log.Errorf("%s", buf[:size])
case <-quitCh:
}
}()
err := s.store.Close()
c.Assert(err, IsNil)
close(quitCh)
err = s.stopServers()
c.Assert(err, IsNil)
}
func (s *TestDDLSuite) startServers() (err error) {
s.m.Lock()
defer s.m.Unlock()
for i := 0; i < len(s.procs); i++ {
if s.procs[i] != nil {
continue
}
// Open log file.
logFP, err := os.OpenFile(fmt.Sprintf("%s%d", logFilePrefix, i), os.O_RDWR, 0766)
if err != nil {
return errors.Trace(err)
}
s.procs[i], err = s.startServer(i, logFP)
if err != nil {
return errors.Trace(err)
}
}
return nil
}
func (s *TestDDLSuite) killServer(proc *os.Process) error {
// Make sure this tidb is killed, and it makes the next tidb that has the same port as this one start quickly.
err := proc.Kill()
if err != nil {
log.Errorf("kill server failed err %v", err)
return errors.Trace(err)
}
_, err = proc.Wait()
if err != nil {
log.Errorf("kill server, wait failed err %v", err)
return errors.Trace(err)
}
time.Sleep(1 * time.Second)
return nil
}
func (s *TestDDLSuite) stopServers() error {
s.m.Lock()
defer s.m.Unlock()
for i := 0; i < len(s.procs); i++ {
if s.procs[i] != nil {
err := s.killServer(s.procs[i].Process)
if err != nil {
return errors.Trace(err)
}
s.procs[i] = nil
}
}
return nil
}
var logFilePrefix = "tidb_log_file_"
func createLogFiles(c *C, length int) {
for i := 0; i < length; i++ {
fp, err := os.Create(fmt.Sprintf("%s%d", logFilePrefix, i))
if err != nil {
c.Assert(err, IsNil)
}
fp.Close()
}
}
func (s *TestDDLSuite) startServer(i int, fp *os.File) (*server, error) {
cmd := exec.Command("ddltest_tidb-server",
"--store=tikv",
fmt.Sprintf("-L=%s", *ddlServerLogLevel),
fmt.Sprintf("--path=%s%s", *etcd, *tikvPath),
fmt.Sprintf("-P=%d", *startPort+i),
fmt.Sprintf("--status=%d", *statusPort+i),
fmt.Sprintf("--lease=%d", *lease))
cmd.Stderr = fp
cmd.Stdout = fp
err := cmd.Start()
if err != nil {
return nil, errors.Trace(err)
}
time.Sleep(500 * time.Millisecond)
// Make sure tidb server process is started.
ps := fmt.Sprintf("ps -aux|grep ddltest_tidb|grep %d", *startPort+i)
output, _ := exec.Command("sh", "-c", ps).Output()
if !strings.Contains(string(output), "ddltest_tidb-server") {
time.Sleep(1 * time.Second)
}
// Open database.
var db *sql.DB
addr := fmt.Sprintf("%s:%d", *tidbIP, *startPort+i)
sleepTime := time.Millisecond * 250
startTime := time.Now()
for i := 0; i < s.retryCount; i++ {
db, err = sql.Open("mysql", fmt.Sprintf("root@(%s)/test_ddl", addr))
if err != nil {
log.Warnf("open addr %v failed, retry count %d err %v", addr, i, err)
continue
}
err = db.Ping()
if err == nil {
break
}
log.Warnf("ping addr %v failed, retry count %d err %v", addr, i, err)
db.Close()
time.Sleep(sleepTime)
sleepTime += sleepTime
}
if err != nil {
log.Errorf("restart server addr %v failed %v, take time %v", addr, err, time.Since(startTime))
return nil, errors.Trace(err)
}
db.SetMaxOpenConns(10)
_, err = db.Exec("use test_ddl")
if err != nil {
return nil, errors.Trace(err)
}
log.Infof("start server %s ok %v", addr, err)
return &server{
Cmd: cmd,
db: db,
addr: addr,
logFP: fp,
}, nil
}
func (s *TestDDLSuite) restartServerRand() error {
i := rand.Intn(*serverNum)
s.m.Lock()
defer s.m.Unlock()
if s.procs[i] == nil {
return nil
}
server := s.procs[i]
s.procs[i] = nil
log.Warnf("begin to restart %s", server.addr)
err := s.killServer(server.Process)
if err != nil {
return errors.Trace(err)
}
s.procs[i], err = s.startServer(i, server.logFP)
return errors.Trace(err)
}
func isRetryError(err error) bool {
if err == nil {
return false
}
if terror.ErrorEqual(err, driver.ErrBadConn) ||
strings.Contains(err.Error(), "connection refused") ||
strings.Contains(err.Error(), "getsockopt: connection reset by peer") ||
strings.Contains(err.Error(), "KV error safe to retry") ||
strings.Contains(err.Error(), "try again later") ||
strings.Contains(err.Error(), "invalid connection") {
return true
}
// TODO: Check the specific columns number.
if strings.Contains(err.Error(), "Column count doesn't match value count at row") {
log.Warnf("err is %v", err)
return false
}
log.Errorf("err is %v, can not retry", err)
return false
}
func (s *TestDDLSuite) exec(query string, args ...interface{}) (sql.Result, error) {
for {
server := s.getServer()
r, err := server.db.Exec(query, args...)
if isRetryError(err) {
log.Errorf("exec %s in server %s err %v, retry", query, err, server.addr)
continue
}
return r, err
}
}
func (s *TestDDLSuite) mustExec(c *C, query string, args ...interface{}) sql.Result {
r, err := s.exec(query, args...)
if err != nil {
log.Fatalf("[mustExec fail]query - %v %v, error - %v", query, args, err)
}
return r
}
func (s *TestDDLSuite) execInsert(c *C, query string, args ...interface{}) sql.Result {
for {
r, err := s.exec(query, args...)
if err == nil {
return r
}
if *enableRestart {
// If use enable random restart servers, we should ignore key exists error.
if strings.Contains(err.Error(), "Duplicate entry") &&
strings.Contains(err.Error(), "for key") {
return r
}
}
log.Fatalf("[execInsert fail]query - %v %v, error - %v", query, args, err)
}
}
func (s *TestDDLSuite) query(query string, args ...interface{}) (*sql.Rows, error) {
for {
server := s.getServer()
r, err := server.db.Query(query, args...)
if isRetryError(err) {
log.Errorf("query %s in server %s err %v, retry", query, err, server.addr)
continue
}
return r, err
}
}
func (s *TestDDLSuite) getServer() *server {
s.m.Lock()
defer s.m.Unlock()
for i := 0; i < 20; i++ {
i := rand.Intn(*serverNum)
if s.procs[i] != nil {
return s.procs[i]
}
}
log.Fatalf("try to get server too many times")
return nil
}
// runDDL executes the DDL query, returns a channel so that you can use it to wait DDL finished.
func (s *TestDDLSuite) runDDL(sql string) chan error {
done := make(chan error, 1)
go func() {
_, err := s.s.Execute(goctx.Background(), sql)
// We must wait 2 * lease time to guarantee all servers update the schema.
if err == nil {
time.Sleep(time.Duration(*lease) * time.Second * 2)
}
done <- err
}()
return done
}
func (s *TestDDLSuite) getTable(c *C, name string) table.Table {
tbl, err := domain.GetDomain(s.ctx).InfoSchema().TableByName(model.NewCIStr("test_ddl"), model.NewCIStr(name))
c.Assert(err, IsNil)
return tbl
}
func dumpRows(c *C, rows *sql.Rows) [][]interface{} {
cols, err := rows.Columns()
c.Assert(err, IsNil)
var ay [][]interface{}
for rows.Next() {
v := make([]interface{}, len(cols))
for i := range v {
v[i] = new(interface{})
}
err = rows.Scan(v...)
c.Assert(err, IsNil)
for i := range v {
v[i] = *(v[i].(*interface{}))
}
ay = append(ay, v)
}
rows.Close()
c.Assert(rows.Err(), IsNil, Commentf("%v", ay))
return ay
}
func matchRows(c *C, rows *sql.Rows, expected [][]interface{}) {
ay := dumpRows(c, rows)
c.Assert(len(ay), Equals, len(expected), Commentf("%v", expected))
for i := range ay {
match(c, ay[i], expected[i]...)
}
}
func match(c *C, row []interface{}, expected ...interface{}) {
c.Assert(len(row), Equals, len(expected))
for i := range row {
if row[i] == nil {
c.Assert(expected[i], IsNil)
continue
}
got, err := types.ToString(row[i])
c.Assert(err, IsNil)
need, err := types.ToString(expected[i])
c.Assert(err, IsNil)
c.Assert(got, Equals, need)
}
}
func (s *TestDDLSuite) Bootstrap(c *C) {
goCtx := goctx.Background()
// Initialize test data, you must use session to do it
_, err := s.s.Execute(goCtx, "use test_ddl")
c.Assert(err, IsNil)
_, err = s.s.Execute(goCtx, "drop table if exists test_index, test_column, test_insert, test_conflict_insert, "+
"test_update, test_conflict_update, test_delete, test_conflict_delete, test_mixed, test_inc")
c.Assert(err, IsNil)
_, err = s.s.Execute(goCtx, "create table test_index (c int, c1 bigint, c2 double, c3 varchar(256), primary key(c))")
c.Assert(err, IsNil)
_, err = s.s.Execute(goCtx, "create table test_column (c1 int, c2 int, primary key(c1))")
c.Assert(err, IsNil)
_, err = s.s.Execute(goCtx, "create table test_insert (c1 int, c2 int, primary key(c1))")
c.Assert(err, IsNil)
_, err = s.s.Execute(goCtx, "create table test_conflict_insert (c1 int, c2 int, primary key(c1))")
c.Assert(err, IsNil)
_, err = s.s.Execute(goCtx, "create table test_update (c1 int, c2 int, primary key(c1))")
c.Assert(err, IsNil)
_, err = s.s.Execute(goCtx, "create table test_conflict_update (c1 int, c2 int, primary key(c1))")
c.Assert(err, IsNil)
_, err = s.s.Execute(goCtx, "create table test_delete (c1 int, c2 int, primary key(c1))")
c.Assert(err, IsNil)
_, err = s.s.Execute(goCtx, "create table test_conflict_delete (c1 int, c2 int, primary key(c1))")
c.Assert(err, IsNil)
_, err = s.s.Execute(goCtx, "create table test_mixed (c1 int, c2 int, primary key(c1))")
c.Assert(err, IsNil)
_, err = s.s.Execute(goCtx, "create table test_inc (c1 int, c2 int, primary key(c1))")
c.Assert(err, IsNil)
}
func (s *TestDDLSuite) TestSimple(c *C) {
done := s.runDDL("create table if not exists test_simple (c1 int, c2 int, c3 int)")
err := <-done
c.Assert(err, IsNil)
_, err = s.exec("insert into test_simple values (1, 1, 1)")
c.Assert(err, IsNil)
rows, err := s.query("select c1 from test_simple limit 1")
c.Assert(err, IsNil)
matchRows(c, rows, [][]interface{}{{1}})
done = s.runDDL("drop table if exists test_simple")
err = <-done
c.Assert(err, IsNil)
}
func (s *TestDDLSuite) TestSimpleInsert(c *C) {
workerNum := 10
rowCount := 10000
batch := rowCount / workerNum
start := time.Now()
var wg sync.WaitGroup
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func(i int) {
defer wg.Done()
for j := 0; j < batch; j++ {
k := batch*i + j
s.execInsert(c, fmt.Sprintf("insert into test_insert values (%d, %d)", k, k))
}
}(i)
}
wg.Wait()
end := time.Now()
fmt.Printf("[TestSimpleInsert][Time Cost]%v\n", end.Sub(start))
ctx := s.ctx
err := ctx.NewTxn(goctx.Background())
c.Assert(err, IsNil)
tbl := s.getTable(c, "test_insert")
handles := make(map[int64]struct{})
err = tbl.IterRecords(ctx, tbl.FirstKey(), tbl.Cols(), func(h int64, data []types.Datum, cols []*table.Column) (bool, error) {
handles[h] = struct{}{}
c.Assert(data[0].GetValue(), Equals, data[1].GetValue())
return true, nil
})
c.Assert(err, IsNil)
c.Assert(handles, HasLen, rowCount, Commentf("%d %d", len(handles), rowCount))
}
func (s *TestDDLSuite) TestSimpleConflictInsert(c *C) {
var mu sync.Mutex
keysMap := make(map[int64]int64)
workerNum := 10
rowCount := 10000
batch := rowCount / workerNum
start := time.Now()
var wg sync.WaitGroup
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func() {
defer wg.Done()
for j := 0; j < batch; j++ {
k := randomNum(rowCount)
s.exec(fmt.Sprintf("insert into test_conflict_insert values (%d, %d)", k, k))
mu.Lock()
keysMap[int64(k)] = int64(k)
mu.Unlock()
}
}()
}
wg.Wait()
end := time.Now()
fmt.Printf("[TestSimpleConflictInsert][Time Cost]%v\n", end.Sub(start))
ctx := s.ctx
err := ctx.NewTxn(goctx.Background())
c.Assert(err, IsNil)
tbl := s.getTable(c, "test_conflict_insert")
handles := make(map[int64]struct{})
err = tbl.IterRecords(ctx, tbl.FirstKey(), tbl.Cols(), func(h int64, data []types.Datum, cols []*table.Column) (bool, error) {
handles[h] = struct{}{}
c.Assert(keysMap, HasKey, data[0].GetValue())
c.Assert(data[0].GetValue(), Equals, data[1].GetValue())
return true, nil
})
c.Assert(err, IsNil)
c.Assert(len(handles), Equals, len(keysMap))
}
func (s *TestDDLSuite) TestSimpleUpdate(c *C) {
var mu sync.Mutex
keysMap := make(map[int64]int64)
workerNum := 10
rowCount := 10000
batch := rowCount / workerNum
start := time.Now()
var wg sync.WaitGroup
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func(i int) {
defer wg.Done()
for j := 0; j < batch; j++ {
k := batch*i + j
s.execInsert(c, fmt.Sprintf("insert into test_update values (%d, %d)", k, k))
v := randomNum(rowCount)
s.mustExec(c, fmt.Sprintf("update test_update set c2 = %d where c1 = %d", v, k))
mu.Lock()
keysMap[int64(k)] = int64(v)
mu.Unlock()
}
}(i)
}
wg.Wait()
end := time.Now()
fmt.Printf("[TestSimpleUpdate][Time Cost]%v\n", end.Sub(start))
ctx := s.ctx
err := ctx.NewTxn(goctx.Background())
c.Assert(err, IsNil)
tbl := s.getTable(c, "test_update")
handles := make(map[int64]struct{})
err = tbl.IterRecords(ctx, tbl.FirstKey(), tbl.Cols(), func(h int64, data []types.Datum, cols []*table.Column) (bool, error) {
handles[h] = struct{}{}
key := data[0].GetInt64()
c.Assert(data[1].GetValue(), Equals, keysMap[key])
return true, nil
})
c.Assert(err, IsNil)
c.Assert(handles, HasLen, rowCount)
}
func (s *TestDDLSuite) TestSimpleConflictUpdate(c *C) {
var mu sync.Mutex
keysMap := make(map[int64]int64)
workerNum := 10
rowCount := 10000
batch := rowCount / workerNum
start := time.Now()
var wg sync.WaitGroup
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func(i int) {
defer wg.Done()
for j := 0; j < batch; j++ {
k := batch*i + j
s.execInsert(c, fmt.Sprintf("insert into test_conflict_update values (%d, %d)", k, k))
mu.Lock()
keysMap[int64(k)] = int64(k)
mu.Unlock()
}
}(i)
}
wg.Wait()
end := time.Now()
fmt.Printf("[TestSimpleConflictUpdate][Insert][Time Cost]%v\n", end.Sub(start))
start = time.Now()
defaultValue := int64(-1)
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func() {
defer wg.Done()
for j := 0; j < batch; j++ {
k := randomNum(rowCount)
s.mustExec(c, fmt.Sprintf("update test_conflict_update set c2 = %d where c1 = %d", defaultValue, k))
mu.Lock()
keysMap[int64(k)] = defaultValue
mu.Unlock()
}
}()
}
wg.Wait()
end = time.Now()
fmt.Printf("[TestSimpleConflictUpdate][Update][Time Cost]%v\n", end.Sub(start))
ctx := s.ctx
err := ctx.NewTxn(goctx.Background())
c.Assert(err, IsNil)
tbl := s.getTable(c, "test_conflict_update")
handles := make(map[int64]struct{})
err = tbl.IterRecords(ctx, tbl.FirstKey(), tbl.Cols(), func(h int64, data []types.Datum, cols []*table.Column) (bool, error) {
handles[h] = struct{}{}
c.Assert(keysMap, HasKey, data[0].GetValue())
if !reflect.DeepEqual(data[1].GetValue(), data[0].GetValue()) && !reflect.DeepEqual(data[1].GetValue(), defaultValue) {
log.Fatalf("[TestSimpleConflictUpdate fail]Bad row: %v", data)
}
return true, nil
})
c.Assert(err, IsNil)
c.Assert(handles, HasLen, rowCount)
}
func (s *TestDDLSuite) TestSimpleDelete(c *C) {
workerNum := 10
rowCount := 10000
batch := rowCount / workerNum
start := time.Now()
var wg sync.WaitGroup
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func(i int) {
defer wg.Done()
for j := 0; j < batch; j++ {
k := batch*i + j
s.execInsert(c, fmt.Sprintf("insert into test_delete values (%d, %d)", k, k))
s.mustExec(c, fmt.Sprintf("delete from test_delete where c1 = %d", k))
}
}(i)
}
wg.Wait()
end := time.Now()
fmt.Printf("[TestSimpleDelete][Time Cost]%v\n", end.Sub(start))
ctx := s.ctx
err := ctx.NewTxn(goctx.Background())
c.Assert(err, IsNil)
tbl := s.getTable(c, "test_delete")
handles := make(map[int64]struct{})
err = tbl.IterRecords(ctx, tbl.FirstKey(), tbl.Cols(), func(h int64, data []types.Datum, cols []*table.Column) (bool, error) {
handles[h] = struct{}{}
return true, nil
})
c.Assert(err, IsNil)
c.Assert(handles, HasLen, 0)
}
func (s *TestDDLSuite) TestSimpleConflictDelete(c *C) {
var mu sync.Mutex
keysMap := make(map[int64]int64)
workerNum := 10
rowCount := 10000
batch := rowCount / workerNum
start := time.Now()
var wg sync.WaitGroup
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func(i int) {
defer wg.Done()
for j := 0; j < batch; j++ {
k := batch*i + j
s.execInsert(c, fmt.Sprintf("insert into test_conflict_delete values (%d, %d)", k, k))
mu.Lock()
keysMap[int64(k)] = int64(k)
mu.Unlock()
}
}(i)
}
wg.Wait()
end := time.Now()
fmt.Printf("[TestSimpleConflictDelete][Insert][Time Cost]%v\n", end.Sub(start))
start = time.Now()
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func(i int) {
defer wg.Done()
for j := 0; j < batch; j++ {
k := randomNum(rowCount)
s.mustExec(c, fmt.Sprintf("delete from test_conflict_delete where c1 = %d", k))
mu.Lock()
delete(keysMap, int64(k))
mu.Unlock()
}
}(i)
}
wg.Wait()
end = time.Now()
fmt.Printf("[TestSimpleConflictDelete][Delete][Time Cost]%v\n", end.Sub(start))
ctx := s.ctx
err := ctx.NewTxn(goctx.Background())
c.Assert(err, IsNil)
tbl := s.getTable(c, "test_conflict_delete")
handles := make(map[int64]struct{})
err = tbl.IterRecords(ctx, tbl.FirstKey(), tbl.Cols(), func(h int64, data []types.Datum, cols []*table.Column) (bool, error) {
handles[h] = struct{}{}
c.Assert(keysMap, HasKey, data[0].GetValue())
return true, nil
})
c.Assert(err, IsNil)
c.Assert(len(handles), Equals, len(keysMap))
}
func (s *TestDDLSuite) TestSimpleMixed(c *C) {
workerNum := 10
rowCount := 10000
batch := rowCount / workerNum
start := time.Now()
var wg sync.WaitGroup
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func(i int) {
defer wg.Done()
for j := 0; j < batch; j++ {
k := batch*i + j
s.execInsert(c, fmt.Sprintf("insert into test_mixed values (%d, %d)", k, k))
}
}(i)
}
wg.Wait()
end := time.Now()
fmt.Printf("[TestSimpleMixed][Insert][Time Cost]%v\n", end.Sub(start))
start = time.Now()
rowID := int64(rowCount)
defaultValue := int64(-1)
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func() {
defer wg.Done()
for j := 0; j < batch; j++ {
key := atomic.AddInt64(&rowID, 1)
s.execInsert(c, fmt.Sprintf("insert into test_mixed values (%d, %d)", key, key))
key = int64(randomNum(rowCount))
s.mustExec(c, fmt.Sprintf("update test_mixed set c2 = %d where c1 = %d", defaultValue, key))
key = int64(randomNum(rowCount))
s.mustExec(c, fmt.Sprintf("delete from test_mixed where c1 = %d", key))
}
}()
}
wg.Wait()
end = time.Now()
fmt.Printf("[TestSimpleMixed][Mixed][Time Cost]%v\n", end.Sub(start))
ctx := s.ctx
err := ctx.NewTxn(goctx.Background())
c.Assert(err, IsNil)
tbl := s.getTable(c, "test_mixed")
updateCount := int64(0)
insertCount := int64(0)
err = tbl.IterRecords(ctx, tbl.FirstKey(), tbl.Cols(), func(h int64, data []types.Datum, cols []*table.Column) (bool, error) {
if reflect.DeepEqual(data[1].GetValue(), data[0].GetValue()) {
insertCount++
} else if reflect.DeepEqual(data[1].GetValue(), defaultValue) && data[0].GetInt64() < int64(rowCount) {
updateCount++
} else {
log.Fatalf("[TestSimpleMixed fail]invalid row: %v", data)
}
return true, nil
})
c.Assert(err, IsNil)
deleteCount := atomic.LoadInt64(&rowID) - insertCount - updateCount
c.Assert(insertCount, Greater, int64(0))
c.Assert(updateCount, Greater, int64(0))
c.Assert(deleteCount, Greater, int64(0))
}
func (s *TestDDLSuite) TestSimpleInc(c *C) {
workerNum := 10
rowCount := 1000
batch := rowCount / workerNum
start := time.Now()
var wg sync.WaitGroup
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func(i int) {
defer wg.Done()
for j := 0; j < batch; j++ {
k := batch*i + j
s.execInsert(c, fmt.Sprintf("insert into test_inc values (%d, %d)", k, k))
}
}(i)
}
wg.Wait()
end := time.Now()
fmt.Printf("[TestSimpleInc][Insert][Time Cost]%v\n", end.Sub(start))
start = time.Now()
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func() {
defer wg.Done()
for j := 0; j < batch; j++ {
s.mustExec(c, fmt.Sprintf("update test_inc set c2 = c2 + 1 where c1 = 0"))
}
}()
}
wg.Wait()
end = time.Now()
fmt.Printf("[TestSimpleInc][Update][Time Cost]%v\n", end.Sub(start))
ctx := s.ctx
err := ctx.NewTxn(goctx.Background())
c.Assert(err, IsNil)
tbl := s.getTable(c, "test_inc")
err = tbl.IterRecords(ctx, tbl.FirstKey(), tbl.Cols(), func(h int64, data []types.Datum, cols []*table.Column) (bool, error) {
if reflect.DeepEqual(data[0].GetValue(), int64(0)) {
if *enableRestart {
c.Assert(data[1].GetValue(), GreaterEqual, int64(rowCount))
} else {
c.Assert(data[1].GetValue(), Equals, int64(rowCount))
}
} else {
c.Assert(data[0].GetValue(), Equals, data[1].GetValue())
}
return true, nil
})
c.Assert(err, IsNil)
}
// addEnvPath appends newPath to $PATH.
func addEnvPath(newPath string) {
os.Setenv("PATH", fmt.Sprintf("%s%c%s", os.Getenv("PATH"), os.PathListSeparator, newPath))
}
func init() {
rand.Seed(time.Now().UnixNano())
store.Register("tikv", tikv.Driver{})
}
|
[
"\"PATH\""
] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
go
| 1 | 0 | |
lib/exabgp/application/bgp.py
|
# encoding: utf-8
"""
bgp.py
Created by Thomas Mangin on 2009-08-30.
Copyright (c) 2009-2017 Exa Networks. All rights reserved.
License: 3-clause BSD. (See the COPYRIGHT file)
"""
import os
import sys
import stat
import platform
import syslog
import string
from exabgp.util.dns import warn
from exabgp.logger import Logger
from exabgp.version import version
# import before the fork to improve copy on write memory savings
from exabgp.reactor.loop import Reactor
from exabgp.vendoring import docopt
from exabgp.vendoring import lsprofcalltree
from exabgp.configuration.usage import usage
from exabgp.debug import setup_report
setup_report()
def is_bgp (s):
return all(c in string.hexdigits or c == ':' for c in s)
def __exit (memory, code):
if memory:
from exabgp.vendoring import objgraph
sys.stdout.write('memory utilisation\n\n')
sys.stdout.write(objgraph.show_most_common_types(limit=20))
sys.stdout.write('\n\n\n')
sys.stdout.write('generating memory utilisation graph\n\n')
sys.stdout.write()
obj = objgraph.by_type('Reactor')
objgraph.show_backrefs([obj], max_depth=10)
sys.exit(code)
def named_pipe (root):
locations = [
'/run/exabgp/',
'/run/%d/' % os.getuid(),
'/run/',
'/var/run/exabgp/',
'/var/run/%d/' % os.getuid(),
'/var/run/',
root + '/run/exabgp/',
root + '/run/%d/' % os.getuid(),
root + '/run/',
root + '/var/run/exabgp/',
root + '/var/run/%d/' % os.getuid(),
root + '/var/run/',
]
for location in locations:
cli_in = location + 'exabgp.in'
cli_out = location + 'exabgp.out'
try:
if not stat.S_ISFIFO(os.stat(cli_in).st_mode):
continue
if not stat.S_ISFIFO(os.stat(cli_out).st_mode):
continue
except KeyboardInterrupt:
raise
except Exception:
continue
os.environ['exabgp_cli_pipe'] = location
return [location]
return locations
def root_folder (options,locations):
if options['--root']:
return os.path.realpath(os.path.normpath(options['--root'])).rstrip('/')
argv = os.path.realpath(os.path.normpath(os.path.join(os.getcwd(),sys.argv[0])))
for location in locations:
if argv.endswith(location):
return argv[:-len(location)]
return ''
def get_envfile (options, etc):
envfile = 'exabgp.env' if not options["--env"] else options["--env"]
if not envfile.startswith('/'):
envfile = '%s/%s' % (etc, envfile)
return envfile
def get_env (envfile):
from exabgp.configuration.setup import environment
try:
return environment.setup(envfile)
except environment.Error as exc:
sys.stdout.write(usage)
sys.stdout.flush()
print('\nconfiguration issue,', str(exc))
sys.exit(1)
def main ():
major = int(sys.version[0])
minor = int(sys.version[2])
if major <= 2 and minor < 5:
sys.stdout.write('This program can not work (is not tested) with your python version (< 2.5)\n')
sys.stdout.flush()
sys.exit(1)
cli_named_pipe = os.environ.get('exabgp_cli_pipe','')
if cli_named_pipe:
from exabgp.application.control import main as control
control(cli_named_pipe)
sys.exit(0)
options = docopt.docopt(usage, help=False)
if options["--run"]:
sys.argv = sys.argv[sys.argv.index('--run')+1:]
if sys.argv[0] == 'healthcheck':
from exabgp.application import run_healthcheck
run_healthcheck()
elif sys.argv[0] == 'cli':
from exabgp.application import run_cli
run_cli()
else:
sys.stdout.write(usage)
sys.stdout.flush()
sys.exit(0)
return
root = root_folder(options,['/bin/exabgp','/sbin/exabgp','/lib/exabgp/application/bgp.py','/lib/exabgp/application/control.py'])
prefix = '' if root == '/usr' else root
etc = prefix + '/etc/exabgp'
os.environ['EXABGP_ETC'] = etc # This is not most pretty
if options["--version"]:
sys.stdout.write('ExaBGP : %s\n' % version)
sys.stdout.write('Python : %s\n' % sys.version.replace('\n',' '))
sys.stdout.write('Uname : %s\n' % ' '.join(platform.uname()[:5]))
sys.stdout.write('Root : %s\n' % root)
sys.stdout.flush()
sys.exit(0)
envfile = get_envfile(options,etc)
env = get_env(envfile)
# Must be done before setting the logger as it modify its behaviour
if options["--debug"]:
env.log.all = True
env.log.level = syslog.LOG_DEBUG
logger = Logger()
from exabgp.configuration.setup import environment
if options["--decode"]:
decode = ''.join(options["--decode"]).replace(':','').replace(' ','')
if not is_bgp(decode):
sys.stdout.write(usage)
sys.stdout.write('Environment values are:\n%s\n\n' % '\n'.join(' - %s' % _ for _ in environment.default()))
sys.stdout.write('The BGP message must be an hexadecimal string.\n\n')
sys.stdout.write('All colons or spaces are ignored, for example:\n\n')
sys.stdout.write(' --decode 001E0200000007900F0003000101\n')
sys.stdout.write(' --decode 001E:02:0000:0007:900F:0003:0001:01\n')
sys.stdout.write(' --decode FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF001E0200000007900F0003000101\n')
sys.stdout.write(' --decode FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF:001E:02:0000:0007:900F:0003:0001:01\n')
sys.stdout.write(' --decode \'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF 001E02 00000007900F0003000101\n\'')
sys.stdout.flush()
sys.exit(1)
else:
decode = ''
duration = options["--signal"]
if duration and duration.isdigit():
pid = os.fork()
if pid:
import time
import signal
try:
time.sleep(int(duration))
os.kill(pid,signal.SIGUSR1)
except KeyboardInterrupt:
pass
try:
pid,code = os.wait()
sys.exit(code)
except KeyboardInterrupt:
try:
pid,code = os.wait()
sys.exit(code)
except Exception:
sys.exit(0)
if options["--help"]:
sys.stdout.write(usage)
sys.stdout.write('Environment values are:\n' + '\n'.join(' - %s' % _ for _ in environment.default()))
sys.stdout.flush()
sys.exit(0)
if options["--decode"]:
env.log.parser = True
env.debug.route = decode
env.tcp.bind = ''
if options["--profile"]:
env.profile.enable = True
if options["--profile"].lower() in ['1','true']:
env.profile.file = True
elif options["--profile"].lower() in ['0','false']:
env.profile.file = False
else:
env.profile.file = options["--profile"]
if envfile and not os.path.isfile(envfile):
comment = 'environment file missing\ngenerate it using "exabgp --fi > %s"' % envfile
else:
comment = ''
if options["--full-ini"] or options["--fi"]:
for line in environment.iter_ini():
sys.stdout.write('%s\n' % line)
sys.stdout.flush()
sys.exit(0)
if options["--full-env"] or options["--fe"]:
print()
for line in environment.iter_env():
sys.stdout.write('%s\n' % line)
sys.stdout.flush()
sys.exit(0)
if options["--diff-ini"] or options["--di"]:
for line in environment.iter_ini(True):
sys.stdout.write('%s\n' % line)
sys.stdout.flush()
sys.exit(0)
if options["--diff-env"] or options["--de"]:
for line in environment.iter_env(True):
sys.stdout.write('%s\n' % line)
sys.stdout.flush()
sys.exit(0)
if options["--once"]:
env.tcp.once = True
if options["--pdb"]:
# The following may fail on old version of python (but is required for debug.py)
os.environ['PDB'] = 'true'
env.debug.pdb = True
if options["--test"]:
env.debug.selfcheck = True
env.log.parser = True
if options["--memory"]:
env.debug.memory = True
configurations = []
# check the file only once that we have parsed all the command line options and allowed them to run
if options["<configuration>"]:
for f in options["<configuration>"]:
normalised = os.path.realpath(os.path.normpath(f))
if os.path.isfile(normalised):
configurations.append(normalised)
continue
if f.startswith('etc/exabgp'):
normalised = os.path.join(etc,f[11:])
if os.path.isfile(normalised):
configurations.append(normalised)
continue
logger.debug('one of the arguments passed as configuration is not a file (%s)' % f,'configuration')
sys.exit(1)
else:
sys.stdout.write(usage)
sys.stdout.write('Environment values are:\n%s\n\n' % '\n'.join(' - %s' % _ for _ in environment.default()))
sys.stdout.write('no configuration file provided')
sys.stdout.flush()
sys.exit(1)
from exabgp.bgp.message.update.attribute import Attribute
Attribute.caching = env.cache.attributes
if env.debug.rotate or len(configurations) == 1:
run(env,comment,configurations,root,options["--validate"])
if not (env.log.destination in ('syslog','stdout','stderr') or env.log.destination.startswith('host:')):
logger.error('can not log to files when running multiple configuration (as we fork)','configuration')
sys.exit(1)
try:
# run each configuration in its own process
pids = []
for configuration in configurations:
pid = os.fork()
if pid == 0:
run(env,comment,[configuration],root,options["--validate"],os.getpid())
else:
pids.append(pid)
# If we get a ^C / SIGTERM, ignore just continue waiting for our child process
import signal
signal.signal(signal.SIGINT, signal.SIG_IGN)
# wait for the forked processes
for pid in pids:
os.waitpid(pid,0)
except OSError as exc:
logger.critical('can not fork, errno %d : %s' % (exc.errno,exc.strerror),'reactor')
sys.exit(1)
def run (env, comment, configurations, root, validate, pid=0):
logger = Logger()
logger.notice('Thank you for using ExaBGP','welcome')
logger.notice('%s' % version,'version')
logger.notice('%s' % sys.version.replace('\n',' '),'interpreter')
logger.notice('%s' % ' '.join(platform.uname()[:5]),'os')
logger.notice('%s' % root,'installation')
if comment:
logger.notice(comment,'advice')
warning = warn()
if warning:
logger.warning(warning,'advice')
if env.api.cli:
pipes = named_pipe(root)
if len(pipes) != 1:
env.api.cli = False
logger.error('could not find the named pipes (exabgp.in and exabgp.out) required for the cli','cli')
logger.error('we scanned the following folders (the number is your PID):','cli')
for location in pipes:
logger.error(' - %s' % location,'cli control')
logger.error('please make them in one of the folder with the following commands:','cli control')
logger.error('> mkfifo %s/run/exabgp.{in,out}' % os.getcwd(),'cli control')
logger.error('> chmod 600 %s/run/exabgp.{in,out}' % os.getcwd(),'cli control')
if os.getuid() != 0:
logger.error('> chown %d:%d %s/run/exabgp.{in,out}' % (os.getuid(),os.getgid(),os.getcwd()),'cli control')
else:
pipe = pipes[0]
os.environ['exabgp_cli_pipe'] = pipe
logger.info('named pipes for the cli are:','cli control')
logger.info('to send commands %sexabgp.in' % pipe,'cli control')
logger.info('to read responses %sexabgp.out' % pipe,'cli control')
if not env.profile.enable:
exit_code = Reactor(configurations).run(validate,root)
__exit(env.debug.memory, exit_code)
try:
import cProfile as profile
except ImportError:
import profile
if env.profile.file == 'stdout':
profiled = 'Reactor(%s).run(%s,"%s")' % (str(configurations),str(validate),str(root))
exit_code = profile.run(profiled)
__exit(env.debug.memory, exit_code)
if pid:
profile_name = "%s-pid-%d" % (env.profile.file,pid)
else:
profile_name = env.profile.file
notice = ''
if os.path.isdir(profile_name):
notice = 'profile can not use this filename as output, it is not a directory (%s)' % profile_name
if os.path.exists(profile_name):
notice = 'profile can not use this filename as output, it already exists (%s)' % profile_name
if not notice:
cwd = os.getcwd()
logger.debug('profiling ....','reactor')
profiler = profile.Profile()
profiler.enable()
try:
exit_code = Reactor(configurations).run(validate, root)
except Exception:
exit_code = Reactor.Exit.unknown
raise
finally:
profiler.disable()
kprofile = lsprofcalltree.KCacheGrind(profiler)
try:
destination = profile_name if profile_name.startswith('/') else os.path.join(cwd,profile_name)
with open(destination, 'w+') as write:
kprofile.output(write)
except IOError:
notice = 'could not save profiling in formation at: ' + destination
logger.debug("-"*len(notice),'reactor')
logger.debug(notice,'reactor')
logger.debug("-"*len(notice),'reactor')
__exit(env.debug.memory,exit_code)
else:
logger.debug("-"*len(notice),'reactor')
logger.debug(notice,'reactor')
logger.debug("-"*len(notice),'reactor')
Reactor(configurations).run(validate,root)
__exit(env.debug.memory,1)
if __name__ == '__main__':
main()
|
[] |
[] |
[
"PDB",
"EXABGP_ETC",
"exabgp_cli_pipe"
] |
[]
|
["PDB", "EXABGP_ETC", "exabgp_cli_pipe"]
|
python
| 3 | 0 | |
.github/workflows/collect-user-submission.py
|
import json
import os
import typing
import frontmatter
import pydantic
from markdown_it import MarkdownIt
class Author(pydantic.BaseModel):
name: str = 'anonymous'
affiliation: str = None
affiliation_url: typing.Union[str, pydantic.HttpUrl] = None
email: typing.Union[str, pydantic.EmailStr] = None
class Submission(pydantic.BaseModel):
title: str
description: str
url: pydantic.HttpUrl
thumbnail: typing.Union[str, pydantic.HttpUrl] = None
authors: typing.List[Author] = None
tags: typing.Dict[str, typing.List[str]] = None
@pydantic.dataclasses.dataclass
class IssueInfo:
gh_event_path: pydantic.FilePath
submission: Submission = pydantic.Field(default=None)
def __post_init_post_parse__(self):
with open(self.gh_event_path) as f:
self.data = json.load(f)
def create_submission(self):
self._get_inputs()
self._create_submission_input()
return self
def _get_inputs(self):
self.author = self.data['issue']['user']['login']
self.title = self.data['issue']['title']
self.body = self.data['issue']['body']
def _create_submission_input(self):
md = MarkdownIt()
inputs = None
for token in md.parse(self.body):
if token.tag == 'code':
inputs = frontmatter.loads(token.content).metadata
break
name = inputs.get('name')
title = inputs.get('title')
description = inputs.get('description')
url = inputs.get('url')
thumbnail = inputs.get('thumbnail')
_authors = inputs.get('authors')
authors = []
if _authors:
for item in _authors:
authors.append(
Author(
name=item.get('name', 'anyonymous'),
affiliation=item.get('affiliation'),
affiliation_url=item.get('affiliation_url'),
email=item.get('email', ''),
)
)
else:
authors = [Author(name='anyonymous')]
_tags = inputs.get(
'tags', {'packages': ['unspecified'], 'formats': ['unspecified'], 'domains': ['unspecified']}
)
self.submission = Submission(
name=name, title=title, description=description, url=url, thumbnail=thumbnail, authors=authors, tags=_tags
)
if __name__ == '__main__':
issue = IssueInfo(gh_event_path=os.environ['GITHUB_EVENT_PATH']).create_submission()
inputs = issue.submission.dict()
with open('gallery-submission-input.json', 'w') as f:
json.dump(inputs, f)
|
[] |
[] |
[
"GITHUB_EVENT_PATH"
] |
[]
|
["GITHUB_EVENT_PATH"]
|
python
| 1 | 0 | |
test_xkcd2347.py
|
import os
import dotenv
import shutil
import pathlib
import xkcd2347
import diskcache
dotenv.load_dotenv()
key = os.environ.get('GITHUB_TOKEN')
def test_key():
assert key
def test_get_dependencies():
gh = xkcd2347.GitHub(key=key)
deps = list(gh.get_dependencies('docnow', 'twarc'))
assert len(deps) > 0
assert deps[0]['repository']['owner']['login']
def test_cache():
cache_dir = pathlib.Path('test-cache')
if cache_dir.exists():
shutil.rmtree(cache_dir)
cache_dir.mkdir()
cache = diskcache.Cache(cache_dir)
gh = xkcd2347.GitHub(key=key, cache=cache)
deps = list(gh.get_dependencies('docnow', 'twarc'))
assert len(deps) > 0
assert deps[0]['repository']['owner']['login']
|
[] |
[] |
[
"GITHUB_TOKEN"
] |
[]
|
["GITHUB_TOKEN"]
|
python
| 1 | 0 | |
main.go
|
package main
import (
"context"
"flag"
"fmt"
"log"
"os"
"os/signal"
"runtime"
"runtime/pprof"
"strings"
"syscall"
"time"
)
var version = "v1.1.0"
var commit = ""
var syslogDst = ""
var remote = ""
var user = ""
var auth = ""
var password = ""
var syslogInterval = 300
var cpuprofile string
var memprofile string
var debug bool
func init() {
flag.StringVar(&syslogDst, "syslog", "", "syslog destnation list")
flag.StringVar(&remote, "remote", "", "remote windows pc")
flag.StringVar(&user, "user", "", "remote user name")
flag.StringVar(&auth, "auth", "", "remote authentication:Default|Negotiate|Kerberos|NTLM")
flag.StringVar(&password, "password", "", "remote user's password")
flag.IntVar(&syslogInterval, "interval", 300, "syslog send interval(sec)")
flag.StringVar(&cpuprofile, "cpuprofile", "", "write cpu profile to `file`")
flag.StringVar(&memprofile, "memprofile", "", "write memory profile to `file`")
flag.BoolVar(&debug, "debug", false, "Debug Mode")
flag.VisitAll(func(f *flag.Flag) {
if s := os.Getenv("TWWINLOG_" + strings.ToUpper(f.Name)); s != "" {
f.Value.Set(s)
}
})
flag.Parse()
}
type logWriter struct {
}
func (writer logWriter) Write(bytes []byte) (int, error) {
return fmt.Print(time.Now().Format("2006-01-02T15:04:05.999 ") + string(bytes))
}
func main() {
log.SetFlags(0)
log.SetOutput(new(logWriter))
if cpuprofile != "" {
f, err := os.Create(cpuprofile)
if err != nil {
log.Fatalf("could not create CPU profile: %v", err)
}
defer f.Close()
if err := pprof.StartCPUProfile(f); err != nil {
log.Fatalf("could not start CPU profile: %v", err)
}
defer pprof.StopCPUProfile()
}
if memprofile != "" {
f, err := os.Create(memprofile)
if err != nil {
log.Fatalf("could not create memory profile: %v", err)
}
defer f.Close()
runtime.GC() // get up-to-date statistics
if err := pprof.WriteHeapProfile(f); err != nil {
log.Fatalf("could not write memory profile:%v", err)
}
}
log.Printf("version=%s", fmt.Sprintf("%s(%s)", version, commit))
if syslogDst == "" {
log.Fatalln("no syslog distenation")
}
quit := make(chan os.Signal, 1)
signal.Notify(quit, os.Interrupt, syscall.SIGTERM)
ctx, cancel := context.WithCancel(context.Background())
go startSyslog(ctx)
go startWinlog(ctx)
<-quit
syslogCh <- &syslogEnt{
Time: time.Now(),
Severity: 6,
Msg: "quit by signal",
}
time.Sleep(time.Second * 1)
log.Println("quit by signal")
cancel()
time.Sleep(time.Second * 2)
}
|
[
"\"TWWINLOG_\" + strings.ToUpper(f.Name"
] |
[] |
[
"TWWINLOG_\" + strings.ToUpper(f.Nam"
] |
[]
|
["TWWINLOG_\" + strings.ToUpper(f.Nam"]
|
go
| 1 | 0 | |
registry/kubernetes/kubernetes.go
|
// Package kubernetes provides a kubernetes registry
package kubernetes
import (
"encoding/json"
"errors"
"fmt"
"os"
"regexp"
"strings"
"time"
"github.com/micro/go-plugins/registry/kubernetes/v2/client"
"github.com/micro/go-micro/v2/config/cmd"
"github.com/micro/go-micro/v2/registry"
)
type kregistry struct {
client client.Kubernetes
timeout time.Duration
options registry.Options
}
var (
// used on pods as labels & services to select
// eg: svcSelectorPrefix+"svc.name"
svcSelectorPrefix = "micro.mu/selector-"
svcSelectorValue = "service"
labelTypeKey = "micro.mu/type"
labelTypeValueService = "service"
// used on k8s services to scope a serialised
// micro service by pod name
annotationServiceKeyPrefix = "micro.mu/service-"
// Pod status
podRunning = "Running"
// label name regex
labelRe = regexp.MustCompilePOSIX("[-A-Za-z0-9_.]")
)
// podSelector
var podSelector = map[string]string{
labelTypeKey: labelTypeValueService,
}
func init() {
cmd.DefaultRegistries["kubernetes"] = NewRegistry
}
func configure(k *kregistry, opts ...registry.Option) error {
for _, o := range opts {
o(&k.options)
}
// get first host
var host string
if len(k.options.Addrs) > 0 && len(k.options.Addrs[0]) > 0 {
host = k.options.Addrs[0]
}
if k.options.Timeout == 0 {
k.options.Timeout = time.Second * 1
}
// if no hosts setup, assume InCluster
var c client.Kubernetes
if len(host) == 0 {
c = client.NewClientInCluster()
} else {
c = client.NewClientByHost(host)
}
k.client = c
k.timeout = k.options.Timeout
return nil
}
// serviceName generates a valid service name for k8s labels
func serviceName(name string) string {
aname := make([]byte, len(name))
for i, r := range []byte(name) {
if !labelRe.Match([]byte{r}) {
aname[i] = '_'
continue
}
aname[i] = r
}
return string(aname)
}
// Init allows reconfig of options
func (c *kregistry) Init(opts ...registry.Option) error {
return configure(c, opts...)
}
// Options returns the registry Options
func (c *kregistry) Options() registry.Options {
return c.options
}
// Register sets a service selector label and an annotation with a
// serialised version of the service passed in.
func (c *kregistry) Register(s *registry.Service, opts ...registry.RegisterOption) error {
if len(s.Nodes) == 0 {
return errors.New("you must register at least one node")
}
// TODO: grab podname from somewhere better than this.
podName := os.Getenv("HOSTNAME")
svcName := s.Name
// encode micro service
b, err := json.Marshal(s)
if err != nil {
return err
}
svc := string(b)
pod := &client.Pod{
Metadata: &client.Meta{
Labels: map[string]*string{
labelTypeKey: &labelTypeValueService,
svcSelectorPrefix + serviceName(svcName): &svcSelectorValue,
},
Annotations: map[string]*string{
annotationServiceKeyPrefix + serviceName(svcName): &svc,
},
},
}
if _, err := c.client.UpdatePod(podName, pod); err != nil {
return err
}
return nil
}
// Deregister nils out any things set in Register
func (c *kregistry) Deregister(s *registry.Service) error {
if len(s.Nodes) == 0 {
return errors.New("you must deregister at least one node")
}
// TODO: grab podname from somewhere better than this.
podName := os.Getenv("HOSTNAME")
svcName := s.Name
pod := &client.Pod{
Metadata: &client.Meta{
Labels: map[string]*string{
svcSelectorPrefix + serviceName(svcName): nil,
},
Annotations: map[string]*string{
annotationServiceKeyPrefix + serviceName(svcName): nil,
},
},
}
if _, err := c.client.UpdatePod(podName, pod); err != nil {
return err
}
return nil
}
// GetService will get all the pods with the given service selector,
// and build services from the annotations.
func (c *kregistry) GetService(name string) ([]*registry.Service, error) {
pods, err := c.client.ListPods(map[string]string{
svcSelectorPrefix + serviceName(name): svcSelectorValue,
})
if err != nil {
return nil, err
}
if len(pods.Items) == 0 {
return nil, registry.ErrNotFound
}
// svcs mapped by version
svcs := make(map[string]*registry.Service)
// loop through items
for _, pod := range pods.Items {
if pod.Status.Phase != podRunning {
continue
}
// get serialised service from annotation
svcStr, ok := pod.Metadata.Annotations[annotationServiceKeyPrefix+serviceName(name)]
if !ok {
continue
}
// unmarshal service string
var svc registry.Service
err := json.Unmarshal([]byte(*svcStr), &svc)
if err != nil {
return nil, fmt.Errorf("could not unmarshal service '%s' from pod annotation", name)
}
// merge up pod service & ip with versioned service.
vs, ok := svcs[svc.Version]
if !ok {
svcs[svc.Version] = &svc
continue
}
vs.Nodes = append(vs.Nodes, svc.Nodes...)
}
list := make([]*registry.Service, 0, len(svcs))
for _, val := range svcs {
list = append(list, val)
}
return list, nil
}
// ListServices will list all the service names
func (c *kregistry) ListServices() ([]*registry.Service, error) {
pods, err := c.client.ListPods(podSelector)
if err != nil {
return nil, err
}
// svcs mapped by name
svcs := make(map[string]bool)
for _, pod := range pods.Items {
if pod.Status.Phase != podRunning {
continue
}
for k, v := range pod.Metadata.Annotations {
if !strings.HasPrefix(k, annotationServiceKeyPrefix) {
continue
}
// we have to unmarshal the annotation itself since the
// key is encoded to match the regex restriction.
var svc registry.Service
if err := json.Unmarshal([]byte(*v), &svc); err != nil {
continue
}
svcs[svc.Name] = true
}
}
var list []*registry.Service
for val := range svcs {
list = append(list, ®istry.Service{Name: val})
}
return list, nil
}
// Watch returns a kubernetes watcher
func (c *kregistry) Watch(opts ...registry.WatchOption) (registry.Watcher, error) {
return newWatcher(c, opts...)
}
func (c *kregistry) String() string {
return "kubernetes"
}
// NewRegistry creates a kubernetes registry
func NewRegistry(opts ...registry.Option) registry.Registry {
k := &kregistry{
options: registry.Options{},
}
configure(k, opts...)
return k
}
|
[
"\"HOSTNAME\"",
"\"HOSTNAME\""
] |
[] |
[
"HOSTNAME"
] |
[]
|
["HOSTNAME"]
|
go
| 1 | 0 | |
pkg/source/actions/action_runner.go
|
package actions
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"os"
"time"
"github.com/google/go-github/v29/github"
"github.com/rajatjindal/krew-release-bot/pkg/cicd"
"github.com/rajatjindal/krew-release-bot/pkg/source"
"github.com/sirupsen/logrus"
"golang.org/x/oauth2"
)
func getHTTPClient() *http.Client {
if os.Getenv("GITHUB_TOKEN") != "" {
logrus.Info("GITHUB_TOKEN env variable found, using authenticated requests.")
ts := oauth2.StaticTokenSource(&oauth2.Token{AccessToken: os.Getenv("GITHUB_TOKEN")})
return oauth2.NewClient(context.TODO(), ts)
}
return nil
}
//RunAction runs the github action
func RunAction() error {
client := github.NewClient(getHTTPClient())
provider := cicd.GetProvider()
if provider == nil {
logrus.Fatal("failed to identify the CI/CD provider")
}
tag, err := provider.GetTag()
if err != nil {
return err
}
owner, repo, err := provider.GetOwnerAndRepo()
if err != nil {
return err
}
actor, err := provider.GetActor()
if err != nil {
return err
}
releaseInfo, err := getReleaseForTag(client, owner, repo, tag)
if err != nil {
return err
}
if releaseInfo.GetPrerelease() {
return fmt.Errorf("release with tag %q is a pre-release. skipping", releaseInfo.GetTagName())
}
templateFile := provider.GetTemplateFile()
logrus.Infof("using template file %q", templateFile)
releaseRequest := &source.ReleaseRequest{
TagName: releaseInfo.GetTagName(),
PluginOwner: owner,
PluginRepo: repo,
PluginReleaseActor: actor,
TemplateFile: templateFile,
}
pluginName, pluginManifest, err := source.ProcessTemplate(templateFile, releaseRequest)
if err != nil {
return err
}
releaseRequest.PluginName = pluginName
releaseRequest.ProcessedTemplate = pluginManifest
pr, err := submitForPR(releaseRequest)
if err != nil {
return err
}
logrus.Info(pr)
return nil
}
func getReleaseForTag(client *github.Client, owner, repo, tag string) (*github.RepositoryRelease, error) {
release, _, err := client.Repositories.GetReleaseByTag(context.TODO(), owner, repo, tag)
if err != nil {
return nil, err
}
return release, nil
}
func submitForPR(request *source.ReleaseRequest) (string, error) {
body, err := json.Marshal(request)
if err != nil {
return "", err
}
req, err := http.NewRequest(http.MethodPost, getWebhookURL(), bytes.NewBuffer(body))
if err != nil {
return "", err
}
req.Header.Add("content-type", "application/json")
client := http.Client{
Timeout: time.Duration(30 * time.Second),
}
resp, err := client.Do(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
respBody, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
if resp.StatusCode != http.StatusOK {
return "", fmt.Errorf("expected status code %d got %d. body: %s", http.StatusOK, resp.StatusCode, string(respBody))
}
return string(respBody), nil
}
func getWebhookURL() string {
if os.Getenv("KREW_RELEASE_BOT_WEBHOOK_URL") != "" {
return os.Getenv("KREW_RELEASE_BOT_WEBHOOK_URL")
}
return "https://krew-release-bot.rajatjindal.com/github-action-webhook"
}
|
[
"\"GITHUB_TOKEN\"",
"\"GITHUB_TOKEN\"",
"\"KREW_RELEASE_BOT_WEBHOOK_URL\"",
"\"KREW_RELEASE_BOT_WEBHOOK_URL\""
] |
[] |
[
"KREW_RELEASE_BOT_WEBHOOK_URL",
"GITHUB_TOKEN"
] |
[]
|
["KREW_RELEASE_BOT_WEBHOOK_URL", "GITHUB_TOKEN"]
|
go
| 2 | 0 | |
pkg/jx/cmd/promote.go
|
package cmd
import (
"fmt"
"io"
"os"
"path/filepath"
"strings"
"time"
"strconv"
"github.com/blang/semver"
"github.com/jenkins-x/jx/pkg/apis/jenkins.io/v1"
typev1 "github.com/jenkins-x/jx/pkg/client/clientset/versioned/typed/jenkins.io/v1"
"github.com/jenkins-x/jx/pkg/gits"
"github.com/jenkins-x/jx/pkg/helm"
"github.com/jenkins-x/jx/pkg/jx/cmd/log"
"github.com/jenkins-x/jx/pkg/jx/cmd/templates"
cmdutil "github.com/jenkins-x/jx/pkg/jx/cmd/util"
"github.com/jenkins-x/jx/pkg/kube"
"github.com/jenkins-x/jx/pkg/util"
"github.com/spf13/cobra"
"gopkg.in/AlecAivazis/survey.v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
)
const (
optionEnvironment = "env"
optionApplication = "app"
optionTimeout = "timeout"
optionPullRequestPollTime = "pull-request-poll-time"
gitStatusSuccess = "success"
)
var (
waitAfterPullRequestCreated = time.Second * 3
)
// PromoteOptions containers the CLI options
type PromoteOptions struct {
CommonOptions
Namespace string
Environment string
Application string
Version string
ReleaseName string
LocalHelmRepoName string
HelmRepositoryURL string
NoHelmUpdate bool
AllAutomatic bool
Timeout string
PullRequestPollTime string
// calculated fields
TimeoutDuration *time.Duration
PullRequestPollDuration *time.Duration
Activities typev1.PipelineActivityInterface
GitInfo *gits.GitRepositoryInfo
jenkinsURL string
releaseResource *v1.Release
}
type ReleaseInfo struct {
ReleaseName string
FullAppName string
Version string
PullRequestInfo *ReleasePullRequestInfo
}
type ReleasePullRequestInfo struct {
GitProvider gits.GitProvider
PullRequest *gits.GitPullRequest
PullRequestArguments *gits.GitPullRequestArguments
}
var (
promote_long = templates.LongDesc(`
Promotes a version of an application to zero to many permanent environments.
For more documentation see: [http://jenkins-x.io/about/features/#promotion](http://jenkins-x.io/about/features/#promotion)
`)
promote_example = templates.Examples(`
# Promote a version of the current application to staging
# discovering the application name from the source code
jx promote --version 1.2.3 --env staging
# Promote a version of the myapp application to production
jx promote myapp --version 1.2.3 --env production
# To create or update a Preview Environment please see the 'jx preview' command
jx preview
`)
)
// NewCmdPromote creates the new command for: jx get prompt
func NewCmdPromote(f cmdutil.Factory, out io.Writer, errOut io.Writer) *cobra.Command {
options := &PromoteOptions{
CommonOptions: CommonOptions{
Factory: f,
Out: out,
Err: errOut,
},
}
cmd := &cobra.Command{
Use: "promote [application]",
Short: "Promotes a version of an application to an environment",
Long: promote_long,
Example: promote_example,
Run: func(cmd *cobra.Command, args []string) {
options.Cmd = cmd
options.Args = args
err := options.Run()
cmdutil.CheckErr(err)
},
}
options.addCommonFlags(cmd)
cmd.Flags().StringVarP(&options.Namespace, "namespace", "n", "", "The Namespace to promote to")
cmd.Flags().StringVarP(&options.Environment, optionEnvironment, "e", "", "The Environment to promote to")
cmd.Flags().BoolVarP(&options.AllAutomatic, "all-auto", "", false, "Promote to all automatic environments in order")
options.addPromoteOptions(cmd)
return cmd
}
func (options *PromoteOptions) addPromoteOptions(cmd *cobra.Command) {
cmd.Flags().StringVarP(&options.Application, optionApplication, "a", "", "The Application to promote")
cmd.Flags().StringVarP(&options.Version, "version", "v", "", "The Version to promote")
cmd.Flags().StringVarP(&options.LocalHelmRepoName, "helm-repo-name", "r", kube.LocalHelmRepoName, "The name of the helm repository that contains the app")
cmd.Flags().StringVarP(&options.HelmRepositoryURL, "helm-repo-url", "u", helm.DefaultHelmRepositoryURL, "The Helm Repository URL to use for the App")
cmd.Flags().StringVarP(&options.ReleaseName, "release", "", "", "The name of the helm release")
cmd.Flags().StringVarP(&options.Timeout, optionTimeout, "t", "1h", "The timeout to wait for the promotion to succeed in the underlying Environment. The command fails if the timeout is exceeded or the promotion does not complete")
cmd.Flags().StringVarP(&options.PullRequestPollTime, optionPullRequestPollTime, "", "20s", "Poll time when waiting for a Pull Request to merge")
cmd.Flags().BoolVarP(&options.NoHelmUpdate, "no-helm-update", "", false, "Allows the 'helm repo update' command if you are sure your local helm cache is up to date with the version you wish to promote")
}
// Run implements this command
func (o *PromoteOptions) Run() error {
app := o.Application
if app == "" {
args := o.Args
if len(args) == 0 {
var err error
app, err = o.DiscoverAppName()
if err != nil {
return err
}
} else {
app = args[0]
}
}
o.Application = app
if o.PullRequestPollTime != "" {
duration, err := time.ParseDuration(o.PullRequestPollTime)
if err != nil {
return fmt.Errorf("Invalid duration format %s for option --%s: %s", o.PullRequestPollTime, optionPullRequestPollTime, err)
}
o.PullRequestPollDuration = &duration
}
if o.Timeout != "" {
duration, err := time.ParseDuration(o.Timeout)
if err != nil {
return fmt.Errorf("Invalid duration format %s for option --%s: %s", o.Timeout, optionTimeout, err)
}
o.TimeoutDuration = &duration
}
targetNS, env, err := o.GetTargetNamespace(o.Namespace, o.Environment)
if err != nil {
return err
}
apisClient, err := o.Factory.CreateApiExtensionsClient()
if err != nil {
return err
}
err = kube.RegisterEnvironmentCRD(apisClient)
if err != nil {
return err
}
err = kube.RegisterPipelineActivityCRD(apisClient)
if err != nil {
return err
}
err = kube.RegisterGitServiceCRD(apisClient)
if err != nil {
return err
}
jxClient, ns, err := o.JXClient()
if err != nil {
return err
}
o.Activities = jxClient.JenkinsV1().PipelineActivities(ns)
releaseName := o.ReleaseName
if releaseName == "" {
releaseName = targetNS + "-" + app
o.ReleaseName = releaseName
}
if o.AllAutomatic {
return o.PromoteAllAutomatic()
}
releaseInfo, err := o.Promote(targetNS, env, true)
err = o.WaitForPromotion(targetNS, env, releaseInfo)
if err != nil {
return err
}
return err
}
func (o *PromoteOptions) PromoteAllAutomatic() error {
kubeClient, currentNs, err := o.KubeClient()
if err != nil {
return err
}
team, _, err := kube.GetDevNamespace(kubeClient, currentNs)
if err != nil {
return err
}
jxClient, _, err := o.JXClient()
if err != nil {
return err
}
envs, err := jxClient.JenkinsV1().Environments(team).List(metav1.ListOptions{})
if err != nil {
o.warnf("No Environments found: %s/n", err)
return nil
}
environments := envs.Items
if len(environments) == 0 {
o.warnf("No Environments have been created yet in team %s. Please create some via 'jx create env'\n", team)
return nil
}
kube.SortEnvironments(environments)
for _, env := range environments {
kind := env.Spec.Kind
if env.Spec.PromotionStrategy == v1.PromotionStrategyTypeAutomatic && kind.IsPermanent() {
ns := env.Spec.Namespace
if ns == "" {
return fmt.Errorf("No namespace for environment %s", env.Name)
}
releaseInfo, err := o.Promote(ns, &env, false)
if err != nil {
return err
}
err = o.WaitForPromotion(ns, &env, releaseInfo)
if err != nil {
return err
}
}
}
return nil
}
func (o *PromoteOptions) Promote(targetNS string, env *v1.Environment, warnIfAuto bool) (*ReleaseInfo, error) {
app := o.Application
if app == "" {
o.warnf("No application name could be detected so cannot promote via Helm. If the detection of the helm chart name is not working consider adding it with the --%s argument on the 'jx promomote' command\n", optionApplication)
return nil, nil
}
version := o.Version
info := util.ColorInfo
if version == "" {
o.Printf("Promoting latest version of app %s to namespace %s\n", info(app), info(targetNS))
} else {
o.Printf("Promoting app %s version %s to namespace %s\n", info(app), info(version), info(targetNS))
}
fullAppName := app
if o.LocalHelmRepoName != "" {
fullAppName = o.LocalHelmRepoName + "/" + app
}
releaseName := o.ReleaseName
if releaseName == "" {
releaseName = targetNS + "-" + app
o.ReleaseName = releaseName
}
releaseInfo := &ReleaseInfo{
ReleaseName: releaseName,
FullAppName: fullAppName,
Version: version,
}
if warnIfAuto && env != nil && env.Spec.PromotionStrategy == v1.PromotionStrategyTypeAutomatic {
o.Printf("%s", util.ColorWarning("WARNING: The Environment %s is setup to promote automatically as part of the CI / CD Pipelines.\n\n", env.Name))
confirm := &survey.Confirm{
Message: "Do you wish to promote anyway? :",
Default: false,
}
flag := false
err := survey.AskOne(confirm, &flag, nil)
if err != nil {
return releaseInfo, err
}
if !flag {
return releaseInfo, nil
}
}
promoteKey := o.createPromoteKey(env)
if env != nil {
source := &env.Spec.Source
if source.URL != "" && env.Spec.Kind.IsPermanent() {
err := o.PromoteViaPullRequest(env, releaseInfo)
if err == nil {
startPromotePR := func(a *v1.PipelineActivity, s *v1.PipelineActivityStep, ps *v1.PromoteActivityStep, p *v1.PromotePullRequestStep) error {
kube.StartPromotionPullRequest(a, s, ps, p)
pr := releaseInfo.PullRequestInfo
if pr != nil && pr.PullRequest != nil && p.PullRequestURL == "" {
p.PullRequestURL = pr.PullRequest.URL
}
if version != "" && a.Spec.Version == "" {
a.Spec.Version = version
}
return nil
}
err = promoteKey.OnPromotePullRequest(o.Activities, startPromotePR)
// lets sleep a little before we try poll for the PR status
time.Sleep(waitAfterPullRequestCreated)
}
return releaseInfo, err
}
}
err := o.verifyHelmConfigured()
if err != nil {
return releaseInfo, err
}
// lets do a helm update to ensure we can find the latest version
if !o.NoHelmUpdate {
o.Printf("Updating the helm repositories to ensure we can find the latest versions...")
err = o.runCommand("helm", "repo", "update")
if err != nil {
return releaseInfo, err
}
}
startPromote := func(a *v1.PipelineActivity, s *v1.PipelineActivityStep, ps *v1.PromoteActivityStep, p *v1.PromoteUpdateStep) error {
kube.StartPromotionUpdate(a, s, ps, p)
if version != "" && a.Spec.Version == "" {
a.Spec.Version = version
}
return nil
}
promoteKey.OnPromoteUpdate(o.Activities, startPromote)
if version != "" {
err = o.runCommand("helm", "upgrade", "--install", "--wait", "--namespace", targetNS, "--version", version, releaseName, fullAppName)
} else {
err = o.runCommand("helm", "upgrade", "--install", "--wait", "--namespace", targetNS, releaseName, fullAppName)
}
if err == nil {
err = o.commentOnIssues(targetNS, env)
if err != nil {
o.warnf("Failed to comment on issues for release %s: %s\n", releaseName, err)
}
err = promoteKey.OnPromoteUpdate(o.Activities, kube.CompletePromotionUpdate)
} else {
err = promoteKey.OnPromoteUpdate(o.Activities, kube.FailedPromotionUpdate)
}
return releaseInfo, err
}
func (o *PromoteOptions) PromoteViaPullRequest(env *v1.Environment, releaseInfo *ReleaseInfo) error {
source := &env.Spec.Source
gitURL := source.URL
if gitURL == "" {
return fmt.Errorf("No source git URL")
}
gitInfo, err := gits.ParseGitURL(gitURL)
if err != nil {
return err
}
environmentsDir, err := util.EnvironmentsDir()
if err != nil {
return err
}
dir := filepath.Join(environmentsDir, gitInfo.Organisation, gitInfo.Name)
// now lets clone the fork and push it...
exists, err := util.FileExists(dir)
if err != nil {
return err
}
app := o.Application
version := o.Version
versionName := version
if versionName == "" {
versionName = "latest"
}
branchName := gits.ConvertToValidBranchName("promote-" + app + "-" + versionName)
base := source.Ref
if base == "" {
base = "master"
}
if exists {
// lets check the git remote URL is setup correctly
err = gits.SetRemoteURL(dir, "origin", gitURL)
if err != nil {
return err
}
err = gits.GitCmd(dir, "stash")
if err != nil {
return err
}
err = gits.GitCmd(dir, "checkout", base)
if err != nil {
return err
}
err = gits.GitCmd(dir, "pull")
if err != nil {
return err
}
} else {
err := os.MkdirAll(dir, DefaultWritePermissions)
if err != nil {
return fmt.Errorf("Failed to create directory %s due to %s", dir, err)
}
err = gits.GitClone(gitURL, dir)
if err != nil {
return err
}
if base != "master" {
err = gits.GitCmd(dir, "checkout", base)
if err != nil {
return err
}
}
// TODO lets fork if required???
/*
pushGitURL, err := gits.GitCreatePushURL(gitURL, details.User)
if err != nil {
return err
}
err = gits.GitCmd(dir, "remote", "add", "upstream", forkEnvGitURL)
if err != nil {
return err
}
err = gits.GitCmd(dir, "remote", "add", "origin", pushGitURL)
if err != nil {
return err
}
err = gits.GitCmd(dir, "push", "-u", "origin", "master")
if err != nil {
return err
}
*/
}
branchNames, err := gits.GitGetRemoteBranchNames(dir, "remotes/origin/")
if err != nil {
return fmt.Errorf("Failed to load remote branch names: %s", err)
}
o.Printf("Found remote branch names %s\n", strings.Join(branchNames, ", "))
if util.StringArrayIndex(branchNames, branchName) >= 0 {
// lets append a UUID as the branch name already exists
branchName += "-" + string(uuid.NewUUID())
}
err = gits.GitCmd(dir, "branch", branchName)
if err != nil {
return err
}
err = gits.GitCmd(dir, "checkout", branchName)
if err != nil {
return err
}
requirementsFile, err := helm.FindRequirementsFileName(dir)
if err != nil {
return err
}
requirements, err := helm.LoadRequirementsFile(requirementsFile)
if err != nil {
return err
}
if version == "" {
version, err = o.findLatestVersion(app)
if err != nil {
return err
}
}
requirements.SetAppVersion(app, version, o.HelmRepositoryURL)
err = helm.SaveRequirementsFile(requirementsFile, requirements)
err = gits.GitCmd(dir, "add", "*", "*/*")
if err != nil {
return err
}
changed, err := gits.HasChanges(dir)
if err != nil {
return err
}
if !changed {
o.Printf("%s\n", util.ColorWarning("No changes made to the GitOps Environment source code. Must be already on version!"))
return nil
}
message := fmt.Sprintf("Promote %s to version %s", app, versionName)
err = gits.GitCommit(dir, message)
if err != nil {
return err
}
err = gits.GitPush(dir)
if err != nil {
return err
}
authConfigSvc, err := o.Factory.CreateGitAuthConfigService()
if err != nil {
return err
}
gitKind, err := o.GitServerKind(gitInfo)
if err != nil {
return err
}
provider, err := gitInfo.PickOrCreateProvider(authConfigSvc, "user name to submit the Pull Request", o.BatchMode, gitKind)
if err != nil {
return err
}
gha := &gits.GitPullRequestArguments{
Owner: gitInfo.Organisation,
Repo: gitInfo.Name,
Title: app + " to " + versionName,
Body: message,
Base: base,
Head: branchName,
}
pr, err := provider.CreatePullRequest(gha)
if err != nil {
return err
}
o.Printf("Created Pull Request: %s\n\n", util.ColorInfo(pr.URL))
releaseInfo.PullRequestInfo = &ReleasePullRequestInfo{
GitProvider: provider,
PullRequest: pr,
PullRequestArguments: gha,
}
return nil
}
func (o *PromoteOptions) GetTargetNamespace(ns string, env string) (string, *v1.Environment, error) {
kubeClient, currentNs, err := o.KubeClient()
if err != nil {
return "", nil, err
}
team, _, err := kube.GetDevNamespace(kubeClient, currentNs)
if err != nil {
return "", nil, err
}
jxClient, _, err := o.JXClient()
if err != nil {
return "", nil, err
}
m, envNames, err := kube.GetEnvironments(jxClient, team)
if err != nil {
return "", nil, err
}
if len(envNames) == 0 {
return "", nil, fmt.Errorf("No Environments have been created yet in team %s. Please create some via 'jx create env'", team)
}
var envResource *v1.Environment
targetNS := currentNs
if env != "" {
envResource = m[env]
if envResource == nil {
return "", nil, util.InvalidOption(optionEnvironment, env, envNames)
}
targetNS = envResource.Spec.Namespace
if targetNS == "" {
return "", nil, fmt.Errorf("Environment %s does not have a namspace associated with it!", env)
}
} else if ns != "" {
targetNS = ns
}
labels := map[string]string{}
annotations := map[string]string{}
err = kube.EnsureNamespaceCreated(kubeClient, targetNS, labels, annotations)
if err != nil {
return "", nil, err
}
return targetNS, envResource, nil
}
func (o *PromoteOptions) DiscoverAppName() (string, error) {
answer := ""
gitInfo, err := gits.GetGitInfo("")
if err != nil {
return answer, err
}
if gitInfo == nil {
return answer, fmt.Errorf("no git info found to discover app name from")
}
answer = gitInfo.Name
if answer == "" {
chartFile, err := o.FindHelmChart()
if err != nil {
return answer, err
}
if chartFile != "" {
return helm.LoadChartName(chartFile)
}
}
return answer, nil
}
func (o *PromoteOptions) WaitForPromotion(ns string, env *v1.Environment, releaseInfo *ReleaseInfo) error {
if o.TimeoutDuration == nil {
o.Printf("No --%s option specified on the 'jx promote' command so not waiting for the promotion to succeed\n", optionTimeout)
return nil
}
if o.PullRequestPollDuration == nil {
o.Printf("No --%s option specified on the 'jx promote' command so not waiting for the promotion to succeed\n", optionPullRequestPollTime)
return nil
}
duration := *o.TimeoutDuration
end := time.Now().Add(duration)
pullRequestInfo := releaseInfo.PullRequestInfo
if pullRequestInfo != nil {
promoteKey := o.createPromoteKey(env)
err := o.waitForGitOpsPullRequest(ns, env, releaseInfo, end, duration, promoteKey)
if err != nil {
// TODO based on if the PR completed or not fail the PR or the Promote?
promoteKey.OnPromotePullRequest(o.Activities, kube.FailedPromotionPullRequest)
return err
}
}
return nil
}
func (o *PromoteOptions) waitForGitOpsPullRequest(ns string, env *v1.Environment, releaseInfo *ReleaseInfo, end time.Time, duration time.Duration, promoteKey *kube.PromoteStepActivityKey) error {
pullRequestInfo := releaseInfo.PullRequestInfo
logMergeFailure := false
logNoMergeCommitSha := false
logHasMergeSha := false
logMergeStatusError := false
logNoMergeStatuses := false
urlStatusMap := map[string]string{}
urlStatusTargetURLMap := map[string]string{}
if pullRequestInfo != nil {
for {
pr := pullRequestInfo.PullRequest
gitProvider := pullRequestInfo.GitProvider
err := gitProvider.UpdatePullRequestStatus(pr)
if err != nil {
return fmt.Errorf("Failed to query the Pull Request status for %s %s", pr.URL, err)
}
if pr.Merged != nil && *pr.Merged {
if pr.MergeCommitSHA == nil {
if !logNoMergeCommitSha {
logNoMergeCommitSha = true
o.Printf("Pull Request %s is merged but waiting for Merge SHA\n", util.ColorInfo(pr.URL))
}
} else {
mergeSha := *pr.MergeCommitSHA
if !logHasMergeSha {
logHasMergeSha = true
o.Printf("Pull Request %s is merged at sha %s\n", util.ColorInfo(pr.URL), util.ColorInfo(mergeSha))
mergedPR := func(a *v1.PipelineActivity, s *v1.PipelineActivityStep, ps *v1.PromoteActivityStep, p *v1.PromotePullRequestStep) error {
kube.CompletePromotionPullRequest(a, s, ps, p)
p.MergeCommitSHA = mergeSha
return nil
}
promoteKey.OnPromotePullRequest(o.Activities, mergedPR)
}
promoteKey.OnPromoteUpdate(o.Activities, kube.StartPromotionUpdate)
statuses, err := gitProvider.ListCommitStatus(pr.Owner, pr.Repo, mergeSha)
if err != nil {
if !logMergeStatusError {
logMergeStatusError = true
o.warnf("Failed to query merge status of repo %s/%s with merge sha %s due to: %s\n", pr.Owner, pr.Repo, mergeSha, err)
}
} else {
if len(statuses) == 0 {
if !logNoMergeStatuses {
logNoMergeStatuses = true
o.Printf("Merge commit has not yet any statuses on repo %s/%s merge sha %s\n", pr.Owner, pr.Repo, mergeSha)
}
} else {
for _, status := range statuses {
if status.IsFailed() {
o.warnf("merge status: %s URL: %s description: %s\n",
status.State, status.TargetURL, status.Description)
return fmt.Errorf("Status: %s URL: %s description: %s\n",
status.State, status.TargetURL, status.Description)
}
url := status.URL
state := status.State
if urlStatusMap[url] == "" || urlStatusMap[url] != gitStatusSuccess {
if urlStatusMap[url] != state {
urlStatusMap[url] = state
urlStatusTargetURLMap[url] = status.TargetURL
o.Printf("merge status: %s for URL %s with target: %s description: %s\n",
util.ColorInfo(state), util.ColorInfo(status.URL), util.ColorInfo(status.TargetURL), util.ColorInfo(status.Description))
}
}
}
prStatuses := []v1.GitStatus{}
keys := util.SortedMapKeys(urlStatusMap)
for _, url := range keys {
state := urlStatusMap[url]
targetURL := urlStatusTargetURLMap[url]
if targetURL == "" {
targetURL = url
}
prStatuses = append(prStatuses, v1.GitStatus{
URL: targetURL,
Status: state,
})
}
updateStatuses := func(a *v1.PipelineActivity, s *v1.PipelineActivityStep, ps *v1.PromoteActivityStep, p *v1.PromoteUpdateStep) error {
p.Statuses = prStatuses
return nil
}
promoteKey.OnPromoteUpdate(o.Activities, updateStatuses)
succeeded := true
for _, v := range urlStatusMap {
if v != gitStatusSuccess {
succeeded = false
}
}
if succeeded {
o.Printf("Merge status checks all passed so the promotion worked!\n")
err = promoteKey.OnPromoteUpdate(o.Activities, kube.CompletePromotionUpdate)
if err == nil {
err = o.commentOnIssues(ns, env)
}
return err
}
}
}
}
} else {
if pr.IsClosed() {
o.warnf("Pull Request %s is closed\n", util.ColorInfo(pr.URL))
return fmt.Errorf("Promotion failed as Pull Request %s is closed without merging", pr.URL)
}
// lets try merge if the status is good
status, err := gitProvider.PullRequestLastCommitStatus(pr)
if err != nil {
o.warnf("Failed to query the Pull Request last commit status for %s ref %s %s\n", pr.URL, pr.LastCommitSha, err)
//return fmt.Errorf("Failed to query the Pull Request last commit status for %s ref %s %s", pr.URL, pr.LastCommitSha, err)
} else {
if status == "success" {
err = gitProvider.MergePullRequest(pr, "jx promote automatically merged promotion PR")
if err != nil {
if !logMergeFailure {
logMergeFailure = true
o.warnf("Failed to merge the Pull Request %s due to %s maybe I don't have karma?\n", pr.URL, err)
}
}
} else if status == "error" || status == "failure" {
return fmt.Errorf("Pull request %s last commit has status %s for ref %s", pr.URL, status, pr.LastCommitSha)
}
}
}
if time.Now().After(end) {
return fmt.Errorf("Timed out waiting for pull request %s to merge. Waited %s", pr.URL, duration.String())
}
time.Sleep(*o.PullRequestPollDuration)
}
}
return nil
}
func (o *PromoteOptions) findLatestVersion(app string) (string, error) {
output, err := o.getCommandOutput("", "helm", "search", app, "--versions")
if err != nil {
return "", err
}
var maxSemVer *semver.Version
maxString := ""
for i, line := range strings.Split(output, "\n") {
if i == 0 {
continue
}
fields := strings.Fields(line)
if len(fields) > 1 {
v := fields[1]
if v != "" {
sv, err := semver.Parse(v)
if err != nil {
o.warnf("Invalid semantic version: %s %s\n", v, err)
} else {
if maxSemVer == nil || maxSemVer.Compare(sv) > 0 {
maxSemVer = &sv
}
}
if maxString == "" || strings.Compare(v, maxString) > 0 {
maxString = v
}
}
}
}
if maxSemVer != nil {
return maxSemVer.String(), nil
}
if maxString == "" {
return "", fmt.Errorf("Could not find a version of app %s in the helm repositories", app)
}
return maxString, nil
}
func (o *PromoteOptions) verifyHelmConfigured() error {
helmHomeDir := filepath.Join(util.HomeDir(), ".helm")
exists, err := util.FileExists(helmHomeDir)
if err != nil {
return err
}
if !exists {
o.Printf("No helm home dir at %s so lets initialise helm client\n", helmHomeDir)
err = o.runCommand("helm", "init", "--client-only")
if err != nil {
return err
}
}
f := o.Factory
_, ns, _ := f.CreateClient()
if err != nil {
return err
}
// lets add the releases chart
return o.registerLocalHelmRepo(o.LocalHelmRepoName, ns)
}
func (o *PromoteOptions) createPromoteKey(env *v1.Environment) *kube.PromoteStepActivityKey {
pipeline := os.Getenv("JOB_NAME")
build := os.Getenv("BUILD_NUMBER")
buildURL := os.Getenv("BUILD_URL")
buildLogsURL := os.Getenv("BUILD_LOG_URL")
gitInfo, err := gits.GetGitInfo("")
releaseNotesURL := ""
releaseName := o.ReleaseName
if o.releaseResource == nil && releaseName != "" {
jxClient, _, err := o.JXClient()
if err == nil {
release, err := jxClient.JenkinsV1().Releases(env.Spec.Namespace).Get(releaseName, metav1.GetOptions{})
if err == nil && release != nil {
o.releaseResource = release
}
}
}
if o.releaseResource != nil {
releaseNotesURL = o.releaseResource.Spec.ReleaseNotesURL
}
if err != nil {
o.warnf("Could not discover the git repository info %s\n", err)
} else {
o.GitInfo = gitInfo
}
if pipeline == "" {
if gitInfo != nil {
// lets default the pipeline name from the git repo
branch, err := gits.GitGetBranch(".")
if err != nil {
o.warnf("Could not find the branch name: %s\n", err)
}
if branch == "" {
branch = "master"
}
pipeline = util.UrlJoin(gitInfo.Organisation, gitInfo.Name, branch)
if build == "" {
// lets validate and determine the current active pipeline branch
p, b, err := o.getLatestPipelineBuild(pipeline)
if err != nil {
o.warnf("Failed to try detect the current Jenkins pipeline for %s due to %s\n", pipeline, err)
pipeline = ""
} else {
pipeline = p
build = b
}
}
}
if pipeline == "" {
// lets try find
o.warnf("No $JOB_NAME environment variable found so cannot record promotion activities into the PipelineActivity resources in kubernetes\n")
}
} else if build == "" {
o.warnf("No $BUILD_NUMBER environment variablefound so cannot record promotion activities into the PipelineActivity resources in kubernetes\n")
}
name := pipeline
if build != "" {
name += "-" + build
if buildURL == "" || buildLogsURL == "" {
jenkinsURL := o.getJenkinsURL()
if jenkinsURL != "" {
path := pipeline
if !strings.HasPrefix(path, "job/") && !strings.HasPrefix(path, "/job/") {
// lets split the path and prefix it with /job
path = strings.Join(strings.Split(path, "/"), "/job/")
path = util.UrlJoin("job", path)
}
path = util.UrlJoin(path, build)
if !strings.HasSuffix(path, "/") {
path += "/"
}
if buildURL == "" {
buildURL = util.UrlJoin(jenkinsURL, path)
}
if buildLogsURL == "" {
buildLogsURL = util.UrlJoin(buildURL, "console")
}
}
}
}
name = kube.ToValidName(name)
o.Printf("Using pipeline: %s build: %s\n", util.ColorInfo(pipeline), util.ColorInfo("#"+build))
return &kube.PromoteStepActivityKey{
PipelineActivityKey: kube.PipelineActivityKey{
Name: name,
Pipeline: pipeline,
Build: build,
BuildURL: buildURL,
BuildLogsURL: buildLogsURL,
GitInfo: gitInfo,
ReleaseNotesURL: releaseNotesURL,
},
Environment: env.Name,
}
}
// getLatestPipelineBuild for the given pipeline name lets try find the Jenkins Pipeline and the latest build
func (o *PromoteOptions) getLatestPipelineBuild(pipeline string) (string, string, error) {
log.Infof("pipeline %s\n", pipeline)
build := ""
jenkins, err := o.Factory.CreateJenkinsClient()
if err != nil {
return pipeline, build, err
}
paths := strings.Split(pipeline, "/")
job, err := jenkins.GetJobByPath(paths...)
if err != nil {
return pipeline, build, err
}
build = strconv.Itoa(job.LastBuild.Number)
return pipeline, build, nil
}
func (o *PromoteOptions) getJenkinsURL() string {
if o.jenkinsURL == "" {
o.jenkinsURL = os.Getenv("JENKINS_URL")
}
if o.jenkinsURL == "" {
o.jenkinsURL = os.Getenv("JENKINS_URL")
}
url, err := o.Factory.GetJenkinsURL()
if err != nil {
o.warnf("Could not find Jenkins URL %s", err)
} else {
o.jenkinsURL = url
}
return o.jenkinsURL
}
// commentOnIssues comments on any issues for a release that the fix is available in the given environment
func (o *PromoteOptions) commentOnIssues(targetNS string, environment *v1.Environment) error {
ens := environment.Spec.Namespace
envName := environment.Spec.Label
app := o.Application
version := o.Version
if ens == "" {
o.warnf("Environment %s has no namespace\n", envName)
return nil
}
if app == "" {
o.warnf("No application name so cannot comment on issues that they are now in %s\n", envName)
return nil
}
if version == "" {
o.warnf("No version name so cannot comment on issues that they are now in %s\n", envName)
return nil
}
gitInfo := o.GitInfo
if gitInfo == nil {
o.warnf("No GitInfo discovered so cannot comment on issues that they are now in %s\n", envName)
return nil
}
authConfigSvc, err := o.Factory.CreateGitAuthConfigService()
if err != nil {
return err
}
gitKind, err := o.GitServerKind(gitInfo)
if err != nil {
return err
}
provider, err := gitInfo.PickOrCreateProvider(authConfigSvc, "user name to comment on issues", o.BatchMode, gitKind)
if err != nil {
return err
}
releaseName := kube.ToValidNameWithDots(app + "-" + version)
jxClient, _, err := o.JXClient()
if err != nil {
return err
}
kubeClient, _, err := o.KubeClient()
if err != nil {
return err
}
release, err := jxClient.JenkinsV1().Releases(ens).Get(releaseName, metav1.GetOptions{})
if err == nil && release != nil {
o.releaseResource = release
issues := release.Spec.Issues
versionMessage := version
if release.Spec.ReleaseNotesURL != "" {
versionMessage = "[" + version + "](" + release.Spec.ReleaseNotesURL + ")"
}
appNames := []string{app, o.ReleaseName, ens + "-" + app}
url := ""
for _, n := range appNames {
url, err = kube.FindServiceURL(kubeClient, ens, n)
if url != "" {
break
}
}
if url == "" {
o.warnf("Could not find the service URL in namespace %s for names %s\n", ens, strings.Join(appNames, ", "))
}
available := ""
if url != "" {
available = fmt.Sprintf(" and available [here](%s)", url)
}
if available == "" {
ing, err := kubeClient.ExtensionsV1beta1().Ingresses(ens).Get(app, metav1.GetOptions{})
if err != nil || ing == nil && o.ReleaseName != "" && o.ReleaseName != app {
ing, err = kubeClient.ExtensionsV1beta1().Ingresses(ens).Get(o.ReleaseName, metav1.GetOptions{})
}
if ing != nil {
if len(ing.Spec.Rules) > 0 {
hostname := ing.Spec.Rules[0].Host
if hostname != "" {
available = fmt.Sprintf(" and available at %s", hostname)
}
}
}
}
for _, issue := range issues {
if issue.IsClosed() {
o.Printf("Commenting that issue %s is now in %s\n", util.ColorInfo(issue.URL), util.ColorInfo(envName))
comment := fmt.Sprintf(":white_check_mark: the fix for this issue is now deployed to **%s** in version %s %s", envName, versionMessage, available)
id := issue.ID
if id != "" {
number, err := strconv.Atoi(id)
if err != nil {
o.warnf("Could not parse issue id %s for URL %s\n", id, issue.URL)
} else {
if number > 0 {
err = provider.CreateIssueComment(gitInfo.Organisation, gitInfo.Name, number, comment)
if err != nil {
o.warnf("Failed to add comment to issue %s: %s", issue.URL, err)
}
}
}
}
}
}
}
return nil
}
|
[
"\"JOB_NAME\"",
"\"BUILD_NUMBER\"",
"\"BUILD_URL\"",
"\"BUILD_LOG_URL\"",
"\"JENKINS_URL\"",
"\"JENKINS_URL\""
] |
[] |
[
"JOB_NAME",
"BUILD_URL",
"BUILD_LOG_URL",
"BUILD_NUMBER",
"JENKINS_URL"
] |
[]
|
["JOB_NAME", "BUILD_URL", "BUILD_LOG_URL", "BUILD_NUMBER", "JENKINS_URL"]
|
go
| 5 | 0 | |
libstorage/api/tests/tests_config.go
|
package tests
import (
"fmt"
"os"
"path"
apiserver "github.com/thecodeteam/rexray/libstorage/api/server"
"github.com/thecodeteam/rexray/libstorage/api/types"
)
var (
// v0ID is the expected ID of the volume used in the test operations.
//
// This value is configurable externally via the environment variable
// LSX_TESTS_V0ID.
v0ID = "vfs-000"
// v0Name is the execpted name of the volume used in the test operations.
//
// This value is configurable externally via the environment variable
// LSX_TESTS_V0NAME.
v0Name = "v0"
// v0NextDev is the expected name of the next available devie.
//
// This value is configurable externally via the environment variable
// LSX_TESTS_V0NEXTDEVICE.
v0NextDev = "/dev/xvda"
tlsPath = path.Join(
os.Getenv("GOPATH"),
"/src/github.com/thecodeteam/rexray/libstorage/.tls")
suiteServerCrt = path.Join(tlsPath, "libstorage-server.crt")
suiteServerKey = path.Join(tlsPath, "libstorage-server.key")
suiteClientCrt = path.Join(tlsPath, "libstorage-client.crt")
suiteClientKey = path.Join(tlsPath, "libstorage-client.key")
suiteTrustedCerts = path.Join(tlsPath, "libstorage-ca.crt")
suiteKnownHosts = path.Join(tlsPath, "known_hosts")
)
func init() {
if v := os.Getenv("LSX_TESTS_V0ID"); v != "" {
v0ID = v
}
if v := os.Getenv("LSX_TESTS_V0NAME"); v != "" {
v0Name = v
}
if v := os.Getenv("LSX_TESTS_V0NEXTDEVICE"); v != "" {
v0NextDev = v
}
//types.Stdout = GinkgoWriter
apiserver.DisableStartupInfo = true
if !types.Debug {
if v := os.Getenv("LIBSTORAGE_LOGGING_LEVEL"); v == "" {
os.Setenv("LIBSTORAGE_LOGGING_LEVEL", "panic")
}
}
}
func (t *testRunner) initConfigData() {
configFileData := fmt.Sprintf(
configFileFormat, t.proto, t.laddr, t.driverName)
t.configFileData = []byte(configFileData)
}
|
[
"\"GOPATH\"",
"\"LSX_TESTS_V0ID\"",
"\"LSX_TESTS_V0NAME\"",
"\"LSX_TESTS_V0NEXTDEVICE\"",
"\"LIBSTORAGE_LOGGING_LEVEL\""
] |
[] |
[
"LIBSTORAGE_LOGGING_LEVEL",
"LSX_TESTS_V0NEXTDEVICE",
"LSX_TESTS_V0ID",
"GOPATH",
"LSX_TESTS_V0NAME"
] |
[]
|
["LIBSTORAGE_LOGGING_LEVEL", "LSX_TESTS_V0NEXTDEVICE", "LSX_TESTS_V0ID", "GOPATH", "LSX_TESTS_V0NAME"]
|
go
| 5 | 0 | |
crosshair/main.py
|
import argparse
import collections
import enum
import linecache
import os.path
from pathlib import Path
import shutil
import sys
import textwrap
import time
import traceback
from typing import (
Counter,
Dict,
Iterable,
List,
MutableMapping,
Optional,
Sequence,
Tuple,
Union,
)
from typing import TextIO
from crosshair.auditwall import engage_auditwall
from crosshair.diff_behavior import diff_behavior
from crosshair.core_and_libs import analyze_any
from crosshair.core_and_libs import run_checkables
from crosshair.core_and_libs import AnalysisMessage
from crosshair.core_and_libs import MessageType
from crosshair.fnutil import load_files_or_qualnames
from crosshair.fnutil import FunctionInfo
from crosshair.options import option_set_from_dict
from crosshair.options import AnalysisKind
from crosshair.options import AnalysisOptionSet
from crosshair.options import AnalysisOptions
from crosshair.options import DEFAULT_OPTIONS
from crosshair.path_cover import path_cover
from crosshair.path_cover import output_argument_dictionary_paths
from crosshair.path_cover import output_eval_exression_paths
from crosshair.path_cover import output_pytest_paths
from crosshair.path_cover import CoverageType
from crosshair.util import add_to_pypath
from crosshair.util import debug
from crosshair.util import set_debug
from crosshair.util import ErrorDuringImport
from crosshair.watcher import Watcher
from crosshair.core_and_libs import installed_plugins
class ExampleOutputFormat(enum.Enum):
ARGUMENT_DICTIONARY = "ARGUMENT_DICTIONARY"
EVAL_EXPRESSION = "EVAL_EXPRESSION"
PYTEST = "PYTEST"
def analysis_kind(argstr: str) -> Sequence[AnalysisKind]:
try:
ret = [AnalysisKind[part.strip()] for part in argstr.split(",")]
except KeyError:
raise ValueError
if AnalysisKind.hypothesis in ret:
try:
import hypothesis
if hypothesis.__version_info__ < (6, 0, 0):
raise Exception("CrossHair requires hypothesis version >= 6.0.0")
except ImportError as e:
raise Exception("Unable to import the hypothesis library") from e
return ret
def command_line_parser() -> argparse.ArgumentParser:
common = argparse.ArgumentParser(
add_help=False, formatter_class=argparse.RawTextHelpFormatter
)
common.add_argument(
"--verbose",
"-v",
action="store_true",
help="Output additional debugging information on stderr",
)
common.add_argument(
"--per_path_timeout",
type=float,
metavar="FLOAT",
help="Maximum seconds to spend checking one execution path",
)
common.add_argument(
"--per_condition_timeout",
type=float,
metavar="FLOAT",
help="Maximum seconds to spend checking execution paths for one condition",
)
parser = argparse.ArgumentParser(
prog="crosshair", description="CrossHair Analysis Tool"
)
subparsers = parser.add_subparsers(help="sub-command help", dest="action")
check_parser = subparsers.add_parser(
"check",
help="Analyze a file or function",
parents=[common],
formatter_class=argparse.RawTextHelpFormatter,
description=textwrap.dedent(
"""\
The check command looks for counterexamples that break contracts.
It outputs machine-readable messages in this format on stdout:
<filename>:<line number>: error: <error message>
It exits with one of the following codes:
0 : No counterexamples are found
1 : Counterexample(s) have been found
2 : Other error
"""
),
)
check_parser.add_argument(
"--report_all",
action="store_true",
help="Output analysis results for all postconditions (not just failing ones)",
)
check_parser.add_argument(
"--report_verbose",
dest="report_verbose",
action="store_true",
help="Output context and stack traces for counterexamples",
)
check_parser.add_argument(
"target",
metavar="TARGET",
type=str,
nargs="+",
help=textwrap.dedent(
"""\
A fully qualified module, class, or function, or
a directory (which will be recursively analyzed), or
a file path with an optional ":<line-number>" suffix.
See https://crosshair.readthedocs.io/en/latest/contracts.html#targeting
"""
),
)
watch_parser = subparsers.add_parser(
"watch",
help="Continuously watch and analyze a directory",
parents=[common],
formatter_class=argparse.RawTextHelpFormatter,
description=textwrap.dedent(
"""\
The watch command continuously looks for contract counterexamples.
Type Ctrl-C to stop this command.
"""
),
)
watch_parser.add_argument(
"directory",
metavar="TARGET",
type=str,
nargs="+",
help=textwrap.dedent(
"""\
File or directory to watch. Directories will be recursively analyzed.
See https://crosshair.readthedocs.io/en/latest/contracts.html#targeting
"""
),
)
for subparser in (check_parser, watch_parser):
subparser.add_argument(
"--analysis_kind",
type=analysis_kind,
metavar="KIND",
help=textwrap.dedent(
"""\
Kind of contract to check.
By default, the PEP316, deal, and icontract kinds are all checked.
Multiple kinds (comma-separated) may be given.
See https://crosshair.readthedocs.io/en/latest/kinds_of_contracts.html
asserts : check assert statements
PEP316 : check PEP316 contracts (docstring-based)
icontract : check icontract contracts (decorator-based)
deal : check deal contracts (decorator-based)
hypothesis : check hypothesis tests
"""
),
)
diffbehavior_parser = subparsers.add_parser(
"diffbehavior",
formatter_class=argparse.RawTextHelpFormatter,
help="Find differences in the behavior of two functions",
description=textwrap.dedent(
"""\
Find differences in the behavior of two functions.
See https://crosshair.readthedocs.io/en/latest/diff_behavior.html
"""
),
parents=[common],
)
diffbehavior_parser.add_argument(
"fn1",
metavar="FUNCTION1",
type=str,
help='first fully-qualified function to compare (e.g. "mymodule.myfunc")',
)
diffbehavior_parser.add_argument(
"fn2",
metavar="FUNCTION2",
type=str,
help="second fully-qualified function to compare",
)
cover_parser = subparsers.add_parser(
"cover",
formatter_class=argparse.RawTextHelpFormatter,
help="Generate inputs for a function, attempting to exercise different code paths",
description=textwrap.dedent(
"""\
Generates inputs to a function, hopefully getting good line, branch, and path
coverage.
See https://crosshair.readthedocs.io/en/latest/cover.html
"""
),
parents=[common],
)
cover_parser.add_argument(
"fn",
metavar="FUNCTION",
type=str,
help='A fully-qualified function to explore (e.g. "mymodule.myfunc")',
)
cover_parser.add_argument(
"--example_output_format",
type=lambda e: ExampleOutputFormat[e.upper()], # type: ignore
choices=ExampleOutputFormat.__members__.values(),
metavar="FORMAT",
default=ExampleOutputFormat.EVAL_EXPRESSION,
help=textwrap.dedent(
"""\
Determines how to output examples.
argument_dictionary : Output arguments as repr'd, ordered dictionaries
eval_expression : Output examples as expressions, suitable for eval()
pytest : Output examples as stub pytest tests
"""
),
)
cover_parser.add_argument(
"--coverage_type",
type=lambda e: CoverageType[e.upper()], # type: ignore
choices=CoverageType.__members__.values(),
metavar="TYPE",
default=CoverageType.OPCODE,
help=textwrap.dedent(
"""\
Determines what kind of coverage to achieve.
opcode : Cover as many opcodes of the function as possible.
This is similar to "branch" coverage.
path : Cover any possible execution path.
There will usually be an infinite number of paths (e.g. loops are
effectively unrolled). Use max_iterations and/or
per_condition_timeout to bound results.
Many path decisions are internal to CrossHair, so you may see
more duplicative-ness in the output than you'd expect.
"""
),
)
return parser
def run_watch_loop(
watcher: Watcher,
max_watch_iterations: int = sys.maxsize,
term_lines_rewritable: bool = True,
) -> None:
restart = True
stats: Counter[str] = Counter()
active_messages: Dict[Tuple[str, int], AnalysisMessage]
for _ in range(max_watch_iterations):
if restart:
clear_screen()
print_divider("-")
line = f" Analyzing {len(watcher._modtimes)} files."
print(color(line, AnsiColor.OKBLUE), end="")
max_condition_timeout = 0.5
restart = False
stats = Counter()
active_messages = {}
else:
time.sleep(0.1)
max_condition_timeout *= 2
for curstats, messages in watcher.run_iteration(max_condition_timeout):
messages = [m for m in messages if m.state > MessageType.PRE_UNSAT]
stats.update(curstats)
if messages_merged(active_messages, messages):
linecache.checkcache()
clear_screen()
options = DEFAULT_OPTIONS.overlay(watcher._options)
for message in active_messages.values():
lines = long_describe_message(message, options)
if lines is None:
continue
print_divider("-")
print(lines, end="")
print_divider("-")
else:
if term_lines_rewritable:
print("\r", end="")
else:
print(".", end="")
continue
num_files = len(watcher._modtimes)
if len(watcher._paths) > 1:
loc_desc = f"{num_files} files"
else:
path_parts = Path(next(iter(watcher._paths))).parts
path_desc = path_parts[-1] if path_parts else "."
if num_files > 1:
loc_desc = f'"{path_desc}" ({num_files} files)'
else:
loc_desc = f'"{path_desc}"'
if term_lines_rewritable:
line = f' Analyzed {stats["num_paths"]} paths in {loc_desc}. '
else:
line = f" Analyzing paths in {loc_desc}: "
print(color(line, AnsiColor.OKBLUE), end="")
if watcher._change_flag:
watcher._change_flag = False
restart = True
line = f" Restarting analysis over {len(watcher._modtimes)} files."
print(color(line, AnsiColor.OKBLUE), end="")
def clear_screen():
# Print enough newlines to fill the screen:
print("\n" * shutil.get_terminal_size().lines, end="")
def print_divider(ch=" "):
try:
cols = os.get_terminal_size().columns - 1
except OSError:
cols = 5
print(ch * cols)
class AnsiColor(enum.Enum):
HEADER = "\033[95m"
OKBLUE = "\033[94m"
OKGREEN = "\033[92m"
WARNING = "\033[93m"
FAIL = "\033[91m"
ENDC = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
def color(text: str, *effects: AnsiColor) -> str:
return "".join(e.value for e in effects) + text + AnsiColor.ENDC.value
def messages_merged(
messages: MutableMapping[Tuple[str, int], AnalysisMessage],
new_messages: Iterable[AnalysisMessage],
) -> bool:
any_change = False
for message in new_messages:
key = (message.filename, message.line)
if key not in messages:
messages[key] = message
any_change = True
return any_change
def watch(
args: argparse.Namespace,
options: AnalysisOptionSet,
max_watch_iterations=sys.maxsize,
) -> int:
if not args.directory:
print("No files or directories given to watch", file=sys.stderr)
return 2
try:
paths = [Path(d) for d in args.directory]
watcher = Watcher(paths, options)
watcher.check_changed()
# Some terminals don't interpret \r correctly; we detect them here:
term_lines_rewritable = "THONNY_USER_DIR" not in os.environ
run_watch_loop(
watcher, max_watch_iterations, term_lines_rewritable=term_lines_rewritable
)
except KeyboardInterrupt:
pass
watcher._pool.terminate()
print()
print("I enjoyed working with you today!")
return 0
def format_src_context(filename: str, lineno: int) -> str:
amount = 3
line_numbers = range(max(1, lineno - amount), lineno + amount + 1)
output = [f"{filename}:{lineno}:\n"]
for curline in line_numbers:
text = linecache.getline(filename, curline)
if text == "": # (actual empty lines have a newline)
continue
output.append(
">" + color(text, AnsiColor.WARNING) if lineno == curline else "|" + text
)
return "".join(output)
def describe_message(
message: AnalysisMessage, options: AnalysisOptions
) -> Optional[str]:
if options.report_verbose:
return long_describe_message(message, options)
else:
return short_describe_message(message, options)
def long_describe_message(
message: AnalysisMessage, options: AnalysisOptions
) -> Optional[str]:
tb, desc, state = message.traceback, message.message, message.state
desc = desc.replace(" when ", "\nwhen ")
context = format_src_context(message.filename, message.line)
intro = ""
if not options.report_all:
if message.state <= MessageType.PRE_UNSAT: # type: ignore
return None
if state == MessageType.CONFIRMED:
intro = "I was able to confirm your postcondition over all paths."
elif state == MessageType.CANNOT_CONFIRM:
intro = "I wasn't able to find a counterexample."
elif message.state == MessageType.PRE_UNSAT:
intro = "I am having trouble finding any inputs that meet your preconditions."
elif message.state == MessageType.POST_ERR:
intro = "I got an error while checking your postcondition."
elif message.state == MessageType.EXEC_ERR:
intro = "I found an exception while running your function."
elif message.state == MessageType.POST_FAIL:
intro = "I was able to make your postcondition return False."
elif message.state == MessageType.SYNTAX_ERR:
intro = "One of your conditions isn't a valid python expression."
elif message.state == MessageType.IMPORT_ERR:
intro = "I couldn't import a file."
if message.state <= MessageType.CANNOT_CONFIRM: # type: ignore
intro = color(intro, AnsiColor.OKGREEN)
else:
intro = color(intro, AnsiColor.FAIL)
return f"{tb}\n{intro}\n{context}\n{desc}\n"
def short_describe_message(
message: AnalysisMessage, options: AnalysisOptions
) -> Optional[str]:
desc = message.message
if message.state <= MessageType.PRE_UNSAT: # type: ignore
if options.report_all:
return "{}:{}: {}: {}".format(message.filename, message.line, "info", desc)
return None
if message.state == MessageType.POST_ERR:
desc = "Error while evaluating post condition: " + desc
return "{}:{}: {}: {}".format(message.filename, message.line, "error", desc)
def checked_load(qualname: str, stderr: TextIO) -> Optional[FunctionInfo]:
try:
objs = list(load_files_or_qualnames([qualname]))
except ErrorDuringImport as exc:
cause = exc.__cause__ if exc.__cause__ is not None else exc
print(
f'Unable to load "{qualname}": {type(cause).__name__}: {cause}',
file=stderr,
)
return None
obj = objs[0]
if not isinstance(obj, FunctionInfo):
print(f'"{qualname}" does not target a function.', file=stderr)
return None
if obj.get_callable() is None:
print(f'Cannot determine signature of "{qualname}"', file=stderr)
return None
return obj
def diffbehavior(
args: argparse.Namespace, options: AnalysisOptions, stdout: TextIO, stderr: TextIO
) -> int:
(fn_name1, fn_name2) = (args.fn1, args.fn2)
fn1 = checked_load(fn_name1, stderr)
fn2 = checked_load(fn_name2, stderr)
if fn1 is None or fn2 is None:
return 2
options.stats = collections.Counter()
diffs = diff_behavior(fn1, fn2, options)
debug("stats", options.stats)
if isinstance(diffs, str):
print(diffs, file=stderr)
return 2
elif len(diffs) == 0:
num_paths = options.stats["num_paths"]
exhausted = options.stats["exhaustion"] > 0
stdout.write(f"No differences found. (attempted {num_paths} iterations)\n")
if exhausted:
stdout.write("All paths exhausted, functions are likely the same!\n")
else:
stdout.write(
"Consider trying longer with: --per_condition_timeout=<seconds>\n"
)
return 0
else:
width = max(len(fn_name1), len(fn_name2)) + 2
for diff in diffs:
inputs = ", ".join(f"{k}={v}" for k, v in diff.args.items())
stdout.write(f"Given: ({inputs}),\n")
result1, result2 = diff.result1, diff.result2
differing_args = result1.get_differing_arg_mutations(result2)
stdout.write(
f"{fn_name1.rjust(width)} : {result1.describe(differing_args)}\n"
)
stdout.write(
f"{fn_name2.rjust(width)} : {result2.describe(differing_args)}\n"
)
return 1
def cover(
args: argparse.Namespace, options: AnalysisOptions, stdout: TextIO, stderr: TextIO
) -> int:
ctxfn = checked_load(args.fn, stderr)
if ctxfn is None:
return 2
options.stats = collections.Counter()
paths = path_cover(ctxfn, options, args.coverage_type)
fn, _ = ctxfn.callable()
example_output_format = args.example_output_format
if example_output_format == ExampleOutputFormat.ARGUMENT_DICTIONARY:
return output_argument_dictionary_paths(fn, paths, stdout, stderr)
elif example_output_format == ExampleOutputFormat.EVAL_EXPRESSION:
return output_eval_exression_paths(fn, paths, stdout, stderr)
if example_output_format == ExampleOutputFormat.PYTEST:
return output_pytest_paths(fn, paths, stdout, stderr)
assert False
def check(
args: argparse.Namespace, options: AnalysisOptionSet, stdout: TextIO, stderr: TextIO
) -> int:
any_problems = False
try:
entities = list(load_files_or_qualnames(args.target))
except FileNotFoundError as exc:
print(f'File not found: "{exc.args[0]}"', file=stderr)
return 2
except ErrorDuringImport as exc:
cause = exc.__cause__ if exc.__cause__ is not None else exc
print(f"Could not import your code:\n", file=stderr)
traceback.print_exception(type(cause), cause, cause.__traceback__, file=stderr)
return 2
full_options = DEFAULT_OPTIONS.overlay(report_verbose=False).overlay(options)
for entity in entities:
debug("Check ", getattr(entity, "__name__", str(entity)))
for message in run_checkables(analyze_any(entity, options)):
line = describe_message(message, full_options)
if line is None:
continue
stdout.write(line + "\n")
debug("Traceback for output message:\n", message.traceback)
if message.state > MessageType.PRE_UNSAT:
any_problems = True
return 1 if any_problems else 0
def unwalled_main(cmd_args: Union[List[str], argparse.Namespace]) -> int:
parser = command_line_parser()
if isinstance(cmd_args, argparse.Namespace):
args = cmd_args
else:
args = parser.parse_args(cmd_args)
if not args.action:
parser.print_help(sys.stderr)
return 2
set_debug(args.verbose)
debug("Installed plugins:", installed_plugins)
options = option_set_from_dict(args.__dict__)
# fall back to current directory to look up modules
with add_to_pypath(*([""] if sys.path and sys.path[0] != "" else [])):
if args.action == "check":
return check(args, options, sys.stdout, sys.stderr)
elif args.action == "diffbehavior":
defaults = DEFAULT_OPTIONS.overlay(
AnalysisOptionSet(
per_condition_timeout=2.5,
per_path_timeout=30.0, # mostly, we don't want to time out paths
)
)
return diffbehavior(args, defaults.overlay(options), sys.stdout, sys.stderr)
elif args.action == "cover":
defaults = DEFAULT_OPTIONS.overlay(
AnalysisOptionSet(
per_condition_timeout=2.5,
per_path_timeout=30.0, # mostly, we don't want to time out paths
)
)
return cover(args, defaults.overlay(options), sys.stdout, sys.stderr)
elif args.action == "watch":
return watch(args, options)
else:
print(f'Unknown action: "{args.action}"', file=sys.stderr)
return 2
def mypy_and_check(cmd_args: Optional[List[str]] = None) -> None:
if cmd_args is None:
cmd_args = sys.argv[1:]
cmd_args = ["check"] + cmd_args
check_args, mypy_args = command_line_parser().parse_known_args(cmd_args)
set_debug(check_args.verbose)
mypy_cmd_args = mypy_args + check_args.target
debug("Running mypy with the following arguments:", " ".join(mypy_cmd_args))
try:
from mypy import api
except ModuleNotFoundError:
print("Unable to find mypy; skipping", file=sys.stderr)
else:
_mypy_out, mypy_err, mypy_ret = api.run(mypy_cmd_args)
print(mypy_err, file=sys.stderr)
if mypy_ret != 0:
sys.exit(mypy_ret)
engage_auditwall()
debug("Running crosshair with these args:", check_args)
sys.exit(unwalled_main(check_args))
def main(cmd_args: Optional[List[str]] = None) -> None:
if cmd_args is None:
cmd_args = sys.argv[1:]
engage_auditwall()
sys.exit(unwalled_main(cmd_args))
if __name__ == "__main__":
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
version.py
|
"""
A minimalistic version helper in the spirit of versioneer, that is able to run without build step using pkg_resources.
Developed by P Angerer, see https://github.com/flying-sheep/get_version.
"""
import re
import os
from pathlib import Path
from subprocess import run, PIPE, CalledProcessError
from typing import NamedTuple, List, Union, Optional
RE_VERSION = r"([\d.]+?)(?:\.dev(\d+))?(?:[_+-]([0-9a-zA-Z.]+))?"
RE_GIT_DESCRIBE = r"v?(?:([\d.]+)-(\d+)-g)?([0-9a-f]{7})(-dirty)?"
ON_RTD = os.environ.get("READTHEDOCS") == "True"
def match_groups(regex, target):
match = re.match(regex, target)
if match is None:
raise re.error(f"Regex does not match “{target}”. RE Pattern: {regex}", regex)
return match.groups()
class Version(NamedTuple):
release: str
dev: Optional[str]
labels: List[str]
@staticmethod
def parse(ver):
release, dev, labels = match_groups(f"{RE_VERSION}$", ver)
return Version(release, dev, labels.split(".") if labels else [])
def __str__(self):
release = self.release if self.release else "0.0"
dev = f".dev{self.dev}" if self.dev else ""
labels = f'+{".".join(self.labels)}' if self.labels else ""
return f"{release}{dev}{labels}"
def get_version_from_dirname(name, parent):
"""Extracted sdist"""
parent = parent.resolve()
re_dirname = re.compile(f"{name}-{RE_VERSION}$")
if not re_dirname.match(parent.name):
return None
return Version.parse(parent.name[len(name) + 1 :])
def get_version_from_git(parent):
parent = parent.resolve()
try:
p = run(
["git", "rev-parse", "--show-toplevel"],
cwd=str(parent),
stdout=PIPE,
stderr=PIPE,
encoding="utf-8",
check=True,
)
except (OSError, CalledProcessError):
return None
if Path(p.stdout.rstrip("\r\n")).resolve() != parent.resolve():
return None
p = run(
[
"git",
"describe",
"--tags",
"--dirty",
"--always",
"--long",
"--match",
"v[0-9]*",
],
cwd=str(parent),
stdout=PIPE,
stderr=PIPE,
encoding="utf-8",
check=True,
)
release, dev, hex_, dirty = match_groups(
f"{RE_GIT_DESCRIBE}$", p.stdout.rstrip("\r\n")
)
labels = []
if dev == "0":
dev = None
else:
labels.append(hex_)
if dirty and not ON_RTD:
labels.append("dirty")
return Version(release, dev, labels)
def get_version_from_metadata(name: str, parent: Optional[Path] = None):
try:
from pkg_resources import get_distribution, DistributionNotFound
except ImportError:
return None
try:
pkg = get_distribution(name)
except DistributionNotFound:
return None
# For an installed package, the parent is the install location
path_pkg = Path(pkg.location).resolve()
if parent is not None and path_pkg != parent.resolve():
msg = f"""\
metadata: Failed; distribution and package paths do not match:
{path_pkg}
!=
{parent.resolve()}\
"""
return None
return Version.parse(pkg.version)
def get_version(package: Union[Path, str]) -> str:
"""Get the version of a package or module
Pass a module path or package name.
The former is recommended, since it also works for not yet installed packages.
Supports getting the version from
#. The directory name (as created by ``setup.py sdist``)
#. The output of ``git describe``
#. The package metadata of an installed package
(This is the only possibility when passing a name)
Args:
package: package name or module path (``…/module.py`` or ``…/module/__init__.py``)
"""
path = Path(package)
if not path.suffix and len(path.parts) == 1: # Is probably not a path
v = get_version_from_metadata(package)
if v:
return str(v)
if path.suffix != ".py":
msg = f"“package” is neither the name of an installed module nor the path to a .py file."
if path.suffix:
msg += f" Unknown file suffix {path.suffix}"
raise ValueError(msg)
if path.name == "__init__.py":
name = path.parent.name
parent = path.parent.parent
else:
name = path.with_suffix("").name
parent = path.parent
return str(
get_version_from_dirname(name, parent)
or get_version_from_git(parent)
or get_version_from_metadata(name, parent)
or "0.0.0"
)
__version__ = get_version(__file__)
if __name__ == "__main__":
print(__version__)
|
[] |
[] |
[
"READTHEDOCS"
] |
[]
|
["READTHEDOCS"]
|
python
| 1 | 0 | |
cmd/liqo-webhook/opts.go
|
// Copyright 2019-2022 The Liqo Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"os"
"github.com/liqotech/liqo/pkg/mutate"
)
const (
defaultCertFile = "/etc/ssl/liqo/tls.crt"
defaultKeyFile = "/etc/ssl/liqo/tls.key"
)
func setOptions(c *mutate.MutationConfig) {
if c.KeyFile = os.Getenv("LIQO_KEY"); c.KeyFile == "" {
c.KeyFile = defaultKeyFile
}
if c.CertFile = os.Getenv("LIQO_CERT"); c.CertFile == "" {
c.CertFile = defaultCertFile
}
}
|
[
"\"LIQO_KEY\"",
"\"LIQO_CERT\""
] |
[] |
[
"LIQO_CERT",
"LIQO_KEY"
] |
[]
|
["LIQO_CERT", "LIQO_KEY"]
|
go
| 2 | 0 | |
appengine-ndb/ndb/query_test.py
|
"""Tests for query.py."""
import datetime
import os
import unittest
from .google_imports import datastore_errors
from .google_imports import namespace_manager
from .google_imports import users
from .google_test_imports import datastore_stub_util
from . import model
from . import query
from . import tasklets
from . import test_utils
class QueryTests(test_utils.NDBTest):
def setUp(self):
super(QueryTests, self).setUp()
# Create class inside tests because kinds are cleared every test.
global Foo
class Foo(model.Model):
name = model.StringProperty()
rate = model.IntegerProperty()
tags = model.StringProperty(repeated=True)
self.create_entities()
the_module = query
def create_entities(self):
self.joe = Foo(name='joe', tags=['joe', 'jill', 'hello'], rate=1)
self.joe.put()
self.jill = Foo(name='jill', tags=['jack', 'jill'], rate=2)
self.jill.put()
self.moe = Foo(name='moe', rate=1)
self.moe.put()
def testBasicQuery(self):
q = query.Query(kind='Foo')
q = q.filter(Foo.name >= 'joe').filter(Foo.name <= 'moe').filter()
res = list(q)
self.assertEqual(res, [self.joe, self.moe])
def testOrderedQuery(self):
q = query.Query(kind='Foo')
q = q.order(Foo.rate).order().order(-Foo.name)
res = list(q)
self.assertEqual(res, [self.moe, self.joe, self.jill])
def testQueryAttributes(self):
q = query.Query(kind='Foo')
self.assertEqual(q.kind, 'Foo')
self.assertEqual(q.ancestor, None)
self.assertEqual(q.filters, None)
self.assertEqual(q.orders, None)
key = model.Key('Barba', 'papa')
q = query.Query(kind='Foo', ancestor=key)
self.assertEqual(q.kind, 'Foo')
self.assertEqual(q.ancestor, key)
self.assertEqual(q.filters, None)
self.assertEqual(q.orders, None)
q = q.filter(Foo.rate == 1)
self.assertEqual(q.kind, 'Foo')
self.assertEqual(q.ancestor, key)
self.assertEqual(q.filters, query.FilterNode('rate', '=', 1))
self.assertEqual(q.orders, None)
q = q.order(-Foo.name)
self.assertEqual(q.kind, 'Foo')
self.assertEqual(q.ancestor, key)
self.assertEqual(q.filters, query.FilterNode('rate', '=', 1))
expected_order = [('name', query._DESC)]
self.assertEqual(query._orders_to_orderings(q.orders), expected_order)
def testQueryRepr(self):
q = Foo.query()
self.assertEqual(repr(q), "Query(kind='Foo')")
q = Foo.query(ancestor=model.Key('Bar', 1))
self.assertEqual(repr(q), "Query(kind='Foo', ancestor=Key('Bar', 1))")
# Let's not specify what it should show for filters and orders,
# just test that it doesn't blow up.
q1 = q.filter(Foo.rate == 1, Foo.name == 'x')
repr(q1)
q2 = q1.order(-Foo.rate)
repr(q2)
# App and namespace.
q3 = Foo.query(app='a', namespace='ns')
self.assertEqual(repr(q3), "Query(kind='Foo', app='a', namespace='ns')")
def testRunToQueue(self):
qry = Foo.query()
queue = tasklets.MultiFuture()
qry.run_to_queue(queue, self.conn).check_success()
results = queue.get_result()
self.assertEqual(len(results), 3)
self.assertEqual(results[0][2], self.joe)
self.assertEqual(results[1][2], self.jill)
self.assertEqual(results[2][2], self.moe)
def testRunToQueueError(self):
self.ExpectWarnings()
qry = Foo.query(Foo.name > '', Foo.rate > 0)
queue = tasklets.MultiFuture()
fut = qry.run_to_queue(queue, self.conn)
self.assertRaises(datastore_errors.BadRequestError, fut.check_success)
self.assertRaises(datastore_errors.BadRequestError, queue.check_success)
def testModernQuerySyntax(self):
class Employee(model.Model):
name = model.StringProperty()
age = model.IntegerProperty('Age')
rank = model.IntegerProperty()
@classmethod
def seniors(cls, min_age, min_rank):
q = cls.query().filter(cls.age >= min_age, cls.rank <= min_rank)
q = q.order(cls.name, -cls.age)
return q
q = Employee.seniors(42, 5)
self.assertEqual(q.filters,
query.ConjunctionNode(
query.FilterNode('Age', '>=', 42),
query.FilterNode('rank', '<=', 5)))
self.assertEqual(query._orders_to_orderings(q.orders),
[('name', query._ASC), ('Age', query._DESC)])
def testAndQuery(self):
class Employee(model.Model):
name = model.StringProperty()
age = model.IntegerProperty('Age')
rank = model.IntegerProperty()
q = Employee.query().filter(query.AND(Employee.age >= 42))
self.assertEqual(q.filters, query.FilterNode('Age', '>=', 42))
q = Employee.query(query.AND(Employee.age >= 42, Employee.rank <= 5))
self.assertEqual(q.filters,
query.ConjunctionNode(
query.FilterNode('Age', '>=', 42),
query.FilterNode('rank', '<=', 5)))
def testOrQuery(self):
class Employee(model.Model):
name = model.StringProperty()
age = model.IntegerProperty('Age')
rank = model.IntegerProperty()
q = Employee.query().filter(query.OR(Employee.age >= 42))
self.assertEqual(q.filters, query.FilterNode('Age', '>=', 42))
q = Employee.query(query.OR(Employee.age < 42, Employee.rank > 5))
self.assertEqual(q.filters,
query.DisjunctionNode(
query.FilterNode('Age', '<', 42),
query.FilterNode('rank', '>', 5)))
def testEmptyInFilter(self):
self.ExpectWarnings()
class Employee(model.Model):
name = model.StringProperty()
for arg in [], (), set(), frozenset():
q = Employee.query(Employee.name.IN(arg))
self.assertEqual(q.filters, query.FalseNode())
self.assertNotEqual(q.filters, 42)
f = iter(q).has_next_async()
self.assertRaises(datastore_errors.BadQueryError, f.check_success)
def testSingletonInFilter(self):
class Employee(model.Model):
name = model.StringProperty()
q = Employee.query(Employee.name.IN(['xyzzy']))
self.assertEqual(q.filters, query.FilterNode('name', '=', 'xyzzy'))
self.assertNotEqual(q.filters, 42)
e = Employee(name='xyzzy')
e.put()
self.assertEqual(q.get(), e)
def testInFilter(self):
class Employee(model.Model):
name = model.StringProperty()
q = Employee.query(Employee.name.IN(['a', 'b']))
self.assertEqual(q.filters,
query.DisjunctionNode(
query.FilterNode('name', '=', 'a'),
query.FilterNode('name', '=', 'b')))
a = Employee(name='a')
a.put()
b = Employee(name='b')
b.put()
self.assertEqual(list(q), [a, b])
def testInFilterArgTypes(self):
class Employee(model.Model):
name = model.StringProperty()
a = Employee(name='a')
a.put()
b = Employee(name='b')
b.put()
for arg in ('a', 'b'), set(['a', 'b']), frozenset(['a', 'b']):
q = Employee.query(Employee.name.IN(arg))
self.assertEqual(list(q), [a, b])
def testInFilterWithNone(self):
class Employee(model.Model):
# Try a few different property types, to get a good mix of what
# used to fail.
name = model.StringProperty()
boss = model.KeyProperty()
age = model.IntegerProperty()
date = model.DateProperty()
a = Employee(name='a', age=42L)
a.put()
bosskey = model.Key(Employee, 'x')
b = Employee(boss=bosskey, date=datetime.date(1996, 1, 31))
b.put()
keys = set([a.key, b.key])
q1 = Employee.query(Employee.name.IN(['a', None]))
self.assertEqual(set(e.key for e in q1), keys)
q2 = Employee.query(Employee.boss.IN([bosskey, None]))
self.assertEqual(set(e.key for e in q2), keys)
q3 = Employee.query(Employee.age.IN([42, None]))
self.assertEqual(set(e.key for e in q3), keys)
q4 = Employee.query(Employee.date.IN([datetime.date(1996, 1, 31), None]))
self.assertEqual(set(e.key for e in q4), keys)
def testQueryExceptions(self):
self.ExpectWarnings()
q = Foo.query(Foo.name > '', Foo.rate > 0)
f = q.fetch_async()
self.assertRaises(datastore_errors.BadRequestError, f.check_success)
def testQueryUnindexedFails(self):
# Shouldn't be able to query for unindexed properties
class SubModel(model.Model):
booh = model.IntegerProperty(indexed=False)
class Emp(model.Model):
name = model.StringProperty()
text = model.TextProperty()
blob = model.BlobProperty()
sub = model.StructuredProperty(SubModel)
struct = model.StructuredProperty(Foo, indexed=False)
local = model.LocalStructuredProperty(Foo)
Emp.query(Emp.name == 'a').fetch() # Should pass
self.assertRaises(datastore_errors.BadFilterError,
lambda: Emp.text == 'a')
self.assertRaises(datastore_errors.BadFilterError,
lambda: Emp.text.IN(['a', 'b']))
self.assertRaises(datastore_errors.BadFilterError,
lambda: Emp.blob == 'a')
self.assertRaises(datastore_errors.BadFilterError,
lambda: Emp.sub == SubModel(booh=42))
self.assertRaises(datastore_errors.BadFilterError,
lambda: Emp.sub.booh == 42)
self.assertRaises(datastore_errors.BadFilterError,
lambda: Emp.struct == Foo(name='a'))
# TODO: Make this fail? See issue 89. http://goo.gl/K4gbY
# Currently StructuredProperty(..., indexed=False) has no effect.
## self.assertRaises(datastore_errors.BadFilterError,
## lambda: Emp.struct.name == 'a')
self.assertRaises(datastore_errors.BadFilterError,
lambda: Emp.local == Foo(name='a'))
def testProjectionQuery(self):
self.ExpectWarnings()
class Foo(model.Model):
p = model.IntegerProperty('pp') # Also check renaming
q = model.IntegerProperty(required=True)
r = model.IntegerProperty(repeated=True)
d = model.IntegerProperty(default=42)
key = Foo(p=1, q=2, r=[3, 4]).put()
q = Foo.query(Foo.p >= 0)
ent = q.get(projection=[Foo.p, 'q'])
self.assertEqual(ent._projection, ('pp', 'q'))
self.assertEqual(ent.p, 1)
self.assertEqual(ent.q, 2)
self.assertRaises(model.UnprojectedPropertyError, lambda: ent.r)
self.assertRaises(model.UnprojectedPropertyError, lambda: ent.d)
ents = q.fetch(projection=['pp', 'r'])
self.assertEqual(ents, [Foo(p=1, r=[3], key=key, projection=('pp', 'r')),
Foo(p=1, r=[4], key=key, projection=['pp', 'r'])])
self.assertRaises(datastore_errors.BadArgumentError, q.get, projection=[42])
def testProjectionQuery_AllTypes(self):
class Foo(model.Model):
abool = model.BooleanProperty()
aint = model.IntegerProperty()
afloat = model.FloatProperty()
astring = model.StringProperty()
ablob = model.BlobProperty(indexed=True)
akey = model.KeyProperty()
auser = model.UserProperty()
apoint = model.GeoPtProperty()
adatetime = model.DateTimeProperty()
adate = model.DateProperty()
atime = model.TimeProperty()
boo = Foo(abool=True,
aint=42,
afloat=3.14,
astring='foo',
ablob='bar',
akey=model.Key(Foo, 'ref'),
auser=users.User('[email protected]'),
apoint=model.GeoPt(52.35, 4.9166667),
adatetime=datetime.datetime(2012, 5, 1, 8, 19, 42),
adate=datetime.date(2012, 5, 1),
atime=datetime.time(8, 19, 42),
)
boo.put()
qry = Foo.query()
for prop in Foo._properties.itervalues():
ent = qry.get(projection=[prop._name])
self.assertEqual(getattr(ent, prop._code_name),
getattr(boo, prop._code_name))
for otherprop in Foo._properties.itervalues():
if otherprop is not prop:
self.assertRaises(model.UnprojectedPropertyError,
getattr, ent, otherprop._code_name)
def testProjectionQuery_ComputedProperties(self):
class Foo(model.Model):
a = model.StringProperty()
b = model.StringProperty()
c = model.ComputedProperty(lambda ent: '<%s.%s>' % (ent.a, ent.b))
d = model.ComputedProperty(lambda ent: '<%s>' % (ent.a,))
foo = Foo(a='a', b='b')
foo.put()
self.assertEqual((foo.a, foo.b, foo.c, foo.d), ('a', 'b', '<a.b>', '<a>'))
qry = Foo.query()
x = qry.get(projection=['a', 'b'])
self.assertEqual((x.a, x.b, x.c, x.d), ('a', 'b', '<a.b>', '<a>'))
y = qry.get(projection=['a'])
self.assertEqual((y.a, y.d), ('a', '<a>'))
self.assertRaises(model.UnprojectedPropertyError, lambda: y.b)
self.assertRaises(model.UnprojectedPropertyError, lambda: y.c)
z = qry.get(projection=['b'])
self.assertEqual((z.b,), ('b',))
p = qry.get(projection=['c', 'd'])
self.assertEqual((p.c, p.d), ('<a.b>', '<a>'))
def testProjectionQuery_StructuredProperties(self):
class Inner(model.Model):
foo = model.StringProperty()
bar = model.StringProperty()
beh = model.StringProperty()
class Middle(model.Model):
baz = model.StringProperty()
inner = model.StructuredProperty(Inner)
inners = model.StructuredProperty(Inner, repeated=True)
class Outer(model.Model):
name = model.StringProperty()
middle = model.StructuredProperty(Middle, 'mid')
one = Outer(name='one',
middle=Middle(baz='one',
inner=Inner(foo='foo', bar='bar'),
inners=[Inner(foo='a', bar='b'),
Inner(foo='c', bar='d')]))
one.put()
two = Outer(name='two',
middle=Middle(baz='two',
inner=Inner(foo='x', bar='y'),
inners=[Inner(foo='p', bar='q')]))
two.put()
q = Outer.query()
[x, y] = q.fetch(projection=[Outer.name, Outer.middle.baz])
self.assertEqual(x.middle.baz, 'one')
self.assertEqual(x.middle._projection, ('baz',))
self.assertEqual(x,
Outer(key=one.key, name='one',
middle=Middle(baz='one', projection=['baz']),
projection=['mid.baz', 'name']))
self.assertEqual(y,
Outer(key=two.key, name='two',
middle=Middle(baz='two', projection=['baz']),
projection=['mid.baz', 'name']))
self.assertRaises(model.UnprojectedPropertyError, lambda: x.middle.inner)
self.assertRaises(model.ReadonlyPropertyError,
setattr, x, 'middle', None)
self.assertRaises(model.ReadonlyPropertyError,
setattr, x, 'middle', x.middle)
self.assertRaises(model.ReadonlyPropertyError,
setattr, x.middle, 'inner', None)
self.assertRaises(model.ReadonlyPropertyError,
setattr, x.middle, 'inner',
Inner(foo='', projection=['foo']))
x = q.get(projection=[Outer.middle.inner.foo, 'mid.inner.bar'])
self.assertEqual(x.middle.inner.foo, 'foo')
self.assertEqual(x.middle.inner._projection, ('bar', 'foo'))
self.assertEqual(x.middle._projection, ('inner.bar', 'inner.foo'))
self.assertEqual(x._projection, ('mid.inner.bar', 'mid.inner.foo'))
self.assertEqual(x,
Outer(key=one.key,
projection=['mid.inner.bar', 'mid.inner.foo'],
middle=Middle(projection=['inner.bar', 'inner.foo'],
inner=Inner(projection=['bar', 'foo'],
foo='foo', bar='bar'))))
self.assertRaises(model.UnprojectedPropertyError,
lambda: x.middle.inner.beh)
self.assertRaises(model.ReadonlyPropertyError,
setattr, x.middle.inner, 'foo', '')
self.assertRaises(model.ReadonlyPropertyError,
setattr, x.middle.inner, 'beh', '')
xs = q.fetch(projection=[Outer.middle.inners.foo])
self.assertEqual(xs[0],
Outer(key=one.key,
middle=Middle(inners=[Inner(foo='a',
_projection=('foo',))],
_projection=('inners.foo',)),
_projection=('mid.inners.foo',)))
self.assertEqual(len(xs), 3)
for x, foo in zip(xs, ['a', 'c', 'p']):
self.assertEqual(len(x.middle.inners), 1)
self.assertEqual(x.middle.inners[0].foo, foo)
def testFilterRepr(self):
class Employee(model.Model):
name = model.StringProperty()
f = (Employee.name == 'xyzzy')
self.assertEqual(repr(f), "FilterNode('name', '=', 'xyzzy')")
def testNodeComparisons(self):
a = query.FilterNode('foo', '=', 1)
b = query.FilterNode('foo', '=', 1)
c = query.FilterNode('foo', '=', 2)
d = query.FilterNode('foo', '<', 1)
# Don't use assertEqual/assertNotEqual; we want to be sure that
# __eq__ or __ne__ is really called here!
self.assertTrue(a == b)
self.assertTrue(a != c)
self.assertTrue(b != d)
self.assertRaises(TypeError, lambda: a < b)
self.assertRaises(TypeError, lambda: a <= b)
self.assertRaises(TypeError, lambda: a > b)
self.assertRaises(TypeError, lambda: a >= b)
x = query.AND(a, b, c)
y = query.AND(a, b, c)
z = query.AND(a, d)
self.assertTrue(x == y)
self.assertTrue(x != z)
def testQueryForStructuredProperty(self):
class Bar(model.Model):
name = model.StringProperty()
foo = model.StructuredProperty(Foo)
b1 = Bar(name='b1', foo=Foo(name='nest', rate=1, tags=['tag1', 'tag2']))
b1.put()
b2 = Bar(name='b2', foo=Foo(name='best', rate=2, tags=['tag2', 'tag3']))
b2.put()
b3 = Bar(name='b3', foo=Foo(name='rest', rate=2, tags=['tag2']))
b3.put()
q1 = Bar.query().order(Bar.name)
self.assertEqual(q1.fetch(10), [b1, b2, b3])
q2 = Bar.query().filter(Bar.foo.rate >= 2)
self.assertEqual(q2.fetch(10), [b2, b3])
q3 = q2.order(Bar.foo.rate, -Bar.foo.name, +Bar.foo.rate)
self.assertEqual(q3.fetch(10), [b3, b2])
def testQueryForStructuredPropertyErrors(self):
class Bar(model.Model):
name = model.StringProperty()
foo = model.StructuredProperty(Foo)
# Can't use inequalities.
self.assertRaises(datastore_errors.BadFilterError,
lambda: Bar.foo < Foo())
self.assertRaises(datastore_errors.BadFilterError,
lambda: Bar.foo != Foo())
# Can't use an empty value.
self.assertRaises(datastore_errors.BadFilterError,
lambda: Bar.foo == Foo())
def testQueryForStructuredPropertyIn(self):
self.ExpectWarnings()
class Bar(model.Model):
name = model.StringProperty()
foo = model.StructuredProperty(Foo)
a = Bar(name='a', foo=Foo(name='a'))
a.put()
b = Bar(name='b', foo=Foo(name='b'))
b.put()
self.assertEqual(
Bar.query(Bar.foo.IN((Foo(name='a'), Foo(name='b')))).fetch(),
[a, b])
self.assertEqual(Bar.query(Bar.foo.IN([Foo(name='a')])).fetch(), [a])
# An IN query with empty argument can be constructed but not executed.
q = Bar.query(Bar.foo.IN(set()))
self.assertRaises(datastore_errors.BadQueryError, q.fetch)
# Passing a non-sequence argument should fail.
self.assertRaises(datastore_errors.BadArgumentError,
Bar.foo.IN, 42)
self.assertRaises(datastore_errors.BadArgumentError,
Bar.foo.IN, None)
self.assertRaises(datastore_errors.BadArgumentError,
Bar.foo.IN, 'not a sequence')
def testQueryForNestedStructuredProperty(self):
class Bar(model.Model):
name = model.StringProperty()
foo = model.StructuredProperty(Foo)
class Bak(model.Model):
bar = model.StructuredProperty(Bar)
class Baz(model.Model):
bar = model.StructuredProperty(Bar)
bak = model.StructuredProperty(Bak)
rank = model.IntegerProperty()
b1 = Baz(bar=Bar(foo=Foo(name='a')))
b1.put()
b2 = Baz(bar=Bar(foo=Foo(name='b')), bak=Bak(bar=Bar(foo=Foo(name='c'))))
b2.put()
q1 = Baz.query().filter(Baz.bar.foo.name >= 'a')
self.assertEqual(q1.fetch(10), [b1, b2])
q2 = Baz.query().filter(Baz.bak.bar.foo.name >= 'a')
self.assertEqual(q2.fetch(10), [b2])
def testQueryForWholeStructure(self):
class Employee(model.Model):
name = model.StringProperty()
rank = model.IntegerProperty()
class Manager(Employee):
report = model.StructuredProperty(Employee, repeated=True)
reports_a = []
for i in range(3):
e = Employee(name=str(i), rank=i)
e.put()
e.key = None
reports_a.append(e)
reports_b = []
for i in range(3, 6):
e = Employee(name=str(i), rank=0)
e.put()
e.key = None
reports_b.append(e)
mgr_a = Manager(name='a', report=reports_a)
mgr_a.put()
mgr_b = Manager(name='b', report=reports_b)
mgr_b.put()
mgr_c = Manager(name='c', report=reports_a + reports_b)
mgr_c.put()
res = list(Manager.query(Manager.report == Employee(name='1', rank=1)))
self.assertEqual(res, [mgr_a, mgr_c])
res = list(Manager.query(Manager.report == Employee(rank=0)))
self.assertEqual(res, [mgr_a, mgr_b, mgr_c])
res = list(Manager.query(Manager.report == Employee(rank=0, name='3')))
self.assertEqual(res, [mgr_b, mgr_c])
res = list(Manager.query(Manager.report == Employee(rank=0, name='1')))
self.assertEqual(res, [])
res = list(Manager.query(Manager.report == Employee(rank=0, name='0'),
Manager.report == Employee(rank=1, name='1')))
self.assertEqual(res, [mgr_a, mgr_c])
q = Manager.query(Manager.report == Employee(rank=2, name='2'))
res = list(q)
self.assertEqual(res, [mgr_a, mgr_c])
res = list(q.iter(offset=1))
self.assertEqual(res, [mgr_c])
res = list(q.iter(limit=1))
self.assertEqual(res, [mgr_a])
def testQueryForWholeStructureCallsDatastoreType(self):
# See issue 87. http://goo.gl/Tl5Ed
class Event(model.Model):
what = model.StringProperty()
when = model.DateProperty() # Has non-trivial _datastore_type().
class Outer(model.Model):
who = model.StringProperty()
events = model.StructuredProperty(Event, repeated=True)
q = Outer.query(Outer.events == Event(what='stuff',
when=datetime.date.today()))
q.fetch() # Failed before the fix.
def testQueryForWholeNestedStructure(self):
class A(model.Model):
a1 = model.StringProperty()
a2 = model.StringProperty()
class B(model.Model):
b1 = model.StructuredProperty(A)
b2 = model.StructuredProperty(A)
class C(model.Model):
c = model.StructuredProperty(B)
x = C(c=B(b1=A(a1='a1', a2='a2'), b2=A(a1='a3', a2='a4')))
x.put()
q = C.query(C.c == x.c)
self.assertEqual(q.get(), x)
def testQueryForWholeStructureNone(self):
class X(model.Model):
name = model.StringProperty()
class Y(model.Model):
x = model.StructuredProperty(X)
y = Y(x=None)
y.put()
q = Y.query(Y.x == None)
self.assertEqual(q.fetch(), [y])
def testQueryAncestorConsistentWithAppId(self):
class Employee(model.Model):
pass
a = model.Key(Employee, 1)
self.assertEqual(a.app(), self.APP_ID) # Just checkin'.
Employee.query(ancestor=a, app=a.app()).fetch() # Shouldn't fail.
self.assertRaises(Exception, Employee.query, ancestor=a, app='notthisapp')
def testQueryAncestorConsistentWithNamespace(self):
class Employee(model.Model):
pass
a = model.Key(Employee, 1, namespace='ns')
self.assertEqual(a.namespace(), 'ns') # Just checkin'.
Employee.query(ancestor=a, namespace='ns').fetch()
Employee.query(ancestor=a, namespace=None).fetch()
self.assertRaises(Exception,
Employee.query, ancestor=a, namespace='another')
self.assertRaises(Exception,
Employee.query, ancestor=a, namespace='')
# And again with the default namespace.
b = model.Key(Employee, 1)
self.assertEqual(b.namespace(), '') # Just checkin'.
Employee.query(ancestor=b, namespace='')
Employee.query(ancestor=b, namespace=None)
self.assertRaises(Exception,
Employee.query, ancestor=b, namespace='ns')
# Finally some queries with a namespace but no ancestor.
Employee.query(namespace='').fetch()
Employee.query(namespace='ns').fetch()
def testQueryWithNamespace(self):
class Employee(model.Model):
pass
k = model.Key(Employee, None, namespace='ns')
e = Employee(key=k)
e.put()
self.assertEqual(Employee.query().fetch(), [])
self.assertEqual(Employee.query(namespace='ns').fetch(), [e])
def testQueryFilterAndOrderPreserveNamespace(self):
class Employee(model.Model):
name = model.StringProperty()
q1 = Employee.query(namespace='ns')
q2 = q1.filter(Employee.name == 'Joe')
self.assertEqual(q2.namespace, 'ns')
# Ditto for order()
q3 = q2.order(Employee.name)
self.assertEqual(q3.namespace, 'ns')
def testMultiQuery(self):
q1 = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
q2 = query.Query(kind='Foo').filter(Foo.tags == 'joe').order(Foo.name)
qq = query._MultiQuery([q1, q2])
res = list(qq)
self.assertEqual(res, [self.jill, self.joe])
def testIterAsync(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
@tasklets.synctasklet
def foo():
it = iter(q)
res = []
while (yield it.has_next_async()):
val = it.next()
res.append(val)
self.assertEqual(res, [self.jill, self.joe])
foo()
def testMap(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
callback = lambda e: e.name
@tasklets.tasklet
def callback_async(e):
yield tasklets.sleep(0.01)
raise tasklets.Return(e.name)
self.assertEqual(q.map(callback), ['jill', 'joe'])
self.assertEqual(q.map(callback_async), ['jill', 'joe'])
# TODO: Test map() with esoteric argument combinations
# e.g. keys_only, produce_cursors, and merge_future.
def testMapAsync(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
callback = lambda e: e.name
@tasklets.tasklet
def callback_async(e):
yield tasklets.sleep(0.01)
raise tasklets.Return(e.name)
@tasklets.synctasklet
def foo():
fut = q.map_async(callback)
res = yield fut
self.assertEqual(res, ['jill', 'joe'])
fut = q.map_async(callback_async)
res = yield fut
self.assertEqual(res, ['jill', 'joe'])
foo()
def testFetch(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
self.assertEqual(q.fetch(10), [self.jill, self.joe])
self.assertEqual(q.fetch(2), [self.jill, self.joe])
self.assertEqual(q.fetch(1), [self.jill])
def testFetchAsync(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
@tasklets.synctasklet
def foo():
res = yield q.fetch_async(10)
self.assertEqual(res, [self.jill, self.joe])
res = yield q.fetch_async(2)
self.assertEqual(res, [self.jill, self.joe])
res = yield q.fetch_async(1)
self.assertEqual(res, [self.jill])
foo()
def testFetchEmpty(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jillian')
self.assertEqual(q.fetch(1), [])
def testFetchKeysOnly(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
self.assertEqual(q.fetch(10, keys_only=True),
[self.jill.key, self.joe.key])
def testGet(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
self.assertEqual(q.get(), self.jill)
def testGetEmpty(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jillian')
self.assertEqual(q.get(), None)
def testGetKeysOnly(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
self.assertEqual(q.get(keys_only=True), self.jill.key)
def testCursors(self):
q = query.Query(kind='Foo')
it = q.iter(produce_cursors=True)
expected = [self.joe, self.jill, self.moe]
self.assertRaises(datastore_errors.BadArgumentError, it.cursor_before)
self.assertRaises(datastore_errors.BadArgumentError, it.cursor_after)
before = []
after = []
for i, ent in enumerate(it):
self.assertEqual(ent, expected[i])
before.append(it.cursor_before())
after.append(it.cursor_after())
before.append(it.cursor_before())
after.append(it.cursor_after())
self.assertEqual(before[1], after[0])
self.assertEqual(before[2], after[1])
self.assertEqual(before[3], after[2])
self.assertEqual(before[3], after[3]) # !!!
def testCursorsKeysOnly(self):
q = query.Query(kind='Foo')
it = q.iter(produce_cursors=True, keys_only=True)
expected = [self.joe.key, self.jill.key, self.moe.key]
self.assertRaises(datastore_errors.BadArgumentError, it.cursor_before)
self.assertRaises(datastore_errors.BadArgumentError, it.cursor_after)
before = []
after = []
for i, ent in enumerate(it):
self.assertEqual(ent, expected[i])
before.append(it.cursor_before())
after.append(it.cursor_after())
before.append(it.cursor_before())
after.append(it.cursor_after())
self.assertEqual(before[1], after[0])
self.assertEqual(before[2], after[1])
self.assertEqual(before[3], after[2])
self.assertEqual(before[3], after[3]) # !!!
def testCursorsEfficientPaging(self):
# We want to read a 'page' of data, get the cursor just past the
# page, and know whether there is another page, all with a single
# RPC. To do this, set limit=pagesize+1, batch_size=pagesize.
q = query.Query(kind='Foo')
cursors = {}
mores = {}
for pagesize in [1, 2, 3, 4]:
it = q.iter(produce_cursors=True, limit=pagesize + 1, batch_size=pagesize)
todo = pagesize
for _ in it:
todo -= 1
if todo <= 0:
break
cursors[pagesize] = it.cursor_after()
mores[pagesize] = it.probably_has_next()
self.assertEqual(mores, {1: True, 2: True, 3: False, 4: False})
self.assertEqual(cursors[3], cursors[4])
# TODO: Assert that only one RPC call was made.
def create_index(self):
ci = datastore_stub_util.datastore_pb.CompositeIndex()
ci.set_app_id(os.environ['APPLICATION_ID'])
ci.set_id(0)
ci.set_state(ci.WRITE_ONLY)
index = ci.mutable_definition()
index.set_ancestor(0)
index.set_entity_type('Foo')
property = index.add_property()
property.set_name('name')
property.set_direction(property.DESCENDING)
property = index.add_property()
property.set_name('tags')
property.set_direction(property.ASCENDING)
stub = self.testbed.get_stub('datastore_v3')
stub.CreateIndex(ci)
def testIndexListPremature(self):
# Before calling next() we don't have the information.
self.create_index()
q = Foo.query(Foo.name >= 'joe', Foo.tags == 'joe')
qi = q.iter()
self.assertEqual(qi.index_list(), None)
def testIndexListEmpty(self):
# A simple query requires no composite indexes.
q = Foo.query(Foo.name == 'joe', Foo.tags == 'joe')
qi = q.iter()
qi.next()
self.assertEqual(qi.index_list(), [])
def testIndexListNontrivial(self):
# Test a non-trivial query.
q = Foo.query(Foo.name >= 'joe', Foo.tags == 'joe')
qi = q.iter()
qi.next()
properties=[model.IndexProperty(name='tags', direction='asc'),
model.IndexProperty(name='name', direction='asc')]
self.assertEqual(qi.index_list(),
[model.IndexState(
definition=model.Index(kind='Foo',
properties=properties,
ancestor=False),
state='serving',
id=0)])
def testIndexListExhausted(self):
# Test that the information is preserved after the iterator is
# exhausted.
q = Foo.query(Foo.name >= 'joe', Foo.tags == 'joe')
qi = q.iter()
list(qi)
properties=[model.IndexProperty(name='tags', direction='asc'),
model.IndexProperty(name='name', direction='asc')]
self.assertEqual(qi.index_list(),
[model.IndexState(
definition=model.Index(kind='Foo',
properties=properties,
ancestor=False),
state='serving',
id=0)])
def testIndexListWithIndexAndOrder(self):
# Test a non-trivial query with sort order and an actual composite
# index present.
self.create_index()
q = Foo.query(Foo.name >= 'joe', Foo.tags == 'joe')
q = q.order(-Foo.name, Foo.tags)
qi = q.iter()
qi.next()
# TODO: This is a little odd, because that's not exactly the index
# we created...?
properties=[model.IndexProperty(name='tags', direction='asc'),
model.IndexProperty(name='name', direction='desc')]
self.assertEqual(qi.index_list(),
[model.IndexState(
definition=model.Index(kind='Foo',
properties=properties,
ancestor=False),
state='serving',
id=0)])
def testIndexListMultiQuery(self):
self.create_index()
q = Foo.query(query.OR(Foo.name == 'joe', Foo.name == 'jill'))
qi = q.iter()
qi.next()
self.assertEqual(qi.index_list(), None)
def testCount(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
self.assertEqual(q.count(10), 2)
self.assertEqual(q.count(1), 1)
def testCountAsync(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
@tasklets.synctasklet
def foo():
res = yield q.count_async(10)
self.assertEqual(res, 2)
res = yield q.count_async(1)
self.assertEqual(res, 1)
foo()
def testCountEmpty(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jillian')
self.assertEqual(q.count(1), 0)
def testCountPostFilter(self):
class Froo(model.Model):
name = model.StringProperty()
rate = model.IntegerProperty()
age = model.IntegerProperty()
class Bar(model.Model):
name = model.StringProperty()
froo = model.StructuredProperty(Froo, repeated=True)
b1 = Bar(name='b1', froo=[Froo(name='a', rate=1)])
b1.put()
b2 = Bar(name='b2', froo=[Froo(name='a', rate=1)])
b2.put()
q = Bar.query(Bar.froo == Froo(name='a', rate=1))
self.assertEqual(q.count(3), 2)
self.assertEqual(q.count(2), 2)
self.assertEqual(q.count(1), 1)
def testCountDisjunction(self):
q = Foo.query(Foo.name.IN(['joe', 'jill']))
self.assertEqual(q.count(3), 2)
self.assertEqual(q.count(2), 2)
self.assertEqual(q.count(1), 1)
def testFetchPage(self):
# This test implicitly also tests fetch_page_async().
q = query.Query(kind='Foo')
page_size = 1
res, curs, more = q.fetch_page(page_size)
self.assertEqual(res, [self.joe])
self.assertTrue(more)
res, curs, more = q.fetch_page(page_size, start_cursor=curs)
self.assertEqual(res, [self.jill])
self.assertTrue(more)
res, curs, more = q.fetch_page(page_size, start_cursor=curs)
self.assertEqual(res, [self.moe])
self.assertFalse(more)
res, curs, more = q.fetch_page(page_size, start_cursor=curs)
self.assertEqual(res, [])
self.assertFalse(more)
page_size = 2
res, curs, more = q.fetch_page(page_size)
self.assertEqual(res, [self.joe, self.jill])
self.assertTrue(more)
res, curs, more = q.fetch_page(page_size, start_cursor=curs)
self.assertEqual(res, [self.moe])
self.assertFalse(more)
res, curs, more = q.fetch_page(page_size, start_cursor=curs)
self.assertEqual(res, [])
self.assertFalse(more)
page_size = 3
res, curs, more = q.fetch_page(page_size)
self.assertEqual(res, [self.joe, self.jill, self.moe])
self.assertFalse(more)
res, curs, more = q.fetch_page(page_size, start_cursor=curs)
self.assertEqual(res, [])
self.assertFalse(more)
page_size = 4
res, curs, more = q.fetch_page(page_size)
self.assertEqual(res, [self.joe, self.jill, self.moe])
self.assertFalse(more)
res, curs, more = q.fetch_page(page_size, start_cursor=curs)
self.assertEqual(res, [])
self.assertFalse(more)
def testMultiQueryIterator(self):
q = query.Query(kind='Foo').filter(Foo.tags.IN(['joe', 'jill']))
q = q.order(Foo.name)
@tasklets.synctasklet
def foo():
it = iter(q)
res = []
while (yield it.has_next_async()):
val = it.next()
res.append(val)
self.assertEqual(res, [self.jill, self.joe])
foo()
def testMultiQueryIteratorUnordered(self):
q = query.Query(kind='Foo').filter(Foo.tags.IN(['joe', 'jill']))
@tasklets.synctasklet
def foo():
it = iter(q)
res = []
while (yield it.has_next_async()):
val = it.next()
res.append(val)
self.assertEqual(set(r._key for r in res),
set([self.jill._key, self.joe._key]))
foo()
def testMultiQueryFetch(self):
q = Foo.query(Foo.tags.IN(['joe', 'jill'])).order(-Foo.name)
expected = [self.joe, self.jill]
self.assertEqual(q.fetch(10), expected)
self.assertEqual(q.fetch(None), expected)
self.assertEqual(q.fetch(), expected)
self.assertEqual(q.fetch(2), expected)
self.assertEqual(q.fetch(1), expected[:1])
self.assertEqual(q.fetch(10, offset=1), expected[1:])
self.assertEqual(q.fetch(1, offset=1), expected[1:])
self.assertEqual(q.fetch(10, keys_only=True), [e._key for e in expected])
def testMultiQueryFetchUnordered(self):
q = Foo.query(Foo.tags.IN(['joe', 'jill']))
expected = [self.joe, self.jill]
self.assertEqual(q.fetch(10), expected)
self.assertEqual(q.fetch(None), expected)
self.assertEqual(q.fetch(), expected)
self.assertEqual(q.fetch(2), expected)
self.assertEqual(q.fetch(1), expected[:1])
self.assertEqual(q.fetch(10, offset=1), expected[1:])
self.assertEqual(q.fetch(1, offset=1), expected[1:])
self.assertEqual(q.fetch(10, keys_only=True), [e._key for e in expected])
def testMultiQueryCount(self):
q = Foo.query(Foo.tags.IN(['joe', 'jill'])).order(Foo.name)
self.assertEqual(q.count(10), 2)
self.assertEqual(q.count(None), 2)
self.assertEqual(q.count(), 2)
self.assertEqual(q.count(2), 2)
self.assertEqual(q.count(1), 1)
self.assertEqual(q.count(10, keys_only=True), 2)
self.assertEqual(q.count(keys_only=True), 2)
def testMultiQueryCountUnordered(self):
q = Foo.query(Foo.tags.IN(['joe', 'jill']))
self.assertEqual(q.count(10), 2)
self.assertEqual(q.count(None), 2)
self.assertEqual(q.count(), 2)
self.assertEqual(q.count(10, keys_only=True), 2)
self.assertEqual(q.count(keys_only=True), 2)
def testMultiQueryCursors(self):
self.ExpectWarnings()
q = Foo.query(Foo.tags.IN(['joe', 'jill']))
self.assertRaises(datastore_errors.BadArgumentError, q.fetch_page, 1)
q = q.order(Foo.tags)
self.assertRaises(datastore_errors.BadArgumentError, q.fetch_page, 1)
q = q.order(Foo.key)
expected = q.fetch()
self.assertEqual(len(expected), 2)
res, curs, more = q.fetch_page(1, keys_only=True)
self.assertEqual(res, [expected[0].key])
self.assertTrue(curs is not None)
self.assertTrue(more)
res, curs, more = q.fetch_page(1, keys_only=False, start_cursor=curs)
self.assertEqual(res, [expected[1]])
self.assertTrue(curs is not None)
self.assertFalse(more)
res, curs, more = q.fetch_page(1, start_cursor=curs)
self.assertEqual(res, [])
self.assertTrue(curs is None)
self.assertFalse(more)
def testMultiQueryWithAndWithoutAncestor(self):
class Benjamin(model.Model):
name = model.StringProperty()
ben = Benjamin(name='ben', parent=self.moe.key)
ben.put()
benji = Benjamin(name='benji')
benji.put()
bq = Benjamin.query()
baq = Benjamin.query(ancestor=self.moe.key)
mq = query._MultiQuery([bq, baq])
res = list(mq)
self.assertEqual(res, [benji, ben])
def testNotEqualOperator(self):
q = query.Query(kind='Foo').filter(Foo.rate != 2)
res = list(q)
self.assertEqual(res, [self.joe, self.moe])
def testInOperator(self):
q = query.Query(kind='Foo').filter(Foo.tags.IN(('jill', 'hello')))
res = list(q)
self.assertEqual(res, [self.joe, self.jill])
def testFullDistributiveLaw(self):
q = query.Query(kind='Foo').filter(Foo.tags.IN(['jill', 'hello']))
q = q.filter(Foo.rate.IN([1, 2]))
DisjunctionNode = query.DisjunctionNode
ConjunctionNode = query.ConjunctionNode
FilterNode = query.FilterNode
expected = DisjunctionNode(
ConjunctionNode(FilterNode('tags', '=', 'jill'),
FilterNode('rate', '=', 1)),
ConjunctionNode(FilterNode('tags', '=', 'jill'),
FilterNode('rate', '=', 2)),
ConjunctionNode(FilterNode('tags', '=', 'hello'),
FilterNode('rate', '=', 1)),
ConjunctionNode(FilterNode('tags', '=', 'hello'),
FilterNode('rate', '=', 2)))
self.assertEqual(q.filters, expected)
def testHalfDistributiveLaw(self):
DisjunctionNode = query.DisjunctionNode
ConjunctionNode = query.ConjunctionNode
FilterNode = query.FilterNode
filters = ConjunctionNode(
FilterNode('tags', 'in', ['jill', 'hello']),
ConjunctionNode(FilterNode('rate', '=', 1),
FilterNode('name', '=', 'moe')))
expected = DisjunctionNode(
ConjunctionNode(FilterNode('tags', '=', 'jill'),
FilterNode('rate', '=', 1),
FilterNode('name', '=', 'moe')),
ConjunctionNode(FilterNode('tags', '=', 'hello'),
FilterNode('rate', '=', 1),
FilterNode('name', '=', 'moe')))
self.assertEqual(filters, expected)
def testKeyFilter(self):
class MyModel(model.Model):
number = model.IntegerProperty()
k1 = model.Key('MyModel', 'foo-1')
m1 = MyModel(key=k1)
m1.put()
k2 = model.Key('MyModel', 'foo-2')
m2 = MyModel(key=k2)
m2.put()
q = MyModel.query(MyModel.key == k1)
res = q.get()
self.assertEqual(res, m1)
q = MyModel.query(MyModel.key > k1)
res = q.get()
self.assertEqual(res, m2)
q = MyModel.query(MyModel.key < k2)
res = q.get()
self.assertEqual(res, m1)
def testUnicode(self):
class MyModel(model.Model):
n = model.IntegerProperty(u'\u4321')
@classmethod
def _get_kind(cls):
return u'\u1234'.encode('utf-8')
a = MyModel(n=42)
k = a.put()
b = k.get()
self.assertEqual(a, b)
self.assertFalse(a is b)
# So far so good, now try queries
res = MyModel.query(MyModel.n == 42).fetch()
self.assertEqual(res, [a])
def testBlobQuery(self):
class MyModel(model.Model):
b = model.BlobProperty(indexed=True)
a = MyModel(b='\xff\x00')
a.put()
q = MyModel.query(MyModel.b == '\xff\x00')
it = iter(q)
b = it.next()
self.assertEqual(a, b)
def testKindlessQuery(self):
class ParentModel(model.Model):
a = model.StringProperty()
class ChildModel(model.Model):
b = model.StringProperty()
p = ParentModel(a= "Test1")
p.put()
c = ChildModel(parent=p.key, b="Test2")
c.put()
q = query.Query(ancestor=p.key)
self.assertEqual(q.count(), 2)
l = q.fetch()
self.assertTrue(c in l)
self.assertTrue(p in l)
def testExpandoQueries(self):
class Foo(model.Expando):
pass
testdata = {'int': 42,
'float': 3.14,
'string': 'hello',
'bool': True,
# Don't call this 'key'; it interferes with the built-in
# key attribute (the entity's key).
'akey': model.Key('Foo', 1),
'point': model.GeoPt(52.35, 4.9166667),
'user': users.User('[email protected]', 'example.com', '123'),
'blobkey': model.BlobKey('blah'),
'none': None,
}
for name, value in testdata.iteritems():
foo = Foo()
setattr(foo, name, value)
foo.put()
qry = Foo.query(query.FilterNode(name, '=', value))
res = qry.get()
self.assertTrue(res is not None, name)
self.assertEqual(getattr(res, name), value)
res.key.delete()
def testQueryCacheInteraction(self):
class Bar(model.Model):
name = model.StringProperty()
ctx = tasklets.get_context()
ctx.set_cache_policy(True)
a = Bar(name='a')
a.put()
b = a.key.get()
self.assertTrue(b is a) # Just verifying that the cache is on.
b = Bar.query().get()
self.assertTrue(b is a)
a.name = 'x' # Modify, but don't write.
b = Bar.query().get()
self.assertTrue(b is a)
self.assertEqual(a.name, 'x')
b = Bar.query().get(use_cache=False) # Skip the cache.
self.assertFalse(b is a)
self.assertEqual(b.name, 'a')
a.key = None # Invalidate cache by resetting key.
b = Bar.query().get()
self.assertFalse(b is a)
self.assertEqual(a.name, 'x')
self.assertEqual(b.name, 'a')
def testGqlMinimal(self):
qry = query.gql('SELECT * FROM Foo')
self.assertEqual(qry.kind, 'Foo')
self.assertEqual(qry.ancestor, None)
self.assertEqual(qry.filters, None)
self.assertEqual(qry.orders, None)
def testGqlAncestor(self):
key = model.Key('Foo', 42)
qry = query.gql("SELECT * FROM Foo WHERE ANCESTOR IS KEY('%s')" %
key.urlsafe())
self.assertEqual(qry.kind, 'Foo')
self.assertEqual(qry.ancestor, key)
self.assertEqual(qry.filters, None)
self.assertEqual(qry.orders, None)
def testGqlAncestorWithParameter(self):
qry = query.gql('SELECT * FROM Foo WHERE ANCESTOR IS :1')
self.assertEqual(qry.kind, 'Foo')
self.assertEqual(qry.ancestor, query.Parameter(1))
self.assertEqual(qry.filters, None)
self.assertEqual(qry.orders, None)
def testGqlFilter(self):
qry = query.gql("SELECT * FROM Foo WHERE name = 'joe' AND rate = 1")
self.assertEqual(qry.kind, 'Foo')
self.assertEqual(qry.ancestor, None)
self.assertEqual(qry.filters,
query.ConjunctionNode(
query.FilterNode('name', '=', 'joe'),
query.FilterNode('rate', '=', 1)))
self.assertEqual(qry.orders, None)
def testGqlOrder(self):
qry = query.gql('SELECT * FROM Foo ORDER BY name')
self.assertEqual(query._orders_to_orderings(qry.orders),
[('name', query._ASC)])
def testGqlOffset(self):
qry = query.gql('SELECT * FROM Foo OFFSET 2')
self.assertEqual(qry.default_options.offset, 2)
def testGqlLimit(self):
qry = query.gql('SELECT * FROM Foo LIMIT 2')
self.assertEqual(qry.default_options.limit, 2)
def testGqlParameters(self):
qry = query.gql('SELECT * FROM Foo WHERE name = :1 AND rate = :foo')
self.assertEqual(qry.kind, 'Foo')
self.assertEqual(qry.ancestor, None)
self.assertEqual(qry.filters,
query.ConjunctionNode(
query.ParameterNode(Foo.name, '=',
query.Parameter(1)),
query.ParameterNode(Foo.rate, '=',
query.Parameter('foo'))))
self.assertEqual(qry.orders, None)
def testGqlBindParameters(self):
pqry = query.gql('SELECT * FROM Foo WHERE name = :1')
qry = pqry.bind('joe')
self.assertEqual(list(qry), [self.joe])
qry = pqry.bind('jill')
self.assertEqual(list(qry), [self.jill])
def testGqlUnresolvedParameters(self):
self.ExpectErrors()
qry = query.gql(
'SELECT * FROM Foo WHERE name = :1')
self.assertRaises(datastore_errors.BadArgumentError, qry.fetch)
self.assertRaises(datastore_errors.BadArgumentError, qry.count)
self.assertRaises(datastore_errors.BadArgumentError, list, qry)
self.assertRaises(datastore_errors.BadArgumentError, qry.iter)
def checkGql(self, expected, gql, args=(), kwds={},
fetch=lambda q: list(q)):
actual = fetch(query.gql(gql).bind(*args, **kwds))
self.assertEqual(expected, actual)
def testGqlBasicQueries(self):
self.checkGql([self.joe, self.jill, self.moe], "SELECT * FROM Foo")
def testGqlKeyQueries(self):
self.checkGql([self.joe.key, self.jill.key, self.moe.key],
"SELECT __key__ FROM Foo")
def testGqlOperatorQueries(self):
self.checkGql([self.joe], "SELECT * FROM Foo WHERE name = 'joe'")
self.checkGql([self.moe], "SELECT * FROM Foo WHERE name > 'joe'")
self.checkGql([self.jill], "SELECT * FROM Foo WHERE name < 'joe'")
self.checkGql([self.joe, self.moe],
"SELECT * FROM Foo WHERE name >= 'joe'")
self.checkGql([self.jill, self.joe],
"SELECT * FROM Foo WHERE name <= 'joe'")
self.checkGql([self.jill, self.moe],
"SELECT * FROM Foo WHERE name != 'joe'")
# NOTE: The ordering on these is questionable:
self.checkGql([self.joe, self.jill],
"SELECT * FROM Foo WHERE name IN ('joe', 'jill')")
self.checkGql([self.jill, self.joe],
"SELECT * FROM Foo WHERE name IN ('jill', 'joe')")
def testGqlOrderQueries(self):
self.checkGql([self.jill, self.joe, self.moe],
"SELECT * FROM Foo ORDER BY name")
self.checkGql([self.moe, self.joe, self.jill],
"SELECT * FROM Foo ORDER BY name DESC")
self.checkGql([self.joe, self.jill, self.moe],
"SELECT * FROM Foo ORDER BY __key__ ASC")
self.checkGql([self.moe, self.jill, self.joe],
"SELECT * FROM Foo ORDER BY __key__ DESC")
self.checkGql([self.jill, self.joe, self.moe],
"SELECT * FROM Foo ORDER BY rate DESC, name")
def testGqlOffsetQuery(self):
self.checkGql([self.jill, self.moe], "SELECT * FROM Foo OFFSET 1")
def testGqlLimitQuery(self):
self.checkGql([self.joe, self.jill], "SELECT * FROM Foo LIMIT 2")
def testGqlLimitOffsetQuery(self):
self.checkGql([self.jill], "SELECT * FROM Foo LIMIT 1 OFFSET 1")
def testGqlLimitOffsetQueryUsingFetch(self):
self.checkGql([self.jill], "SELECT * FROM Foo LIMIT 1 OFFSET 1",
fetch=lambda q: q.fetch())
# XXX TODO: Make this work:
## def testGqlLimitQueryUsingFetch(self):
## self.checkGql([self.joe, self.jill], "SELECT * FROM Foo LIMIT 2",
## fetch=lambda q: q.fetch(3))
def testGqlOffsetQueryUsingFetchPage(self):
q = query.gql("SELECT * FROM Foo LIMIT 2")
res1, cur1, more1 = q.fetch_page(1)
self.assertEqual([self.joe], res1)
self.assertEqual(True, more1)
res2, cur2, more2 = q.fetch_page(1, start_cursor=cur1)
self.assertEqual([self.jill], res2)
# XXX TODO: Gotta make this work:
## self.assertEqual(False, more2)
## res3, cur3, more3 = q.fetch_page(1, start_cursor=cur2)
## self.assertEqual([], res3)
## self.assertEqual(False, more3)
## self.assertEqual(None, cur3)
def testGqlLimitQueryUsingFetchPage(self):
q = query.gql("SELECT * FROM Foo OFFSET 1")
res1, cur1, more1 = q.fetch_page(1)
self.assertEqual([self.jill], res1)
self.assertEqual(True, more1)
# NOTE: Without offset=0, the following break.
res2, cur2, more2 = q.fetch_page(1, start_cursor=cur1, offset=0)
self.assertEqual([self.moe], res2)
self.assertEqual(False, more2)
res3, cur3, more3 = q.fetch_page(1, start_cursor=cur2, offset=0)
self.assertEqual([], res3)
self.assertEqual(False, more3)
self.assertEqual(None, cur3)
def testGqlParameterizedAncestor(self):
q = query.gql("SELECT * FROM Foo WHERE ANCESTOR IS :1")
self.assertEqual([self.moe], q.bind(self.moe.key).fetch())
def testGqlParameterizedInClause(self):
# NOTE: The ordering on these is questionable:
q = query.gql("SELECT * FROM Foo WHERE name IN :1")
self.assertEqual([self.jill, self.joe], q.bind(('jill', 'joe')).fetch())
# Exercise the LIST function.
q = query.gql("SELECT * FROM Foo WHERE name IN (:a, :b)")
self.assertEqual([self.jill, self.joe], q.bind(a='jill', b='joe').fetch())
# Generate OR/AND nodes containing parameter nodes.
q = query.gql("SELECT * FROM Foo WHERE name = :1 AND rate in (1, 2)")
self.assertEqual([self.jill], q.bind('jill').fetch())
def testGqlKeyFunction(self):
class Bar(model.Model):
ref = model.KeyProperty(kind=Foo)
noref = Bar()
noref.put()
joeref = Bar(ref=self.joe.key)
joeref.put()
moeref = Bar(ref=self.moe.key)
moeref.put()
self.assertEqual(
[noref],
Bar.gql("WHERE ref = NULL").fetch())
self.assertEqual(
[noref],
Bar.gql("WHERE ref = :1").bind(None).fetch())
self.assertEqual(
[joeref],
Bar.gql("WHERE ref = :1").bind(self.joe.key).fetch())
self.assertEqual(
[joeref],
Bar.gql("WHERE ref = KEY('%s')" % self.joe.key.urlsafe()).fetch())
self.assertEqual(
[joeref],
Bar.gql("WHERE ref = KEY('Foo', %s)" % self.joe.key.id()).fetch())
self.assertEqual(
[joeref],
Bar.gql("WHERE ref = KEY(:1)").bind(self.joe.key.urlsafe()).fetch())
self.assertEqual(
[joeref],
Bar.gql("WHERE ref = KEY('Foo', :1)").bind(self.joe.key.id()).fetch())
def testGqlKeyFunctionAncestor(self):
class Bar(model.Model):
pass
nobar = Bar()
nobar.put()
joebar = Bar(parent=self.joe.key)
joebar.put()
moebar = Bar(parent=self.moe.key)
moebar.put()
self.assertEqual(
[joebar],
Bar.gql("WHERE ANCESTOR IS KEY('%s')" % self.joe.key.urlsafe()).fetch())
self.assertEqual(
[joebar],
Bar.gql("WHERE ANCESTOR IS :1").bind(self.joe.key).fetch())
self.assertEqual(
[joebar],
Bar.gql("WHERE ANCESTOR IS KEY(:1)").bind(self.joe.key.urlsafe()).fetch())
self.assertEqual(
[joebar],
Bar.gql("WHERE ANCESTOR IS KEY('Foo', :1)")
.bind(self.joe.key.id()).fetch())
def testGqlAncestorFunctionError(self):
self.assertRaises(TypeError,
query.gql, 'SELECT * FROM Foo WHERE ANCESTOR IS USER(:1)')
def testGqlOtherFunctions(self):
class Bar(model.Model):
auser = model.UserProperty()
apoint = model.GeoPtProperty()
adatetime = model.DateTimeProperty()
adate = model.DateProperty()
atime = model.TimeProperty()
abar = Bar(
auser=users.User('[email protected]'),
apoint=model.GeoPt(52.35, 4.9166667),
adatetime=datetime.datetime(2012, 2, 1, 14, 54, 0),
adate=datetime.date(2012, 2, 2),
atime=datetime.time(14, 54, 0),
)
abar.put()
bbar = Bar()
bbar.put()
self.assertEqual(
[abar.key],
query.gql("SELECT __key__ FROM Bar WHERE auser=USER(:1)")
.bind('[email protected]').fetch())
self.assertEqual(
[abar.key],
query.gql("SELECT __key__ FROM Bar WHERE apoint=GEOPT(:1, :2)")
.bind(52.35, 4.9166667).fetch())
self.assertEqual(
[abar.key],
query.gql("SELECT __key__ FROM Bar WHERE adatetime=DATETIME(:1)")
.bind('2012-02-01 14:54:00').fetch())
self.assertEqual(
[abar.key],
query.gql("SELECT __key__ FROM Bar WHERE adate=DATE(:1, :2, :2)")
.bind(2012, 2).fetch())
self.assertEqual(
[abar.key],
query.gql("SELECT __key__ FROM Bar WHERE atime=TIME(:hour, :min, :sec)")
.bind(hour=14, min=54, sec=0).fetch())
def testGqlStructuredPropertyQuery(self):
class Bar(model.Model):
foo = model.StructuredProperty(Foo)
barf = Bar(foo=Foo(name='one', rate=3, tags=['a', 'b']))
barf.put()
barg = Bar(foo=Foo(name='two', rate=4, tags=['b', 'c']))
barg.put()
barh = Bar()
barh.put()
# TODO: Once SDK 1.6.3 is released, drop quotes around foo.name.
q = Bar.gql("WHERE \"foo.name\" = 'one'")
self.assertEqual([barf], q.fetch())
q = Bar.gql("WHERE foo = :1").bind(Foo(name='two', rate=4))
self.assertEqual([barg], q.fetch())
q = Bar.gql("WHERE foo = NULL")
self.assertEqual([barh], q.fetch())
q = Bar.gql("WHERE foo = :1")
self.assertEqual([barh], q.bind(None).fetch())
def testGqlExpandoProperty(self):
class Bar(model.Expando):
pass
babar = Bar(name='Babar')
babar.put()
bare = Bar(nude=42)
bare.put()
q = Bar.gql("WHERE name = 'Babar'")
self.assertEqual([babar], q.fetch())
q = Bar.gql("WHERE nude = :1")
self.assertEqual([bare], q.bind(42).fetch())
def testGqlExpandoInStructure(self):
class Bar(model.Expando):
pass
class Baz(model.Model):
bar = model.StructuredProperty(Bar)
bazar = Baz(bar=Bar(bow=1, wow=2))
bazar.put()
bazone = Baz()
bazone.put()
q = Baz.gql("WHERE \"bar.bow\" = 1")
self.assertEqual([bazar], q.fetch())
def testGqlKindlessQuery(self):
results = query.gql('SELECT *').fetch()
self.assertEqual([self.joe, self.jill, self.moe], results)
def testGqlSubclass(self):
# You can pass gql() a subclass of Query and it'll use that.
class MyQuery(query.Query):
pass
q = query._gql("SELECT * FROM Foo WHERE name = :1", query_class=MyQuery)
self.assertTrue(isinstance(q, MyQuery))
# And bind() preserves the class.
qb = q.bind('joe')
self.assertTrue(isinstance(qb, MyQuery))
# .filter() also preserves the class, as well as default_options.
qf = q.filter(Foo.rate == 1)
self.assertTrue(isinstance(qf, MyQuery))
self.assertEqual(qf.default_options, q.default_options)
# Same for .options().
qo = q.order(-Foo.name)
self.assertTrue(isinstance(qo, MyQuery))
self.assertEqual(qo.default_options, q.default_options)
def testGqlUnusedBindings(self):
# Only unused positional bindings raise an error.
q = Foo.gql("WHERE ANCESTOR IS :1 AND rate >= :2")
qb = q.bind(self.joe.key, 2, foo=42) # Must not fail
self.assertRaises(datastore_errors.BadArgumentError, q.bind)
self.assertRaises(datastore_errors.BadArgumentError, q.bind, self.joe.key)
self.assertRaises(datastore_errors.BadArgumentError, q.bind,
self.joe.key, 2, 42)
def testGqlWithBind(self):
q = Foo.gql("WHERE name = :1", 'joe')
self.assertEqual([self.joe], q.fetch())
def testGqlAnalyze(self):
q = Foo.gql("WHERE name = 'joe'")
self.assertEqual([], q.analyze())
q = Foo.gql("WHERE name = :1 AND rate = :2")
self.assertEqual([1, 2], q.analyze())
q = Foo.gql("WHERE name = :foo AND rate = :bar")
self.assertEqual(['bar', 'foo'], q.analyze())
q = Foo.gql("WHERE tags = :1 AND name = :foo AND rate = :bar")
self.assertEqual([1, 'bar', 'foo'], q.analyze())
def testGqlProjection(self):
q = query.gql("SELECT name, tags FROM Foo WHERE name < 'joe' ORDER BY name")
self.assertEqual(q.fetch(), [Foo(name='jill', tags=['jack'],
key=self.jill.key,
projection=['name', 'tags']),
Foo(name='jill', tags=['jill'],
key=self.jill.key,
projection=('name', 'tags'))])
def testAsyncNamespace(self):
# Test that async queries pick up the namespace when the
# foo_async() call is made, not later.
# See issue 168. http://goo.gl/aJp7i
namespace_manager.set_namespace('mission')
barney = Foo(name='Barney')
barney.put()
willy = Foo(name='Willy')
willy.put()
q1 = Foo.query()
qm = Foo.query(Foo.name.IN(['Barney', 'Willy'])).order(Foo._key)
# Test twice: once with a simple query, once with a MultiQuery.
for q in q1, qm:
# Test fetch_async().
namespace_manager.set_namespace('mission')
fut = q.fetch_async()
namespace_manager.set_namespace('impossible')
res = fut.get_result()
self.assertEqual(res, [barney, willy])
# Test map_async().
namespace_manager.set_namespace('mission')
fut = q.map_async(None)
namespace_manager.set_namespace('impossible')
res = fut.get_result()
self.assertEqual(res, [barney, willy])
# Test get_async().
namespace_manager.set_namespace('mission')
fut = q.get_async()
namespace_manager.set_namespace('impossible')
res = fut.get_result()
self.assertEqual(res, barney)
# Test count_async().
namespace_manager.set_namespace('mission')
fut = q.count_async()
namespace_manager.set_namespace('impossible')
res = fut.get_result()
self.assertEqual(res, 2)
# Test fetch_page_async().
namespace_manager.set_namespace('mission')
fut = q.fetch_page_async(2)
namespace_manager.set_namespace('impossible')
res, cur, more = fut.get_result()
self.assertEqual(res, [barney, willy])
self.assertEqual(more, False)
def main():
unittest.main()
if __name__ == '__main__':
main()
|
[] |
[] |
[
"APPLICATION_ID"
] |
[]
|
["APPLICATION_ID"]
|
python
| 1 | 0 | |
cmd/githubCreateIssue_generated.go
|
// Code generated by piper's step-generator. DO NOT EDIT.
package cmd
import (
"fmt"
"os"
"time"
"github.com/SAP/jenkins-library/pkg/config"
"github.com/SAP/jenkins-library/pkg/log"
"github.com/SAP/jenkins-library/pkg/splunk"
"github.com/SAP/jenkins-library/pkg/telemetry"
"github.com/spf13/cobra"
)
type githubCreateIssueOptions struct {
APIURL string `json:"apiUrl,omitempty"`
Body string `json:"body,omitempty"`
BodyFilePath string `json:"bodyFilePath,omitempty"`
Owner string `json:"owner,omitempty"`
Repository string `json:"repository,omitempty"`
Title string `json:"title,omitempty"`
Token string `json:"token,omitempty"`
}
// GithubCreateIssueCommand Create a new GitHub issue.
func GithubCreateIssueCommand() *cobra.Command {
const STEP_NAME = "githubCreateIssue"
metadata := githubCreateIssueMetadata()
var stepConfig githubCreateIssueOptions
var startTime time.Time
var logCollector *log.CollectorHook
var createGithubCreateIssueCmd = &cobra.Command{
Use: STEP_NAME,
Short: "Create a new GitHub issue.",
Long: `This step allows you to create a new GitHub issue.
You will be able to use this step for example for regular jobs to report into your repository in case of new security findings.`,
PreRunE: func(cmd *cobra.Command, _ []string) error {
startTime = time.Now()
log.SetStepName(STEP_NAME)
log.SetVerbose(GeneralConfig.Verbose)
GeneralConfig.GitHubAccessTokens = ResolveAccessTokens(GeneralConfig.GitHubTokens)
path, _ := os.Getwd()
fatalHook := &log.FatalHook{CorrelationID: GeneralConfig.CorrelationID, Path: path}
log.RegisterHook(fatalHook)
err := PrepareConfig(cmd, &metadata, STEP_NAME, &stepConfig, config.OpenPiperFile)
if err != nil {
log.SetErrorCategory(log.ErrorConfiguration)
return err
}
log.RegisterSecret(stepConfig.Token)
if len(GeneralConfig.HookConfig.SentryConfig.Dsn) > 0 {
sentryHook := log.NewSentryHook(GeneralConfig.HookConfig.SentryConfig.Dsn, GeneralConfig.CorrelationID)
log.RegisterHook(&sentryHook)
}
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
logCollector = &log.CollectorHook{CorrelationID: GeneralConfig.CorrelationID}
log.RegisterHook(logCollector)
}
return nil
},
Run: func(_ *cobra.Command, _ []string) {
telemetryData := telemetry.CustomData{}
telemetryData.ErrorCode = "1"
handler := func() {
config.RemoveVaultSecretFiles()
telemetryData.Duration = fmt.Sprintf("%v", time.Since(startTime).Milliseconds())
telemetryData.ErrorCategory = log.GetErrorCategory().String()
telemetry.Send(&telemetryData)
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
splunk.Send(&telemetryData, logCollector)
}
}
log.DeferExitHandler(handler)
defer handler()
telemetry.Initialize(GeneralConfig.NoTelemetry, STEP_NAME)
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
splunk.Initialize(GeneralConfig.CorrelationID,
GeneralConfig.HookConfig.SplunkConfig.Dsn,
GeneralConfig.HookConfig.SplunkConfig.Token,
GeneralConfig.HookConfig.SplunkConfig.Index,
GeneralConfig.HookConfig.SplunkConfig.SendLogs)
}
githubCreateIssue(stepConfig, &telemetryData)
telemetryData.ErrorCode = "0"
log.Entry().Info("SUCCESS")
},
}
addGithubCreateIssueFlags(createGithubCreateIssueCmd, &stepConfig)
return createGithubCreateIssueCmd
}
func addGithubCreateIssueFlags(cmd *cobra.Command, stepConfig *githubCreateIssueOptions) {
cmd.Flags().StringVar(&stepConfig.APIURL, "apiUrl", `https://api.github.com`, "Set the GitHub API url.")
cmd.Flags().StringVar(&stepConfig.Body, "body", os.Getenv("PIPER_body"), "Defines the content of the issue, e.g. using markdown syntax.")
cmd.Flags().StringVar(&stepConfig.BodyFilePath, "bodyFilePath", os.Getenv("PIPER_bodyFilePath"), "Defines the path to a file containing the markdown content for the issue. This can be used instead of [`body`](#body)")
cmd.Flags().StringVar(&stepConfig.Owner, "owner", os.Getenv("PIPER_owner"), "Name of the GitHub organization.")
cmd.Flags().StringVar(&stepConfig.Repository, "repository", os.Getenv("PIPER_repository"), "Name of the GitHub repository.")
cmd.Flags().StringVar(&stepConfig.Title, "title", os.Getenv("PIPER_title"), "Defines the title for the Issue.")
cmd.Flags().StringVar(&stepConfig.Token, "token", os.Getenv("PIPER_token"), "GitHub personal access token as per https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line.")
cmd.MarkFlagRequired("apiUrl")
cmd.MarkFlagRequired("owner")
cmd.MarkFlagRequired("repository")
cmd.MarkFlagRequired("title")
cmd.MarkFlagRequired("token")
}
// retrieve step metadata
func githubCreateIssueMetadata() config.StepData {
var theMetaData = config.StepData{
Metadata: config.StepMetadata{
Name: "githubCreateIssue",
Aliases: []config.Alias{},
Description: "Create a new GitHub issue.",
},
Spec: config.StepSpec{
Inputs: config.StepInputs{
Secrets: []config.StepSecrets{
{Name: "githubTokenCredentialsId", Description: "Jenkins 'Secret text' credentials ID containing token to authenticate to GitHub.", Type: "jenkins"},
},
Parameters: []config.StepParameters{
{
Name: "apiUrl",
ResourceRef: []config.ResourceReference{},
Scope: []string{"GENERAL", "PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{{Name: "githubApiUrl"}},
Default: `https://api.github.com`,
},
{
Name: "body",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_body"),
},
{
Name: "bodyFilePath",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_bodyFilePath"),
},
{
Name: "owner",
ResourceRef: []config.ResourceReference{
{
Name: "commonPipelineEnvironment",
Param: "github/owner",
},
},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{{Name: "githubOrg"}},
Default: os.Getenv("PIPER_owner"),
},
{
Name: "repository",
ResourceRef: []config.ResourceReference{
{
Name: "commonPipelineEnvironment",
Param: "github/repository",
},
},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{{Name: "githubRepo"}},
Default: os.Getenv("PIPER_repository"),
},
{
Name: "title",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_title"),
},
{
Name: "token",
ResourceRef: []config.ResourceReference{
{
Name: "githubTokenCredentialsId",
Type: "secret",
},
{
Name: "githubVaultSecretName",
Type: "vaultSecret",
Default: "github",
},
},
Scope: []string{"GENERAL", "PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{{Name: "githubToken"}, {Name: "access_token"}},
Default: os.Getenv("PIPER_token"),
},
},
},
},
}
return theMetaData
}
|
[
"\"PIPER_body\"",
"\"PIPER_bodyFilePath\"",
"\"PIPER_owner\"",
"\"PIPER_repository\"",
"\"PIPER_title\"",
"\"PIPER_token\"",
"\"PIPER_body\"",
"\"PIPER_bodyFilePath\"",
"\"PIPER_owner\"",
"\"PIPER_repository\"",
"\"PIPER_title\"",
"\"PIPER_token\""
] |
[] |
[
"PIPER_title",
"PIPER_body",
"PIPER_token",
"PIPER_bodyFilePath",
"PIPER_repository",
"PIPER_owner"
] |
[]
|
["PIPER_title", "PIPER_body", "PIPER_token", "PIPER_bodyFilePath", "PIPER_repository", "PIPER_owner"]
|
go
| 6 | 0 | |
pkg/query-service/app/server.go
|
package app
import (
"context"
"fmt"
"net"
"net/http"
_ "net/http/pprof" // http profiler
"os"
"time"
"github.com/gorilla/handlers"
"github.com/gorilla/mux"
"github.com/rs/cors"
"github.com/soheilhy/cmux"
"go.signoz.io/query-service/app/clickhouseReader"
"go.signoz.io/query-service/app/dashboards"
"go.signoz.io/query-service/constants"
"go.signoz.io/query-service/dao"
"go.signoz.io/query-service/healthcheck"
"go.signoz.io/query-service/telemetry"
"go.signoz.io/query-service/utils"
"go.uber.org/zap"
)
type ServerOptions struct {
HTTPHostPort string
PrivateHostPort string
}
// Server runs HTTP, Mux and a grpc server
type Server struct {
// logger *zap.Logger
// tracer opentracing.Tracer // TODO make part of flags.Service
serverOptions *ServerOptions
// public http router
httpConn net.Listener
httpServer *http.Server
// private http
privateConn net.Listener
privateHTTP *http.Server
unavailableChannel chan healthcheck.Status
}
// HealthCheckStatus returns health check status channel a client can subscribe to
func (s Server) HealthCheckStatus() chan healthcheck.Status {
return s.unavailableChannel
}
// NewServer creates and initializes Server
func NewServer(serverOptions *ServerOptions) (*Server, error) {
if err := dao.InitDao("sqlite", constants.RELATIONAL_DATASOURCE_PATH); err != nil {
return nil, err
}
localDB, err := dashboards.InitDB(constants.RELATIONAL_DATASOURCE_PATH)
if err != nil {
return nil, err
}
localDB.SetMaxOpenConns(10)
var reader Reader
storage := os.Getenv("STORAGE")
if storage == "clickhouse" {
zap.S().Info("Using ClickHouse as datastore ...")
clickhouseReader := clickhouseReader.NewReader(localDB)
go clickhouseReader.Start()
reader = clickhouseReader
} else {
return nil, fmt.Errorf("Storage type: %s is not supported in query service", storage)
}
apiHandler, err := NewAPIHandler(&reader, dao.DB())
if err != nil {
return nil, err
}
s := &Server{
// logger: logger,
// tracer: tracer,
serverOptions: serverOptions,
unavailableChannel: make(chan healthcheck.Status),
}
httpServer, err := s.createPublicServer(apiHandler)
if err != nil {
return nil, err
}
s.httpServer = httpServer
privateServer, err := s.createPrivateServer(apiHandler)
if err != nil {
return nil, err
}
s.privateHTTP = privateServer
return s, nil
}
func (s *Server) createPrivateServer(api *APIHandler) (*http.Server, error) {
r := NewRouter()
r.Use(setTimeoutMiddleware)
r.Use(s.analyticsMiddleware)
r.Use(loggingMiddlewarePrivate)
api.RegisterPrivateRoutes(r)
c := cors.New(cors.Options{
//todo(amol): find out a way to add exact domain or
// ip here for alert manager
AllowedOrigins: []string{"*"},
AllowedMethods: []string{"GET", "DELETE", "POST", "PUT"},
AllowedHeaders: []string{"Accept", "Authorization", "Content-Type"},
})
handler := c.Handler(r)
handler = handlers.CompressHandler(handler)
return &http.Server{
Handler: handler,
}, nil
}
func (s *Server) createPublicServer(api *APIHandler) (*http.Server, error) {
r := NewRouter()
r.Use(setTimeoutMiddleware)
r.Use(s.analyticsMiddleware)
r.Use(loggingMiddleware)
api.RegisterRoutes(r)
api.RegisterMetricsRoutes(r)
c := cors.New(cors.Options{
AllowedOrigins: []string{"*"},
AllowedMethods: []string{"GET", "DELETE", "POST", "PUT"},
AllowedHeaders: []string{"Accept", "Authorization", "Content-Type"},
})
handler := c.Handler(r)
handler = handlers.CompressHandler(handler)
return &http.Server{
Handler: handler,
}, nil
}
// loggingMiddleware is used for logging public api calls
func loggingMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
route := mux.CurrentRoute(r)
path, _ := route.GetPathTemplate()
startTime := time.Now()
next.ServeHTTP(w, r)
zap.S().Info(path, "\ttimeTaken: ", time.Now().Sub(startTime))
})
}
// loggingMiddlewarePrivate is used for logging private api calls
// from internal services like alert manager
func loggingMiddlewarePrivate(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
route := mux.CurrentRoute(r)
path, _ := route.GetPathTemplate()
startTime := time.Now()
next.ServeHTTP(w, r)
zap.S().Info(path, "\tprivatePort: true", "\ttimeTaken: ", time.Now().Sub(startTime))
})
}
type loggingResponseWriter struct {
http.ResponseWriter
statusCode int
}
func NewLoggingResponseWriter(w http.ResponseWriter) *loggingResponseWriter {
// WriteHeader(int) is not called if our response implicitly returns 200 OK, so
// we default to that status code.
return &loggingResponseWriter{w, http.StatusOK}
}
func (lrw *loggingResponseWriter) WriteHeader(code int) {
lrw.statusCode = code
lrw.ResponseWriter.WriteHeader(code)
}
func (s *Server) analyticsMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
route := mux.CurrentRoute(r)
path, _ := route.GetPathTemplate()
lrw := NewLoggingResponseWriter(w)
next.ServeHTTP(lrw, r)
data := map[string]interface{}{"path": path, "statusCode": lrw.statusCode}
if _, ok := telemetry.IgnoredPaths()[path]; !ok {
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_PATH, data)
}
})
}
func setTimeoutMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx, cancel := context.WithTimeout(r.Context(), constants.ContextTimeout*time.Second)
defer cancel()
r = r.WithContext(ctx)
next.ServeHTTP(w, r)
})
}
// initListeners initialises listeners of the server
func (s *Server) initListeners() error {
// listen on public port
var err error
publicHostPort := s.serverOptions.HTTPHostPort
if publicHostPort == "" {
return fmt.Errorf("constants.HTTPHostPort is required")
}
s.httpConn, err = net.Listen("tcp", publicHostPort)
if err != nil {
return err
}
zap.S().Info(fmt.Sprintf("Query server started listening on %s...", s.serverOptions.HTTPHostPort))
// listen on private port to support internal services
privateHostPort := s.serverOptions.PrivateHostPort
if privateHostPort == "" {
return fmt.Errorf("constants.PrivateHostPort is required")
}
s.privateConn, err = net.Listen("tcp", privateHostPort)
if err != nil {
return err
}
zap.S().Info(fmt.Sprintf("Query server started listening on private port %s...", s.serverOptions.PrivateHostPort))
return nil
}
// Start listening on http and private http port concurrently
func (s *Server) Start() error {
err := s.initListeners()
if err != nil {
return err
}
var httpPort int
if port, err := utils.GetPort(s.httpConn.Addr()); err == nil {
httpPort = port
}
go func() {
zap.S().Info("Starting HTTP server", zap.Int("port", httpPort), zap.String("addr", s.serverOptions.HTTPHostPort))
switch err := s.httpServer.Serve(s.httpConn); err {
case nil, http.ErrServerClosed, cmux.ErrListenerClosed:
// normal exit, nothing to do
default:
zap.S().Error("Could not start HTTP server", zap.Error(err))
}
s.unavailableChannel <- healthcheck.Unavailable
}()
go func() {
zap.S().Info("Starting pprof server", zap.String("addr", constants.DebugHttpPort))
err = http.ListenAndServe(constants.DebugHttpPort, nil)
if err != nil {
zap.S().Error("Could not start pprof server", zap.Error(err))
}
}()
var privatePort int
if port, err := utils.GetPort(s.privateConn.Addr()); err == nil {
privatePort = port
}
fmt.Println("starting private http")
go func() {
zap.S().Info("Starting Private HTTP server", zap.Int("port", privatePort), zap.String("addr", s.serverOptions.PrivateHostPort))
switch err := s.privateHTTP.Serve(s.privateConn); err {
case nil, http.ErrServerClosed, cmux.ErrListenerClosed:
// normal exit, nothing to do
zap.S().Info("private http server closed")
default:
zap.S().Error("Could not start private HTTP server", zap.Error(err))
}
s.unavailableChannel <- healthcheck.Unavailable
}()
return nil
}
|
[
"\"STORAGE\""
] |
[] |
[
"STORAGE"
] |
[]
|
["STORAGE"]
|
go
| 1 | 0 | |
main.go
|
package main
import (
"fmt"
"log"
"net/http"
"os"
"gorm.io/gorm"
)
// Response struct
type Response struct {
StatusCode int `json:"status-code"`
Message string `json:"message"`
Data string `json:"data"`
}
// Account struct
type Account struct {
gorm.Model
UniqueID string
Token string
Email string
Username string
Password string
}
func main() {
port := os.Getenv("PORT")
if port == "" {
port = "8000"
}
http.HandleFunc("/", baseHandler)
http.HandleFunc("/api/login", loginHandler)
http.HandleFunc("/api/register", registerHandler)
http.HandleFunc("/api/forgot-password", forgotPasswordHandler)
log.Println(fmt.Sprintf("Server listens to port %s", port))
http.ListenAndServe(fmt.Sprintf(":%s", port), nil)
}
|
[
"\"PORT\""
] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
go
| 1 | 0 | |
soracom/generated/cmd/sigfox_devices_unset_group.go
|
// Code generated by soracom-cli generate-cmd. DO NOT EDIT.
package cmd
import (
"fmt"
"net/url"
"os"
"github.com/spf13/cobra"
)
// SigfoxDevicesUnsetGroupCmdDeviceId holds value of 'device_id' option
var SigfoxDevicesUnsetGroupCmdDeviceId string
func init() {
SigfoxDevicesUnsetGroupCmd.Flags().StringVar(&SigfoxDevicesUnsetGroupCmdDeviceId, "device-id", "", TRAPI("Device ID of the target Sigfox device."))
SigfoxDevicesCmd.AddCommand(SigfoxDevicesUnsetGroupCmd)
}
// SigfoxDevicesUnsetGroupCmd defines 'unset-group' subcommand
var SigfoxDevicesUnsetGroupCmd = &cobra.Command{
Use: "unset-group",
Short: TRAPI("/sigfox_devices/{device_id}/unset_group:post:summary"),
Long: TRAPI(`/sigfox_devices/{device_id}/unset_group:post:description`),
RunE: func(cmd *cobra.Command, args []string) error {
opt := &apiClientOptions{
BasePath: "/v1",
Language: getSelectedLanguage(),
}
ac := newAPIClient(opt)
if v := os.Getenv("SORACOM_VERBOSE"); v != "" {
ac.SetVerbose(true)
}
err := authHelper(ac, cmd, args)
if err != nil {
cmd.SilenceUsage = true
return err
}
param, err := collectSigfoxDevicesUnsetGroupCmdParams(ac)
if err != nil {
return err
}
body, err := ac.callAPI(param)
if err != nil {
cmd.SilenceUsage = true
return err
}
if body == "" {
return nil
}
if rawOutput {
_, err = os.Stdout.Write([]byte(body))
} else {
return prettyPrintStringAsJSON(body)
}
return err
},
}
func collectSigfoxDevicesUnsetGroupCmdParams(ac *apiClient) (*apiParams, error) {
if SigfoxDevicesUnsetGroupCmdDeviceId == "" {
return nil, fmt.Errorf("required parameter '%s' is not specified", "device-id")
}
return &apiParams{
method: "POST",
path: buildPathForSigfoxDevicesUnsetGroupCmd("/sigfox_devices/{device_id}/unset_group"),
query: buildQueryForSigfoxDevicesUnsetGroupCmd(),
noRetryOnError: noRetryOnError,
}, nil
}
func buildPathForSigfoxDevicesUnsetGroupCmd(path string) string {
escapedDeviceId := url.PathEscape(SigfoxDevicesUnsetGroupCmdDeviceId)
path = strReplace(path, "{"+"device_id"+"}", escapedDeviceId, -1)
return path
}
func buildQueryForSigfoxDevicesUnsetGroupCmd() url.Values {
result := url.Values{}
return result
}
|
[
"\"SORACOM_VERBOSE\""
] |
[] |
[
"SORACOM_VERBOSE"
] |
[]
|
["SORACOM_VERBOSE"]
|
go
| 1 | 0 | |
pkg/cri/config/config_windows.go
|
// +build windows
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"os"
"path/filepath"
"github.com/containerd/containerd"
"github.com/containerd/containerd/pkg/cri/streaming"
)
// DefaultConfig returns default configurations of cri plugin.
func DefaultConfig() PluginConfig {
return PluginConfig{
CniConfig: CniConfig{
NetworkPluginBinDir: filepath.Join(os.Getenv("ProgramFiles"), "containerd", "cni", "bin"),
NetworkPluginConfDir: filepath.Join(os.Getenv("ProgramFiles"), "containerd", "cni", "conf"),
NetworkPluginMaxConfNum: 1,
NetworkPluginConfTemplate: "",
},
ContainerdConfig: ContainerdConfig{
Snapshotter: containerd.DefaultSnapshotter,
DefaultRuntimeName: "runhcs-wcow-process",
NoPivot: false,
Runtimes: map[string]Runtime{
"runhcs-wcow-process": {
Type: "io.containerd.runhcs.v1",
},
},
},
DisableTCPService: true,
StreamServerAddress: "127.0.0.1",
StreamServerPort: "0",
StreamIdleTimeout: streaming.DefaultConfig.StreamIdleTimeout.String(), // 4 hour
EnableTLSStreaming: false,
X509KeyPairStreaming: X509KeyPairStreaming{
TLSKeyFile: "",
TLSCertFile: "",
},
SandboxImage: "k8s.gcr.io/pause:3.5",
StatsCollectPeriod: 10,
MaxContainerLogLineSize: 16 * 1024,
Registry: Registry{
Mirrors: map[string]Mirror{
"docker.io": {
Endpoints: []string{"https://registry-1.docker.io"},
},
},
},
MaxConcurrentDownloads: 3,
IgnoreImageDefinedVolumes: false,
// TODO(windows): Add platform specific config, so that most common defaults can be shared.
}
}
|
[
"\"ProgramFiles\"",
"\"ProgramFiles\""
] |
[] |
[
"ProgramFiles"
] |
[]
|
["ProgramFiles"]
|
go
| 1 | 0 | |
tools/istio-iptables/pkg/cmd/run.go
|
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"bufio"
"fmt"
"net"
"os"
"strings"
"time"
"istio.io/istio/tools/istio-iptables/pkg/builder"
"istio.io/istio/tools/istio-iptables/pkg/config"
"istio.io/istio/tools/istio-iptables/pkg/constants"
dep "istio.io/istio/tools/istio-iptables/pkg/dependencies"
iptableslog "istio.io/istio/tools/istio-iptables/pkg/log"
"istio.io/pkg/log"
)
type Ops int
const (
// AppendOps performs append operations of rules
AppendOps Ops = iota
// DeleteOps performs delete operations of rules
DeleteOps
// In TPROXY mode, mark the packet from envoy outbound to app by podIP,
// this is to prevent it being intercepted to envoy inbound listener.
outboundMark = "1338"
)
var opsToString = map[Ops]string{
AppendOps: "-A",
DeleteOps: "-D",
}
type IptablesConfigurator struct {
iptables *builder.IptablesBuilder
// TODO(abhide): Fix dep.Dependencies with better interface
ext dep.Dependencies
cfg *config.Config
}
func NewIptablesConfigurator(cfg *config.Config, ext dep.Dependencies) *IptablesConfigurator {
return &IptablesConfigurator{
iptables: builder.NewIptablesBuilder(cfg),
ext: ext,
cfg: cfg,
}
}
type NetworkRange struct {
IsWildcard bool
IPNets []*net.IPNet
HasLoopBackIP bool
}
func filterEmpty(strs []string) []string {
filtered := make([]string, 0, len(strs))
for _, s := range strs {
if s == "" {
continue
}
filtered = append(filtered, s)
}
return filtered
}
func split(s string) []string {
if s == "" {
return nil
}
return filterEmpty(strings.Split(s, ","))
}
func (cfg *IptablesConfigurator) separateV4V6(cidrList string) (NetworkRange, NetworkRange, error) {
if cidrList == "*" {
return NetworkRange{IsWildcard: true}, NetworkRange{IsWildcard: true}, nil
}
ipv6Ranges := NetworkRange{}
ipv4Ranges := NetworkRange{}
for _, ipRange := range split(cidrList) {
ip, ipNet, err := net.ParseCIDR(ipRange)
if err != nil {
_, err = fmt.Fprintf(os.Stderr, "Ignoring error for bug compatibility with istio-iptables: %s\n", err.Error())
if err != nil {
return ipv4Ranges, ipv6Ranges, err
}
continue
}
if ip.To4() != nil {
ipv4Ranges.IPNets = append(ipv4Ranges.IPNets, ipNet)
if ip.IsLoopback() {
ipv4Ranges.HasLoopBackIP = true
}
} else {
ipv6Ranges.IPNets = append(ipv6Ranges.IPNets, ipNet)
if ip.IsLoopback() {
ipv6Ranges.HasLoopBackIP = true
}
}
}
return ipv4Ranges, ipv6Ranges, nil
}
func (cfg *IptablesConfigurator) logConfig() {
// Dump out our environment for debugging purposes.
var b strings.Builder
b.WriteString(fmt.Sprintf("ENVOY_PORT=%s\n", os.Getenv("ENVOY_PORT")))
b.WriteString(fmt.Sprintf("INBOUND_CAPTURE_PORT=%s\n", os.Getenv("INBOUND_CAPTURE_PORT")))
b.WriteString(fmt.Sprintf("ISTIO_INBOUND_INTERCEPTION_MODE=%s\n", os.Getenv("ISTIO_INBOUND_INTERCEPTION_MODE")))
b.WriteString(fmt.Sprintf("ISTIO_INBOUND_TPROXY_ROUTE_TABLE=%s\n", os.Getenv("ISTIO_INBOUND_TPROXY_ROUTE_TABLE")))
b.WriteString(fmt.Sprintf("ISTIO_INBOUND_PORTS=%s\n", os.Getenv("ISTIO_INBOUND_PORTS")))
b.WriteString(fmt.Sprintf("ISTIO_OUTBOUND_PORTS=%s\n", os.Getenv("ISTIO_OUTBOUND_PORTS")))
b.WriteString(fmt.Sprintf("ISTIO_LOCAL_EXCLUDE_PORTS=%s\n", os.Getenv("ISTIO_LOCAL_EXCLUDE_PORTS")))
b.WriteString(fmt.Sprintf("ISTIO_EXCLUDE_INTERFACES=%s\n", os.Getenv("ISTIO_EXCLUDE_INTERFACES")))
b.WriteString(fmt.Sprintf("ISTIO_SERVICE_CIDR=%s\n", os.Getenv("ISTIO_SERVICE_CIDR")))
b.WriteString(fmt.Sprintf("ISTIO_SERVICE_EXCLUDE_CIDR=%s\n", os.Getenv("ISTIO_SERVICE_EXCLUDE_CIDR")))
b.WriteString(fmt.Sprintf("ISTIO_META_DNS_CAPTURE=%s", os.Getenv("ISTIO_META_DNS_CAPTURE")))
log.Infof("Istio iptables environment:\n%s", b.String())
cfg.cfg.Print()
}
func (cfg *IptablesConfigurator) handleInboundPortsInclude() {
// Handling of inbound ports. Traffic will be redirected to Envoy, which will process and forward
// to the local service. If not set, no inbound port will be intercepted by istio iptablesOrFail.
var table string
if cfg.cfg.InboundPortsInclude != "" {
if cfg.cfg.InboundInterceptionMode == constants.TPROXY {
// When using TPROXY, create a new chain for routing all inbound traffic to
// Envoy. Any packet entering this chain gets marked with the ${INBOUND_TPROXY_MARK} mark,
// so that they get routed to the loopback interface in order to get redirected to Envoy.
// In the ISTIOINBOUND chain, '-j ISTIODIVERT' reroutes to the loopback
// interface.
// Mark all inbound packets.
cfg.iptables.AppendRule(iptableslog.UndefinedCommand, constants.ISTIODIVERT, constants.MANGLE, "-j", constants.MARK, "--set-mark",
cfg.cfg.InboundTProxyMark)
cfg.iptables.AppendRule(iptableslog.UndefinedCommand, constants.ISTIODIVERT, constants.MANGLE, "-j", constants.ACCEPT)
// Route all packets marked in chain ISTIODIVERT using routing table ${INBOUND_TPROXY_ROUTE_TABLE}.
// TODO: (abhide): Move this out of this method
cfg.ext.RunOrFail(
constants.IP, "-f", "inet", "rule", "add", "fwmark", cfg.cfg.InboundTProxyMark, "lookup",
cfg.cfg.InboundTProxyRouteTable)
// In routing table ${INBOUND_TPROXY_ROUTE_TABLE}, create a single default rule to route all traffic to
// the loopback interface.
// TODO: (abhide): Move this out of this method
err := cfg.ext.Run(constants.IP, "-f", "inet", "route", "add", "local", "default", "dev", "lo", "table",
cfg.cfg.InboundTProxyRouteTable)
if err != nil {
// TODO: (abhide): Move this out of this method
cfg.ext.RunOrFail(constants.IP, "route", "show", "table", "all")
}
// Create a new chain for redirecting inbound traffic to the common Envoy
// port.
// In the ISTIOINBOUND chain, '-j RETURN' bypasses Envoy and
// '-j ISTIOTPROXY' redirects to Envoy.
cfg.iptables.AppendVersionedRule("127.0.0.1/32", "::1/128", iptableslog.UndefinedCommand,
constants.ISTIOTPROXY, constants.MANGLE, "!", "-d", constants.IPVersionSpecific,
"-p", constants.TCP, "-j", constants.TPROXY,
"--tproxy-mark", cfg.cfg.InboundTProxyMark+"/0xffffffff", "--on-port", cfg.cfg.InboundCapturePort)
table = constants.MANGLE
} else {
table = constants.NAT
}
cfg.iptables.AppendRule(iptableslog.JumpInbound, constants.PREROUTING, table, "-p", constants.TCP,
"-j", constants.ISTIOINBOUND)
if cfg.cfg.InboundPortsInclude == "*" {
// Makes sure SSH is not redirected
cfg.iptables.AppendRule(iptableslog.ExcludeInboundPort, constants.ISTIOINBOUND, table, "-p", constants.TCP,
"--dport", "22", "-j", constants.RETURN)
// Apply any user-specified port exclusions.
if cfg.cfg.InboundPortsExclude != "" {
for _, port := range split(cfg.cfg.InboundPortsExclude) {
cfg.iptables.AppendRule(iptableslog.ExcludeInboundPort, constants.ISTIOINBOUND, table, "-p", constants.TCP,
"--dport", port, "-j", constants.RETURN)
}
}
// Redirect remaining inbound traffic to Envoy.
if cfg.cfg.InboundInterceptionMode == constants.TPROXY {
// If an inbound packet belongs to an established socket, route it to the
// loopback interface.
cfg.iptables.AppendRule(iptableslog.UndefinedCommand, constants.ISTIOINBOUND, constants.MANGLE, "-p", constants.TCP,
"-m", "conntrack", "--ctstate", "RELATED,ESTABLISHED", "-j", constants.ISTIODIVERT)
// Otherwise, it's a new connection. Redirect it using TPROXY.
cfg.iptables.AppendRule(iptableslog.UndefinedCommand, constants.ISTIOINBOUND, constants.MANGLE, "-p", constants.TCP,
"-j", constants.ISTIOTPROXY)
} else {
cfg.iptables.AppendRule(iptableslog.UndefinedCommand, constants.ISTIOINBOUND, constants.NAT, "-p", constants.TCP,
"-j", constants.ISTIOINREDIRECT)
}
} else {
// User has specified a non-empty list of ports to be redirected to Envoy.
for _, port := range split(cfg.cfg.InboundPortsInclude) {
if cfg.cfg.InboundInterceptionMode == constants.TPROXY {
cfg.iptables.AppendRule(iptableslog.IncludeInboundPort, constants.ISTIOINBOUND, constants.MANGLE, "-p", constants.TCP,
"--dport", port, "-m", "conntrack", "--ctstate", "RELATED,ESTABLISHED", "-j", constants.ISTIODIVERT)
cfg.iptables.AppendRule(iptableslog.IncludeInboundPort,
constants.ISTIOINBOUND, constants.MANGLE, "-p", constants.TCP, "--dport", port, "-j", constants.ISTIOTPROXY)
} else {
cfg.iptables.AppendRule(iptableslog.IncludeInboundPort,
constants.ISTIOINBOUND, constants.NAT, "-p", constants.TCP, "--dport", port, "-j", constants.ISTIOINREDIRECT)
}
}
}
}
}
func (cfg *IptablesConfigurator) handleOutboundIncludeRules(
rangeInclude NetworkRange,
appendRule func(command iptableslog.Command, chain string, table string, params ...string) *builder.IptablesBuilder,
insert func(command iptableslog.Command, chain string, table string, position int, params ...string) *builder.IptablesBuilder) {
// Apply outbound IP inclusions.
if rangeInclude.IsWildcard {
// Wildcard specified. Redirect all remaining outbound traffic to Envoy.
appendRule(iptableslog.UndefinedCommand, constants.ISTIOOUTPUT, constants.NAT, "-j", constants.ISTIOREDIRECT)
for _, internalInterface := range split(cfg.cfg.KubeVirtInterfaces) {
insert(iptableslog.KubevirtCommand,
constants.PREROUTING, constants.NAT, 1, "-i", internalInterface, "-j", constants.ISTIOREDIRECT)
}
} else if len(rangeInclude.IPNets) > 0 {
// User has specified a non-empty list of cidrs to be redirected to Envoy.
for _, cidr := range rangeInclude.IPNets {
for _, internalInterface := range split(cfg.cfg.KubeVirtInterfaces) {
insert(iptableslog.KubevirtCommand, constants.PREROUTING, constants.NAT, 1, "-i", internalInterface,
"-d", cidr.String(), "-j", constants.ISTIOREDIRECT)
}
appendRule(iptableslog.UndefinedCommand,
constants.ISTIOOUTPUT, constants.NAT, "-d", cidr.String(), "-j", constants.ISTIOREDIRECT)
}
// All other traffic is not redirected.
appendRule(iptableslog.UndefinedCommand, constants.ISTIOOUTPUT, constants.NAT, "-j", constants.RETURN)
}
}
func (cfg *IptablesConfigurator) shortCircuitKubeInternalInterface() {
for _, internalInterface := range split(cfg.cfg.KubeVirtInterfaces) {
cfg.iptables.InsertRule(iptableslog.KubevirtCommand, constants.PREROUTING, constants.NAT, 1, "-i", internalInterface, "-j", constants.RETURN)
}
}
func (cfg *IptablesConfigurator) shortCircuitExcludeInterfaces() {
for _, excludeInterface := range split(cfg.cfg.ExcludeInterfaces) {
cfg.iptables.AppendRule(
iptableslog.ExcludeInterfaceCommand, constants.PREROUTING, constants.NAT, "-i", excludeInterface, "-j", constants.RETURN)
cfg.iptables.AppendRule(iptableslog.ExcludeInterfaceCommand, constants.OUTPUT, constants.NAT, "-o", excludeInterface, "-j", constants.RETURN)
}
if cfg.cfg.InboundInterceptionMode == constants.TPROXY {
for _, excludeInterface := range split(cfg.cfg.ExcludeInterfaces) {
cfg.iptables.AppendRule(
iptableslog.ExcludeInterfaceCommand, constants.PREROUTING, constants.MANGLE, "-i", excludeInterface, "-j", constants.RETURN)
cfg.iptables.AppendRule(iptableslog.ExcludeInterfaceCommand, constants.OUTPUT, constants.MANGLE, "-o", excludeInterface, "-j", constants.RETURN)
}
}
}
func SplitV4V6(ips []string) (ipv4 []string, ipv6 []string) {
for _, i := range ips {
parsed := net.ParseIP(i)
if parsed.To4() != nil {
ipv4 = append(ipv4, i)
} else {
ipv6 = append(ipv6, i)
}
}
return
}
func (cfg *IptablesConfigurator) run() {
defer func() {
// Best effort since we don't know if the commands exist
_ = cfg.ext.Run(constants.IPTABLESSAVE)
if cfg.cfg.EnableInboundIPv6 {
_ = cfg.ext.Run(constants.IP6TABLESSAVE)
}
}()
// Since OUTBOUND_IP_RANGES_EXCLUDE could carry ipv4 and ipv6 ranges
// need to split them in different arrays one for ipv4 and one for ipv6
// in order to not to fail
ipv4RangesExclude, ipv6RangesExclude, err := cfg.separateV4V6(cfg.cfg.OutboundIPRangesExclude)
if err != nil {
panic(err)
}
if ipv4RangesExclude.IsWildcard {
panic("Invalid value for OUTBOUND_IP_RANGES_EXCLUDE")
}
// FixMe: Do we need similar check for ipv6RangesExclude as well ??
ipv4RangesInclude, ipv6RangesInclude, err := cfg.separateV4V6(cfg.cfg.OutboundIPRangesInclude)
if err != nil {
panic(err)
}
redirectDNS := cfg.cfg.RedirectDNS
cfg.logConfig()
if cfg.cfg.EnableInboundIPv6 {
// TODO: (abhide): Move this out of this method
cfg.ext.RunOrFail(constants.IP, "-6", "addr", "add", "::6/128", "dev", "lo")
}
cfg.shortCircuitExcludeInterfaces()
// Do not capture internal interface.
cfg.shortCircuitKubeInternalInterface()
// Create a new chain for to hit tunnel port directly. Envoy will be listening on port acting as VPN tunnel.
cfg.iptables.AppendRule(iptableslog.UndefinedCommand, constants.ISTIOINBOUND, constants.NAT, "-p", constants.TCP, "--dport",
cfg.cfg.InboundTunnelPort, "-j", constants.RETURN)
// Create a new chain for redirecting outbound traffic to the common Envoy port.
// In both chains, '-j RETURN' bypasses Envoy and '-j ISTIOREDIRECT'
// redirects to Envoy.
cfg.iptables.AppendRule(iptableslog.UndefinedCommand,
constants.ISTIOREDIRECT, constants.NAT, "-p", constants.TCP, "-j", constants.REDIRECT, "--to-ports", cfg.cfg.ProxyPort)
// Use this chain also for redirecting inbound traffic to the common Envoy port
// when not using TPROXY.
cfg.iptables.AppendRule(iptableslog.InboundCapture, constants.ISTIOINREDIRECT, constants.NAT, "-p", constants.TCP, "-j", constants.REDIRECT,
"--to-ports", cfg.cfg.InboundCapturePort)
cfg.handleInboundPortsInclude()
// TODO: change the default behavior to not intercept any output - user may use http_proxy or another
// iptablesOrFail wrapper (like ufw). Current default is similar with 0.1
// Jump to the ISTIOOUTPUT chain from OUTPUT chain for all tcp traffic, and UDP dns (if enabled)
cfg.iptables.AppendRule(iptableslog.JumpOutbound, constants.OUTPUT, constants.NAT, "-p", constants.TCP, "-j", constants.ISTIOOUTPUT)
// Apply port based exclusions. Must be applied before connections back to self are redirected.
if cfg.cfg.OutboundPortsExclude != "" {
for _, port := range split(cfg.cfg.OutboundPortsExclude) {
cfg.iptables.AppendRule(iptableslog.UndefinedCommand, constants.ISTIOOUTPUT, constants.NAT, "-p", constants.TCP,
"--dport", port, "-j", constants.RETURN)
}
}
// 127.0.0.6/::7 is bind connect from inbound passthrough cluster
cfg.iptables.AppendVersionedRule("127.0.0.6/32", "::6/128", iptableslog.UndefinedCommand, constants.ISTIOOUTPUT, constants.NAT,
"-o", "lo", "-s", constants.IPVersionSpecific, "-j", constants.RETURN)
for _, uid := range split(cfg.cfg.ProxyUID) {
// Redirect app calls back to itself via Envoy when using the service VIP
// e.g. appN => Envoy (client) => Envoy (server) => appN.
// nolint: lll
if redirectDNS {
// When DNS is enabled, we skip this for port 53. This ensures we do not have:
// app => istio-agent => Envoy inbound => dns server
// Instead, we just have:
// app => istio-agent => dns server
cfg.iptables.AppendVersionedRule("127.0.0.1/32", "::1/128", iptableslog.UndefinedCommand, constants.ISTIOOUTPUT, constants.NAT,
"-o", "lo", "!", "-d", constants.IPVersionSpecific,
"-p", "tcp", "!", "--dport", "53",
"-m", "owner", "--uid-owner", uid, "-j", constants.ISTIOINREDIRECT)
} else {
cfg.iptables.AppendVersionedRule("127.0.0.1/32", "::1/128", iptableslog.UndefinedCommand, constants.ISTIOOUTPUT, constants.NAT,
"-o", "lo", "!", "-d", constants.IPVersionSpecific,
"-m", "owner", "--uid-owner", uid, "-j", constants.ISTIOINREDIRECT)
}
// Do not redirect app calls to back itself via Envoy when using the endpoint address
// e.g. appN => appN by lo
// If loopback explicitly set via OutboundIPRangesInclude, then don't return.
if !ipv4RangesInclude.HasLoopBackIP && !ipv6RangesInclude.HasLoopBackIP {
if redirectDNS {
// Users may have a DNS server that is on localhost. In these cases, applications may
// send TCP traffic to the DNS server that we actually *do* want to intercept. To
// handle this case, we exclude port 53 from this rule. Note: We cannot just move the
// port 53 redirection rule further up the list, as we will want to avoid capturing
// DNS requests from the proxy UID/GID
cfg.iptables.AppendRule(iptableslog.UndefinedCommand, constants.ISTIOOUTPUT, constants.NAT, "-o", "lo", "-p", "tcp",
"!", "--dport", "53",
"-m", "owner", "!", "--uid-owner", uid, "-j", constants.RETURN)
} else {
cfg.iptables.AppendRule(iptableslog.UndefinedCommand, constants.ISTIOOUTPUT, constants.NAT,
"-o", "lo", "-m", "owner", "!", "--uid-owner", uid, "-j", constants.RETURN)
}
}
// Avoid infinite loops. Don't redirect Envoy traffic directly back to
// Envoy for non-loopback traffic.
cfg.iptables.AppendRule(iptableslog.UndefinedCommand, constants.ISTIOOUTPUT, constants.NAT,
"-m", "owner", "--uid-owner", uid, "-j", constants.RETURN)
}
for _, gid := range split(cfg.cfg.ProxyGID) {
// Redirect app calls back to itself via Envoy when using the service VIP
// e.g. appN => Envoy (client) => Envoy (server) => appN.
cfg.iptables.AppendVersionedRule("127.0.0.1/32", "::1/128", iptableslog.UndefinedCommand, constants.ISTIOOUTPUT, constants.NAT,
"-o", "lo", "!", "-d", constants.IPVersionSpecific,
"-m", "owner", "--gid-owner", gid, "-j", constants.ISTIOINREDIRECT)
// Do not redirect app calls to back itself via Envoy when using the endpoint address
// e.g. appN => appN by lo
// If loopback explicitly set via OutboundIPRangesInclude, then don't return.
if !ipv4RangesInclude.HasLoopBackIP && !ipv6RangesInclude.HasLoopBackIP {
if redirectDNS {
// Users may have a DNS server that is on localhost. In these cases, applications may
// send TCP traffic to the DNS server that we actually *do* want to intercept. To
// handle this case, we exclude port 53 from this rule. Note: We cannot just move the
// port 53 redirection rule further up the list, as we will want to avoid capturing
// DNS requests from the proxy UID/GID
cfg.iptables.AppendRule(iptableslog.UndefinedCommand, constants.ISTIOOUTPUT, constants.NAT,
"-o", "lo", "-p", "tcp",
"!", "--dport", "53",
"-m", "owner", "!", "--gid-owner", gid, "-j", constants.RETURN)
} else {
cfg.iptables.AppendRule(iptableslog.UndefinedCommand, constants.ISTIOOUTPUT, constants.NAT,
"-o", "lo", "-m", "owner", "!", "--gid-owner", gid, "-j", constants.RETURN)
}
}
// Avoid infinite loops. Don't redirect Envoy traffic directly back to
// Envoy for non-loopback traffic.
cfg.iptables.AppendRule(iptableslog.UndefinedCommand, constants.ISTIOOUTPUT, constants.NAT, "-m", "owner", "--gid-owner", gid, "-j", constants.RETURN)
}
if redirectDNS {
if cfg.cfg.CaptureAllDNS {
// Redirect all TCP dns traffic on port 53 to the agent on port 15053
// This will be useful for the CNI case where pod DNS server address cannot be decided.
cfg.iptables.AppendRule(iptableslog.UndefinedCommand,
constants.ISTIOOUTPUT, constants.NAT,
"-p", constants.TCP,
"--dport", "53",
"-j", constants.REDIRECT,
"--to-ports", constants.IstioAgentDNSListenerPort)
} else {
for _, s := range cfg.cfg.DNSServersV4 {
// redirect all TCP dns traffic on port 53 to the agent on port 15053 for all servers
// in etc/resolv.conf
// We avoid redirecting all IP ranges to avoid infinite loops when there are local DNS proxies
// such as: app -> istio dns server -> dnsmasq -> upstream
// This ensures that we do not get requests from dnsmasq sent back to the agent dns server in a loop.
// Note: If a user somehow configured etc/resolv.conf to point to dnsmasq and server X, and dnsmasq also
// pointed to server X, this would not work. However, the assumption is that is not a common case.
cfg.iptables.AppendRuleV4(iptableslog.UndefinedCommand,
constants.ISTIOOUTPUT, constants.NAT,
"-p", constants.TCP,
"--dport", "53",
"-d", s+"/32",
"-j", constants.REDIRECT,
"--to-ports", constants.IstioAgentDNSListenerPort)
}
for _, s := range cfg.cfg.DNSServersV6 {
cfg.iptables.AppendRuleV6(iptableslog.UndefinedCommand,
constants.ISTIOOUTPUT, constants.NAT,
"-p", constants.TCP,
"--dport", "53",
"-d", s+"/128",
"-j", constants.REDIRECT,
"--to-ports", constants.IstioAgentDNSListenerPort)
}
}
}
// Skip redirection for Envoy-aware applications and
// container-to-container traffic both of which explicitly use
// localhost.
cfg.iptables.AppendVersionedRule("127.0.0.1/32", "::1/128", iptableslog.UndefinedCommand, constants.ISTIOOUTPUT, constants.NAT,
"-d", constants.IPVersionSpecific, "-j", constants.RETURN)
// Apply outbound IPv4 exclusions. Must be applied before inclusions.
for _, cidr := range ipv4RangesExclude.IPNets {
cfg.iptables.AppendRuleV4(iptableslog.UndefinedCommand, constants.ISTIOOUTPUT, constants.NAT, "-d", cidr.String(), "-j", constants.RETURN)
}
for _, cidr := range ipv6RangesExclude.IPNets {
cfg.iptables.AppendRuleV6(iptableslog.UndefinedCommand, constants.ISTIOOUTPUT, constants.NAT, "-d", cidr.String(), "-j", constants.RETURN)
}
cfg.handleOutboundPortsInclude()
cfg.handleOutboundIncludeRules(ipv4RangesInclude, cfg.iptables.AppendRuleV4, cfg.iptables.InsertRuleV4)
cfg.handleOutboundIncludeRules(ipv6RangesInclude, cfg.iptables.AppendRuleV6, cfg.iptables.InsertRuleV6)
if redirectDNS {
HandleDNSUDP(
AppendOps, cfg.iptables, cfg.ext, "",
cfg.cfg.ProxyUID, cfg.cfg.ProxyGID,
cfg.cfg.DNSServersV4, cfg.cfg.DNSServersV6, cfg.cfg.CaptureAllDNS)
}
if cfg.cfg.InboundInterceptionMode == constants.TPROXY {
// save packet mark set by envoy.filters.listener.original_src as connection mark
cfg.iptables.AppendRule(iptableslog.UndefinedCommand, constants.PREROUTING, constants.MANGLE,
"-p", constants.TCP, "-m", "mark", "--mark", cfg.cfg.InboundTProxyMark, "-j", "CONNMARK", "--save-mark")
// If the packet is already marked with 1337, then return. This is to prevent mark envoy --> app traffic again.
cfg.iptables.AppendRule(iptableslog.UndefinedCommand, constants.OUTPUT, constants.MANGLE,
"-p", constants.TCP, "-o", "lo", "-m", "mark", "--mark", cfg.cfg.InboundTProxyMark, "-j", constants.RETURN)
for _, uid := range split(cfg.cfg.ProxyUID) {
// mark outgoing packets from envoy to workload by pod ip
// app call VIP --> envoy outbound -(mark 1338)-> envoy inbound --> app
cfg.iptables.AppendRule(iptableslog.UndefinedCommand, constants.OUTPUT, constants.MANGLE,
"!", "-d", "127.0.0.1/32", "-p", constants.TCP, "-o", "lo", "-m", "owner", "--uid-owner", uid, "-j", constants.MARK, "--set-mark", outboundMark)
}
for _, gid := range split(cfg.cfg.ProxyGID) {
// mark outgoing packets from envoy to workload by pod ip
// app call VIP --> envoy outbound -(mark 1338)-> envoy inbound --> app
cfg.iptables.AppendRule(iptableslog.UndefinedCommand, constants.OUTPUT, constants.MANGLE,
"!", "-d", "127.0.0.1/32", "-p", constants.TCP, "-o", "lo", "-m", "owner", "--gid-owner", gid, "-j", constants.MARK, "--set-mark", outboundMark)
}
// mark outgoing packets from workload, match it to policy routing entry setup for TPROXY mode
cfg.iptables.AppendRule(iptableslog.UndefinedCommand, constants.OUTPUT, constants.MANGLE,
"-p", constants.TCP, "-m", "connmark", "--mark", cfg.cfg.InboundTProxyMark, "-j", "CONNMARK", "--restore-mark")
// prevent infinite redirect
cfg.iptables.InsertRule(iptableslog.UndefinedCommand, constants.ISTIOINBOUND, constants.MANGLE, 1,
"-p", constants.TCP, "-m", "mark", "--mark", cfg.cfg.InboundTProxyMark, "-j", constants.RETURN)
// prevent intercept traffic from envoy/pilot-agent ==> app by 127.0.0.6 --> podip
cfg.iptables.InsertRule(iptableslog.UndefinedCommand, constants.ISTIOINBOUND, constants.MANGLE, 2,
"-p", constants.TCP, "-s", "127.0.0.6/32", "-i", "lo", "-j", constants.RETURN)
// prevent intercept traffic from app ==> app by pod ip
cfg.iptables.InsertRule(iptableslog.UndefinedCommand, constants.ISTIOINBOUND, constants.MANGLE, 3,
"-p", constants.TCP, "-i", "lo", "-m", "mark", "!", "--mark", outboundMark, "-j", constants.RETURN)
}
cfg.executeCommands()
}
type UDPRuleApplier struct {
iptables *builder.IptablesBuilder
ext dep.Dependencies
ops Ops
table string
chain string
cmd string
}
func (f UDPRuleApplier) RunV4(args ...string) {
switch f.ops {
case AppendOps:
f.iptables.AppendRuleV4(iptableslog.UndefinedCommand, f.chain, f.table, args...)
case DeleteOps:
deleteArgs := []string{"-t", f.table, opsToString[f.ops], f.chain}
deleteArgs = append(deleteArgs, args...)
f.ext.RunQuietlyAndIgnore(f.cmd, deleteArgs...)
}
}
func (f UDPRuleApplier) RunV6(args ...string) {
switch f.ops {
case AppendOps:
f.iptables.AppendRuleV6(iptableslog.UndefinedCommand, f.chain, f.table, args...)
case DeleteOps:
deleteArgs := []string{"-t", f.table, opsToString[f.ops], f.chain}
deleteArgs = append(deleteArgs, args...)
f.ext.RunQuietlyAndIgnore(f.cmd, deleteArgs...)
}
}
func (f UDPRuleApplier) Run(args ...string) {
f.RunV4(args...)
f.RunV6(args...)
}
func (f UDPRuleApplier) WithChain(chain string) UDPRuleApplier {
f.chain = chain
return f
}
func (f UDPRuleApplier) WithTable(table string) UDPRuleApplier {
f.table = table
return f
}
// HandleDNSUDP is a helper function to tackle with DNS UDP specific operations.
// This helps the creation logic of DNS UDP rules in sync with the deletion.
func HandleDNSUDP(
ops Ops, iptables *builder.IptablesBuilder, ext dep.Dependencies,
cmd, proxyUID, proxyGID string, dnsServersV4 []string, dnsServersV6 []string, captureAllDNS bool) {
f := UDPRuleApplier{
iptables: iptables,
ext: ext,
ops: ops,
table: constants.NAT,
chain: constants.OUTPUT,
cmd: cmd,
}
// Make sure that upstream DNS requests from agent/envoy dont get captured.
// TODO: add ip6 as well
for _, uid := range split(proxyUID) {
f.Run("-p", "udp", "--dport", "53", "-m", "owner", "--uid-owner", uid, "-j", constants.RETURN)
}
for _, gid := range split(proxyGID) {
f.Run("-p", "udp", "--dport", "53", "-m", "owner", "--gid-owner", gid, "-j", constants.RETURN)
}
if captureAllDNS {
// Redirect all TCP dns traffic on port 53 to the agent on port 15053
// This will be useful for the CNI case where pod DNS server address cannot be decided.
f.Run("-p", "udp", "--dport", "53", "-j", constants.REDIRECT, "--to-port", constants.IstioAgentDNSListenerPort)
} else {
// redirect all TCP dns traffic on port 53 to the agent on port 15053 for all servers
// in etc/resolv.conf
// We avoid redirecting all IP ranges to avoid infinite loops when there are local DNS proxies
// such as: app -> istio dns server -> dnsmasq -> upstream
// This ensures that we do not get requests from dnsmasq sent back to the agent dns server in a loop.
// Note: If a user somehow configured etc/resolv.conf to point to dnsmasq and server X, and dnsmasq also
// pointed to server X, this would not work. However, the assumption is that is not a common case.
for _, s := range dnsServersV4 {
f.RunV4("-p", "udp", "--dport", "53", "-d", s+"/32",
"-j", constants.REDIRECT, "--to-port", constants.IstioAgentDNSListenerPort)
}
for _, s := range dnsServersV6 {
f.RunV6("-p", "udp", "--dport", "53", "-d", s+"/128",
"-j", constants.REDIRECT, "--to-port", constants.IstioAgentDNSListenerPort)
}
}
// Split UDP DNS traffic to separate conntrack zones
addConntrackZoneDNSUDP(f.WithTable(constants.RAW), proxyUID, proxyGID, dnsServersV4, dnsServersV6, captureAllDNS)
}
// addConntrackZoneDNSUDP is a helper function to add iptables rules to split DNS traffic
// in two separate conntrack zones to avoid issues with UDP conntrack race conditions.
// Traffic that goes from istio to DNS servers and vice versa are zone 1 and traffic from
// DNS client to istio and vice versa goes to zone 2
func addConntrackZoneDNSUDP(
f UDPRuleApplier, proxyUID, proxyGID string, dnsServersV4 []string, dnsServersV6 []string, captureAllDNS bool) {
// TODO: add ip6 as well
for _, uid := range split(proxyUID) {
// Packets with dst port 53 from istio to zone 1. These are Istio calls to upstream resolvers
f.Run("-p", "udp", "--dport", "53", "-m", "owner", "--uid-owner", uid, "-j", constants.CT, "--zone", "1")
// Packets with src port 15053 from istio to zone 2. These are Istio response packets to application clients
f.Run("-p", "udp", "--sport", "15053", "-m", "owner", "--uid-owner", uid, "-j", constants.CT, "--zone", "2")
}
for _, gid := range split(proxyGID) {
// Packets with dst port 53 from istio to zone 1. These are Istio calls to upstream resolvers
f.Run("-p", "udp", "--dport", "53", "-m", "owner", "--gid-owner", gid, "-j", constants.CT, "--zone", "1")
// Packets with src port 15053 from istio to zone 2. These are Istio response packets to application clients
f.Run("-p", "udp", "--sport", "15053", "-m", "owner", "--gid-owner", gid, "-j", constants.CT, "--zone", "2")
}
if captureAllDNS {
// Not specifying destination address is useful for the CNI case where pod DNS server address cannot be decided.
// Mark all UDP dns traffic with dst port 53 as zone 2. These are application client packets towards DNS resolvers.
f.Run("-p", "udp", "--dport", "53",
"-j", constants.CT, "--zone", "2")
// Mark all UDP dns traffic with src port 53 as zone 1. These are response packets from the DNS resolvers.
f.WithChain(constants.PREROUTING).Run("-p", "udp", "--sport", "53",
"-j", constants.CT, "--zone", "1")
} else {
// Go through all DNS servers in etc/resolv.conf and mark the packets based on these destination addresses.
for _, s := range dnsServersV4 {
// Mark all UDP dns traffic with dst port 53 as zone 2. These are application client packets towards DNS resolvers.
f.RunV4("-p", "udp", "--dport", "53", "-d", s+"/32",
"-j", constants.CT, "--zone", "2")
// Mark all UDP dns traffic with src port 53 as zone 1. These are response packets from the DNS resolvers.
f.WithChain(constants.PREROUTING).RunV4("-p", "udp", "--sport", "53", "-d", s+"/32",
"-j", constants.CT, "--zone", "1")
}
for _, s := range dnsServersV6 {
// Mark all UDP dns traffic with dst port 53 as zone 2. These are application client packets towards DNS resolvers.
f.RunV6("-p", "udp", "--dport", "53", "-d", s+"/128",
"-j", constants.CT, "--zone", "2")
// Mark all UDP dns traffic with src port 53 as zone 1. These are response packets from the DNS resolvers.
f.WithChain(constants.PREROUTING).RunV6("-p", "udp", "--sport", "53", "-d", s+"/128",
"-j", constants.CT, "--zone", "1")
}
}
}
func (cfg *IptablesConfigurator) handleOutboundPortsInclude() {
if cfg.cfg.OutboundPortsInclude != "" {
for _, port := range split(cfg.cfg.OutboundPortsInclude) {
cfg.iptables.AppendRule(iptableslog.UndefinedCommand,
constants.ISTIOOUTPUT, constants.NAT, "-p", constants.TCP, "--dport", port, "-j", constants.ISTIOREDIRECT)
}
}
}
func (cfg *IptablesConfigurator) createRulesFile(f *os.File, contents string) error {
defer f.Close()
log.Infof("Writing following contents to rules file: %v\n%v", f.Name(), strings.TrimSpace(contents))
writer := bufio.NewWriter(f)
_, err := writer.WriteString(contents)
if err != nil {
return fmt.Errorf("unable to write iptables-restore file: %v", err)
}
err = writer.Flush()
return err
}
func (cfg *IptablesConfigurator) executeIptablesCommands(commands [][]string) {
for _, cmd := range commands {
if len(cmd) > 1 {
cfg.ext.RunOrFail(cmd[0], cmd[1:]...)
} else {
cfg.ext.RunOrFail(cmd[0])
}
}
}
func (cfg *IptablesConfigurator) executeIptablesRestoreCommand(isIpv4 bool) error {
var data, filename, cmd string
if isIpv4 {
data = cfg.iptables.BuildV4Restore()
filename = fmt.Sprintf("iptables-rules-%d.txt", time.Now().UnixNano())
cmd = constants.IPTABLESRESTORE
} else {
data = cfg.iptables.BuildV6Restore()
filename = fmt.Sprintf("ip6tables-rules-%d.txt", time.Now().UnixNano())
cmd = constants.IP6TABLESRESTORE
}
var rulesFile *os.File
var err error
if cfg.cfg.OutputPath != "" {
// Print the iptables rules into the given output file.
rulesFile, err = os.OpenFile(cfg.cfg.OutputPath, os.O_CREATE|os.O_WRONLY, 0o644)
if err != nil {
return fmt.Errorf("unable to open iptables rules output file %v: %v", cfg.cfg.OutputPath, err)
}
} else {
// Otherwise create a temporary file to write iptables rules to, which will be cleaned up at the end.
rulesFile, err = os.CreateTemp("", filename)
if err != nil {
return fmt.Errorf("unable to create iptables-restore file: %v", err)
}
defer os.Remove(rulesFile.Name())
}
if err := cfg.createRulesFile(rulesFile, data); err != nil {
return err
}
// --noflush to prevent flushing/deleting previous contents from table
cfg.ext.RunOrFail(cmd, "--noflush", rulesFile.Name())
return nil
}
func (cfg *IptablesConfigurator) executeCommands() {
if cfg.cfg.RestoreFormat {
// Execute iptables-restore
err := cfg.executeIptablesRestoreCommand(true)
if err != nil {
log.Errorf("Failed to execute iptables-restore command: %v", err)
os.Exit(1)
}
// Execute ip6tables-restore
err = cfg.executeIptablesRestoreCommand(false)
if err != nil {
log.Errorf("Failed to execute iptables-restore command: %v", err)
os.Exit(1)
}
} else {
// Execute iptables commands
cfg.executeIptablesCommands(cfg.iptables.BuildV4())
// Execute ip6tables commands
cfg.executeIptablesCommands(cfg.iptables.BuildV6())
}
}
|
[
"\"ENVOY_PORT\"",
"\"INBOUND_CAPTURE_PORT\"",
"\"ISTIO_INBOUND_INTERCEPTION_MODE\"",
"\"ISTIO_INBOUND_TPROXY_ROUTE_TABLE\"",
"\"ISTIO_INBOUND_PORTS\"",
"\"ISTIO_OUTBOUND_PORTS\"",
"\"ISTIO_LOCAL_EXCLUDE_PORTS\"",
"\"ISTIO_EXCLUDE_INTERFACES\"",
"\"ISTIO_SERVICE_CIDR\"",
"\"ISTIO_SERVICE_EXCLUDE_CIDR\"",
"\"ISTIO_META_DNS_CAPTURE\""
] |
[] |
[
"ISTIO_OUTBOUND_PORTS",
"ISTIO_EXCLUDE_INTERFACES",
"ISTIO_SERVICE_CIDR",
"ENVOY_PORT",
"ISTIO_META_DNS_CAPTURE",
"ISTIO_SERVICE_EXCLUDE_CIDR",
"INBOUND_CAPTURE_PORT",
"ISTIO_LOCAL_EXCLUDE_PORTS",
"ISTIO_INBOUND_INTERCEPTION_MODE",
"ISTIO_INBOUND_PORTS",
"ISTIO_INBOUND_TPROXY_ROUTE_TABLE"
] |
[]
|
["ISTIO_OUTBOUND_PORTS", "ISTIO_EXCLUDE_INTERFACES", "ISTIO_SERVICE_CIDR", "ENVOY_PORT", "ISTIO_META_DNS_CAPTURE", "ISTIO_SERVICE_EXCLUDE_CIDR", "INBOUND_CAPTURE_PORT", "ISTIO_LOCAL_EXCLUDE_PORTS", "ISTIO_INBOUND_INTERCEPTION_MODE", "ISTIO_INBOUND_PORTS", "ISTIO_INBOUND_TPROXY_ROUTE_TABLE"]
|
go
| 11 | 0 | |
pypy/module/rctime/interp_time.py
|
from pypy.rpython.tool import rffi_platform as platform
from pypy.rpython.lltypesystem import rffi
from pypy.interpreter.error import OperationError
from pypy.interpreter.baseobjspace import W_Root, ObjSpace
from pypy.rpython.lltypesystem import lltype
from pypy.rlib.rarithmetic import ovfcheck_float_to_int
from pypy.rlib import rposix
from pypy.translator.tool.cbuild import ExternalCompilationInfo
import math
import os
import sys
import time as pytime
_POSIX = os.name == "posix"
_WIN = os.name == "nt"
_includes = ["time.h"]
if _POSIX:
_includes.append('sys/time.h')
class CConfig:
_compilation_info_ = ExternalCompilationInfo(
includes = _includes
)
CLOCKS_PER_SEC = platform.ConstantInteger("CLOCKS_PER_SEC")
clock_t = platform.SimpleType("clock_t", rffi.ULONG)
time_t = platform.SimpleType("time_t", rffi.LONG)
size_t = platform.SimpleType("size_t", rffi.LONG)
has_gettimeofday = platform.Has('gettimeofday')
if _POSIX:
calling_conv = 'c'
CConfig.timeval = platform.Struct("struct timeval",
[("tv_sec", rffi.INT),
("tv_usec", rffi.INT)])
CConfig.tm = platform.Struct("struct tm", [("tm_sec", rffi.INT),
("tm_min", rffi.INT), ("tm_hour", rffi.INT), ("tm_mday", rffi.INT),
("tm_mon", rffi.INT), ("tm_year", rffi.INT), ("tm_wday", rffi.INT),
("tm_yday", rffi.INT), ("tm_isdst", rffi.INT), ("tm_gmtoff", rffi.LONG),
("tm_zone", rffi.CCHARP)])
elif _WIN:
calling_conv = 'win'
CConfig.tm = platform.Struct("struct tm", [("tm_sec", rffi.INT),
("tm_min", rffi.INT), ("tm_hour", rffi.INT), ("tm_mday", rffi.INT),
("tm_mon", rffi.INT), ("tm_year", rffi.INT), ("tm_wday", rffi.INT),
("tm_yday", rffi.INT), ("tm_isdst", rffi.INT)])
class cConfig:
pass
for k, v in platform.configure(CConfig).items():
setattr(cConfig, k, v)
cConfig.tm.__name__ = "_tm"
def external(name, args, result):
return rffi.llexternal(name, args, result,
compilation_info=CConfig._compilation_info_,
calling_conv=calling_conv,
threadsafe=False)
if _POSIX:
cConfig.timeval.__name__ = "_timeval"
timeval = cConfig.timeval
CLOCKS_PER_SEC = cConfig.CLOCKS_PER_SEC
clock_t = cConfig.clock_t
time_t = cConfig.time_t
size_t = cConfig.size_t
tm = cConfig.tm
glob_buf = lltype.malloc(tm, flavor='raw', zero=True)
if cConfig.has_gettimeofday:
c_gettimeofday = external('gettimeofday', [rffi.VOIDP, rffi.VOIDP], rffi.INT)
TIME_TP = rffi.CArrayPtr(time_t)
TM_P = lltype.Ptr(tm)
c_clock = external('clock', [TIME_TP], clock_t)
c_time = external('time', [TIME_TP], time_t)
c_ctime = external('ctime', [TIME_TP], rffi.CCHARP)
c_gmtime = external('gmtime', [TIME_TP], TM_P)
c_mktime = external('mktime', [TM_P], time_t)
c_asctime = external('asctime', [TM_P], rffi.CCHARP)
c_localtime = external('localtime', [TIME_TP], TM_P)
if _POSIX:
c_tzset = external('tzset', [], lltype.Void)
c_strftime = external('strftime', [rffi.CCHARP, size_t, rffi.CCHARP, TM_P],
size_t)
def _init_accept2dyear():
return (1, 0)[bool(os.getenv("PYTHONY2K"))]
def _init_timezone():
timezone = daylight = altzone = 0
tzname = ["", ""]
# pypy cant' use in_dll to access global exported variables
# so we can't compute these attributes
# if _WIN:
# cdll.msvcrt._tzset()
#
# timezone = c_long.in_dll(cdll.msvcrt, "_timezone").value
# if hasattr(cdll.msvcrt, "altzone"):
# altzone = c_long.in_dll(cdll.msvcrt, "altzone").value
# else:
# altzone = timezone - 3600
# daylight = c_long.in_dll(cdll.msvcrt, "_daylight").value
# tzname = _tzname_t.in_dll(cdll.msvcrt, "_tzname")
# tzname = (tzname.tzname_0, tzname.tzname_1)
if _POSIX:
YEAR = (365 * 24 + 6) * 3600
t = (((c_time(lltype.nullptr(TIME_TP.TO))) / YEAR) * YEAR)
# we cannot have reference to stack variable, put it on the heap
t_ref = lltype.malloc(TIME_TP.TO, 1, flavor='raw')
t_ref[0] = t
p = c_localtime(t_ref)
janzone = -p.c_tm_gmtoff
tm_zone = rffi.charp2str(p.c_tm_zone)
janname = [" ", tm_zone][bool(tm_zone)]
tt = t + YEAR / 2
t_ref[0] = tt
p = c_localtime(t_ref)
lltype.free(t_ref, flavor='raw')
tm_zone = rffi.charp2str(p.c_tm_zone)
julyzone = -p.c_tm_gmtoff
julyname = [" ", tm_zone][bool(tm_zone)]
if janzone < julyzone:
# DST is reversed in the southern hemisphere
timezone = julyzone
altzone = janzone
daylight = int(janzone != julyzone)
tzname = [julyname, janname]
else:
timezone = janzone
altzone = julyzone
daylight = int(janzone != julyzone)
tzname = [janname, julyname]
return timezone, daylight, tzname, altzone
def _get_error_msg():
errno = rposix.get_errno()
return os.strerror(errno)
def sleep(secs):
pytime.sleep(secs)
sleep.unwrap_spec = [float]
def _get_module_object(space, obj_name):
w_module = space.getbuiltinmodule('time')
w_obj = space.getattr(w_module, space.wrap(obj_name))
return w_obj
def _set_module_object(space, obj_name, w_obj_value):
w_module = space.getbuiltinmodule('time')
space.setattr(w_module, space.wrap(obj_name), w_obj_value)
def _get_inttime(space, w_seconds):
# w_seconds can be a wrapped None (it will be automatically wrapped
# in the callers, so we never get a real None here).
if space.is_w(w_seconds, space.w_None):
seconds = pytime.time()
else:
seconds = space.float_w(w_seconds)
try:
return ovfcheck_float_to_int(seconds)
except OverflowError:
raise OperationError(space.w_ValueError,
space.wrap("time argument too large"))
def _tm_to_tuple(space, t):
time_tuple = []
time_tuple.append(space.wrap(rffi.getintfield(t, 'c_tm_year') + 1900))
time_tuple.append(space.wrap(rffi.getintfield(t, 'c_tm_mon') + 1)) # want january == 1
time_tuple.append(space.wrap(rffi.getintfield(t, 'c_tm_mday')) )
time_tuple.append(space.wrap(rffi.getintfield(t, 'c_tm_hour')) )
time_tuple.append(space.wrap(rffi.getintfield(t, 'c_tm_min')) )
time_tuple.append(space.wrap(rffi.getintfield(t, 'c_tm_sec')) )
time_tuple.append(space.wrap((rffi.getintfield(t, 'c_tm_wday') + 6) % 7)) # want monday == 0
time_tuple.append(space.wrap(rffi.getintfield(t, 'c_tm_yday') + 1)) # want january, 1 == 1
time_tuple.append(space.wrap(rffi.getintfield(t, 'c_tm_isdst')) )
w_struct_time = _get_module_object(space, 'struct_time')
w_time_tuple = space.newtuple(time_tuple)
return space.call_function(w_struct_time, w_time_tuple)
def _gettmarg(space, w_tup, allowNone=True):
if allowNone and space.is_w(w_tup, space.w_None):
# default to the current local time
tt = int(pytime.time())
t_ref = lltype.malloc(TIME_TP.TO, 1, flavor='raw')
t_ref[0] = tt
pbuf = c_localtime(t_ref)
lltype.free(t_ref, flavor='raw')
if not pbuf:
raise OperationError(space.w_ValueError,
space.wrap(_get_error_msg()))
return pbuf
tup_w = space.unpackiterable(w_tup)
if len(tup_w) != 9:
raise OperationError(space.w_TypeError,
space.wrap("argument must be sequence of "
"length 9, not %d" % len(tup_w)))
y = space.int_w(tup_w[0])
rffi.setintfield(glob_buf, 'c_tm_mon', space.int_w(tup_w[1]))
rffi.setintfield(glob_buf, 'c_tm_mday', space.int_w(tup_w[2]))
rffi.setintfield(glob_buf, 'c_tm_hour', space.int_w(tup_w[3]))
rffi.setintfield(glob_buf, 'c_tm_min', space.int_w(tup_w[4]))
rffi.setintfield(glob_buf, 'c_tm_sec', space.int_w(tup_w[5]))
rffi.setintfield(glob_buf, 'c_tm_wday', space.int_w(tup_w[6]))
rffi.setintfield(glob_buf, 'c_tm_yday', space.int_w(tup_w[7]))
rffi.setintfield(glob_buf, 'c_tm_isdst', space.int_w(tup_w[8]))
if _POSIX:
# actually never happens, but makes annotator happy
glob_buf.c_tm_zone = lltype.nullptr(rffi.CCHARP.TO)
rffi.setintfield(glob_buf, 'c_tm_gmtoff', 0)
w_accept2dyear = _get_module_object(space, "accept2dyear")
accept2dyear = space.int_w(w_accept2dyear)
if y < 1900:
if not accept2dyear:
raise OperationError(space.w_ValueError,
space.wrap("year >= 1900 required"))
if 69 <= y <= 99:
y += 1900
elif 0 <= y <= 68:
y += 2000
else:
raise OperationError(space.w_ValueError,
space.wrap("year out of range"))
if rffi.getintfield(glob_buf, 'c_tm_wday') < 0:
raise OperationError(space.w_ValueError,
space.wrap("day of week out of range"))
rffi.setintfield(glob_buf, 'c_tm_year', y - 1900)
rffi.setintfield(glob_buf, 'c_tm_mon',
rffi.getintfield(glob_buf, 'c_tm_mon') - 1)
rffi.setintfield(glob_buf, 'c_tm_wday',
(rffi.getintfield(glob_buf, 'c_tm_wday') + 1) % 7)
rffi.setintfield(glob_buf, 'c_tm_yday',
rffi.getintfield(glob_buf, 'c_tm_yday') - 1)
return glob_buf
def time(space):
"""time() -> floating point number
Return the current time in seconds since the Epoch.
Fractions of a second may be present if the system clock provides them."""
secs = pytime.time()
return space.wrap(secs)
if _WIN:
class PCCache:
pass
pccache = PCCache()
pccache.divisor = 0.0
pccache.ctrStart = 0
def clock(space):
"""clock() -> floating point number
Return the CPU time or real time since the start of the process or since
the first call to clock(). This has as much precision as the system
records."""
return space.wrap(pytime.clock())
def ctime(space, w_seconds=None):
"""ctime([seconds]) -> string
Convert a time in seconds since the Epoch to a string in local time.
This is equivalent to asctime(localtime(seconds)). When the time tuple is
not present, current time as returned by localtime() is used."""
seconds = _get_inttime(space, w_seconds)
t_ref = lltype.malloc(TIME_TP.TO, 1, flavor='raw')
t_ref[0] = seconds
p = c_ctime(t_ref)
lltype.free(t_ref, flavor='raw')
if not p:
raise OperationError(space.w_ValueError,
space.wrap("unconvertible time"))
return space.wrap(rffi.charp2str(p)[:-1]) # get rid of new line
ctime.unwrap_spec = [ObjSpace, W_Root]
# by now w_tup is an optional argument (and not *args)
# because of the ext. compiler bugs in handling such arguments (*args, **kwds)
def asctime(space, w_tup=None):
"""asctime([tuple]) -> string
Convert a time tuple to a string, e.g. 'Sat Jun 06 16:26:11 1998'.
When the time tuple is not present, current time as returned by localtime()
is used."""
buf_value = _gettmarg(space, w_tup)
p = c_asctime(buf_value)
if not p:
raise OperationError(space.w_ValueError,
space.wrap("unconvertible time"))
return space.wrap(rffi.charp2str(p)[:-1]) # get rid of new line
asctime.unwrap_spec = [ObjSpace, W_Root]
def gmtime(space, w_seconds=None):
"""gmtime([seconds]) -> (tm_year, tm_mon, tm_day, tm_hour, tm_min,
tm_sec, tm_wday, tm_yday, tm_isdst)
Convert seconds since the Epoch to a time tuple expressing UTC (a.k.a.
GMT). When 'seconds' is not passed in, convert the current time instead.
"""
# rpython does not support that a variable has two incompatible builtins
# as value so we have to duplicate the code. NOT GOOD! see localtime() too
seconds = _get_inttime(space, w_seconds)
t_ref = lltype.malloc(TIME_TP.TO, 1, flavor='raw')
t_ref[0] = seconds
p = c_gmtime(t_ref)
lltype.free(t_ref, flavor='raw')
if not p:
raise OperationError(space.w_ValueError, space.wrap(_get_error_msg()))
return _tm_to_tuple(space, p)
gmtime.unwrap_spec = [ObjSpace, W_Root]
def localtime(space, w_seconds=None):
"""localtime([seconds]) -> (tm_year, tm_mon, tm_day, tm_hour, tm_min,
tm_sec, tm_wday, tm_yday, tm_isdst)
Convert seconds since the Epoch to a time tuple expressing local time.
When 'seconds' is not passed in, convert the current time instead."""
seconds = _get_inttime(space, w_seconds)
t_ref = lltype.malloc(TIME_TP.TO, 1, flavor='raw')
t_ref[0] = seconds
p = c_localtime(t_ref)
lltype.free(t_ref, flavor='raw')
if not p:
raise OperationError(space.w_ValueError, space.wrap(_get_error_msg()))
return _tm_to_tuple(space, p)
localtime.unwrap_spec = [ObjSpace, W_Root]
def mktime(space, w_tup):
"""mktime(tuple) -> floating point number
Convert a time tuple in local time to seconds since the Epoch."""
buf = _gettmarg(space, w_tup, allowNone=False)
tt = c_mktime(buf)
if tt == -1:
raise OperationError(space.w_OverflowError,
space.wrap("mktime argument out of range"))
return space.wrap(float(tt))
mktime.unwrap_spec = [ObjSpace, W_Root]
if _POSIX:
def tzset(space):
"""tzset()
Initialize, or reinitialize, the local timezone to the value stored in
os.environ['TZ']. The TZ environment variable should be specified in
standard Unix timezone format as documented in the tzset man page
(eg. 'US/Eastern', 'Europe/Amsterdam'). Unknown timezones will silently
fall back to UTC. If the TZ environment variable is not set, the local
timezone is set to the systems best guess of wallclock time.
Changing the TZ environment variable without calling tzset *may* change
the local timezone used by methods such as localtime, but this behaviour
should not be relied on"""
c_tzset()
# reset timezone, altzone, daylight and tzname
timezone, daylight, tzname, altzone = _init_timezone()
_set_module_object(space, "timezone", space.wrap(timezone))
_set_module_object(space, 'daylight', space.wrap(daylight))
tzname_w = [space.wrap(tzname[0]), space.wrap(tzname[1])]
_set_module_object(space, 'tzname', space.newtuple(tzname_w))
_set_module_object(space, 'altzone', space.wrap(altzone))
tzset.unwrap_spec = [ObjSpace]
def strftime(space, format, w_tup=None):
"""strftime(format[, tuple]) -> string
Convert a time tuple to a string according to a format specification.
See the library reference manual for formatting codes. When the time tuple
is not present, current time as returned by localtime() is used."""
buf_value = _gettmarg(space, w_tup)
# Checks added to make sure strftime() does not crash Python by
# indexing blindly into some array for a textual representation
# by some bad index (fixes bug #897625).
# No check for year since handled in gettmarg().
if rffi.getintfield(buf_value, 'c_tm_mon') < 0 or rffi.getintfield(buf_value, 'c_tm_mon') > 11:
raise OperationError(space.w_ValueError,
space.wrap("month out of range"))
if rffi.getintfield(buf_value, 'c_tm_mday') < 1 or rffi.getintfield(buf_value, 'c_tm_mday') > 31:
raise OperationError(space.w_ValueError,
space.wrap("day of month out of range"))
if rffi.getintfield(buf_value, 'c_tm_hour') < 0 or rffi.getintfield(buf_value, 'c_tm_hour') > 23:
raise OperationError(space.w_ValueError,
space.wrap("hour out of range"))
if rffi.getintfield(buf_value, 'c_tm_min') < 0 or rffi.getintfield(buf_value, 'c_tm_min') > 59:
raise OperationError(space.w_ValueError,
space.wrap("minute out of range"))
if rffi.getintfield(buf_value, 'c_tm_sec') < 0 or rffi.getintfield(buf_value, 'c_tm_sec') > 61:
raise OperationError(space.w_ValueError,
space.wrap("seconds out of range"))
if rffi.getintfield(buf_value, 'c_tm_yday') < 0 or rffi.getintfield(buf_value, 'c_tm_yday') > 365:
raise OperationError(space.w_ValueError,
space.wrap("day of year out of range"))
if rffi.getintfield(buf_value, 'c_tm_isdst') < -1 or rffi.getintfield(buf_value, 'c_tm_isdst') > 1:
raise OperationError(space.w_ValueError,
space.wrap("daylight savings flag out of range"))
i = 1024
while True:
outbuf = lltype.malloc(rffi.CCHARP.TO, i + 1, flavor='raw')
buflen = c_strftime(outbuf, i, format, buf_value)
if buflen > 0 or i >= 256 * len(format):
# if the buffer is 256 times as long as the format,
# it's probably not failing for lack of room!
# More likely, the format yields an empty result,
# e.g. an empty format, or %Z when the timezone
# is unknown.
if buflen < 0: buflen = 0 # should not occur
outbuf[buflen] = '\x00'
result = rffi.charp2str(outbuf)
lltype.free(outbuf, flavor='raw')
return space.wrap(result)
i += i
strftime.unwrap_spec = [ObjSpace, str, W_Root]
|
[] |
[] |
[
"PYTHONY2K",
"TZ"
] |
[]
|
["PYTHONY2K", "TZ"]
|
python
| 2 | 0 | |
PyTorch/yolov3/utils/torch_utils.py
|
# YOLOv3 PyTorch utils
import datetime
import logging
import math
import os
import platform
import subprocess
import time
from contextlib import contextmanager
from copy import deepcopy
from pathlib import Path
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torchvision
try:
import thop # for FLOPS computation
except ImportError:
thop = None
logger = logging.getLogger(__name__)
@contextmanager
def torch_distributed_zero_first(local_rank: int):
"""
Decorator to make all processes in distributed training wait for each local_master to do something.
"""
if local_rank not in [-1, 0]:
torch.distributed.barrier()
yield
if local_rank == 0:
torch.distributed.barrier()
def init_torch_seeds(seed=0):
# Speed-reproducibility tradeoff https://pytorch.org/docs/stable/notes/randomness.html
torch.manual_seed(seed)
if seed == 0: # slower, more reproducible
cudnn.benchmark, cudnn.deterministic = False, True
else: # faster, less reproducible
cudnn.benchmark, cudnn.deterministic = True, False
def date_modified(path=__file__):
# return human-readable file modification date, i.e. '2021-3-26'
t = datetime.datetime.fromtimestamp(Path(path).stat().st_mtime)
return f'{t.year}-{t.month}-{t.day}'
def git_describe(path=Path(__file__).parent): # path must be a directory
# return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe
s = f'git -C {path} describe --tags --long --always'
try:
return subprocess.check_output(s, shell=True, stderr=subprocess.STDOUT).decode()[:-1]
except subprocess.CalledProcessError as e:
return '' # not a git repository
def select_device(device='', batch_size=None):
# device = 'cpu' or '0' or '0,1,2,3'
if (device.lower() == 'dml'):
return torch.device('dml')
s = f'YOLOv3 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string
cpu = device.lower() == 'cpu'
if cpu:
os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False
elif device: # non-cpu device requested
# os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable
assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availability
cuda = not cpu and torch.cuda.is_available()
if cuda:
devices = device.split(',') if device else range(torch.cuda.device_count()) # i.e. 0,1,6,7
n = len(devices) # device count
if n > 1 and batch_size: # check batch_size is divisible by device_count
assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'
space = ' ' * len(s)
for i, d in enumerate(devices):
p = torch.cuda.get_device_properties(i)
s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2}MB)\n" # bytes to MB
else:
s += 'CPU\n'
logger.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe
return torch.device('cuda:0' if cuda else 'cpu')
def time_synchronized():
# pytorch-accurate time
if torch.cuda.is_available():
torch.cuda.synchronize()
return time.time()
def profile(x, ops, n=100, device=None):
# profile a pytorch module or list of modules. Example usage:
# x = torch.randn(16, 3, 640, 640) # input
# m1 = lambda x: x * torch.sigmoid(x)
# m2 = nn.SiLU()
# profile(x, [m1, m2], n=100) # profile speed over 100 iterations
device = device or torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
x = x.to(device)
x.requires_grad = True
print(torch.__version__, device.type, torch.cuda.get_device_properties(0) if device.type == 'cuda' else '')
print(f"\n{'Params':>12s}{'GFLOPS':>12s}{'forward (ms)':>16s}{'backward (ms)':>16s}{'input':>24s}{'output':>24s}")
for m in ops if isinstance(ops, list) else [ops]:
m = m.to(device) if hasattr(m, 'to') else m # device
m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m # type
dtf, dtb, t = 0., 0., [0., 0., 0.] # dt forward, backward
try:
flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPS
except:
flops = 0
for _ in range(n):
t[0] = time_synchronized()
y = m(x)
t[1] = time_synchronized()
try:
_ = y.sum().backward()
t[2] = time_synchronized()
except: # no backward method
t[2] = float('nan')
dtf += (t[1] - t[0]) * 1000 / n # ms per op forward
dtb += (t[2] - t[1]) * 1000 / n # ms per op backward
s_in = tuple(x.shape) if isinstance(x, torch.Tensor) else 'list'
s_out = tuple(y.shape) if isinstance(y, torch.Tensor) else 'list'
p = sum(list(x.numel() for x in m.parameters())) if isinstance(m, nn.Module) else 0 # parameters
print(f'{p:12}{flops:12.4g}{dtf:16.4g}{dtb:16.4g}{str(s_in):>24s}{str(s_out):>24s}')
def is_parallel(model):
# Returns True if model is of type DP or DDP
return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)
def de_parallel(model):
# De-parallelize a model: returns single-GPU model if model is of type DP or DDP
return model.module if is_parallel(model) else model
def intersect_dicts(da, db, exclude=()):
# Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values
return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape}
def initialize_weights(model):
for m in model.modules():
t = type(m)
if t is nn.Conv2d:
pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif t is nn.BatchNorm2d:
m.eps = 1e-3
m.momentum = 0.03
elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6]:
m.inplace = True
def find_modules(model, mclass=nn.Conv2d):
# Finds layer indices matching module class 'mclass'
return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)]
def sparsity(model):
# Return global model sparsity
a, b = 0., 0.
for p in model.parameters():
a += p.numel()
b += (p == 0).sum()
return b / a
def prune(model, amount=0.3):
# Prune model to requested global sparsity
import torch.nn.utils.prune as prune
print('Pruning model... ', end='')
for name, m in model.named_modules():
if isinstance(m, nn.Conv2d):
prune.l1_unstructured(m, name='weight', amount=amount) # prune
prune.remove(m, 'weight') # make permanent
print(' %.3g global sparsity' % sparsity(model))
def fuse_conv_and_bn(conv, bn):
# Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/
fusedconv = nn.Conv2d(conv.in_channels,
conv.out_channels,
kernel_size=conv.kernel_size,
stride=conv.stride,
padding=conv.padding,
groups=conv.groups,
bias=True).requires_grad_(False).to(conv.weight.device)
# prepare filters
w_conv = conv.weight.clone().view(conv.out_channels, -1)
w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))
fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape))
# prepare spatial bias
b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias
b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))
fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)
return fusedconv
def model_info(model, verbose=False, img_size=640):
# Model information. img_size may be int or list, i.e. img_size=640 or img_size=[640, 320]
n_p = sum(x.numel() for x in model.parameters()) # number parameters
n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients
if verbose:
print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma'))
for i, (name, p) in enumerate(model.named_parameters()):
name = name.replace('module_list.', '')
print('%5g %40s %9s %12g %20s %10.3g %10.3g' %
(i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))
try: # FLOPS
from thop import profile
stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32
img = torch.zeros((1, model.yaml.get('ch', 3), stride, stride), device=next(model.parameters()).device) # input
flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPS
img_size = img_size if isinstance(img_size, list) else [img_size, img_size] # expand if int/float
fs = ', %.1f GFLOPS' % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 GFLOPS
except (ImportError, Exception):
fs = ''
logger.info(f"Model Summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}")
def load_classifier(name='resnet101', n=2):
# Loads a pretrained model reshaped to n-class output
model = torchvision.models.__dict__[name](pretrained=True)
# ResNet model properties
# input_size = [3, 224, 224]
# input_space = 'RGB'
# input_range = [0, 1]
# mean = [0.485, 0.456, 0.406]
# std = [0.229, 0.224, 0.225]
# Reshape output to n classes
filters = model.fc.weight.shape[1]
model.fc.bias = nn.Parameter(torch.zeros(n), requires_grad=True)
model.fc.weight = nn.Parameter(torch.zeros(n, filters), requires_grad=True)
model.fc.out_features = n
return model
def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416)
# scales img(bs,3,y,x) by ratio constrained to gs-multiple
if ratio == 1.0:
return img
else:
h, w = img.shape[2:]
s = (int(h * ratio), int(w * ratio)) # new size
img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize
if not same_shape: # pad/crop img
h, w = [math.ceil(x * ratio / gs) * gs for x in (h, w)]
return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean
def copy_attr(a, b, include=(), exclude=()):
# Copy attributes from b to a, options to only include [...] and to exclude [...]
for k, v in b.__dict__.items():
if (len(include) and k not in include) or k.startswith('_') or k in exclude:
continue
else:
setattr(a, k, v)
class ModelEMA:
""" Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models
Keep a moving average of everything in the model state_dict (parameters and buffers).
This is intended to allow functionality like
https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
A smoothed version of the weights is necessary for some training schemes to perform well.
This class is sensitive where it is initialized in the sequence of model init,
GPU assignment and distributed training wrappers.
"""
def __init__(self, model, decay=0.9999, updates=0):
# Create EMA
self.ema = deepcopy(model.module if is_parallel(model) else model).eval() # FP32 EMA
# if next(model.parameters()).device.type != 'cpu':
# self.ema.half() # FP16 EMA
self.updates = updates # number of EMA updates
self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) # decay exponential ramp (to help early epochs)
for p in self.ema.parameters():
p.requires_grad_(False)
def update(self, model):
# Update EMA parameters
with torch.no_grad():
self.updates += 1
d = self.decay(self.updates)
msd = model.module.state_dict() if is_parallel(model) else model.state_dict() # model state_dict
for k, v in self.ema.state_dict().items():
if v.dtype.is_floating_point:
v *= d
v += (1. - d) * msd[k].detach()
def update_attr(self, model, include=(), exclude=('process_group', 'reducer')):
# Update EMA attributes
copy_attr(self.ema, model, include, exclude)
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
term.go
|
//go:build !no_term
// +build !no_term
package zli
import (
"bytes"
"fmt"
"os"
"syscall"
"golang.org/x/term"
)
// IsTerminal reports if this file descriptor is an interactive terminal.
//
// TODO: this is a bit tricky now, as we can replace zli.Stdout with something
// else; checking os.Stdout may not be correct in those cases.
var IsTerminal = func(fd uintptr) bool { return term.IsTerminal(int(fd)) }
// TerminalSize gets the dimensions of the given terminal.
var TerminalSize = func(fd uintptr) (width, height int, err error) { return term.GetSize(int(fd)) }
// WantColor indicates if the program should output any colors. This is
// automatically set from from the output terminal and NO_COLOR environment
// variable.
//
// You can override this if the user sets "--color=force" or the like.
//
// TODO: maybe expand this a bit with WantMonochrome or some such, so you can
// still output bold/underline/reverse text for people who don't want colors.
var WantColor = func() bool {
_, ok := os.LookupEnv("NO_COLOR")
return os.Getenv("TERM") != "dumb" && term.IsTerminal(int(os.Stdout.Fd())) && !ok
}()
// AskPassword interactively asks the user for a password and confirmation.
//
// Just a convenient wrapper for term.ReadPassword() to call it how you want to
// use it much of the time to ask for a new password.
func AskPassword(minlen int) (string, error) {
start:
fmt.Fprintf(Stdout, "Enter password for new user (will not echo): ")
pwd1, err := term.ReadPassword(int(syscall.Stdin))
if err != nil {
return "", err
}
if len(pwd1) < minlen {
fmt.Fprintf(Stdout, "\nNeed at least %d characters\n", minlen)
goto start
}
fmt.Fprintf(Stdout, "\nConfirm: ")
pwd2, err := term.ReadPassword(int(syscall.Stdin))
if err != nil {
return "", err
}
fmt.Fprintln(Stdout, "")
if !bytes.Equal(pwd1, pwd2) {
fmt.Fprintln(Stdout, "Passwords did not match; try again.")
goto start
}
return string(pwd1), nil
}
|
[
"\"TERM\""
] |
[] |
[
"TERM"
] |
[]
|
["TERM"]
|
go
| 1 | 0 | |
setup.py
|
from setuptools import setup
from setuptools.command.develop import develop
from setuptools.command.install import install
import pip
import os
import sys
import contextlib
@contextlib.contextmanager
def cwd(path):
curdir = os.getcwd()
try:
os.chdir(path)
yield
finally:
os.chdir(curdir)
# inspired by https://blog.shazam.com/python-microlibs-5be9461ad979
packages = ['vaex-core', 'vaex-viz', 'vaex-hdf5', 'vaex-server', 'vaex-astro', 'vaex-jupyter', 'vaex-ml', 'vaex-meta', 'vaex-graphql', 'vaex-contrib']
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
class DevelopCmd(develop):
def run(self):
relative = os.path.abspath(os.path.join('packages', 'vaex-core', 'vaex'))
for package in packages:
with cwd(os.path.join('packages', package)):
err = os.system('python -m pip install -e .')
if err:
raise RuntimeError(f'Oops, failed to install {package}')
# we need to make symbolic links from vaex-core/vaex/<name> to vaex-<name>/vaex/<name>
# otherwise development install do not work
if package not in ['vaex-core']:
name = package.split('-')[1]
source = os.path.abspath(os.path.join('packages', package, 'vaex', name))
rel_source = os.path.relpath(source, relative)
with cwd(relative):
print('symlinking', source, name, rel_source)
if os.path.exists(name) and os.readlink(name) == rel_source:
print('symlink ok')
else:
if os.path.exists(name):
print('old symlink', os.readlink(name))
os.remove(name)
os.symlink(rel_source, name)
class InstallCmd(install):
""" Add custom steps for the install command """
def run(self):
for package in packages:
with cwd(os.path.join('packages', package)):
os.system('python -m pip install --no-deps .')
for package in packages:
with cwd(os.path.join('packages', package)):
os.system('python -m pip install --upgrade .')
setup(
name='vaex-meta',
version="0.1.0",
description="Convenience setup.py for when installing from the git repo",
classifiers=[
'Private :: Do Not Upload to pypi server',
],
install_requires=[
'pip',
],
extras_require={
'dev': [
'pytest',
'gcsfs',
's3fs',
'graphviz',
'myst_parser',
# For generating the documentation
'sphinx',
'nbsphinx',
'sphinx_gallery',
'sphinx_sitemap',
'sphinx_book_theme',
]
},
cmdclass={
'install': InstallCmd,
'develop': DevelopCmd,
},
)
|
[] |
[] |
[
"READTHEDOCS"
] |
[]
|
["READTHEDOCS"]
|
python
| 1 | 0 | |
backend-modules/cardano-graphql/src/it/java/com/bloxbean/cardano/client/backend/gql/it/GqlBaseTest.java
|
package com.bloxbean.cardano.client.backend.gql.it;
import com.bloxbean.cardano.client.backend.gql.Constants;
import com.bloxbean.cardano.client.backend.gql.GqlBackendService;
import java.util.HashMap;
import java.util.Map;
public class GqlBaseTest {
protected String authKey;
protected GqlBackendService backendService;
public GqlBaseTest() {
authKey = System.getProperty("CARDANO_GRAPHQL_AUTH_KEY");
if(authKey == null || authKey.isEmpty()) {
authKey = System.getenv("CARDANO_GRAPHQL_AUTH_KEY");
}
Map<String, String> headers = new HashMap<>();
headers.put("AuthKey", "Some Auth key");
headers.put("CustomHeader", "Some header");
backendService = new GqlBackendService(Constants.GQL_URL, headers);
}
}
|
[
"\"CARDANO_GRAPHQL_AUTH_KEY\""
] |
[] |
[
"CARDANO_GRAPHQL_AUTH_KEY"
] |
[]
|
["CARDANO_GRAPHQL_AUTH_KEY"]
|
java
| 1 | 0 | |
tests/calico_cni_k8s_test.go
|
// Copyright (c) 2015-2020 Tigera, Inc. All rights reserved.
package main_test
import (
"context"
"encoding/json"
"fmt"
"io"
"math/rand"
"net"
"os"
"os/exec"
"strings"
"syscall"
"time"
"regexp"
"github.com/containernetworking/cni/pkg/types/current"
"github.com/containernetworking/plugins/pkg/ns"
cnitestutils "github.com/containernetworking/plugins/pkg/testutils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
log "github.com/sirupsen/logrus"
"github.com/vishvananda/netlink"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"github.com/projectcalico/cni-plugin/internal/pkg/testutils"
"github.com/projectcalico/cni-plugin/internal/pkg/utils"
"github.com/projectcalico/cni-plugin/pkg/types"
api "github.com/projectcalico/libcalico-go/lib/apis/v3"
k8sconversion "github.com/projectcalico/libcalico-go/lib/backend/k8s/conversion"
client "github.com/projectcalico/libcalico-go/lib/clientv3"
"github.com/projectcalico/libcalico-go/lib/ipam"
"github.com/projectcalico/libcalico-go/lib/logutils"
"github.com/projectcalico/libcalico-go/lib/names"
cnet "github.com/projectcalico/libcalico-go/lib/net"
"github.com/projectcalico/libcalico-go/lib/numorstring"
"github.com/projectcalico/libcalico-go/lib/options"
)
func ensureNamespace(clientset *kubernetes.Clientset, name string) {
ns := &v1.Namespace{
ObjectMeta: metav1.ObjectMeta{Name: name},
}
_, err := clientset.CoreV1().Namespaces().Create(ns)
if errors.IsAlreadyExists(err) {
return
}
Expect(err).NotTo(HaveOccurred())
}
func ensurePodCreated(clientset *kubernetes.Clientset, namespace string, pod *v1.Pod) *v1.Pod {
pod, err := clientset.CoreV1().Pods(namespace).Create(pod)
Expect(err).NotTo(HaveOccurred())
// Immediately try to get the pod, and retry until we do. This prevents race
// conditions where the API Server has accepted the create, but isn't ready
// to find the pod on a get. These races can cause the tests to be flaky.
Eventually(func() error {
_, err := clientset.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{})
return err
}, 2*time.Second, 100*time.Millisecond).ShouldNot(HaveOccurred())
return pod
}
func ensurePodDeleted(clientset *kubernetes.Clientset, ns string, podName string) {
// Check if pod exists first.
_, err := clientset.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
if kerrors.IsNotFound(err) {
// Pod has been deleted already. Do nothing.
return
}
Expect(err).NotTo(HaveOccurred())
// Delete pod immediately.
err = clientset.CoreV1().Pods(ns).Delete(podName, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
// Wait for pod to disappear.
Eventually(func() error {
_, err := clientset.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
if kerrors.IsNotFound(err) {
return nil
}
if err != nil {
return err
}
return fmt.Errorf("Pod %s.%s still exists", ns, podName)
}, "5s", "200ms").Should(BeNil())
}
func ensureNodeDeleted(clientset *kubernetes.Clientset, nodeName string) {
// Check if node exists first.
_, err := clientset.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
if kerrors.IsNotFound(err) {
// Node has been deleted already. Do nothing.
return
}
Expect(err).NotTo(HaveOccurred())
// Delete node immediately.
err = clientset.CoreV1().Nodes().Delete(nodeName, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
// Wait for node to disappear.
Eventually(func() error {
_, err := clientset.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
if kerrors.IsNotFound(err) {
return nil
}
if err != nil {
return err
}
return fmt.Errorf("Node %s still exists", nodeName)
}, "5s", "200ms").Should(BeNil())
}
var _ = Describe("Kubernetes CNI tests", func() {
// Create a random seed
rand.Seed(time.Now().UTC().UnixNano())
log.SetFormatter(&logutils.Formatter{})
log.AddHook(&logutils.ContextHook{})
log.SetOutput(GinkgoWriter)
log.SetLevel(log.InfoLevel)
hostname, _ := names.Hostname()
ctx := context.Background()
calicoClient, err := client.NewFromEnv()
if err != nil {
panic(err)
}
config, err := clientcmd.DefaultClientConfig.ClientConfig()
if err != nil {
panic(err)
}
k8sClient, err := kubernetes.NewForConfig(config)
if err != nil {
panic(err)
}
BeforeEach(func() {
testutils.WipeDatastore()
// Create the node for these tests. The IPAM code requires a corresponding Calico node to exist.
name, err := names.Hostname()
Expect(err).NotTo(HaveOccurred())
err = testutils.AddNode(calicoClient, k8sClient, name)
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
// Delete the node.
name, err := names.Hostname()
Expect(err).NotTo(HaveOccurred())
err = testutils.DeleteNode(calicoClient, k8sClient, name)
Expect(err).NotTo(HaveOccurred())
})
logConf := types.NetConf{
LogLevel: "info",
}
utils.ConfigureLogging(logConf)
cniVersion := os.Getenv("CNI_SPEC_VERSION")
Context("using host-local IPAM", func() {
netconf := fmt.Sprintf(`
{
"cniVersion": "%s",
"name": "net1",
"type": "calico",
"etcd_endpoints": "http://%s:2379",
"datastore_type": "%s",
"ipam": {
"type": "host-local",
"subnet": "10.0.0.0/8"
},
"kubernetes": {
"k8s_api_root": "http://127.0.0.1:8080"
},
"policy": {"type": "k8s"},
"nodename_file_optional": true,
"log_level":"info"
}`, cniVersion, os.Getenv("ETCD_IP"), os.Getenv("DATASTORE_TYPE"))
It("successfully networks the namespace", func() {
config, err := clientcmd.DefaultClientConfig.ClientConfig()
if err != nil {
panic(err)
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
panic(err)
}
ensureNamespace(clientset, testutils.K8S_TEST_NS)
// Create a K8s pod w/o any special params
name := fmt.Sprintf("run%d", rand.Uint32())
ensurePodCreated(clientset, testutils.K8S_TEST_NS, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: name},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: name,
Image: "ignore",
}},
NodeName: hostname,
},
})
defer ensurePodDeleted(clientset, testutils.K8S_TEST_NS, name)
containerID, result, contVeth, contAddresses, contRoutes, contNs, err := testutils.CreateContainer(netconf, name, testutils.K8S_TEST_NS, "")
Expect(err).ShouldNot(HaveOccurred())
mac := contVeth.Attrs().HardwareAddr
Expect(len(result.IPs)).Should(Equal(1))
ip := result.IPs[0].Address.IP.String()
result.IPs[0].Address.IP = result.IPs[0].Address.IP.To4() // Make sure the IP is respresented as 4 bytes
Expect(result.IPs[0].Address.Mask.String()).Should(Equal("ffffffff"))
// datastore things:
// TODO Make sure the profile doesn't exist
ids := names.WorkloadEndpointIdentifiers{
Node: hostname,
Orchestrator: api.OrchestratorKubernetes,
Endpoint: "eth0",
Pod: name,
ContainerID: containerID,
}
wrkload, err := ids.CalculateWorkloadEndpointName(false)
Expect(err).NotTo(HaveOccurred())
interfaceName := k8sconversion.NewConverter().VethNameForWorkload(testutils.K8S_TEST_NS, name)
// The endpoint is created
endpoints, err := calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(1))
if os.Getenv("DATASTORE_TYPE") == "kubernetes" {
// Unlike etcd datastore, WEP based on a kubernetes pod does not store values for mac/containerID.
// Put them back manually for later comparison.
endpoints.Items[0].Spec.ContainerID = containerID
endpoints.Items[0].Spec.MAC = mac.String()
}
Expect(endpoints.Items[0].Name).Should(Equal(wrkload))
Expect(endpoints.Items[0].Namespace).Should(Equal(testutils.K8S_TEST_NS))
Expect(endpoints.Items[0].Labels).Should(Equal(map[string]string{
"projectcalico.org/namespace": "test",
"projectcalico.org/orchestrator": api.OrchestratorKubernetes,
"projectcalico.org/serviceaccount": "default",
}))
Expect(endpoints.Items[0].Spec).Should(Equal(api.WorkloadEndpointSpec{
Pod: name,
InterfaceName: interfaceName,
IPNetworks: []string{result.IPs[0].Address.String()},
MAC: mac.String(),
Profiles: []string{"kns.test", "ksa.test.default"},
Node: hostname,
Endpoint: "eth0",
Workload: "",
ContainerID: containerID,
Orchestrator: api.OrchestratorKubernetes,
}))
// Routes and interface on host - there's is nothing to assert on the routes since felix adds those.
// fmt.Println(Cmd("ip link show")) // Useful for debugging
hostVeth, err := netlink.LinkByName(interfaceName)
Expect(err).ToNot(HaveOccurred())
Expect(hostVeth.Attrs().Flags.String()).Should(ContainSubstring("up"))
Expect(hostVeth.Attrs().MTU).Should(Equal(1500))
// Assert hostVeth sysctl values are set to what we expect for IPv4.
err = testutils.CheckSysctlValue(fmt.Sprintf("/proc/sys/net/ipv4/conf/%s/proxy_arp", interfaceName), "1")
Expect(err).ShouldNot(HaveOccurred())
err = testutils.CheckSysctlValue(fmt.Sprintf("/proc/sys/net/ipv4/neigh/%s/proxy_delay", interfaceName), "0")
Expect(err).ShouldNot(HaveOccurred())
err = testutils.CheckSysctlValue(fmt.Sprintf("/proc/sys/net/ipv4/conf/%s/forwarding", interfaceName), "1")
Expect(err).ShouldNot(HaveOccurred())
// Assert if the host side route is programmed correctly.
hostRoutes, err := netlink.RouteList(hostVeth, syscall.AF_INET)
Expect(err).ShouldNot(HaveOccurred())
Expect(hostRoutes[0]).Should(Equal(netlink.Route{
LinkIndex: hostVeth.Attrs().Index,
Scope: netlink.SCOPE_LINK,
Dst: &result.IPs[0].Address,
Protocol: syscall.RTPROT_BOOT,
Table: syscall.RT_TABLE_MAIN,
Type: syscall.RTN_UNICAST,
}))
// Routes and interface in netns
Expect(contVeth.Attrs().Flags.String()).Should(ContainSubstring("up"))
// Assume the first IP is the IPv4 address
Expect(contAddresses[0].IP.String()).Should(Equal(ip))
Expect(contRoutes).Should(SatisfyAll(
ContainElement(netlink.Route{
LinkIndex: contVeth.Attrs().Index,
Gw: net.IPv4(169, 254, 1, 1).To4(),
Protocol: syscall.RTPROT_BOOT,
Table: syscall.RT_TABLE_MAIN,
Type: syscall.RTN_UNICAST,
}),
ContainElement(netlink.Route{
LinkIndex: contVeth.Attrs().Index,
Scope: netlink.SCOPE_LINK,
Dst: &net.IPNet{IP: net.IPv4(169, 254, 1, 1).To4(), Mask: net.CIDRMask(32, 32)},
Protocol: syscall.RTPROT_BOOT,
Table: syscall.RT_TABLE_MAIN,
Type: syscall.RTN_UNICAST,
})))
// Delete container
_, err = testutils.DeleteContainer(netconf, contNs.Path(), name, testutils.K8S_TEST_NS)
Expect(err).ShouldNot(HaveOccurred())
if os.Getenv("DATASTORE_TYPE") != "kubernetes" {
// Make sure there are no endpoints anymore
endpoints, err = calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(0))
}
// Make sure the interface has been removed from the namespace
targetNs, _ := ns.GetNS(contNs.Path())
err = targetNs.Do(func(_ ns.NetNS) error {
_, err = netlink.LinkByName("eth0")
return err
})
Expect(err).Should(HaveOccurred())
Expect(err.Error()).Should(Equal("Link not found"))
// Make sure the interface has been removed from the host
_, err = netlink.LinkByName("cali" + containerID)
Expect(err).Should(HaveOccurred())
Expect(err.Error()).Should(Equal("Link not found"))
})
Context("when a named port is set", func() {
It("it is added to the workload endpoint", func() {
config, err := clientcmd.DefaultClientConfig.ClientConfig()
if err != nil {
panic(err)
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
panic(err)
}
name := fmt.Sprintf("run%d", rand.Uint32())
// Create a K8s pod w/o any special params
ensurePodCreated(clientset, testutils.K8S_TEST_NS, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: name},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: fmt.Sprintf("container-%s", name),
Image: "ignore",
Ports: []v1.ContainerPort{{
Name: "anamedport",
ContainerPort: 555,
}},
}},
NodeName: hostname,
},
})
defer ensurePodDeleted(clientset, testutils.K8S_TEST_NS, name)
containerID, result, contVeth, _, _, contNs, err := testutils.CreateContainer(netconf, name, testutils.K8S_TEST_NS, "")
Expect(err).ShouldNot(HaveOccurred())
mac := contVeth.Attrs().HardwareAddr
Expect(len(result.IPs)).Should(Equal(1))
result.IPs[0].Address.IP = result.IPs[0].Address.IP.To4() // Make sure the IP is respresented as 4 bytes
Expect(result.IPs[0].Address.Mask.String()).Should(Equal("ffffffff"))
// datastore things:
// TODO Make sure the profile doesn't exist
ids := names.WorkloadEndpointIdentifiers{
Node: hostname,
Orchestrator: api.OrchestratorKubernetes,
Endpoint: "eth0",
Pod: name,
ContainerID: containerID,
}
wrkload, err := ids.CalculateWorkloadEndpointName(false)
interfaceName := k8sconversion.NewConverter().VethNameForWorkload(testutils.K8S_TEST_NS, name)
Expect(err).NotTo(HaveOccurred())
// The endpoint is created
endpoints, err := calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(1))
if os.Getenv("DATASTORE_TYPE") == "kubernetes" {
// Unlike etcd datastore, WEP based on a kubernetes pod does not store values for mac/containerID.
// Put them back manually for later comparison.
endpoints.Items[0].Spec.ContainerID = containerID
endpoints.Items[0].Spec.MAC = mac.String()
}
Expect(endpoints.Items[0].Name).Should(Equal(wrkload))
Expect(endpoints.Items[0].Namespace).Should(Equal(testutils.K8S_TEST_NS))
Expect(endpoints.Items[0].Labels).Should(Equal(map[string]string{
"projectcalico.org/namespace": "test",
"projectcalico.org/orchestrator": api.OrchestratorKubernetes,
"projectcalico.org/serviceaccount": "default",
}))
Expect(endpoints.Items[0].Spec).Should(Equal(api.WorkloadEndpointSpec{
Pod: name,
InterfaceName: interfaceName,
IPNetworks: []string{result.IPs[0].Address.String()},
MAC: mac.String(),
Profiles: []string{"kns.test", "ksa.test.default"},
Node: hostname,
Endpoint: "eth0",
Workload: "",
ContainerID: containerID,
Orchestrator: api.OrchestratorKubernetes,
Ports: []api.EndpointPort{{
Name: "anamedport",
Protocol: numorstring.ProtocolFromString("TCP"),
Port: 555,
}},
}))
// Delete container
_, err = testutils.DeleteContainer(netconf, contNs.Path(), name, testutils.K8S_TEST_NS)
Expect(err).ShouldNot(HaveOccurred())
if os.Getenv("DATASTORE_TYPE") != "kubernetes" {
// Make sure there are no endpoints anymore
endpoints, err = calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(0))
}
// Make sure the interface has been removed from the namespace
targetNs, _ := ns.GetNS(contNs.Path())
err = targetNs.Do(func(_ ns.NetNS) error {
_, err = netlink.LinkByName("eth0")
return err
})
Expect(err).Should(HaveOccurred())
Expect(err.Error()).Should(Equal("Link not found"))
// Make sure the interface has been removed from the host
_, err = netlink.LinkByName("cali" + containerID)
Expect(err).Should(HaveOccurred())
Expect(err.Error()).Should(Equal("Link not found"))
})
})
Context("when the same hostVeth exists", func() {
It("successfully networks the namespace", func() {
config, err := clientcmd.DefaultClientConfig.ClientConfig()
Expect(err).NotTo(HaveOccurred())
clientset, err := kubernetes.NewForConfig(config)
Expect(err).NotTo(HaveOccurred())
name := fmt.Sprintf("run%d", rand.Uint32())
// Create a K8s pod w/o any special params
ensurePodCreated(clientset, testutils.K8S_TEST_NS, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: name},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: name,
Image: "ignore",
}},
NodeName: hostname,
},
})
defer ensurePodDeleted(clientset, testutils.K8S_TEST_NS, name)
if err := testutils.CreateHostVeth("", name, testutils.K8S_TEST_NS, hostname); err != nil {
panic(err)
}
_, _, _, _, _, contNs, err := testutils.CreateContainer(netconf, name, testutils.K8S_TEST_NS, "")
Expect(err).ShouldNot(HaveOccurred())
_, err = testutils.DeleteContainer(netconf, contNs.Path(), name, testutils.K8S_TEST_NS)
Expect(err).ShouldNot(HaveOccurred())
})
})
Context("when calico-config contains a custom mtu", func() {
mtuNetconfTemplate := `
{
"cniVersion": "%s",
"name": "net1",
"type": "calico",
"etcd_endpoints": "http://%s:2379",
"datastore_type": "%s",
"mtu": %d,
"ipam": {
"type": "host-local",
"subnet": "10.0.0.0/8"
},
"kubernetes": {
"k8s_api_root": "http://127.0.0.1:8080"
},
"policy": {"type": "k8s"},
"nodename_file_optional": true,
"log_level":"info"
}`
It("creates pods with the new mtu", func() {
config, err := clientcmd.DefaultClientConfig.ClientConfig()
Expect(err).NotTo(HaveOccurred())
clientset, err := kubernetes.NewForConfig(config)
Expect(err).NotTo(HaveOccurred())
// Create a K8s pod/container with non-default MTU
name1 := fmt.Sprintf("mtutest%d", rand.Uint32())
mtuNetconf1 := fmt.Sprintf(mtuNetconfTemplate, cniVersion, os.Getenv("ETCD_IP"), os.Getenv("DATASTORE_TYPE"), 3000)
ensurePodCreated(clientset, testutils.K8S_TEST_NS, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: name1},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: name1,
Image: "ignore",
}},
NodeName: hostname,
},
})
defer ensurePodDeleted(clientset, testutils.K8S_TEST_NS, name1)
_, _, contVeth1, _, _, contNs1, err := testutils.CreateContainer(mtuNetconf1, name1, testutils.K8S_TEST_NS, "")
Expect(err).ShouldNot(HaveOccurred())
Expect(contVeth1.Attrs().MTU).Should(Equal(3000))
// Create another K8s pod/container with a different non-default MTU
name2 := fmt.Sprintf("mtutest2%d", rand.Uint32())
mtuNetconf2 := fmt.Sprintf(mtuNetconfTemplate, cniVersion, os.Getenv("ETCD_IP"), os.Getenv("DATASTORE_TYPE"), 4000)
ensurePodCreated(clientset, testutils.K8S_TEST_NS, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: name2},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: name2,
Image: "ignore",
}},
NodeName: hostname,
},
})
defer ensurePodDeleted(clientset, testutils.K8S_TEST_NS, name2)
_, _, contVeth2, _, _, contNs2, err := testutils.CreateContainer(mtuNetconf2, name2, testutils.K8S_TEST_NS, "")
Expect(err).ShouldNot(HaveOccurred())
Expect(contVeth2.Attrs().MTU).Should(Equal(4000))
// Cleanup
_, err = testutils.DeleteContainer(mtuNetconf1, contNs1.Path(), name1, testutils.K8S_TEST_NS)
Expect(err).ShouldNot(HaveOccurred())
_, err = testutils.DeleteContainer(mtuNetconf2, contNs2.Path(), name2, testutils.K8S_TEST_NS)
Expect(err).ShouldNot(HaveOccurred())
})
})
hostLocalIPAMConfigs := []struct {
description, cniVersion, config, unexpectedRoute string
expectedV4Routes, expectedV6Routes []string
numIPv4IPs, numIPv6IPs int
}{
{
description: "old-style inline subnet",
cniVersion: cniVersion,
config: `
{
"cniVersion": "%s",
"name": "net6",
"nodename_file_optional": true,
"type": "calico",
"etcd_endpoints": "http://%s:2379",
"datastore_type": "%s",
"ipam": {
"type": "host-local",
"subnet": "usePodCidr"
},
"kubernetes": {
"k8s_api_root": "http://127.0.0.1:8080"
},
"policy": {"type": "k8s"},
"log_level":"info"
}`,
expectedV4Routes: []string{
regexp.QuoteMeta("default via 169.254.1.1 dev eth0"),
regexp.QuoteMeta("169.254.1.1 dev eth0 scope link"),
},
unexpectedRoute: regexp.QuoteMeta("10."),
numIPv4IPs: 1,
numIPv6IPs: 0,
},
{
// This scenario tests IPv4+IPv6 without specifying any routes.
description: "new-style with IPv4 and IPv6 ranges, no routes",
cniVersion: "0.3.0",
config: `
{
"cniVersion": "%s",
"name": "net6",
"nodename_file_optional": true,
"type": "calico",
"etcd_endpoints": "http://%s:2379",
"datastore_type": "%s",
"ipam": {
"type": "host-local",
"ranges": [
[
{
"subnet": "usePodCidr"
}
],
[
{
"subnet": "dead:beef::/96"
}
]
]
},
"kubernetes": {
"k8s_api_root": "http://127.0.0.1:8080"
},
"policy": {"type": "k8s"},
"log_level":"info"
}`,
expectedV4Routes: []string{
regexp.QuoteMeta("default via 169.254.1.1 dev eth0"),
regexp.QuoteMeta("169.254.1.1 dev eth0 scope link"),
},
expectedV6Routes: []string{
"dead:beef::[0-9a-f]* dev eth0 proto kernel metric 256 pref medium",
"fe80::/64 dev eth0 proto kernel metric 256 pref medium",
"default via fe80::ecee:eeff:feee:eeee dev eth0 metric 1024",
},
unexpectedRoute: regexp.QuoteMeta("10."),
numIPv4IPs: 1,
numIPv6IPs: 1,
},
{
// In this scenario, we use a lot more of the host-local IPAM plugin. Namely:
// - we use multiple ranges, one of which is IPv6, the other uses the podCIDR
// - we add custom routes, which override our default 0/0 and ::/0 routes.
// This configuration is only supported for CNI version >= 0.3.0 since we assign multiple
// addresses per family.
description: "new-style with IPv4 and IPv6 ranges and routes",
cniVersion: "0.3.0",
config: `
{
"cniVersion": "%s",
"name": "net6",
"nodename_file_optional": true,
"type": "calico",
"etcd_endpoints": "http://%s:2379",
"datastore_type": "%s",
"ipam": {
"type": "host-local",
"ranges": [
[
{
"subnet": "usePodCidr"
}
],
[
{
"subnet": "10.100.0.0/24"
}
],
[
{
"subnet": "dead:beef::/96"
}
]
],
"routes": [
{"dst": "10.123.0.0/16", "gw": "10.123.0.1"},
{"dst": "10.124.0.0/16"},
{"dst": "dead:beef::/96"}
]
},
"kubernetes": {
"k8s_api_root": "http://127.0.0.1:8080"
},
"policy": {"type": "k8s"},
"log_level":"info"
}`,
expectedV4Routes: []string{
regexp.QuoteMeta("10.123.0.0/16 via 169.254.1.1 dev eth0"),
regexp.QuoteMeta("10.124.0.0/16 via 169.254.1.1 dev eth0"),
regexp.QuoteMeta("169.254.1.1 dev eth0 scope link"),
},
expectedV6Routes: []string{
"dead:beef::. dev eth0 proto kernel metric 256 pref medium",
"dead:beef::/96 via fe80::ecee:eeff:feee:eeee dev eth0 metric 1024",
"fe80::/64 dev eth0 proto kernel metric 256 pref medium",
},
unexpectedRoute: "default",
numIPv4IPs: 2,
numIPv6IPs: 1,
},
{
// In this scenario, we use a lot more of the host-local IPAM plugin. Namely:
// - we use multiple ranges, one of which is IPv6, the other uses the podCIDR
// - we add custom routes, but configure the plugin to also include our default routes.
description: "new-style with IPv4 and IPv6 ranges and routes and Calico default routes",
cniVersion: "0.3.0",
config: `
{
"cniVersion": "%s",
"name": "net6",
"nodename_file_optional": true,
"type": "calico",
"etcd_endpoints": "http://%s:2379",
"include_default_routes": true,
"datastore_type": "%s",
"ipam": {
"type": "host-local",
"ranges": [
[
{
"subnet": "usePodCidr"
}
],
[
{
"subnet": "10.100.0.0/24"
}
],
[
{
"subnet": "dead:beef::/96"
}
]
],
"routes": [
{"dst": "10.123.0.0/16", "gw": "10.123.0.1"},
{"dst": "10.124.0.0/16"},
{"dst": "dead:beef::/96"}
]
},
"kubernetes": {
"k8s_api_root": "http://127.0.0.1:8080"
},
"policy": {"type": "k8s"},
"log_level":"info"
}`,
expectedV4Routes: []string{
regexp.QuoteMeta("default via 169.254.1.1 dev eth0"),
regexp.QuoteMeta("10.123.0.0/16 via 169.254.1.1 dev eth0"),
regexp.QuoteMeta("10.124.0.0/16 via 169.254.1.1 dev eth0"),
regexp.QuoteMeta("169.254.1.1 dev eth0 scope link"),
},
expectedV6Routes: []string{
"dead:beef::. dev eth0 proto kernel metric 256 pref medium",
"dead:beef::/96 via fe80::ecee:eeff:feee:eeee dev eth0 metric 1024",
"fe80::/64 dev eth0 proto kernel metric 256 pref medium",
},
numIPv4IPs: 2,
numIPv6IPs: 1,
},
}
for _, c := range hostLocalIPAMConfigs {
c := c // Make sure we get a fresh variable on each loop.
Context("Using host-local IPAM ("+c.description+"): request an IP then release it, and then request it again", func() {
It("should successfully assign IP both times and successfully release it in the middle", func() {
netconfHostLocalIPAM := fmt.Sprintf(c.config, c.cniVersion, os.Getenv("ETCD_IP"), os.Getenv("DATASTORE_TYPE"))
config, err := clientcmd.DefaultClientConfig.ClientConfig()
Expect(err).NotTo(HaveOccurred())
clientset, err := kubernetes.NewForConfig(config)
Expect(err).NotTo(HaveOccurred())
ensureNamespace(clientset, testutils.K8S_TEST_NS)
ensureNodeDeleted(clientset, hostname)
// Create a K8s Node object with PodCIDR and name equal to hostname.
_, err = clientset.CoreV1().Nodes().Create(&v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: hostname},
Spec: v1.NodeSpec{
PodCIDR: "10.0.0.0/24",
},
})
Expect(err).NotTo(HaveOccurred())
defer ensureNodeDeleted(clientset, hostname)
By("Creating a pod with a specific IP address")
name := fmt.Sprintf("run%d", rand.Uint32())
ensurePodCreated(clientset, testutils.K8S_TEST_NS, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: name},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: name,
Image: "ignore",
}},
NodeName: hostname,
},
})
defer ensurePodDeleted(clientset, testutils.K8S_TEST_NS, name)
requestedIP := "10.0.0.42"
expectedIP := net.IPv4(10, 0, 0, 42).To4()
_, _, _, contAddresses, _, contNs, err := testutils.CreateContainer(netconfHostLocalIPAM, name, testutils.K8S_TEST_NS, requestedIP)
Expect(err).NotTo(HaveOccurred())
podIP := contAddresses[0].IP
log.Infof("All container IPs: %v", contAddresses)
Expect(podIP).Should(Equal(expectedIP))
By("Deleting the pod we created earlier")
_, err = testutils.DeleteContainer(netconfHostLocalIPAM, contNs.Path(), name, testutils.K8S_TEST_NS)
Expect(err).ShouldNot(HaveOccurred())
By("Creating a second pod with the same IP address as the first pod")
name2 := fmt.Sprintf("run2%d", rand.Uint32())
ensurePodCreated(clientset, testutils.K8S_TEST_NS, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: name2},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: fmt.Sprintf("container-%s", name2),
Image: "ignore",
}},
NodeName: hostname,
},
})
defer ensurePodDeleted(clientset, testutils.K8S_TEST_NS, name2)
_, _, _, contAddresses, _, contNs, err = testutils.CreateContainer(netconfHostLocalIPAM, name2, testutils.K8S_TEST_NS, requestedIP)
Expect(err).NotTo(HaveOccurred())
pod2IP := contAddresses[0].IP
log.Infof("All container IPs: %v", contAddresses)
Expect(pod2IP).Should(Equal(expectedIP))
err = contNs.Do(func(_ ns.NetNS) error {
defer GinkgoRecover()
out, err := exec.Command("ip", "route", "show").Output()
Expect(err).NotTo(HaveOccurred())
for _, r := range c.expectedV4Routes {
Expect(string(out)).To(MatchRegexp(r))
}
if c.unexpectedRoute != "" {
Expect(string(out)).NotTo(ContainSubstring(c.unexpectedRoute))
}
out, err = exec.Command("ip", "-6", "route", "show").Output()
Expect(err).NotTo(HaveOccurred())
for _, r := range c.expectedV6Routes {
Expect(string(out)).To(MatchRegexp(r))
}
if c.numIPv6IPs > 0 {
err := testutils.CheckSysctlValue("/proc/sys/net/ipv6/conf/eth0/accept_dad", "0")
Expect(err).NotTo(HaveOccurred())
}
out, err = exec.Command("ip", "addr", "show").Output()
Expect(err).NotTo(HaveOccurred())
inet := regexp.MustCompile(` {4}inet .*scope global`)
Expect(inet.FindAll(out, -1)).To(HaveLen(c.numIPv4IPs))
inetv6 := regexp.MustCompile(` {4}inet6 .*scope global`)
Expect(inetv6.FindAll(out, -1)).To(HaveLen(c.numIPv6IPs))
Expect(out).NotTo(ContainSubstring("scope global tentative"),
"Some IPv6 addresses marked as tentative; disabling DAD must have failed.")
return nil
})
Expect(err).ShouldNot(HaveOccurred())
_, err = testutils.DeleteContainer(netconfHostLocalIPAM, contNs.Path(), name2, testutils.K8S_TEST_NS)
Expect(err).ShouldNot(HaveOccurred())
})
})
}
})
Context("using calico-ipam with a Namespace annotation only", func() {
var nc types.NetConf
var netconf string
var pool1CIDR, pool2CIDR *net.IPNet
var pool1 = "50.60.0.0/24"
var pool2 = "50.60.1.0/24"
var clientset *kubernetes.Clientset
var name string
var testNS string
BeforeEach(func() {
// Build the network config for this set of tests.
nc = types.NetConf{
CNIVersion: cniVersion,
Name: "calico-uts",
Type: "calico",
EtcdEndpoints: fmt.Sprintf("http://%s:2379", os.Getenv("ETCD_IP")),
DatastoreType: os.Getenv("DATASTORE_TYPE"),
Kubernetes: types.Kubernetes{K8sAPIRoot: "http://127.0.0.1:8080"},
Policy: types.Policy{PolicyType: "k8s"},
NodenameFileOptional: true,
LogLevel: "info",
}
nc.IPAM.Type = "calico-ipam"
ncb, err := json.Marshal(nc)
Expect(err).NotTo(HaveOccurred())
netconf = string(ncb)
// Create IP Pools.
testutils.MustCreateNewIPPool(calicoClient, pool1, false, false, true)
_, pool1CIDR, err = net.ParseCIDR(pool1)
Expect(err).NotTo(HaveOccurred())
testutils.MustCreateNewIPPool(calicoClient, pool2, false, false, true)
_, pool2CIDR, err = net.ParseCIDR(pool2)
Expect(err).NotTo(HaveOccurred())
// Set up clients.
config, err := clientcmd.DefaultClientConfig.ClientConfig()
Expect(err).NotTo(HaveOccurred())
clientset, err = kubernetes.NewForConfig(config)
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
// Delete pod
ensurePodDeleted(clientset, testNS, name)
// Delete the IP Pools.
testutils.MustDeleteIPPool(calicoClient, pool1)
testutils.MustDeleteIPPool(calicoClient, pool2)
})
It("successfully assigns an IP address from an IP Pool specified on a Namespace", func() {
// Create the Namespace.
testNS = fmt.Sprintf("run%d", rand.Uint32())
_, err = clientset.CoreV1().Namespaces().Create(&v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: testNS,
Annotations: map[string]string{
"cni.projectcalico.org/ipv4pools": "[\"50.60.0.0/24\"]",
},
},
})
Expect(err).NotTo(HaveOccurred())
// Now create a K8s pod.
name = fmt.Sprintf("run%d", rand.Uint32())
ensurePodCreated(clientset, testNS, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Annotations: map[string]string{},
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: name,
Image: "ignore",
}},
NodeName: hostname,
},
})
_, _, _, contAddresses, _, contNs, err := testutils.CreateContainer(netconf, name, testNS, "")
Expect(err).NotTo(HaveOccurred())
podIP := contAddresses[0].IP
log.Infof("All container IPs: %v", contAddresses)
log.Infof("Container got IP address: %s", podIP)
Expect(pool1CIDR.Contains(podIP)).To(BeTrue())
// Delete the container.
_, err = testutils.DeleteContainer(netconf, contNs.Path(), name, testNS)
Expect(err).ShouldNot(HaveOccurred())
})
It("should fail to assign from an IP pool that doesn't exist", func() {
// Create the Namespace.
testNS = fmt.Sprintf("run%d", rand.Uint32())
_, err = clientset.CoreV1().Namespaces().Create(&v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: testNS,
Annotations: map[string]string{
"cni.projectcalico.org/ipv4pools": "[\"100.0.0.0/16\"]",
},
},
})
Expect(err).NotTo(HaveOccurred())
// Now create a K8s pod.
name = fmt.Sprintf("run%d", rand.Uint32())
pod := ensurePodCreated(clientset, testNS, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Annotations: map[string]string{},
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: name,
Image: "ignore",
}},
NodeName: hostname,
},
})
log.Infof("Created POD object: %v", pod)
// Expect an error when invoking the CNI plugin.
_, _, _, _, _, contNs, err := testutils.CreateContainer(netconf, name, testNS, "")
Expect(err).To(HaveOccurred())
// Delete the container.
_, err = testutils.DeleteContainer(netconf, contNs.Path(), name, testNS)
Expect(err).ShouldNot(HaveOccurred())
})
It("should fail to assign an IP when the provided IP Pool is full", func() {
// Create the Namespace.
testNS = fmt.Sprintf("run%d", rand.Uint32())
_, err = clientset.CoreV1().Namespaces().Create(&v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: testNS,
Annotations: map[string]string{
"cni.projectcalico.org/ipv4pools": "[\"50.60.0.0/24\"]",
},
},
})
Expect(err).NotTo(HaveOccurred())
// Now create a K8s pod.
name = fmt.Sprintf("run%d", rand.Uint32())
pod := ensurePodCreated(clientset, testNS, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Annotations: map[string]string{},
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: name,
Image: "ignore",
}},
NodeName: hostname,
},
})
log.Infof("Created POD object: %v", pod)
// Allocate all the addresses in the IP pool.
handle := "test-handle"
v4s, _, err := calicoClient.IPAM().AutoAssign(
context.Background(),
ipam.AutoAssignArgs{
Num4: 256,
HandleID: &handle,
IPv4Pools: []cnet.IPNet{cnet.IPNet{IPNet: *pool1CIDR}},
},
)
Expect(err).NotTo(HaveOccurred())
Expect(len(v4s)).To(Equal(256))
// Expect an error when invoking the CNI plugin.
_, _, _, _, _, contNs, err := testutils.CreateContainer(netconf, name, testNS, "")
Expect(err).To(HaveOccurred())
// Delete the container.
_, err = testutils.DeleteContainer(netconf, contNs.Path(), name, testNS)
Expect(err).ShouldNot(HaveOccurred())
// Release all the IPs assigned above.
err = calicoClient.IPAM().ReleaseByHandle(context.Background(), handle)
Expect(err).ShouldNot(HaveOccurred())
})
It("should assign an IP from the second pool when the first IP Pool is full", func() {
// Create the Namespace.
testNS = fmt.Sprintf("run%d", rand.Uint32())
_, err = clientset.CoreV1().Namespaces().Create(&v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: testNS,
Annotations: map[string]string{
"cni.projectcalico.org/ipv4pools": "[\"50.60.0.0/24\", \"50.60.1.0/24\"]",
},
},
})
Expect(err).NotTo(HaveOccurred())
// Now create a K8s pod.
name = fmt.Sprintf("run%d", rand.Uint32())
pod := ensurePodCreated(clientset, testNS, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Annotations: map[string]string{},
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: name,
Image: "ignore",
}},
NodeName: hostname,
},
})
log.Infof("Created POD object: %v", pod)
// Allocate all the addresses in the first IP pool.
handle := "test-handle"
v4s, _, err := calicoClient.IPAM().AutoAssign(
context.Background(),
ipam.AutoAssignArgs{
Num4: 256,
HandleID: &handle,
IPv4Pools: []cnet.IPNet{cnet.IPNet{IPNet: *pool1CIDR}},
},
)
Expect(err).NotTo(HaveOccurred())
Expect(len(v4s)).To(Equal(256))
// Invoke the CNI plugin.
_, r, _, _, _, contNs, err := testutils.CreateContainer(netconf, name, testNS, "")
Expect(err).NotTo(HaveOccurred())
// Expect the assigned IP address in the second IP pool.
Expect(len(r.IPs)).To(Equal(1))
Expect(pool2CIDR.Contains(r.IPs[0].Address.IP)).To(BeTrue(), "IP assigned from wrong pool")
// Delete the container.
_, err = testutils.DeleteContainer(netconf, contNs.Path(), name, testNS)
Expect(err).ShouldNot(HaveOccurred())
// Release all the IPs assigned above.
err = calicoClient.IPAM().ReleaseByHandle(context.Background(), handle)
Expect(err).ShouldNot(HaveOccurred())
})
})
Context("using calico-ipam with Namespace annotation and pod annotation", func() {
var nc types.NetConf
var netconf string
var ipPoolCIDR *net.IPNet
var pool1 = "50.70.0.0/16"
var clientset *kubernetes.Clientset
var name string
var testNS string
BeforeEach(func() {
// Build the network config for this set of tests.
nc = types.NetConf{
CNIVersion: cniVersion,
Name: "calico-uts",
Type: "calico",
EtcdEndpoints: fmt.Sprintf("http://%s:2379", os.Getenv("ETCD_IP")),
DatastoreType: os.Getenv("DATASTORE_TYPE"),
Kubernetes: types.Kubernetes{K8sAPIRoot: "http://127.0.0.1:8080"},
Policy: types.Policy{PolicyType: "k8s"},
NodenameFileOptional: true,
LogLevel: "info",
}
nc.IPAM.Type = "calico-ipam"
ncb, err := json.Marshal(nc)
Expect(err).NotTo(HaveOccurred())
netconf = string(ncb)
// Create a new IP Pool.
testutils.MustCreateNewIPPool(calicoClient, pool1, false, false, true)
_, ipPoolCIDR, err = net.ParseCIDR(pool1)
Expect(err).NotTo(HaveOccurred())
// Create clients.
config, err := clientcmd.DefaultClientConfig.ClientConfig()
Expect(err).NotTo(HaveOccurred())
clientset, err = kubernetes.NewForConfig(config)
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
// Delete pod
ensurePodDeleted(clientset, testNS, name)
// Delete the IP Pool.
testutils.MustDeleteIPPool(calicoClient, pool1)
})
It("should prefer pod annotations to namespace annotations if both are present", func() {
// Create the Namespace.
testNS = fmt.Sprintf("run%d", rand.Uint32())
_, err = clientset.CoreV1().Namespaces().Create(&v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: testNS,
Annotations: map[string]string{
"cni.projectcalico.org/ipv4pools": "[\"50.55.0.0/16\"]",
},
},
})
Expect(err).NotTo(HaveOccurred())
// Now create a K8s pod passing in an IP pool.
name = fmt.Sprintf("run%d", rand.Uint32())
pod := ensurePodCreated(clientset, testNS, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Annotations: map[string]string{
"cni.projectcalico.org/ipv4pools": "[\"50.70.0.0/16\"]",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: name,
Image: "ignore",
}},
NodeName: hostname,
},
})
log.Infof("Created POD object: %v", pod)
// Run the CNI plugin.
_, _, _, contAddresses, _, contNs, err := testutils.CreateContainer(netconf, name, testNS, "")
Expect(err).NotTo(HaveOccurred())
podIP := contAddresses[0].IP
log.Infof("All container IPs: %v", contAddresses)
log.Infof("Container got IP address: %s", podIP)
Expect(ipPoolCIDR.Contains(podIP)).To(BeTrue())
// Delete the container.
_, err = testutils.DeleteContainer(netconf, contNs.Path(), name, testNS)
Expect(err).ShouldNot(HaveOccurred())
})
})
Context("using calico-ipam specifying IP pools via pod annotation", func() {
var nc types.NetConf
var netconf string
var pool1 = "172.16.0.0/16"
var pool2 = "172.17.0.0/16"
var pool1CIDR, pool2CIDR *net.IPNet
var pool2Name string
var clientset *kubernetes.Clientset
var name string
BeforeEach(func() {
// Build the network config for this set of tests.
nc = types.NetConf{
CNIVersion: cniVersion,
Name: "calico-uts",
Type: "calico",
EtcdEndpoints: fmt.Sprintf("http://%s:2379", os.Getenv("ETCD_IP")),
DatastoreType: os.Getenv("DATASTORE_TYPE"),
Kubernetes: types.Kubernetes{K8sAPIRoot: "http://127.0.0.1:8080"},
Policy: types.Policy{PolicyType: "k8s"},
NodenameFileOptional: true,
LogLevel: "info",
}
nc.IPAM.Type = "calico-ipam"
ncb, err := json.Marshal(nc)
Expect(err).NotTo(HaveOccurred())
netconf = string(ncb)
// Create two IP pools.
testutils.MustCreateNewIPPool(calicoClient, pool1, false, false, true)
_, pool1CIDR, err = net.ParseCIDR(pool1)
Expect(err).NotTo(HaveOccurred())
pool2Name = testutils.MustCreateNewIPPool(calicoClient, pool2, false, false, true)
_, pool2CIDR, err = net.ParseCIDR(pool2)
Expect(err).NotTo(HaveOccurred())
// Create a kubernetes clientset.
config, err := clientcmd.DefaultClientConfig.ClientConfig()
Expect(err).NotTo(HaveOccurred())
clientset, err = kubernetes.NewForConfig(config)
Expect(err).NotTo(HaveOccurred())
// Ensure a namespace exists.
ensureNamespace(clientset, testutils.K8S_TEST_NS)
})
AfterEach(func() {
// Delete pod
ensurePodDeleted(clientset, testutils.K8S_TEST_NS, name)
// Delete the IP Pools.
testutils.MustDeleteIPPool(calicoClient, pool1)
testutils.MustDeleteIPPool(calicoClient, pool2)
})
It("successfully assigns an IP address from the annotated IP Pool (by cidr)", func() {
// Create a K8s pod passing in an IP pool.
name = fmt.Sprintf("run%d", rand.Uint32())
pod := ensurePodCreated(clientset, testutils.K8S_TEST_NS, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Annotations: map[string]string{
"cni.projectcalico.org/ipv4pools": "[\"172.16.0.0/16\"]",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: name,
Image: "ignore",
}},
NodeName: hostname,
},
})
log.Infof("Created POD object: %v", pod)
_, _, _, contAddresses, _, contNs, err := testutils.CreateContainer(netconf, name, testutils.K8S_TEST_NS, "")
Expect(err).NotTo(HaveOccurred())
podIP := contAddresses[0].IP
log.Infof("All container IPs: %v", contAddresses)
log.Infof("Container got IP address: %s", podIP)
Expect(pool1CIDR.Contains(podIP)).To(BeTrue())
// Delete the container.
_, err = testutils.DeleteContainer(netconf, contNs.Path(), name, testutils.K8S_TEST_NS)
Expect(err).ShouldNot(HaveOccurred())
})
It("successfully assigns an IP address from the annotated IP Pool (by name)", func() {
// Create a K8s pod passing in an IP pool.
name = fmt.Sprintf("run%d", rand.Uint32())
pod := ensurePodCreated(clientset, testutils.K8S_TEST_NS, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Annotations: map[string]string{
"cni.projectcalico.org/ipv4pools": fmt.Sprintf("[\"%s\"]", pool2Name),
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: name,
Image: "ignore",
}},
NodeName: hostname,
},
})
log.Infof("Created POD object: %v", pod)
_, _, _, contAddresses, _, contNs, err := testutils.CreateContainer(netconf, name, testutils.K8S_TEST_NS, "")
Expect(err).NotTo(HaveOccurred())
podIP := contAddresses[0].IP
log.Infof("All container IPs: %v", contAddresses)
log.Infof("Container got IP address: %s", podIP)
Expect(pool2CIDR.Contains(podIP)).To(BeTrue())
// Delete the container.
_, err = testutils.DeleteContainer(netconf, contNs.Path(), name, testutils.K8S_TEST_NS)
Expect(err).ShouldNot(HaveOccurred())
})
})
Context("using floatingIPs annotation to assign a DNAT", func() {
var netconf types.NetConf
var clientset *kubernetes.Clientset
var name string
BeforeEach(func() {
netconf = types.NetConf{
CNIVersion: cniVersion,
Name: "calico-network-name",
Type: "calico",
EtcdEndpoints: fmt.Sprintf("http://%s:2379", os.Getenv("ETCD_IP")),
DatastoreType: os.Getenv("DATASTORE_TYPE"),
Kubernetes: types.Kubernetes{K8sAPIRoot: "http://127.0.0.1:8080"},
Policy: types.Policy{PolicyType: "k8s"},
NodenameFileOptional: true,
LogLevel: "info",
FeatureControl: types.FeatureControl{FloatingIPs: true},
}
netconf.IPAM.Type = "calico-ipam"
// Create an IP pool for the pod IP as well as a floating IP range.
for _, ipPool := range []string{"172.16.0.0/16", "1.1.1.0/24"} {
testutils.MustCreateNewIPPool(calicoClient, ipPool, false, false, true)
_, _, err := net.ParseCIDR(ipPool)
Expect(err).NotTo(HaveOccurred())
}
// Build kubernetes clients.
config, err := clientcmd.DefaultClientConfig.ClientConfig()
Expect(err).NotTo(HaveOccurred())
clientset, err = kubernetes.NewForConfig(config)
Expect(err).NotTo(HaveOccurred())
// Now create a K8s pod passing in a floating IP.
ensureNamespace(clientset, testutils.K8S_TEST_NS)
name = fmt.Sprintf("run%d", rand.Uint32())
pod := ensurePodCreated(clientset, testutils.K8S_TEST_NS, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Annotations: map[string]string{
"cni.projectcalico.org/floatingIPs": "[\"1.1.1.1\"]",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: name,
Image: "ignore",
}},
NodeName: hostname,
},
})
log.Infof("Created POD object: %v", pod)
})
AfterEach(func() {
// Delete pod
ensurePodDeleted(clientset, testutils.K8S_TEST_NS, name)
// Delete IPPools.
for _, ipPool := range []string{"172.16.0.0/16", "1.1.1.0/24"} {
testutils.MustDeleteIPPool(calicoClient, ipPool)
}
})
It("successfully assigns a DNAT IP address from the annotated floatingIP", func() {
// Resolve the config struct.
confBytes, err := json.Marshal(netconf)
Expect(err).NotTo(HaveOccurred())
// Invoke the CNI plugin
_, _, _, contAddresses, _, contNs, err := testutils.CreateContainer(string(confBytes), name, testutils.K8S_TEST_NS, "")
Expect(err).NotTo(HaveOccurred())
// Assert that the endpoint is created
endpoints, err := calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(1))
// Assert that the endpoint contains the appropriate DNAT
podIP := contAddresses[0].IP
Expect(endpoints.Items[0].Spec.IPNATs).Should(HaveLen(1))
Expect(endpoints.Items[0].Spec.IPNATs).Should(Equal([]api.IPNAT{api.IPNAT{InternalIP: podIP.String(), ExternalIP: "1.1.1.1"}}))
// Delete the container.
_, err = testutils.DeleteContainer(string(confBytes), contNs.Path(), name, testutils.K8S_TEST_NS)
Expect(err).ShouldNot(HaveOccurred())
})
It("fails when the floating_ip feature is not enabled", func() {
// Resolve the config struct, disabling the feature.
netconf.FeatureControl.FloatingIPs = false
confBytes, err := json.Marshal(netconf)
Expect(err).NotTo(HaveOccurred())
// Invoke the CNI plugin, expect it to fail.
_, _, _, _, _, contNs, err := testutils.CreateContainer(string(confBytes), name, testutils.K8S_TEST_NS, "")
Expect(err).To(HaveOccurred())
if os.Getenv("DATASTORE_TYPE") != "kubernetes" {
// No WEP should be created with an etcd datastore.
endpoints, err := calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(0))
}
// Delete the container.
_, err = testutils.DeleteContainer(string(confBytes), contNs.Path(), name, testutils.K8S_TEST_NS)
Expect(err).ShouldNot(HaveOccurred())
})
})
Context("using ipAddrsNoIpam annotation to assign IP address to a pod, bypassing IPAM", func() {
var clientset *kubernetes.Clientset
var netconf string
var nc types.NetConf
var name string
BeforeEach(func() {
// Set up clients.
config, err := clientcmd.DefaultClientConfig.ClientConfig()
Expect(err).NotTo(HaveOccurred())
clientset, err = kubernetes.NewForConfig(config)
Expect(err).NotTo(HaveOccurred())
// Create a network config.
nc = types.NetConf{
CNIVersion: cniVersion,
Name: "calico-uts",
Type: "calico",
EtcdEndpoints: fmt.Sprintf("http://%s:2379", os.Getenv("ETCD_IP")),
DatastoreType: os.Getenv("DATASTORE_TYPE"),
Kubernetes: types.Kubernetes{K8sAPIRoot: "http://127.0.0.1:8080"},
Policy: types.Policy{PolicyType: "k8s"},
NodenameFileOptional: true,
LogLevel: "info",
FeatureControl: types.FeatureControl{IPAddrsNoIpam: true},
}
nc.IPAM.Type = "calico-ipam"
ncb, err := json.Marshal(nc)
Expect(err).NotTo(HaveOccurred())
netconf = string(ncb)
})
AfterEach(func() {
// Delete pod
ensurePodDeleted(clientset, testutils.K8S_TEST_NS, name)
})
It("should successfully assigns the annotated IP address", func() {
assignIP := net.IPv4(10, 0, 0, 1).To4()
// Now create a K8s pod passing in an IP address.
name = fmt.Sprintf("run%d", rand.Uint32())
pod := ensurePodCreated(clientset, testutils.K8S_TEST_NS, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Annotations: map[string]string{
"cni.projectcalico.org/ipAddrsNoIpam": "[\"10.0.0.1\"]",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: name,
Image: "ignore",
}},
NodeName: hostname,
},
})
log.Infof("Created POD object: %v", pod)
containerID, _, contVeth, contAddresses, _, contNs, err := testutils.CreateContainer(netconf, name, testutils.K8S_TEST_NS, "")
Expect(err).NotTo(HaveOccurred())
mac := contVeth.Attrs().HardwareAddr
podIP := contAddresses[0].IP
log.Infof("All container IPs: %v", contAddresses)
log.Infof("Container got IP address: %s", podIP)
Expect(podIP).Should(Equal(assignIP))
ids := names.WorkloadEndpointIdentifiers{
Node: hostname,
Orchestrator: api.OrchestratorKubernetes,
Endpoint: "eth0",
Pod: name,
ContainerID: containerID,
}
wrkload, err := ids.CalculateWorkloadEndpointName(false)
Expect(err).NotTo(HaveOccurred())
interfaceName := k8sconversion.NewConverter().VethNameForWorkload(testutils.K8S_TEST_NS, name)
// The endpoint is created
endpoints, err := calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(1))
if os.Getenv("DATASTORE_TYPE") == "kubernetes" {
// Unlike etcd datastore, WEP based on a kubernetes pod does not store values for mac/containerID.
// Put them back manually for later comparison.
endpoints.Items[0].Spec.ContainerID = containerID
endpoints.Items[0].Spec.MAC = mac.String()
}
Expect(endpoints.Items[0].Name).Should(Equal(wrkload))
Expect(endpoints.Items[0].Namespace).Should(Equal(testutils.K8S_TEST_NS))
Expect(endpoints.Items[0].Labels).Should(Equal(map[string]string{
"projectcalico.org/namespace": "test",
"projectcalico.org/orchestrator": api.OrchestratorKubernetes,
"projectcalico.org/serviceaccount": "default",
}))
Expect(endpoints.Items[0].Spec).Should(Equal(api.WorkloadEndpointSpec{
Pod: name,
InterfaceName: interfaceName,
IPNetworks: []string{assignIP.String() + "/32"},
MAC: mac.String(),
Profiles: []string{"kns.test", "ksa.test.default"},
Node: hostname,
Endpoint: "eth0",
Workload: "",
ContainerID: containerID,
Orchestrator: api.OrchestratorKubernetes,
}))
// Delete the container.
_, err = testutils.DeleteContainer(netconf, contNs.Path(), name, testutils.K8S_TEST_NS)
Expect(err).ShouldNot(HaveOccurred())
})
It("should fail if ipAddrsNoIpam is not enabled", func() {
// Disable the feature
nc.FeatureControl.IPAddrsNoIpam = false
ncb, err := json.Marshal(nc)
Expect(err).NotTo(HaveOccurred())
netconf = string(ncb)
config, err := clientcmd.DefaultClientConfig.ClientConfig()
Expect(err).NotTo(HaveOccurred())
clientset, err := kubernetes.NewForConfig(config)
Expect(err).NotTo(HaveOccurred())
// Now create a K8s pod passing in an IP address.
name = fmt.Sprintf("run%d", rand.Uint32())
pod := ensurePodCreated(clientset, testutils.K8S_TEST_NS, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Annotations: map[string]string{
"cni.projectcalico.org/ipAddrsNoIpam": "[\"10.0.0.1\"]",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: name,
Image: "ignore",
}},
NodeName: hostname,
},
})
log.Infof("Created POD object: %v", pod)
_, _, _, _, _, contNs, err := testutils.CreateContainer(netconf, name, testutils.K8S_TEST_NS, "")
Expect(err).To(HaveOccurred())
_, err = testutils.DeleteContainer(netconf, contNs.Path(), name, testutils.K8S_TEST_NS)
Expect(err).ShouldNot(HaveOccurred())
})
It("should return an error if multiple addresses are requested using ipAddrsNoIpam", func() {
// Now create a K8s pod passing in more than one IPv4 address.
name = fmt.Sprintf("run%d", rand.Uint32())
pod := ensurePodCreated(clientset, testutils.K8S_TEST_NS, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Annotations: map[string]string{
"cni.projectcalico.org/ipAddrsNoIpam": "[\"10.0.0.1\", \"10.0.0.2\"]",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: name,
Image: "ignore",
}},
NodeName: hostname,
},
})
log.Infof("Created POD object: %v", pod)
_, _, _, _, _, contNs, err := testutils.CreateContainer(netconf, name, testutils.K8S_TEST_NS, "")
Expect(err).To(HaveOccurred())
if os.Getenv("DATASTORE_TYPE") != "kubernetes" {
// No WEP should be created with an etcd datastore.
endpoints, err := calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(0))
}
// Delete the container.
_, err = testutils.DeleteContainer(netconf, contNs.Path(), name, testutils.K8S_TEST_NS)
Expect(err).ShouldNot(HaveOccurred())
})
})
Context("using ipAddrs annotation to assign IP address to a pod from IPAM", func() {
var clientset *kubernetes.Clientset
BeforeEach(func() {
// Set up clients.
config, err := clientcmd.DefaultClientConfig.ClientConfig()
Expect(err).NotTo(HaveOccurred())
clientset, err = kubernetes.NewForConfig(config)
Expect(err).NotTo(HaveOccurred())
})
It("should successfully assign the annotated IP address", func() {
netconfCalicoIPAM := fmt.Sprintf(`
{
"cniVersion": "%s",
"name": "net4",
"type": "calico",
"etcd_endpoints": "http://%s:2379",
"datastore_type": "%s",
"nodename_file_optional": true,
"ipam": {
"type": "calico-ipam",
"assign_ipv4": "true",
"assign_ipv6": "true"
},
"kubernetes": {
"k8s_api_root": "http://127.0.0.1:8080"
},
"policy": {"type": "k8s"},
"log_level":"info"
}`, cniVersion, os.Getenv("ETCD_IP"), os.Getenv("DATASTORE_TYPE"))
assignIP := net.IPv4(20, 0, 0, 111).To4()
// Create a new ipPool.
ipPool := "20.0.0.0/24"
testutils.MustCreateNewIPPool(calicoClient, ipPool, false, false, true)
_, _, err := net.ParseCIDR(ipPool)
Expect(err).NotTo(HaveOccurred())
// Now create a K8s pod passing in an IP address.
name := fmt.Sprintf("run%d", rand.Uint32())
pod := ensurePodCreated(clientset, testutils.K8S_TEST_NS, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Annotations: map[string]string{
"cni.projectcalico.org/ipAddrs": "[\"20.0.0.111\"]",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: name,
Image: "ignore",
}},
NodeName: hostname,
},
})
defer ensurePodDeleted(clientset, testutils.K8S_TEST_NS, name)
log.Infof("Created POD object: %v", pod)
containerID, _, contVeth, contAddresses, _, netNS, err := testutils.CreateContainer(netconfCalicoIPAM, name, testutils.K8S_TEST_NS, "")
Expect(err).NotTo(HaveOccurred())
mac := contVeth.Attrs().HardwareAddr
podIP := contAddresses[0].IP
log.Infof("All container IPs: %v", contAddresses)
log.Infof("Container got IP address: %s", podIP)
Expect(podIP).Should(Equal(assignIP))
ids := names.WorkloadEndpointIdentifiers{
Node: hostname,
Orchestrator: api.OrchestratorKubernetes,
Endpoint: "eth0",
Pod: name,
ContainerID: containerID,
}
wrkload, err := ids.CalculateWorkloadEndpointName(false)
Expect(err).NotTo(HaveOccurred())
interfaceName := k8sconversion.NewConverter().VethNameForWorkload(testutils.K8S_TEST_NS, name)
// Make sure WorkloadEndpoint is created and has the requested IP in the datastore.
endpoints, err := calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(1))
if os.Getenv("DATASTORE_TYPE") == "kubernetes" {
// Unlike etcd datastore, WEP based on a kubernetes pod does not store values for mac/containerID.
// Put them back manually for later comparison.
endpoints.Items[0].Spec.ContainerID = containerID
endpoints.Items[0].Spec.MAC = mac.String()
}
Expect(endpoints.Items[0].Name).Should(Equal(wrkload))
Expect(endpoints.Items[0].Namespace).Should(Equal(testutils.K8S_TEST_NS))
Expect(endpoints.Items[0].Labels).Should(Equal(map[string]string{
"projectcalico.org/namespace": "test",
"projectcalico.org/orchestrator": api.OrchestratorKubernetes,
"projectcalico.org/serviceaccount": "default",
}))
Expect(endpoints.Items[0].Spec).Should(Equal(api.WorkloadEndpointSpec{
Pod: name,
InterfaceName: interfaceName,
IPNetworks: []string{assignIP.String() + "/32"},
MAC: mac.String(),
Profiles: []string{"kns.test", "ksa.test.default"},
Node: hostname,
Endpoint: "eth0",
Workload: "",
ContainerID: containerID,
Orchestrator: api.OrchestratorKubernetes,
}))
// Check the pod's IP annotations.
checkPodIPAnnotations(clientset, testutils.K8S_TEST_NS, name, "20.0.0.111/32", "20.0.0.111/32")
// Delete the container.
_, err = testutils.DeleteContainer(netconfCalicoIPAM, netNS.Path(), name, testutils.K8S_TEST_NS)
Expect(err).ShouldNot(HaveOccurred())
})
})
Context("with dual stack IP allocations", func() {
var clientset *kubernetes.Clientset
var ipPool4 string = "20.0.0.0/24"
var ipPool6 string = "fd80:20::/96"
BeforeEach(func() {
// Set up clients.
config, err := clientcmd.DefaultClientConfig.ClientConfig()
Expect(err).NotTo(HaveOccurred())
clientset, err = kubernetes.NewForConfig(config)
Expect(err).NotTo(HaveOccurred())
testutils.MustCreateNewIPPool(calicoClient, ipPool4, false, false, true)
testutils.MustCreateNewIPPool(calicoClient, ipPool6, false, false, true)
})
AfterEach(func() {
testutils.MustDeleteIPPool(calicoClient, ipPool4)
testutils.MustDeleteIPPool(calicoClient, ipPool6)
})
It("should allocate IPv4 and IPv6 addresses and handle dual stack floating IPs", func() {
netconfCalicoIPAM := fmt.Sprintf(`
{
"feature_control": {
"floating_ips": true
},
"cniVersion": "%s",
"name": "net4",
"type": "calico",
"etcd_endpoints": "http://%s:2379",
"datastore_type": "%s",
"nodename_file_optional": true,
"ipam": {
"type": "calico-ipam",
"assign_ipv4": "true",
"assign_ipv6": "true"
},
"kubernetes": {
"k8s_api_root": "http://127.0.0.1:8080"
},
"policy": {"type": "k8s"},
"log_level":"info"
}`, cniVersion, os.Getenv("ETCD_IP"), os.Getenv("DATASTORE_TYPE"))
// Now create a K8s pod (without any pod IP annotations).
name := fmt.Sprintf("run%d", rand.Uint32())
pod := ensurePodCreated(clientset, testutils.K8S_TEST_NS, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Annotations: map[string]string{
"cni.projectcalico.org/floatingIPs": "[\"1.1.1.1\", \"2001:647f::21\"]",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: name,
Image: "ignore",
}},
NodeName: hostname,
},
})
defer ensurePodDeleted(clientset, testutils.K8S_TEST_NS, name)
log.Infof("Created POD object: %v", pod)
containerID, _, contVeth, contAddresses, _, netNS, err := testutils.CreateContainer(netconfCalicoIPAM, name, testutils.K8S_TEST_NS, "")
Expect(err).NotTo(HaveOccurred())
mac := contVeth.Attrs().HardwareAddr
log.Infof("All container IPs: %v", contAddresses)
Expect(contAddresses).To(HaveLen(2))
podIPv4 := contAddresses[0].IP
Expect(podIPv4.To4()).NotTo(BeNil())
podIPv6 := contAddresses[1].IP
Expect(podIPv6.To16()).NotTo(BeNil())
ids := names.WorkloadEndpointIdentifiers{
Node: hostname,
Orchestrator: api.OrchestratorKubernetes,
Endpoint: "eth0",
Pod: name,
ContainerID: containerID,
}
wrkload, err := ids.CalculateWorkloadEndpointName(false)
Expect(err).NotTo(HaveOccurred())
interfaceName := k8sconversion.NewConverter().VethNameForWorkload(testutils.K8S_TEST_NS, name)
// Make sure WorkloadEndpoint is created and has the requested IP in the datastore.
endpoints, err := calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(1))
if os.Getenv("DATASTORE_TYPE") == "kubernetes" {
// Unlike etcd datastore, WEP based on a kubernetes pod does not store values for mac/containerID.
// Put them back manually for later comparison.
endpoints.Items[0].Spec.ContainerID = containerID
endpoints.Items[0].Spec.MAC = mac.String()
}
Expect(endpoints.Items[0].Name).Should(Equal(wrkload))
Expect(endpoints.Items[0].Namespace).Should(Equal(testutils.K8S_TEST_NS))
Expect(endpoints.Items[0].Labels).Should(Equal(map[string]string{
"projectcalico.org/namespace": "test",
"projectcalico.org/orchestrator": api.OrchestratorKubernetes,
"projectcalico.org/serviceaccount": "default",
}))
Expect(endpoints.Items[0].Spec).Should(Equal(api.WorkloadEndpointSpec{
Pod: name,
InterfaceName: interfaceName,
IPNetworks: []string{podIPv4.String() + "/32", podIPv6.String() + "/128"},
MAC: mac.String(),
Profiles: []string{"kns.test", "ksa.test.default"},
Node: hostname,
Endpoint: "eth0",
Workload: "",
IPNATs: []api.IPNAT{
{
InternalIP: podIPv4.String(),
ExternalIP: "1.1.1.1",
},
{
InternalIP: podIPv6.String(),
ExternalIP: "2001:647f::21",
},
},
ContainerID: containerID,
Orchestrator: api.OrchestratorKubernetes,
}))
// Check the pod's IP annotations.
checkPodIPAnnotations(clientset, testutils.K8S_TEST_NS, name, podIPv4.String()+"/32", podIPv4.String()+"/32,"+podIPv6.String()+"/128")
// Delete the container.
_, err = testutils.DeleteContainer(netconfCalicoIPAM, netNS.Path(), name, testutils.K8S_TEST_NS)
Expect(err).ShouldNot(HaveOccurred())
})
})
// This context contains test cases meant to simulate specific scenarios seen when running the plugin
// in a Kubernetes cluster.
Context("Kubernetes-specific race condition tests", func() {
var clientset *kubernetes.Clientset
var cniContainerIDX string = "container-id-00x"
var cniContainerIDY string = "container-id-00y"
var ipPool string = "10.0.0.0/24"
var name string
var nc types.NetConf
var netconf string
BeforeEach(func() {
// Set up clients.
config, err := clientcmd.DefaultClientConfig.ClientConfig()
Expect(err).NotTo(HaveOccurred())
clientset, err = kubernetes.NewForConfig(config)
Expect(err).NotTo(HaveOccurred())
// Create a network config.
nc = types.NetConf{
CNIVersion: cniVersion,
Name: "calico-uts",
Type: "calico",
EtcdEndpoints: fmt.Sprintf("http://%s:2379", os.Getenv("ETCD_IP")),
DatastoreType: os.Getenv("DATASTORE_TYPE"),
Kubernetes: types.Kubernetes{K8sAPIRoot: "http://127.0.0.1:8080"},
Policy: types.Policy{PolicyType: "k8s"},
NodenameFileOptional: true,
LogLevel: "info",
}
nc.IPAM.Type = "calico-ipam"
ncb, err := json.Marshal(nc)
Expect(err).NotTo(HaveOccurred())
netconf = string(ncb)
// Create a new ipPool.
testutils.MustCreateNewIPPool(calicoClient, ipPool, false, false, true)
// Now create a K8s pod.
name = fmt.Sprintf("pod-%d", rand.Uint32())
ensurePodCreated(clientset, testutils.K8S_TEST_NS,
&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: name,
Image: "ignore",
}},
NodeName: hostname,
},
})
})
AfterEach(func() {
// Delete pod
ensurePodDeleted(clientset, testutils.K8S_TEST_NS, name)
// Delete IP pools.
testutils.MustDeleteIPPool(calicoClient, ipPool)
})
// This specific test case is for an issue where k8s would send extra DELs being "aggressive". See: https://github.com/kubernetes/kubernetes/issues/44100
// Specifically, what this test simulates is:
// - CNI ADD for containerIDX
// - CNI DEL for containerIDX
// - CNI ADD for containerIDY
// - CNI DEL for containerIDX (Spurious delete for old container ID)
It("Use different container IDs to ADD and DEL the container", func() {
// ADD the container with passing a container ID "X".
_, result, _, _, _, contNs, err := testutils.CreateContainerWithId(netconf, name, testutils.K8S_TEST_NS, "", cniContainerIDX)
Expect(err).ShouldNot(HaveOccurred())
log.Printf("Unmarshaled result: %v\n", result)
// Assert that the endpoint is created in the backend datastore with ContainerID "X".
endpoints, err := calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(1))
idsX := names.WorkloadEndpointIdentifiers{
Node: hostname,
Orchestrator: api.OrchestratorKubernetes,
Endpoint: "eth0",
Pod: name,
ContainerID: cniContainerIDX,
}
wrkloadX, err := idsX.CalculateWorkloadEndpointName(false)
Expect(err).NotTo(HaveOccurred())
Expect(endpoints.Items[0].Name).Should(Equal(wrkloadX))
Expect(endpoints.Items[0].Namespace).Should(Equal(testutils.K8S_TEST_NS))
Expect(endpoints.Items[0].Labels).Should(Equal(map[string]string{
"projectcalico.org/namespace": "test",
"projectcalico.org/orchestrator": api.OrchestratorKubernetes,
"projectcalico.org/serviceaccount": "default",
}))
if os.Getenv("DATASTORE_TYPE") != "kubernetes" {
Expect(endpoints.Items[0].Spec.ContainerID).Should(Equal(cniContainerIDX))
}
// Delete the container with container ID "X".
exitCode, err := testutils.DeleteContainerWithId(netconf, contNs.Path(), name, testutils.K8S_TEST_NS, cniContainerIDX)
Expect(err).ShouldNot(HaveOccurred())
Expect(exitCode).Should(Equal(0))
if os.Getenv("DATASTORE_TYPE") != "kubernetes" {
// The endpoint for ContainerID "X" should not exist in the backend datastore.
endpoints, err = calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(0))
}
// ADD a new container with passing a container ID "Y".
_, result, _, _, _, contNs, err = testutils.CreateContainerWithId(netconf, name, testutils.K8S_TEST_NS, "", cniContainerIDY)
Expect(err).ShouldNot(HaveOccurred())
log.Printf("Unmarshaled result: %v\n", result)
// Assert that the endpoint is created in the backend datastore with ContainerID "Y".
endpoints, err = calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(1))
idsY := names.WorkloadEndpointIdentifiers{
Node: hostname,
Orchestrator: api.OrchestratorKubernetes,
Endpoint: "eth0",
Pod: name,
ContainerID: cniContainerIDY,
}
wrkloadY, err := idsY.CalculateWorkloadEndpointName(false)
Expect(err).NotTo(HaveOccurred())
Expect(endpoints.Items[0].Name).Should(Equal(wrkloadY))
Expect(endpoints.Items[0].Namespace).Should(Equal(testutils.K8S_TEST_NS))
Expect(endpoints.Items[0].Labels).Should(Equal(map[string]string{
"projectcalico.org/namespace": "test",
"projectcalico.org/orchestrator": api.OrchestratorKubernetes,
"projectcalico.org/serviceaccount": "default",
}))
if os.Getenv("DATASTORE_TYPE") != "kubernetes" {
Expect(endpoints.Items[0].Spec.ContainerID).Should(Equal(cniContainerIDY))
}
// Delete the container with container ID "X" again.
exitCode, err = testutils.DeleteContainerWithId(netconf, contNs.Path(), name, testutils.K8S_TEST_NS, cniContainerIDX)
Expect(err).ShouldNot(HaveOccurred())
Expect(exitCode).Should(Equal(0))
// Assert that the endpoint with container ID "Y" is still in the datastore.
endpoints, err = calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(1))
Expect(endpoints.Items[0].Name).Should(Equal(wrkloadY))
Expect(endpoints.Items[0].Namespace).Should(Equal(testutils.K8S_TEST_NS))
Expect(endpoints.Items[0].Labels).Should(Equal(map[string]string{
"projectcalico.org/namespace": "test",
"projectcalico.org/orchestrator": api.OrchestratorKubernetes,
"projectcalico.org/serviceaccount": "default",
}))
if os.Getenv("DATASTORE_TYPE") != "kubernetes" {
Expect(endpoints.Items[0].Spec.ContainerID).Should(Equal(cniContainerIDY))
}
// Finally, delete the container with container ID "Y".
exitCode, err = testutils.DeleteContainerWithId(netconf, contNs.Path(), name, testutils.K8S_TEST_NS, cniContainerIDY)
Expect(err).ShouldNot(HaveOccurred())
Expect(exitCode).Should(Equal(0))
})
// Specifically, this test simulartes the following:
// - CNI ADD using containerIDX
// - CNI ADD using containerIDY
// - CNI DEL using containerIDX (should be a no-op)
// - CNI DEL using containerIDY (should actually delete the container)
It("should handle deletes for stale container IDs", func() {
// ADD the container with passing a CNI_CONTAINERID of "X".
_, result, _, _, _, _, err := testutils.CreateContainerWithId(netconf, name, testutils.K8S_TEST_NS, "", cniContainerIDX)
Expect(err).ShouldNot(HaveOccurred())
log.Printf("Unmarshaled result: %v\n", result)
// Assert that the endpoint is created in the backend datastore with ContainerID "X".
endpoints, err := calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(1))
idsX := names.WorkloadEndpointIdentifiers{
Node: hostname,
Orchestrator: api.OrchestratorKubernetes,
Endpoint: "eth0",
Pod: name,
ContainerID: cniContainerIDX,
}
wrkloadX, err := idsX.CalculateWorkloadEndpointName(false)
Expect(err).NotTo(HaveOccurred())
Expect(endpoints.Items[0].Name).Should(Equal(wrkloadX))
Expect(endpoints.Items[0].Namespace).Should(Equal(testutils.K8S_TEST_NS))
Expect(endpoints.Items[0].Labels).Should(Equal(map[string]string{
"projectcalico.org/namespace": "test",
"projectcalico.org/orchestrator": api.OrchestratorKubernetes,
"projectcalico.org/serviceaccount": "default",
}))
if os.Getenv("DATASTORE_TYPE") != "kubernetes" {
Expect(endpoints.Items[0].Spec.ContainerID).Should(Equal(cniContainerIDX))
}
// ADD the container with passing a CNI_CONTAINERID of "Y"
_, result, _, _, _, contNs, err := testutils.CreateContainerWithId(netconf, name, testutils.K8S_TEST_NS, "", cniContainerIDY)
Expect(err).ShouldNot(HaveOccurred())
log.Printf("Unmarshaled result: %v\n", result)
// Assert that the endpoint is created in the backend datastore with ContainerID "Y".
endpoints, err = calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(1))
idsY := names.WorkloadEndpointIdentifiers{
Node: hostname,
Orchestrator: api.OrchestratorKubernetes,
Endpoint: "eth0",
Pod: name,
ContainerID: cniContainerIDY,
}
wrkloadY, err := idsY.CalculateWorkloadEndpointName(false)
Expect(err).NotTo(HaveOccurred())
Expect(endpoints.Items[0].Name).Should(Equal(wrkloadY))
Expect(endpoints.Items[0].Namespace).Should(Equal(testutils.K8S_TEST_NS))
Expect(endpoints.Items[0].Labels).Should(Equal(map[string]string{
"projectcalico.org/namespace": "test",
"projectcalico.org/orchestrator": api.OrchestratorKubernetes,
"projectcalico.org/serviceaccount": "default",
}))
if os.Getenv("DATASTORE_TYPE") != "kubernetes" {
Expect(endpoints.Items[0].Spec.ContainerID).Should(Equal(cniContainerIDY))
}
// Delete the container with the CNI_CONTAINERID "X".
exitCode, err := testutils.DeleteContainerWithId(netconf, contNs.Path(), name, testutils.K8S_TEST_NS, cniContainerIDX)
Expect(err).ShouldNot(HaveOccurred())
Expect(exitCode).Should(Equal(0))
// Assert that the endpoint in the backend datastore still has ContainerID "Y".
endpoints, err = calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(1))
Expect(endpoints.Items[0].Name).Should(Equal(wrkloadY))
Expect(endpoints.Items[0].Namespace).Should(Equal(testutils.K8S_TEST_NS))
Expect(endpoints.Items[0].Labels).Should(Equal(map[string]string{
"projectcalico.org/namespace": "test",
"projectcalico.org/orchestrator": api.OrchestratorKubernetes,
"projectcalico.org/serviceaccount": "default",
}))
if os.Getenv("DATASTORE_TYPE") != "kubernetes" {
Expect(endpoints.Items[0].Spec.ContainerID).Should(Equal(cniContainerIDY))
}
// Delete the container with the CNI_CONTAINERID "Y".
exitCode, err = testutils.DeleteContainerWithId(netconf, contNs.Path(), name, testutils.K8S_TEST_NS, cniContainerIDY)
Expect(err).ShouldNot(HaveOccurred())
Expect(exitCode).Should(Equal(0))
if os.Getenv("DATASTORE_TYPE") != "kubernetes" {
// Assert that the endpoint in the backend datastore is now gone.
endpoints, err = calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(0))
}
})
})
Context("after a pod has already been networked once", func() {
var nc types.NetConf
var netconf string
var clientset *kubernetes.Clientset
var workloadName, containerID, name string
var endpointSpec api.WorkloadEndpointSpec
var contNs ns.NetNS
var result *current.Result
checkIPAMReservation := func() {
// IPAM reservation should still be in place.
handleID := utils.GetHandleID("calico-uts", containerID, workloadName)
ipamIPs, err := calicoClient.IPAM().IPsByHandle(context.Background(), handleID)
ExpectWithOffset(1, err).NotTo(HaveOccurred(), "error getting IPs")
ExpectWithOffset(1, ipamIPs).To(HaveLen(1),
"There should be an IPAM handle for endpoint")
Expect(endpointSpec.IPNetworks).To(HaveLen(1))
ExpectWithOffset(1, ipamIPs[0].String()+"/32").To(Equal(endpointSpec.IPNetworks[0]))
}
BeforeEach(func() {
// Create a new ipPool.
testutils.MustCreateNewIPPool(calicoClient, "10.0.0.0/24", false, false, true)
// Create a network config.
nc = types.NetConf{
CNIVersion: cniVersion,
Name: "calico-uts",
Type: "calico",
EtcdEndpoints: fmt.Sprintf("http://%s:2379", os.Getenv("ETCD_IP")),
DatastoreType: os.Getenv("DATASTORE_TYPE"),
Kubernetes: types.Kubernetes{K8sAPIRoot: "http://127.0.0.1:8080"},
Policy: types.Policy{PolicyType: "k8s"},
NodenameFileOptional: true,
LogLevel: "info",
}
nc.IPAM.Type = "calico-ipam"
ncb, err := json.Marshal(nc)
Expect(err).NotTo(HaveOccurred())
netconf = string(ncb)
// Now create a K8s pod.
config, err := clientcmd.DefaultClientConfig.ClientConfig()
Expect(err).NotTo(HaveOccurred())
clientset, err = kubernetes.NewForConfig(config)
Expect(err).NotTo(HaveOccurred())
name = fmt.Sprintf("run%d", rand.Uint32())
pod := ensurePodCreated(clientset, testutils.K8S_TEST_NS,
&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: name,
Image: "ignore",
}},
NodeName: hostname,
},
})
log.Infof("Created POD object: %v", pod)
// Run the CNI plugin.
containerID, result, _, _, _, contNs, err = testutils.CreateContainer(netconf, name, testutils.K8S_TEST_NS, "")
Expect(err).ShouldNot(HaveOccurred())
log.Printf("Unmarshalled result from first ADD: %v\n", result)
// The endpoint is created in etcd
endpoints, err := calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(1))
ids := names.WorkloadEndpointIdentifiers{
Node: hostname,
Orchestrator: api.OrchestratorKubernetes,
Endpoint: "eth0",
Pod: name,
ContainerID: containerID,
}
workloadName, err = ids.CalculateWorkloadEndpointName(false)
Expect(err).NotTo(HaveOccurred())
Expect(endpoints.Items[0].Name).Should(Equal(workloadName))
Expect(endpoints.Items[0].Namespace).Should(Equal(testutils.K8S_TEST_NS))
Expect(endpoints.Items[0].Labels).Should(Equal(map[string]string{
"projectcalico.org/namespace": "test",
"projectcalico.org/orchestrator": api.OrchestratorKubernetes,
"projectcalico.org/serviceaccount": "default",
}))
endpointSpec = endpoints.Items[0].Spec
if os.Getenv("DATASTORE_TYPE") != "kubernetes" {
Expect(endpointSpec.ContainerID).Should(Equal(containerID))
}
checkIPAMReservation()
})
AfterEach(func() {
_, err = testutils.DeleteContainerWithId(netconf, contNs.Path(), name, testutils.K8S_TEST_NS, containerID)
Expect(err).ShouldNot(HaveOccurred())
ensurePodDeleted(clientset, testutils.K8S_TEST_NS, name)
})
It("a second ADD for the same container should work, assigning a new IP", func() {
// Try to create the same pod with a different container (so CNI receives the ADD for the same endpoint again)
resultSecondAdd, _, _, _, err := testutils.RunCNIPluginWithId(netconf, name, testutils.K8S_TEST_NS, "", "new-container-id", "eth0", contNs)
Expect(err).NotTo(HaveOccurred())
log.Printf("Unmarshalled result from second ADD: %v\n", resultSecondAdd)
// The IP addresses shouldn't be the same, since we'll reassign one.
Expect(resultSecondAdd.IPs).ShouldNot(Equal(result.IPs))
// Otherwise, they should be the same.
resultSecondAdd.IPs = nil
result.IPs = nil
Expect(resultSecondAdd).Should(Equal(result))
// IPAM reservation should still be in place.
checkIPAMReservation()
})
Context("with networking rigged to fail", func() {
renameVeth := func(from, to string) {
output, err := exec.Command("ip", "link", "set", from, "down").CombinedOutput()
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Output: %s", output))
output, err = exec.Command("ip", "link", "set", from, "name", to).CombinedOutput()
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Output: %s", output))
output, err = exec.Command("ip", "link", "set", to, "up").CombinedOutput()
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Output: %s", output))
}
var realVethName, tweakedVethName string
BeforeEach(func() {
// To prevent the networking attempt from succeeding, rename the old veth.
// This leaves a route and an eth0 in place that the plugin will struggle with.
realVethName = endpointSpec.InterfaceName
tweakedVethName = strings.Replace(realVethName, "cali", "sali", 1)
renameVeth(realVethName, tweakedVethName)
})
It("a second ADD should fail, but not clean up the original IPAM allocation", func() {
// Try to create the same container (so CNI receives the ADD for the same endpoint again)
// Use a different container ID but the same Pod Name/Namespace
_, _, _, _, err := testutils.RunCNIPluginWithId(netconf, name, testutils.K8S_TEST_NS, "", "new-container-id", "eth0", contNs)
Expect(err).Should(HaveOccurred())
// IPAM reservation should still be in place.
checkIPAMReservation()
})
AfterEach(func() {
// So the tear-down succeeds, put the veth back.
renameVeth(tweakedVethName, realVethName)
})
})
})
Context("Create a container then send another ADD for the same container but with a different interface", func() {
netconf := fmt.Sprintf(`
{
"cniVersion": "%s",
"name": "net10",
"type": "calico",
"etcd_endpoints": "http://%s:2379",
"datastore_type": "%s",
"nodename_file_optional": true,
"log_level": "info",
"ipam": {
"type": "calico-ipam"
},
"kubernetes": {
"k8s_api_root": "http://127.0.0.1:8080"
},
"policy": {"type": "k8s"}
}`, cniVersion, os.Getenv("ETCD_IP"), os.Getenv("DATASTORE_TYPE"))
It("should successfully execute both ADDs but for second ADD will return the same result as the first time but it won't network the container", func() {
// Create a new ipPool.
testutils.MustCreateNewIPPool(calicoClient, "10.0.0.0/24", false, false, true)
config, err := clientcmd.DefaultClientConfig.ClientConfig()
Expect(err).NotTo(HaveOccurred())
clientset, err := kubernetes.NewForConfig(config)
Expect(err).NotTo(HaveOccurred())
// Now create a K8s pod.
name := "mypod-1"
pod := ensurePodCreated(clientset, testutils.K8S_TEST_NS,
&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: name,
Image: "ignore",
}},
NodeName: hostname,
},
})
log.Infof("Created POD object: %v", pod)
// Create the container, which will call CNI and by default it will create the container with interface name 'eth0'.
containerID, result, _, _, _, contNs, err := testutils.CreateContainer(netconf, name, testutils.K8S_TEST_NS, "")
Expect(err).ShouldNot(HaveOccurred())
// Make sure the pod gets cleaned up, whether we fail or not.
expectedIfaceName := "eth0"
defer func() {
_, err := testutils.DeleteContainerWithIdAndIfaceName(netconf, contNs.Path(), name, testutils.K8S_TEST_NS, containerID, expectedIfaceName)
Expect(err).ShouldNot(HaveOccurred())
ensurePodDeleted(clientset, testutils.K8S_TEST_NS, name)
}()
log.Printf("First container, unmarshalled result: %v\n", result)
// The endpoint is created in etcd
endpoints, err := calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(1))
ids := names.WorkloadEndpointIdentifiers{
Node: hostname,
Orchestrator: api.OrchestratorKubernetes,
Endpoint: "eth0",
Pod: name,
ContainerID: containerID,
}
wepName, err := ids.CalculateWorkloadEndpointName(false)
Expect(err).NotTo(HaveOccurred())
Expect(endpoints.Items[0].Name).Should(Equal(wepName))
Expect(endpoints.Items[0].Namespace).Should(Equal(testutils.K8S_TEST_NS))
Expect(endpoints.Items[0].Labels).Should(Equal(map[string]string{
"projectcalico.org/namespace": "test",
"projectcalico.org/orchestrator": api.OrchestratorKubernetes,
"projectcalico.org/serviceaccount": "default",
}))
if os.Getenv("DATASTORE_TYPE") != "kubernetes" {
Expect(endpoints.Items[0].Spec.ContainerID).Should(Equal(containerID))
}
// Try to create the same container but with a different endpoint (container interface name 'eth1'),
// so CNI receives the ADD for the same containerID but different endpoint.
_, _, _, _, err = testutils.RunCNIPluginWithId(netconf, name, testutils.K8S_TEST_NS, "", containerID, "eth1", contNs)
Expect(err).ShouldNot(HaveOccurred())
// If the above command succeeds, the CNI plugin will have renamed the container side of the
// veth to "eth1". We need to clean it up under the correct name, or we'll leak it.
expectedIfaceName = "eth1"
// The endpoint is created in etcd
endpoints, err = calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(1))
// Returned endpoint should still have the same fields even after calling the CNI plugin with a different interface name.
// Calico CNI currently only supports one endpoint (interface) per pod.
Expect(endpoints.Items[0].Name).Should(Equal(wepName))
Expect(endpoints.Items[0].Namespace).Should(Equal(testutils.K8S_TEST_NS))
Expect(endpoints.Items[0].Labels).Should(Equal(map[string]string{
"projectcalico.org/namespace": "test",
"projectcalico.org/orchestrator": api.OrchestratorKubernetes,
"projectcalico.org/serviceaccount": "default",
}))
// Explicitly assert that endpoint name is still 'eth0' (which was the case from the first ADD)
Expect(endpoints.Items[0].Spec.Endpoint).Should(Equal("eth0"))
if os.Getenv("DATASTORE_TYPE") != "kubernetes" {
Expect(endpoints.Items[0].Spec.ContainerID).Should(Equal(containerID))
}
// Now we create another pod with a very similar name.
name2 := "mypod"
pod2 := ensurePodCreated(clientset, testutils.K8S_TEST_NS,
&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name2,
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: name2,
Image: "ignore",
}},
NodeName: hostname,
},
})
log.Infof("Created POD object: %v", pod2)
// Now since we can't use the same container namespace for the second container, we need to create a new one.
contNs2, err := cnitestutils.NewNS()
Expect(err).NotTo(HaveOccurred())
containerID2 := "random-cid"
defer func() {
_, err := testutils.DeleteContainerWithId(netconf, contNs2.Path(), name2, testutils.K8S_TEST_NS, containerID2)
Expect(err).ShouldNot(HaveOccurred())
ensurePodDeleted(clientset, testutils.K8S_TEST_NS, name2)
}()
err = contNs2.Do(func(_ ns.NetNS) error {
lo, err := netlink.LinkByName("lo")
if err != nil {
return err
}
return netlink.LinkSetUp(lo)
})
Expect(err).NotTo(HaveOccurred())
// Create the container, which will call CNI and by default it will create the container with interface name 'eth0'.
result, _, _, _, err = testutils.RunCNIPluginWithId(netconf, name2, testutils.K8S_TEST_NS, "", containerID2, "eth0", contNs2)
Expect(err).ShouldNot(HaveOccurred())
log.Printf("Second container: unmarshalled result: %v\n", result)
// Make sure BOTH of the endpoints are there in etcd
endpoints, err = calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(2))
// Construct the workloadendpoint name for the second pod.
ids2 := names.WorkloadEndpointIdentifiers{
Node: hostname,
Orchestrator: api.OrchestratorKubernetes,
Endpoint: "eth0",
Pod: name2,
ContainerID: containerID2,
}
wrkload2, err := ids2.CalculateWorkloadEndpointName(false)
Expect(err).NotTo(HaveOccurred())
// Explicitly Get the second workloadendpoint and make sure it exists and has all the right fields.
ep, err := calicoClient.WorkloadEndpoints().Get(ctx, testutils.K8S_TEST_NS, wrkload2, options.GetOptions{})
Expect(err).ShouldNot(HaveOccurred())
// Returned endpoint should still have the same fields even after calling the CNI plugin with a different interface name.
// Calico CNI currently only supports one endpoint (interface) per pod.
Expect(ep.Name).Should(Equal(wrkload2))
Expect(ep.Namespace).Should(Equal(testutils.K8S_TEST_NS))
Expect(ep.Labels).Should(Equal(map[string]string{
"projectcalico.org/namespace": "test",
"projectcalico.org/orchestrator": api.OrchestratorKubernetes,
"projectcalico.org/serviceaccount": "default",
}))
if os.Getenv("DATASTORE_TYPE") != "kubernetes" {
// Assert this WEP has the new containerID for the second pod.
Expect(ep.Spec.ContainerID).Should(Equal(containerID2))
}
})
})
Context("when pod has a service account", func() {
var nc types.NetConf
var netconf string
var clientset *kubernetes.Clientset
var name string
var pool string = "172.24.0.0/24"
BeforeEach(func() {
// Build the network config for this set of tests.
nc = types.NetConf{
CNIVersion: cniVersion,
Name: "calico-uts",
Type: "calico",
EtcdEndpoints: fmt.Sprintf("http://%s:2379", os.Getenv("ETCD_IP")),
DatastoreType: os.Getenv("DATASTORE_TYPE"),
Kubernetes: types.Kubernetes{K8sAPIRoot: "http://127.0.0.1:8080"},
Policy: types.Policy{PolicyType: "k8s"},
NodenameFileOptional: true,
LogLevel: "info",
}
nc.IPAM.Type = "calico-ipam"
ncb, err := json.Marshal(nc)
Expect(err).NotTo(HaveOccurred())
netconf = string(ncb)
// Create an IPPool for the test.
testutils.MustCreateNewIPPool(calicoClient, pool, false, false, true)
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
// Delete pod
ensurePodDeleted(clientset, testutils.K8S_TEST_NS, name)
testutils.MustDeleteIPPool(calicoClient, pool)
})
It("should add a service account profile to the workload endpoint", func() {
config, err := clientcmd.DefaultClientConfig.ClientConfig()
if err != nil {
panic(err)
}
clientset, err = kubernetes.NewForConfig(config)
if err != nil {
panic(err)
}
name = fmt.Sprintf("run%d", rand.Uint32())
// Make sure the namespace exists.
ensureNamespace(clientset, testutils.K8S_TEST_NS)
// Create a K8s service account
saName := "testserviceaccount"
_, err = clientset.CoreV1().ServiceAccounts(testutils.K8S_TEST_NS).Create(&v1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{Name: saName},
})
if err != nil {
panic(err)
}
defer func() {
err = clientset.CoreV1().ServiceAccounts(testutils.K8S_TEST_NS).Delete(saName, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
}()
// Create a K8s pod with the service account
ensurePodCreated(clientset, testutils.K8S_TEST_NS, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: name},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: fmt.Sprintf("container-%s", name),
Image: "ignore",
Ports: []v1.ContainerPort{{
Name: "anamedport",
ContainerPort: 555,
}},
}},
ServiceAccountName: saName,
NodeName: hostname,
},
})
containerID, result, contVeth, _, _, contNs, err := testutils.CreateContainer(netconf, name, testutils.K8S_TEST_NS, "")
Expect(err).ShouldNot(HaveOccurred())
mac := contVeth.Attrs().HardwareAddr
Expect(len(result.IPs)).Should(Equal(1))
ids := names.WorkloadEndpointIdentifiers{
Node: hostname,
Orchestrator: api.OrchestratorKubernetes,
Endpoint: "eth0",
Pod: name,
ContainerID: containerID,
}
wrkload, err := ids.CalculateWorkloadEndpointName(false)
interfaceName := k8sconversion.NewConverter().VethNameForWorkload(testutils.K8S_TEST_NS, name)
Expect(err).NotTo(HaveOccurred())
// The endpoint is created
endpoints, err := calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(1))
if os.Getenv("DATASTORE_TYPE") == "kubernetes" {
// Unlike etcd datastore, WEP based on a kubernetes pod does not store values for mac/containerID.
// Put them back manually for later comparison.
endpoints.Items[0].Spec.ContainerID = containerID
endpoints.Items[0].Spec.MAC = mac.String()
}
Expect(endpoints.Items[0].Name).Should(Equal(wrkload))
Expect(endpoints.Items[0].Namespace).Should(Equal(testutils.K8S_TEST_NS))
Expect(endpoints.Items[0].Labels).Should(Equal(map[string]string{
"projectcalico.org/namespace": "test",
"projectcalico.org/serviceaccount": saName,
"projectcalico.org/orchestrator": api.OrchestratorKubernetes,
}))
Expect(endpoints.Items[0].Spec).Should(Equal(api.WorkloadEndpointSpec{
Pod: name,
InterfaceName: interfaceName,
IPNetworks: []string{result.IPs[0].Address.String()},
MAC: mac.String(),
Profiles: []string{"kns.test", "ksa.test." + saName},
Node: hostname,
Endpoint: "eth0",
Workload: "",
ContainerID: containerID,
Orchestrator: api.OrchestratorKubernetes,
Ports: []api.EndpointPort{{
Name: "anamedport",
Protocol: numorstring.ProtocolFromString("TCP"),
Port: 555,
}},
}))
_, err = testutils.DeleteContainer(netconf, contNs.Path(), name, testutils.K8S_TEST_NS)
Expect(err).ShouldNot(HaveOccurred())
if os.Getenv("DATASTORE_TYPE") != "kubernetes" {
// Make sure there are no endpoints anymore
endpoints, err = calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(0))
}
// Make sure the interface has been removed from the namespace
targetNs, _ := ns.GetNS(contNs.Path())
err = targetNs.Do(func(_ ns.NetNS) error {
_, err = netlink.LinkByName("eth0")
return err
})
Expect(err).Should(HaveOccurred())
Expect(err.Error()).Should(Equal("Link not found"))
// Make sure the interface has been removed from the host
_, err = netlink.LinkByName("cali" + containerID)
Expect(err).Should(HaveOccurred())
Expect(err.Error()).Should(Equal("Link not found"))
})
})
Context("when pod has a GenerateName", func() {
var nc types.NetConf
var netconf string
var clientset *kubernetes.Clientset
var name string
var pool string = "172.24.0.0/24"
BeforeEach(func() {
// Build the network config for this set of tests.
nc = types.NetConf{
CNIVersion: cniVersion,
Name: "calico-uts",
Type: "calico",
EtcdEndpoints: fmt.Sprintf("http://%s:2379", os.Getenv("ETCD_IP")),
DatastoreType: os.Getenv("DATASTORE_TYPE"),
Kubernetes: types.Kubernetes{K8sAPIRoot: "http://127.0.0.1:8080"},
Policy: types.Policy{PolicyType: "k8s"},
NodenameFileOptional: true,
LogLevel: "info",
}
nc.IPAM.Type = "calico-ipam"
ncb, err := json.Marshal(nc)
Expect(err).NotTo(HaveOccurred())
netconf = string(ncb)
// Create an IPPool for the test.
testutils.MustCreateNewIPPool(calicoClient, pool, false, false, true)
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
// Delete pod
ensurePodDeleted(clientset, testutils.K8S_TEST_NS, name)
testutils.MustDeleteIPPool(calicoClient, pool)
})
It("should add a workload endpoint with the GenerateName", func() {
config, err := clientcmd.DefaultClientConfig.ClientConfig()
if err != nil {
panic(err)
}
clientset, err = kubernetes.NewForConfig(config)
if err != nil {
panic(err)
}
// Make sure the namespace exists.
ensureNamespace(clientset, testutils.K8S_TEST_NS)
// Create a K8s pod with GenerateName
name = fmt.Sprintf("run%d", rand.Uint32())
generateName := "test-gen-name"
ensurePodCreated(clientset, testutils.K8S_TEST_NS, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: name,
GenerateName: generateName},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: fmt.Sprintf("container-%s", name),
Image: "ignore",
Ports: []v1.ContainerPort{{
Name: "anamedport",
ContainerPort: 555,
}},
}},
NodeName: hostname,
},
})
containerID, result, contVeth, _, _, contNs, err := testutils.CreateContainer(netconf, name, testutils.K8S_TEST_NS, "")
Expect(err).ShouldNot(HaveOccurred())
ids := names.WorkloadEndpointIdentifiers{
Node: hostname,
Orchestrator: api.OrchestratorKubernetes,
Endpoint: "eth0",
Pod: name,
ContainerID: containerID,
}
wrkload, err := ids.CalculateWorkloadEndpointName(false)
Expect(err).NotTo(HaveOccurred())
mac := contVeth.Attrs().HardwareAddr
interfaceName := k8sconversion.NewConverter().VethNameForWorkload(testutils.K8S_TEST_NS, name)
// The endpoint is created
endpoints, err := calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(1))
if os.Getenv("DATASTORE_TYPE") == "kubernetes" {
// Unlike etcd datastore, WEP based on a kubernetes pod does not store values for mac/containerID.
// Put them back manually for later comparison.
endpoints.Items[0].Spec.ContainerID = containerID
endpoints.Items[0].Spec.MAC = mac.String()
}
Expect(endpoints.Items[0].Name).Should(Equal(wrkload))
Expect(endpoints.Items[0].Namespace).Should(Equal(testutils.K8S_TEST_NS))
Expect(endpoints.Items[0].Labels).Should(Equal(map[string]string{
"projectcalico.org/namespace": "test",
"projectcalico.org/orchestrator": api.OrchestratorKubernetes,
"projectcalico.org/serviceaccount": "default",
}))
// Make sure that the GenerateName is there.
Expect(endpoints.Items[0].GenerateName).Should(Equal(generateName))
// Let's just check that the Spec is good too.
Expect(endpoints.Items[0].Spec).Should(Equal(api.WorkloadEndpointSpec{
Pod: name,
InterfaceName: interfaceName,
IPNetworks: []string{result.IPs[0].Address.String()},
MAC: mac.String(),
Profiles: []string{"kns.test", "ksa.test.default"},
Node: hostname,
Endpoint: "eth0",
Workload: "",
ContainerID: containerID,
Orchestrator: api.OrchestratorKubernetes,
Ports: []api.EndpointPort{{
Name: "anamedport",
Protocol: numorstring.ProtocolFromString("TCP"),
Port: 555,
}},
}))
_, err = testutils.DeleteContainer(netconf, contNs.Path(), name, testutils.K8S_TEST_NS)
Expect(err).ShouldNot(HaveOccurred())
})
})
Describe("testConnection tests", func() {
It("successfully connects to the datastore", func(done Done) {
netconf := fmt.Sprintf(`
{
"cniVersion": "%s",
"name": "net1",
"type": "calico",
"etcd_endpoints": "http://%s:2379",
"datastore_type": "%s",
"ipam": {
"type": "host-local",
"subnet": "10.0.0.0/8"
},
"kubernetes": {
"k8s_api_root": "http://127.0.0.1:8080"
},
"policy": {"type": "k8s"},
"nodename_file_optional": true,
"log_level":"info"
}`, cniVersion, os.Getenv("ETCD_IP"), os.Getenv("DATASTORE_TYPE"))
pluginPath := fmt.Sprintf("%s/%s", os.Getenv("BIN"), os.Getenv("PLUGIN"))
c := exec.Command(pluginPath, "-t")
stdin, err := c.StdinPipe()
Expect(err).ToNot(HaveOccurred())
go func() {
defer stdin.Close()
_, _ = io.WriteString(stdin, netconf)
}()
_, err = c.CombinedOutput()
Expect(err).ToNot(HaveOccurred())
close(done)
}, 10)
It("reports it cannot connect to the datastore", func(done Done) {
// wrong port(s).
netconf := fmt.Sprintf(`
{
"cniVersion": "%s",
"name": "net1",
"type": "calico",
"etcd_endpoints": "http://%s:2370",
"datastore_type": "%s",
"ipam": {
"type": "host-local",
"subnet": "10.0.0.0/8"
},
"kubernetes": {
"k8s_api_root": "http://127.0.0.1:8081"
},
"policy": {"type": "k8s"},
"nodename_file_optional": true,
"log_level":"info"
}`, cniVersion, os.Getenv("ETCD_IP"), os.Getenv("DATASTORE_TYPE"))
pluginPath := fmt.Sprintf("%s/%s", os.Getenv("BIN"), os.Getenv("PLUGIN"))
c := exec.Command(pluginPath, "-t")
stdin, err := c.StdinPipe()
Expect(err).ToNot(HaveOccurred())
go func() {
defer stdin.Close()
_, _ = io.WriteString(stdin, netconf)
}()
_, err = c.CombinedOutput()
Expect(err).To(HaveOccurred())
close(done)
}, 10)
})
})
func checkPodIPAnnotations(clientset *kubernetes.Clientset, ns, name, expectedIP, expectedIPs string) {
if os.Getenv("DATASTORE_TYPE") == "kubernetes" {
pod, err := clientset.CoreV1().Pods(testutils.K8S_TEST_NS).Get(name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(pod.Annotations["cni.projectcalico.org/podIP"]).To(Equal(expectedIP))
Expect(pod.Annotations["cni.projectcalico.org/podIPs"]).To(Equal(expectedIPs))
}
}
|
[
"\"CNI_SPEC_VERSION\"",
"\"ETCD_IP\"",
"\"DATASTORE_TYPE\"",
"\"DATASTORE_TYPE\"",
"\"DATASTORE_TYPE\"",
"\"DATASTORE_TYPE\"",
"\"DATASTORE_TYPE\"",
"\"ETCD_IP\"",
"\"DATASTORE_TYPE\"",
"\"ETCD_IP\"",
"\"DATASTORE_TYPE\"",
"\"ETCD_IP\"",
"\"DATASTORE_TYPE\"",
"\"ETCD_IP\"",
"\"DATASTORE_TYPE\"",
"\"ETCD_IP\"",
"\"DATASTORE_TYPE\"",
"\"ETCD_IP\"",
"\"DATASTORE_TYPE\"",
"\"ETCD_IP\"",
"\"DATASTORE_TYPE\"",
"\"DATASTORE_TYPE\"",
"\"ETCD_IP\"",
"\"DATASTORE_TYPE\"",
"\"DATASTORE_TYPE\"",
"\"DATASTORE_TYPE\"",
"\"ETCD_IP\"",
"\"DATASTORE_TYPE\"",
"\"DATASTORE_TYPE\"",
"\"ETCD_IP\"",
"\"DATASTORE_TYPE\"",
"\"DATASTORE_TYPE\"",
"\"ETCD_IP\"",
"\"DATASTORE_TYPE\"",
"\"DATASTORE_TYPE\"",
"\"DATASTORE_TYPE\"",
"\"DATASTORE_TYPE\"",
"\"DATASTORE_TYPE\"",
"\"DATASTORE_TYPE\"",
"\"DATASTORE_TYPE\"",
"\"DATASTORE_TYPE\"",
"\"DATASTORE_TYPE\"",
"\"ETCD_IP\"",
"\"DATASTORE_TYPE\"",
"\"DATASTORE_TYPE\"",
"\"ETCD_IP\"",
"\"DATASTORE_TYPE\"",
"\"DATASTORE_TYPE\"",
"\"DATASTORE_TYPE\"",
"\"DATASTORE_TYPE\"",
"\"ETCD_IP\"",
"\"DATASTORE_TYPE\"",
"\"DATASTORE_TYPE\"",
"\"DATASTORE_TYPE\"",
"\"ETCD_IP\"",
"\"DATASTORE_TYPE\"",
"\"DATASTORE_TYPE\"",
"\"ETCD_IP\"",
"\"DATASTORE_TYPE\"",
"\"BIN\"",
"\"PLUGIN\"",
"\"ETCD_IP\"",
"\"DATASTORE_TYPE\"",
"\"BIN\"",
"\"PLUGIN\"",
"\"DATASTORE_TYPE\""
] |
[] |
[
"CNI_SPEC_VERSION",
"BIN",
"PLUGIN",
"DATASTORE_TYPE",
"ETCD_IP"
] |
[]
|
["CNI_SPEC_VERSION", "BIN", "PLUGIN", "DATASTORE_TYPE", "ETCD_IP"]
|
go
| 5 | 0 | |
src/test/java/de/hhu/stups/plues/ui/controller/UnsatCoreTest.java
|
package de.hhu.stups.plues.ui.controller;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import static org.testfx.api.FxToolkit.setupStage;
import de.hhu.stups.plues.Delayed;
import de.hhu.stups.plues.data.Store;
import de.hhu.stups.plues.data.entities.AbstractUnit;
import de.hhu.stups.plues.data.entities.Course;
import de.hhu.stups.plues.data.entities.Group;
import de.hhu.stups.plues.data.entities.Module;
import de.hhu.stups.plues.data.entities.Session;
import de.hhu.stups.plues.routes.Router;
import de.hhu.stups.plues.services.SolverService;
import de.hhu.stups.plues.services.UiDataService;
import de.hhu.stups.plues.ui.UiTestDataCreator;
import de.hhu.stups.plues.ui.components.CombinationOrSingleCourseSelection;
import de.hhu.stups.plues.ui.components.ControllerHeader;
import de.hhu.stups.plues.ui.components.MajorMinorCourseSelection;
import de.hhu.stups.plues.ui.components.TaskProgressIndicator;
import de.hhu.stups.plues.ui.components.unsatcore.AbstractUnitUnsatCore;
import de.hhu.stups.plues.ui.components.unsatcore.CourseUnsatCore;
import de.hhu.stups.plues.ui.components.unsatcore.GroupUnsatCore;
import de.hhu.stups.plues.ui.components.unsatcore.ModuleUnsatCore;
import de.hhu.stups.plues.ui.components.unsatcore.SessionUnsatCore;
import de.hhu.stups.plues.ui.components.unsatcore.UnsatCoreButtonBar;
import de.hhu.stups.plues.ui.layout.Inflater;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import javafx.collections.FXCollections;
import javafx.collections.ObservableList;
import javafx.fxml.FXMLLoader;
import javafx.fxml.JavaFXBuilderFactory;
import javafx.scene.Scene;
import javafx.scene.control.TitledPane;
import javafx.scene.input.KeyCode;
import javafx.stage.Stage;
import org.junit.After;
import org.junit.Assert;
import org.junit.Assume;
import org.junit.Test;
import org.mockito.internal.stubbing.answers.ThrowsException;
import org.testfx.framework.junit.ApplicationTest;
import org.testfx.util.WaitForAsyncUtils;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
public class UnsatCoreTest extends ApplicationTest {
private final ExecutorService executorService = Executors.newSingleThreadExecutor();
private final Store store;
private final ObservableList<Module> modules;
private final ObservableList<AbstractUnit> abstractUnits;
private final ObservableList<Group> groups;
private final ObservableList<Session> sessions;
private final ObservableList<Course> courseList = UiTestDataCreator.createCourseList();
private CombinationOrSingleCourseSelection courseSelection;
private CourseUnsatCore courseUnsatCore;
private ModuleUnsatCore moduleUnsatCore;
private AbstractUnitUnsatCore abstractUnitUnsatCore;
private GroupUnsatCore groupUnsatCore;
private SessionUnsatCore sessionUnsatCore;
public UnsatCoreTest() {
store = mock(Store.class, new ThrowsException(new RuntimeException()));
modules = FXCollections.observableArrayList(mock(Module.class));
abstractUnits = FXCollections.observableArrayList(mock(AbstractUnit.class));
groups = FXCollections.observableArrayList(mock(Group.class));
sessions = FXCollections.observableArrayList(mock(Session.class));
}
/**
* When a task is running the course selection has to be disabled.
*/
@Test
public void testDisableCourseSelectionTaskRunning() {
// don't run this test in headless mode since it fails for unknown reasons, nevertheless, the
// test succeeds in a headful testing environment
Assume.assumeFalse("true".equals(System.getenv("HEADLESS")));
Assert.assertFalse(courseSelection.isDisabled());
Assert.assertFalse(courseUnsatCore.courseIsInfeasibleProperty().get());
Assert.assertFalse(courseUnsatCore.taskRunningProperty().get());
final UnsatCoreButtonBar checkFeasibilityButtonBar =
lookup("#checkFeasibilityButtonBar").query();
clickOn(checkFeasibilityButtonBar.getBtSubmitTask());
sleep(2000, TimeUnit.MILLISECONDS);
//Race Condition?
Assert.assertTrue(courseUnsatCore.taskRunningProperty().get());
Assert.assertTrue(courseSelection.isDisabled());
}
/**
* When the current task is waiting for other tasks to finish the course selection should be
* disabled and enabled when the task is cancelled especially before it enters the running state.
*/
@Test
public void testDisableCourseSelectionTaskWaiting() {
executorService.submit(UiTestDataCreator.getSimpleTask(1));
final UnsatCoreButtonBar checkFeasibilityButtonBar =
lookup("#checkFeasibilityButtonBar").query();
Assert.assertFalse(courseSelection.isDisabled());
clickOn(checkFeasibilityButtonBar.getBtSubmitTask());
Assert.assertTrue(courseSelection.isDisabled());
clickOn(checkFeasibilityButtonBar.getCancelTask());
Assert.assertFalse(courseSelection.isDisabled());
}
/**
* Test that the unsat core computation is only enabled when the selected courses are infeasible,
* i.e. a check feasibility task has to run beforehand.
*/
@Test
public void testCheckFeasibilityBeforeModuleUnsatCore() {
final UnsatCoreButtonBar checkFeasibilityButtonBar =
lookup("#checkFeasibilityButtonBar").query();
final UnsatCoreButtonBar unsatCoreButtonBar = lookup("#unsatCoreButtonBar").query();
Assert.assertFalse(checkFeasibilityButtonBar.isDisabled());
Assert.assertFalse(unsatCoreButtonBar.isVisible());
Assert.assertTrue(unsatCoreButtonBar.isDisabled());
clickOn(checkFeasibilityButtonBar.getBtSubmitTask());
sleep(2, TimeUnit.SECONDS); // task runs 2 seconds
courseUnsatCore.courseIsInfeasibleProperty().set(true);
Assert.assertTrue(checkFeasibilityButtonBar.isDisabled());
Assert.assertFalse(unsatCoreButtonBar.isDisabled());
Assert.assertTrue(unsatCoreButtonBar.isVisible());
courseUnsatCore.courseIsInfeasibleProperty().set(false);
// course is feasible
clickOn(courseSelection.getRbSingleSelection());
clickOn(checkFeasibilityButtonBar.getBtSubmitTask());
sleep(2, TimeUnit.SECONDS); // task runs 2 seconds
Assert.assertFalse(checkFeasibilityButtonBar.isDisabled());
Assert.assertTrue(unsatCoreButtonBar.isDisabled());
Assert.assertFalse(unsatCoreButtonBar.isVisible());
}
@Test
public void testCourseSelection() {
clickOn(courseSelection.getRbCombination());
clickOn(courseSelection.getMajorMinorCourseSelection().getMajorComboBox())
.type(KeyCode.DOWN)
.type(KeyCode.DOWN)
.type(KeyCode.ENTER);
clickOn(courseSelection.getMajorMinorCourseSelection().getMinorComboBox())
.type(KeyCode.DOWN)
.type(KeyCode.ENTER);
Assert.assertEquals(courseList.get(4), courseSelection.getSelectedCourses().get(0));
Assert.assertEquals(courseList.get(3), courseSelection.getSelectedCourses().get(1));
clickOn(courseSelection.getMajorMinorCourseSelection().getMajorComboBox())
.type(KeyCode.DOWN)
.type(KeyCode.ENTER);
clickOn(courseSelection.getMajorMinorCourseSelection().getMinorComboBox())
.type(KeyCode.UP)
.type(KeyCode.ENTER);
Assert.assertEquals(courseList.get(8), courseSelection.getSelectedCourses().get(0));
Assert.assertEquals(courseList.get(2), courseSelection.getSelectedCourses().get(1));
Assert.assertFalse(courseSelection.getMajorMinorCourseSelection().isDisabled());
Assert.assertTrue(courseSelection.getSingleCourseSelection().isDisabled());
clickOn(courseSelection.getRbSingleSelection());
Assert.assertTrue(courseSelection.getMajorMinorCourseSelection().isDisabled());
Assert.assertFalse(courseSelection.getSingleCourseSelection().isDisabled());
}
/**
* Test the visibility of the panes and disable states of the button bars as well as the used
* properties' states.
*/
@Test
public void testPaneVisibility() {
final TitledPane modulesPane = lookup("#modulesPane").query();
final TitledPane abstractUnitsPane = lookup("#abstractUnitsPane").query();
final TitledPane groupPane = lookup("#groupPane").query();
final TitledPane sessionPane = lookup("#sessionPane").query();
Assert.assertFalse(modulesPane.isVisible());
Assert.assertFalse(abstractUnitsPane.isVisible());
Assert.assertFalse(groupPane.isVisible());
Assert.assertFalse(sessionPane.isVisible());
Assert.assertTrue(moduleUnsatCore.moduleProperty().isEmpty());
Assert.assertTrue(abstractUnitUnsatCore.abstractUnitsProperty().isEmpty());
Assert.assertTrue(groupUnsatCore.groupProperty().isEmpty());
Assert.assertTrue(sessionUnsatCore.sessionProperty().isEmpty());
// module unsat core step
moduleUnsatCore.setCourses(courseList);
moduleUnsatCore.setModules(modules);
Assert.assertTrue(courseUnsatCore.getUnsatCoreButtonBar().isDisabled());
Assert.assertFalse(moduleUnsatCore.getUnsatCoreButtonBar().isDisabled());
Assert.assertTrue(modulesPane.isVisible());
Assert.assertFalse(abstractUnitsPane.isVisible());
Assert.assertFalse(groupPane.isVisible());
Assert.assertFalse(sessionPane.isVisible());
// abstract unit unsat core step
abstractUnitUnsatCore.modulesProperty().bind(moduleUnsatCore.moduleProperty());
abstractUnitUnsatCore.setAbstractUnits(abstractUnits);
Assert.assertTrue(courseUnsatCore.getUnsatCoreButtonBar().isDisabled());
Assert.assertTrue(moduleUnsatCore.getUnsatCoreButtonBar().isDisabled());
Assert.assertFalse(abstractUnitUnsatCore.getUnsatCoreButtonBar().isDisabled());
Assert.assertTrue(modulesPane.isVisible());
Assert.assertTrue(abstractUnitsPane.isVisible());
Assert.assertFalse(groupPane.isVisible());
Assert.assertFalse(sessionPane.isVisible());
// group unsat core step
groupUnsatCore.abstractUnitsProperty().bind(abstractUnitUnsatCore.abstractUnitsProperty());
groupUnsatCore.setGroups(groups);
Assert.assertTrue(courseUnsatCore.getUnsatCoreButtonBar().isDisabled());
Assert.assertTrue(moduleUnsatCore.getUnsatCoreButtonBar().isDisabled());
Assert.assertTrue(abstractUnitUnsatCore.getUnsatCoreButtonBar().isDisabled());
Assert.assertFalse(groupUnsatCore.getUnsatCoreButtonBar().isDisabled());
Assert.assertTrue(modulesPane.isVisible());
Assert.assertTrue(abstractUnitsPane.isVisible());
Assert.assertTrue(groupPane.isVisible());
Assert.assertFalse(sessionPane.isVisible());
// in the last step, i.e. session unsat core computation, all panes are visible and all
// unsat core button bars are disabled
sessionUnsatCore.setSessions(sessions);
Assert.assertTrue(courseUnsatCore.getUnsatCoreButtonBar().isDisabled());
Assert.assertTrue(moduleUnsatCore.getUnsatCoreButtonBar().isDisabled());
Assert.assertTrue(abstractUnitUnsatCore.getUnsatCoreButtonBar().isDisabled());
Assert.assertTrue(groupUnsatCore.getUnsatCoreButtonBar().isDisabled());
Assert.assertTrue(modulesPane.isVisible());
Assert.assertTrue(abstractUnitsPane.isVisible());
Assert.assertTrue(groupPane.isVisible());
Assert.assertTrue(sessionPane.isVisible());
// all panes invisible if the course selection has changed
clickOn(courseSelection.getRbCombination());
clickOn(courseSelection.getMajorMinorCourseSelection().getMajorComboBox())
.type(KeyCode.DOWN)
.type(KeyCode.DOWN)
.type(KeyCode.ENTER);
Assert.assertFalse(modulesPane.isVisible());
Assert.assertFalse(abstractUnitsPane.isVisible());
Assert.assertFalse(groupPane.isVisible());
Assert.assertFalse(sessionPane.isVisible());
// and all properties are empty
Assert.assertTrue(moduleUnsatCore.moduleProperty().isEmpty());
Assert.assertTrue(abstractUnitUnsatCore.abstractUnitsProperty().isEmpty());
Assert.assertTrue(groupUnsatCore.groupProperty().isEmpty());
Assert.assertTrue(sessionUnsatCore.sessionProperty().isEmpty());
}
/**
* Test the visibility of the panes and disable states of the button bars when the unsat core
* search is interrupted by changing the course selection.
*/
@Test
public void testPaneVisibilityInterrupted() {
final TitledPane modulesPane = lookup("#modulesPane").query();
final TitledPane abstractUnitsPane = lookup("#abstractUnitsPane").query();
final TitledPane groupPane = lookup("#groupPane").query();
final TitledPane sessionPane = lookup("#sessionPane").query();
moduleUnsatCore.setCourses(courseList);
moduleUnsatCore.setModules(modules);
Assert.assertTrue(courseUnsatCore.getUnsatCoreButtonBar().isDisabled());
Assert.assertFalse(moduleUnsatCore.getUnsatCoreButtonBar().isDisabled());
Assert.assertTrue(modulesPane.isVisible());
Assert.assertFalse(abstractUnitsPane.isVisible());
Assert.assertFalse(groupPane.isVisible());
Assert.assertFalse(sessionPane.isVisible());
abstractUnitUnsatCore.modulesProperty().bind(moduleUnsatCore.moduleProperty());
abstractUnitUnsatCore.setAbstractUnits(abstractUnits);
Assert.assertTrue(courseUnsatCore.getUnsatCoreButtonBar().isDisabled());
Assert.assertTrue(moduleUnsatCore.getUnsatCoreButtonBar().isDisabled());
Assert.assertFalse(abstractUnitUnsatCore.getUnsatCoreButtonBar().isDisabled());
Assert.assertTrue(modulesPane.isVisible());
Assert.assertTrue(abstractUnitsPane.isVisible());
Assert.assertFalse(groupPane.isVisible());
Assert.assertFalse(sessionPane.isVisible());
clickOn(courseSelection.getMajorMinorCourseSelection().getMajorComboBox())
.type(KeyCode.DOWN)
.type(KeyCode.ENTER);
Assert.assertTrue(courseUnsatCore.getUnsatCoreButtonBar().isDisabled());
Assert.assertTrue(moduleUnsatCore.getUnsatCoreButtonBar().isDisabled());
Assert.assertTrue(abstractUnitUnsatCore.getUnsatCoreButtonBar().isDisabled());
Assert.assertTrue(groupUnsatCore.getUnsatCoreButtonBar().isDisabled());
Assert.assertFalse(modulesPane.isVisible());
Assert.assertFalse(abstractUnitsPane.isVisible());
Assert.assertFalse(groupPane.isVisible());
Assert.assertFalse(sessionPane.isVisible());
Assert.assertTrue(moduleUnsatCore.moduleProperty().isEmpty());
Assert.assertTrue(abstractUnitUnsatCore.abstractUnitsProperty().isEmpty());
Assert.assertTrue(groupUnsatCore.groupProperty().isEmpty());
Assert.assertTrue(sessionUnsatCore.sessionProperty().isEmpty());
}
@After
public void cleanup() throws Exception {
WaitForAsyncUtils.waitForFxEvents();
setupStage(Stage::close);
}
@SuppressFBWarnings("RV_RETURN_VALUE_IGNORED_NO_SIDE_EFFECT")
@Override
public void start(final Stage stage) throws Exception {
final FXMLLoader subLoader = new FXMLLoader();
subLoader.setBuilderFactory(type -> {
if (type.equals(TaskProgressIndicator.class)) {
return () -> new TaskProgressIndicator(new Inflater(new FXMLLoader()));
}
return new JavaFXBuilderFactory().getBuilder(type);
});
final FXMLLoader loader = new FXMLLoader();
loader.setBuilderFactory(type -> {
if (type.equals(MajorMinorCourseSelection.class)) {
return () -> new MajorMinorCourseSelection(new Inflater(new FXMLLoader()));
} else if (type.equals(CombinationOrSingleCourseSelection.class)) {
return () -> courseSelection;
} else if (type.equals(TaskProgressIndicator.class)) {
return () -> new TaskProgressIndicator(new Inflater(new FXMLLoader()));
} else if (type.equals(UnsatCoreButtonBar.class)) {
return () -> new UnsatCoreButtonBar(new Inflater(subLoader));
}
return new JavaFXBuilderFactory().getBuilder(type);
});
final Inflater inflater = new Inflater(loader);
courseSelection = new CombinationOrSingleCourseSelection(inflater);
courseSelection.setCourses(courseList);
final SolverService solverService = UiTestDataCreator.getMockedSolverService();
final Delayed<SolverService> delayedSolverService = new Delayed<>();
delayedSolverService.set(solverService);
final Delayed<Store> delayedStore = new Delayed<>();
doReturn(courseList).when(store).getCourses();
delayedStore.set(store);
final UiDataService uiDataService = new UiDataService(delayedSolverService, delayedStore,
executorService);
courseUnsatCore = new CourseUnsatCore(inflater, delayedStore, delayedSolverService,
executorService, uiDataService);
moduleUnsatCore = new ModuleUnsatCore(inflater, new Router());
abstractUnitUnsatCore = new AbstractUnitUnsatCore(inflater, new Router());
groupUnsatCore = new GroupUnsatCore(inflater, new Router());
sessionUnsatCore = new SessionUnsatCore(inflater, new Router(), uiDataService);
final FXMLLoader unsatCoreLoader = new FXMLLoader();
unsatCoreLoader.setBuilderFactory(type -> {
if (type.equals(CourseUnsatCore.class)) {
return () -> courseUnsatCore;
} else if (type.equals(ModuleUnsatCore.class)) {
return () -> moduleUnsatCore;
} else if (type.equals(AbstractUnitUnsatCore.class)) {
return () -> abstractUnitUnsatCore;
} else if (type.equals(GroupUnsatCore.class)) {
return () -> groupUnsatCore;
} else if (type.equals(SessionUnsatCore.class)) {
return () -> sessionUnsatCore;
} else if (type.equals(ControllerHeader.class)) {
return () -> new ControllerHeader(new Inflater(new FXMLLoader()));
}
return new JavaFXBuilderFactory().getBuilder(type);
});
final Inflater unsatCoreInflater = new Inflater(unsatCoreLoader);
final UnsatCore unsatCore = new UnsatCore(unsatCoreInflater, delayedSolverService, delayedStore,
executorService);
final Scene scene = new Scene(unsatCore, 600, 800);
stage.setScene(scene);
stage.show();
}
}
|
[
"\"HEADLESS\""
] |
[] |
[
"HEADLESS"
] |
[]
|
["HEADLESS"]
|
java
| 1 | 0 | |
class_book/wsgi.py
|
"""
WSGI config for class_book project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'class_book.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
providers/dns/edgedns/edgedns_test.go
|
package edgedns
import (
"os"
"testing"
"time"
configdns "github.com/akamai/AkamaiOPEN-edgegrid-golang/configdns-v2"
"github.com/akamai/AkamaiOPEN-edgegrid-golang/edgegrid"
"github.com/fibbs/lego/v4/challenge/dns01"
"github.com/fibbs/lego/v4/platform/tester"
"github.com/stretchr/testify/require"
)
const (
envDomain = envNamespace + "TEST_DOMAIN"
envTestHost = envNamespace + "TEST_HOST"
envTestClientToken = envNamespace + "TEST_CLIENT_TOKEN"
envTestClientSecret = envNamespace + "TEST_CLIENT_SECRET"
envTestAccessToken = envNamespace + "TEST_ACCESS_TOKEN"
)
var envTest = tester.NewEnvTest(
EnvHost,
EnvClientToken,
EnvClientSecret,
EnvAccessToken,
EnvEdgeRc,
EnvEdgeRcSection,
envTestHost,
envTestClientToken,
envTestClientSecret,
envTestAccessToken).
WithDomain(envDomain).
WithLiveTestRequirements(EnvHost, EnvClientToken, EnvClientSecret, EnvAccessToken, envDomain)
func TestNewDNSProvider_FromEnv(t *testing.T) {
testCases := []struct {
desc string
envVars map[string]string
expectedConfig *edgegrid.Config
expectedErr string
}{
{
desc: "success",
envVars: map[string]string{
EnvHost: "akaa-xxxxxxxxxxxxxxxx-xxxxxxxxxxxxxxxx.luna.akamaiapis.net",
EnvClientToken: "akab-xxxxxxxxxxxxxxxx-xxxxxxxxxxxxxxxx",
EnvClientSecret: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
EnvAccessToken: "akac-xxxxxxxxxxxxxxxx-xxxxxxxxxxxxxxxx",
},
expectedConfig: &edgegrid.Config{
Host: "akaa-xxxxxxxxxxxxxxxx-xxxxxxxxxxxxxxxx.luna.akamaiapis.net",
ClientToken: "akab-xxxxxxxxxxxxxxxx-xxxxxxxxxxxxxxxx",
ClientSecret: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
AccessToken: "akac-xxxxxxxxxxxxxxxx-xxxxxxxxxxxxxxxx",
MaxBody: maxBody,
},
},
{
desc: "with section",
envVars: map[string]string{
EnvEdgeRcSection: "test",
envTestHost: "akaa-xxxxxxxxxxxxxxxx-xxxxxxxxxxxxxxxx.luna.akamaiapis.net",
envTestClientToken: "akab-xxxxxxxxxxxxxxxx-xxxxxxxxxxxxxxxx",
envTestClientSecret: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
envTestAccessToken: "akac-xxxxxxxxxxxxxxxx-xxxxxxxxxxxxxxxx",
},
expectedConfig: &edgegrid.Config{
Host: "akaa-xxxxxxxxxxxxxxxx-xxxxxxxxxxxxxxxx.luna.akamaiapis.net",
ClientToken: "akab-xxxxxxxxxxxxxxxx-xxxxxxxxxxxxxxxx",
ClientSecret: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
AccessToken: "akac-xxxxxxxxxxxxxxxx-xxxxxxxxxxxxxxxx",
MaxBody: maxBody,
},
},
{
desc: "missing credentials",
expectedErr: "edgedns: Unable to create instance using environment or .edgerc file",
},
{
desc: "missing host",
envVars: map[string]string{
EnvHost: "",
EnvClientToken: "B",
EnvClientSecret: "C",
EnvAccessToken: "D",
},
expectedErr: "edgedns: Unable to create instance using environment or .edgerc file",
},
{
desc: "missing client token",
envVars: map[string]string{
EnvHost: "A",
EnvClientToken: "",
EnvClientSecret: "C",
EnvAccessToken: "D",
},
expectedErr: "edgedns: Fatal missing required environment variables: [AKAMAI_CLIENT_TOKEN]",
},
{
desc: "missing client secret",
envVars: map[string]string{
EnvHost: "A",
EnvClientToken: "B",
EnvClientSecret: "",
EnvAccessToken: "D",
},
expectedErr: "edgedns: Fatal missing required environment variables: [AKAMAI_CLIENT_SECRET]",
},
{
desc: "missing access token",
envVars: map[string]string{
EnvHost: "A",
EnvClientToken: "B",
EnvClientSecret: "C",
EnvAccessToken: "",
},
expectedErr: "edgedns: Fatal missing required environment variables: [AKAMAI_ACCESS_TOKEN]",
},
}
for _, test := range testCases {
t.Run(test.desc, func(t *testing.T) {
defer envTest.RestoreEnv()
envTest.ClearEnv()
if test.envVars == nil {
test.envVars = map[string]string{}
}
test.envVars[EnvEdgeRc] = "/dev/null"
envTest.Apply(test.envVars)
p, err := NewDNSProvider()
if test.expectedErr != "" {
require.EqualError(t, err, test.expectedErr)
return
}
require.NoError(t, err)
require.NotNil(t, p)
require.NotNil(t, p.config)
if test.expectedConfig != nil {
require.Equal(t, *test.expectedConfig, configdns.Config)
}
})
}
}
func TestDNSProvider_findZone(t *testing.T) {
testCases := []struct {
desc string
domain string
expected string
}{
{
desc: "Extract root record name",
domain: "bar.com",
expected: "bar.com",
},
{
desc: "Extract sub record name",
domain: "foo.bar.com",
expected: "bar.com",
},
}
for _, test := range testCases {
test := test
t.Run(test.desc, func(t *testing.T) {
t.Parallel()
zone, err := findZone(test.domain)
require.NoError(t, err)
require.Equal(t, test.expected, zone)
})
}
}
func TestNewDefaultConfig(t *testing.T) {
defer envTest.RestoreEnv()
testCases := []struct {
desc string
envVars map[string]string
expected *Config
}{
{
desc: "default configuration",
expected: &Config{
TTL: dns01.DefaultTTL,
PropagationTimeout: 3 * time.Minute,
PollingInterval: 15 * time.Second,
Config: edgegrid.Config{
MaxBody: maxBody,
},
},
},
{
desc: "custom values",
envVars: map[string]string{
EnvTTL: "99",
EnvPropagationTimeout: "60",
EnvPollingInterval: "60",
},
expected: &Config{
TTL: 99,
PropagationTimeout: 60 * time.Second,
PollingInterval: 60 * time.Second,
Config: edgegrid.Config{
MaxBody: maxBody,
},
},
},
}
for _, test := range testCases {
t.Run(test.desc, func(t *testing.T) {
envTest.ClearEnv()
for key, value := range test.envVars {
os.Setenv(key, value)
}
config := NewDefaultConfig()
require.Equal(t, test.expected, config)
})
}
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
tf_agents/experimental/examples/cql_sac/kumar20/cql_sac_train_eval.py
|
# coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs training and eval on CQL-SAC on D4RL using the Actor-Learner API.
All default hyperparameters in train_eval come from the CQL paper:
https://arxiv.org/abs/2006.04779
"""
import os
from absl import app
from absl import flags
from absl import logging
import gin
import numpy as np
import tensorflow as tf
from tf_agents.agents.cql import cql_sac_agent
from tf_agents.agents.ddpg import critic_network
from tf_agents.agents.sac import tanh_normal_projection_network
from tf_agents.environments import tf_py_environment
from tf_agents.experimental.examples.cql_sac.kumar20.d4rl_utils import load_d4rl
from tf_agents.experimental.examples.cql_sac.kumar20.data_utils import create_tf_record_dataset
from tf_agents.metrics import py_metrics
from tf_agents.networks import actor_distribution_network
from tf_agents.policies import py_tf_eager_policy
from tf_agents.train import actor
from tf_agents.train import learner
from tf_agents.train import triggers
from tf_agents.train.utils import strategy_utils
from tf_agents.train.utils import train_utils
from tf_agents.trajectories import trajectory
FLAGS = flags.FLAGS
flags.DEFINE_string('root_dir', os.getenv('TEST_UNDECLARED_OUTPUTS_DIR'),
'Root directory for writing logs/summaries/checkpoints.')
flags.DEFINE_string('env_name', 'antmaze-medium-play-v0',
'Name of the environment.')
flags.DEFINE_string('dataset_path', None, 'TFRecord dataset path.')
flags.DEFINE_integer('learner_iterations_per_call', 500,
'Iterations per learner run call.')
flags.DEFINE_integer('policy_save_interval', 10000, 'Policy save interval.')
flags.DEFINE_integer('eval_interval', 10000, 'Evaluation interval.')
flags.DEFINE_integer('summary_interval', 1000, 'Summary interval.')
flags.DEFINE_integer('num_gradient_updates', 1000000,
'Total number of train iterations to perform.')
flags.DEFINE_float(
'reward_shift', 0.0, 'Value to add to reward. Useful for sparse rewards, '
'e.g. set to -0.5 for optimal performance on AntMaze environments which '
'have rewards of 0 (most often) or 1 (when the target position is reached)'
)
flags.DEFINE_multi_float(
'action_clipping', None, 'Optional (min, max) values to clip actions. '
'e.g. set to (-0.995, 0.995) when actions are close to -1 and 1 since'
'tanh_distribution.log_prob(actions) will yield -inf and inf and make '
'actor loss NaN. '
)
flags.DEFINE_bool(
'use_trajectories', False,
'Whether dataset samples are stored as trajectories. '
'If False, stored as transitions')
flags.DEFINE_multi_string('gin_file', None, 'Paths to the gin-config files.')
flags.DEFINE_multi_string('gin_param', None, 'Gin binding parameters.')
@gin.configurable
def train_eval(
root_dir,
dataset_path,
env_name,
# Training params
tpu=False,
use_gpu=False,
num_gradient_updates=1000000,
actor_fc_layers=(256, 256),
critic_joint_fc_layers=(256, 256, 256),
# Agent params
batch_size=256,
bc_steps=0,
actor_learning_rate=3e-5,
critic_learning_rate=3e-4,
alpha_learning_rate=3e-4,
reward_scale_factor=1.0,
cql_alpha_learning_rate=3e-4,
cql_alpha=5.0,
cql_tau=10.0,
num_cql_samples=10,
reward_noise_variance=0.0,
include_critic_entropy_term=False,
use_lagrange_cql_alpha=True,
log_cql_alpha_clipping=None,
softmax_temperature=1.0,
# Data params
reward_shift=0.0,
action_clipping=None,
use_trajectories=False,
data_shuffle_buffer_size_per_record=1,
data_shuffle_buffer_size=100,
data_num_shards=1,
data_block_length=10,
data_parallel_reads=None,
data_parallel_calls=10,
data_prefetch=10,
data_cycle_length=10,
# Others
policy_save_interval=10000,
eval_interval=10000,
summary_interval=1000,
learner_iterations_per_call=1,
eval_episodes=10,
debug_summaries=False,
summarize_grads_and_vars=False,
seed=None):
"""Trains and evaluates CQL-SAC."""
logging.info('Training CQL-SAC on: %s', env_name)
tf.random.set_seed(seed)
np.random.seed(seed)
# Load environment.
env = load_d4rl(env_name)
tf_env = tf_py_environment.TFPyEnvironment(env)
strategy = strategy_utils.get_strategy(tpu, use_gpu)
if not dataset_path.endswith('.tfrecord'):
dataset_path = os.path.join(dataset_path, env_name,
'%s*.tfrecord' % env_name)
logging.info('Loading dataset from %s', dataset_path)
dataset_paths = tf.io.gfile.glob(dataset_path)
# Create dataset.
with strategy.scope():
dataset = create_tf_record_dataset(
dataset_paths,
batch_size,
shuffle_buffer_size_per_record=data_shuffle_buffer_size_per_record,
shuffle_buffer_size=data_shuffle_buffer_size,
num_shards=data_num_shards,
cycle_length=data_cycle_length,
block_length=data_block_length,
num_parallel_reads=data_parallel_reads,
num_parallel_calls=data_parallel_calls,
num_prefetch=data_prefetch,
strategy=strategy,
reward_shift=reward_shift,
action_clipping=action_clipping,
use_trajectories=use_trajectories)
# Create agent.
time_step_spec = tf_env.time_step_spec()
observation_spec = time_step_spec.observation
action_spec = tf_env.action_spec()
with strategy.scope():
train_step = train_utils.create_train_step()
actor_net = actor_distribution_network.ActorDistributionNetwork(
observation_spec,
action_spec,
fc_layer_params=actor_fc_layers,
continuous_projection_net=tanh_normal_projection_network
.TanhNormalProjectionNetwork)
critic_net = critic_network.CriticNetwork(
(observation_spec, action_spec),
joint_fc_layer_params=critic_joint_fc_layers,
kernel_initializer='glorot_uniform',
last_kernel_initializer='glorot_uniform')
agent = cql_sac_agent.CqlSacAgent(
time_step_spec,
action_spec,
actor_network=actor_net,
critic_network=critic_net,
actor_optimizer=tf.keras.optimizers.Adam(
learning_rate=actor_learning_rate),
critic_optimizer=tf.keras.optimizers.Adam(
learning_rate=critic_learning_rate),
alpha_optimizer=tf.keras.optimizers.Adam(
learning_rate=alpha_learning_rate),
cql_alpha=cql_alpha,
num_cql_samples=num_cql_samples,
include_critic_entropy_term=include_critic_entropy_term,
use_lagrange_cql_alpha=use_lagrange_cql_alpha,
cql_alpha_learning_rate=cql_alpha_learning_rate,
target_update_tau=5e-3,
target_update_period=1,
random_seed=seed,
cql_tau=cql_tau,
reward_noise_variance=reward_noise_variance,
num_bc_steps=bc_steps,
td_errors_loss_fn=tf.math.squared_difference,
gamma=0.99,
reward_scale_factor=reward_scale_factor,
gradient_clipping=None,
log_cql_alpha_clipping=log_cql_alpha_clipping,
softmax_temperature=softmax_temperature,
debug_summaries=debug_summaries,
summarize_grads_and_vars=summarize_grads_and_vars,
train_step_counter=train_step)
agent.initialize()
# Create learner.
saved_model_dir = os.path.join(root_dir, learner.POLICY_SAVED_MODEL_DIR)
collect_env_step_metric = py_metrics.EnvironmentSteps()
learning_triggers = [
triggers.PolicySavedModelTrigger(
saved_model_dir,
agent,
train_step,
interval=policy_save_interval,
metadata_metrics={
triggers.ENV_STEP_METADATA_KEY: collect_env_step_metric
}),
triggers.StepPerSecondLogTrigger(train_step, interval=100)
]
cql_learner = learner.Learner(
root_dir,
train_step,
agent,
experience_dataset_fn=lambda: dataset,
triggers=learning_triggers,
summary_interval=summary_interval,
strategy=strategy)
# Create actor for evaluation.
eval_greedy_policy = py_tf_eager_policy.PyTFEagerPolicy(
agent.policy, use_tf_function=True)
eval_actor = actor.Actor(
env,
eval_greedy_policy,
train_step,
metrics=actor.eval_metrics(eval_episodes),
summary_dir=os.path.join(root_dir, 'eval'),
episodes_per_run=eval_episodes)
# Run.
dummy_trajectory = trajectory.mid((), (), (), 0., 1.)
num_learner_iterations = int(num_gradient_updates /
learner_iterations_per_call)
for _ in range(num_learner_iterations):
# Mimic collecting environment steps since we loaded a static dataset.
for _ in range(learner_iterations_per_call):
collect_env_step_metric(dummy_trajectory)
cql_learner.run(iterations=learner_iterations_per_call)
if eval_interval and train_step.numpy() % eval_interval == 0:
eval_actor.run_and_log()
def main(_):
logging.set_verbosity(logging.INFO)
gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_param)
train_eval(
root_dir=FLAGS.root_dir,
dataset_path=FLAGS.dataset_path,
env_name=FLAGS.env_name,
tpu=FLAGS.tpu,
use_gpu=FLAGS.use_gpu,
num_gradient_updates=FLAGS.num_gradient_updates,
policy_save_interval=FLAGS.policy_save_interval,
eval_interval=FLAGS.eval_interval,
summary_interval=FLAGS.summary_interval,
learner_iterations_per_call=FLAGS.learner_iterations_per_call,
reward_shift=FLAGS.reward_shift,
action_clipping=FLAGS.action_clipping,
use_trajectories=FLAGS.use_trajectories)
if __name__ == '__main__':
flags.mark_flag_as_required('root_dir')
flags.mark_flag_as_required('dataset_path')
app.run(main)
|
[] |
[] |
[
"TEST_UNDECLARED_OUTPUTS_DIR"
] |
[]
|
["TEST_UNDECLARED_OUTPUTS_DIR"]
|
python
| 1 | 0 | |
ml_service/util/env_variables.py
|
"""Env dataclass to load and hold all environment variables
"""
from dataclasses import dataclass
import os
from typing import Optional
from dotenv import load_dotenv
@dataclass(frozen=True)
class Env:
"""Loads all environment variables into a predefined set of properties
"""
# to load .env file into environment variables for local execution
load_dotenv()
workspace_name: Optional[str] = os.environ.get("WORKSPACE_NAME")
resource_group: Optional[str] = os.environ.get("RESOURCE_GROUP")
subscription_id: Optional[str] = os.environ.get("SUBSCRIPTION_ID")
tenant_id: Optional[str] = os.environ.get("TENANT_ID")
app_id: Optional[str] = os.environ.get("SP_APP_ID")
app_secret: Optional[str] = os.environ.get("SP_APP_SECRET")
vm_size: Optional[str] = os.environ.get("AML_COMPUTE_CLUSTER_CPU_SKU")
compute_name: Optional[str] = os.environ.get("AML_COMPUTE_CLUSTER_NAME")
vm_priority: Optional[str] = os.environ.get("AML_CLUSTER_PRIORITY",
'lowpriority')
min_nodes: int = int(os.environ.get("AML_CLUSTER_MIN_NODES", 0))
max_nodes: int = int(os.environ.get("AML_CLUSTER_MAX_NODES", 4))
build_id: Optional[str] = os.environ.get("BUILD_BUILDID")
pipeline_name: Optional[str] = os.environ.get("TRAINING_PIPELINE_NAME")
scoring_pipeline_name: Optional[str] = os.environ.get(
"SCORING_PIPELINE_NAME")
sources_directory_train: Optional[str] = os.environ.get(
"SOURCES_DIR_TRAIN")
train_script_path: Optional[str] = os.environ.get("TRAIN_SCRIPT_PATH")
evaluate_script_path: Optional[str] = os.environ.get(
"EVALUATE_SCRIPT_PATH")
register_script_path: Optional[str] = os.environ.get(
"REGISTER_SCRIPT_PATH")
scoring_script_path: Optional[str] = os.environ.get(
"SCORING_SCRIPT_PATH"
)
scoring_script_output_path: Optional[str] = os.environ.get(
"SCORING_SCRIPT_OUTPUT_PATH")
scoring_script_input_meta: Optional[str] = os.environ.get(
"SCORING_SCRIPT_INPUT_META")
scoring_script_input_raw: Optional[str] = os.environ.get(
"SCORING_SCRIPT_INPUT_RAW")
model_name: Optional[str] = os.environ.get("MODEL_NAME")
autoencoder_name: Optional[str] = os.environ.get("AUTOENCODER_NAME")
no_of_epochs: int = int(os.environ.get("TRAINING_EPOCHS"))
batch_size: int = int(os.environ.get("TRAINING_BATCH_SIZE"))
autoencoder_no_of_epochs: int = int(os.environ.get(
"AUTOENCODER_EPOCHS"))
autoencoder_batch_size: int = int(os.environ.get(
"AUTOENCODER_BATCH_SIZE"))
experiment_name: Optional[str] = os.environ.get("EXPERIMENT_NAME")
model_version: Optional[str] = os.environ.get('MODEL_VERSION')
image_name: Optional[str] = os.environ.get('IMAGE_NAME')
db_cluster_id: Optional[str] = os.environ.get("DB_CLUSTER_ID")
score_script: Optional[str] = os.environ.get("SCORE_SCRIPT")
build_uri: Optional[str] = os.environ.get("BUILD_URI")
dataset_name: Optional[str] = os.environ.get("DATASET_NAME")
label_dataset_name: Optional[str] = os.environ.get("DATASET_NAME_LABEL")
datastore_name: Optional[str] = os.environ.get("DATASTORE_NAME")
dataset_version: Optional[str] = os.environ.get("DATASET_VERSION")
run_evaluation: Optional[str] = os.environ.get("RUN_EVALUATION", "true")
allow_run_cancel: Optional[str] = os.environ.get("ALLOW_RUN_CANCEL",
"true")
aml_env_name: Optional[str] = os.environ.get("AML_ENV_NAME")
rebuild_env: Optional[bool] = os.environ.get(
"AML_REBUILD_ENVIRONMENT", "false").lower().strip() == "true"
|
[] |
[] |
[
"DATASET_VERSION",
"TRAIN_SCRIPT_PATH",
"SCORING_SCRIPT_INPUT_META",
"SCORING_PIPELINE_NAME",
"AUTOENCODER_EPOCHS",
"WORKSPACE_NAME",
"SCORING_SCRIPT_PATH\"\n ",
"TRAINING_PIPELINE_NAME",
"AML_ENV_NAME",
"SUBSCRIPTION_ID",
"TENANT_ID",
"AML_COMPUTE_CLUSTER_NAME",
"REGISTER_SCRIPT_PATH",
"BUILD_URI",
"SCORE_SCRIPT",
"EVALUATE_SCRIPT_PATH",
"AML_COMPUTE_CLUSTER_CPU_SKU",
"AML_CLUSTER_PRIORITY",
"SCORING_SCRIPT_INPUT_RAW",
"AML_CLUSTER_MAX_NODES",
"SP_APP_ID",
"MODEL_VERSION",
"DB_CLUSTER_ID",
"AML_REBUILD_ENVIRONMENT",
"RESOURCE_GROUP",
"SCORING_SCRIPT_OUTPUT_PATH",
"BUILD_BUILDID",
"ALLOW_RUN_CANCEL",
"TRAINING_EPOCHS",
"AUTOENCODER_BATCH_SIZE",
"IMAGE_NAME",
"SOURCES_DIR_TRAIN",
"DATASET_NAME",
"DATASET_NAME_LABEL",
"EXPERIMENT_NAME",
"RUN_EVALUATION",
"AML_CLUSTER_MIN_NODES",
"MODEL_NAME",
"SP_APP_SECRET",
"TRAINING_BATCH_SIZE",
"DATASTORE_NAME",
"AUTOENCODER_NAME"
] |
[]
|
["DATASET_VERSION", "TRAIN_SCRIPT_PATH", "SCORING_SCRIPT_INPUT_META", "SCORING_PIPELINE_NAME", "AUTOENCODER_EPOCHS", "WORKSPACE_NAME", "SCORING_SCRIPT_PATH\"\n ", "TRAINING_PIPELINE_NAME", "AML_ENV_NAME", "SUBSCRIPTION_ID", "TENANT_ID", "AML_COMPUTE_CLUSTER_NAME", "REGISTER_SCRIPT_PATH", "BUILD_URI", "SCORE_SCRIPT", "EVALUATE_SCRIPT_PATH", "AML_COMPUTE_CLUSTER_CPU_SKU", "AML_CLUSTER_PRIORITY", "SCORING_SCRIPT_INPUT_RAW", "AML_CLUSTER_MAX_NODES", "SP_APP_ID", "MODEL_VERSION", "DB_CLUSTER_ID", "AML_REBUILD_ENVIRONMENT", "RESOURCE_GROUP", "SCORING_SCRIPT_OUTPUT_PATH", "BUILD_BUILDID", "ALLOW_RUN_CANCEL", "TRAINING_EPOCHS", "AUTOENCODER_BATCH_SIZE", "IMAGE_NAME", "SOURCES_DIR_TRAIN", "DATASET_NAME", "DATASET_NAME_LABEL", "EXPERIMENT_NAME", "RUN_EVALUATION", "AML_CLUSTER_MIN_NODES", "MODEL_NAME", "SP_APP_SECRET", "TRAINING_BATCH_SIZE", "DATASTORE_NAME", "AUTOENCODER_NAME"]
|
python
| 42 | 0 | |
access/resource_secret_acl_test.go
|
package access
import (
"context"
"os"
"testing"
"github.com/databrickslabs/databricks-terraform/common"
"github.com/databrickslabs/databricks-terraform/internal/qa"
"github.com/stretchr/testify/assert"
)
func TestSecretsScopesAclsIntegration(t *testing.T) {
cloud := os.Getenv("CLOUD_ENV")
if cloud == "" {
t.Skip("Acceptance tests skipped unless env 'CLOUD_ENV' is set")
}
client := common.NewClientFromEnvironment()
ctx := context.Background()
scopesAPI := NewSecretScopesAPI(ctx, client)
secretsAPI := NewSecretsAPI(ctx, client)
secretAclsAPI := NewSecretAclsAPI(ctx, client)
testScope := "my-test-scope"
testKey := "my-test-key"
testSecret := "my-test-secret"
initialManagePrincipal := ""
// TODO: on random group
testPrincipal := "users"
err := scopesAPI.Create(SecretScope{
Name: testScope,
InitialManagePrincipal: initialManagePrincipal,
})
assert.NoError(t, err, err)
defer func() {
// Deleting scope deletes everything else
err := scopesAPI.Delete(testScope)
assert.NoError(t, err, err)
}()
scopes, err := scopesAPI.List()
assert.NoError(t, err, err)
assert.True(t, len(scopes) >= 1, "Scopes are empty list")
scope, err := scopesAPI.Read(testScope)
assert.NoError(t, err, err)
assert.Equal(t, testScope, scope.Name, "Scope lookup does not yield same scope")
err = secretsAPI.Create(testSecret, testScope, testKey)
assert.NoError(t, err, err)
secrets, err := secretsAPI.List(testScope)
assert.NoError(t, err, err)
assert.True(t, len(secrets) > 0, "Secrets are empty list")
secret, err := secretsAPI.Read(testScope, testKey)
assert.NoError(t, err, err)
assert.Equal(t, testKey, secret.Key, "Secret lookup does not yield same key")
err = secretAclsAPI.Create(testScope, testPrincipal, ACLPermissionManage)
assert.NoError(t, err, err)
secretAcls, err := secretAclsAPI.List(testScope)
assert.NoError(t, err, err)
assert.True(t, len(secretAcls) > 0, "Secrets acls are empty list")
secretACL, err := secretAclsAPI.Read(testScope, testPrincipal)
assert.NoError(t, err, err)
assert.Equal(t, testPrincipal, secretACL.Principal, "Secret lookup does not yield same key")
assert.Equal(t, ACLPermissionManage, secretACL.Permission, "Secret lookup does not yield same key")
err = secretsAPI.Delete(testScope, testKey)
assert.NoError(t, err, err)
err = secretAclsAPI.Delete(testScope, testPrincipal)
assert.NoError(t, err, err)
}
func TestResourceSecretACLRead(t *testing.T) {
d, err := qa.ResourceFixture{
Fixtures: []qa.HTTPFixture{
{
Method: "GET",
Resource: "/api/2.0/secrets/acls/get?principal=something&scope=global",
Response: ACLItem{
Permission: "CAN_MANAGE",
},
},
},
Resource: ResourceSecretACL(),
Read: true,
ID: "global|||something",
}.Apply(t)
assert.NoError(t, err, err)
assert.Equal(t, "global|||something", d.Id(), "Id should not be empty")
assert.Equal(t, "CAN_MANAGE", d.Get("permission"))
assert.Equal(t, "something", d.Get("principal"))
assert.Equal(t, "global", d.Get("scope"))
}
func TestResourceSecretACLRead_NotFound(t *testing.T) {
qa.ResourceFixture{
Fixtures: []qa.HTTPFixture{
{
Method: "GET",
Resource: "/api/2.0/secrets/acls/get?principal=something&scope=global",
Response: common.APIErrorBody{
ErrorCode: "NOT_FOUND",
Message: "Item not found",
},
Status: 404,
},
},
Resource: ResourceSecretACL(),
Read: true,
Removed: true,
ID: "global|||something",
}.ApplyNoError(t)
}
func TestResourceSecretACLRead_Error(t *testing.T) {
d, err := qa.ResourceFixture{
Fixtures: []qa.HTTPFixture{
{
Method: "GET",
Resource: "/api/2.0/secrets/acls/get?principal=something&scope=global",
Response: common.APIErrorBody{
ErrorCode: "INVALID_REQUEST",
Message: "Internal error happened",
},
Status: 400,
},
},
Resource: ResourceSecretACL(),
Read: true,
ID: "global|||something",
}.Apply(t)
qa.AssertErrorStartsWith(t, err, "Internal error happened")
assert.Equal(t, "global|||something", d.Id(), "Id should not be empty for error reads")
}
func TestResourceSecretACLCreate(t *testing.T) {
d, err := qa.ResourceFixture{
Fixtures: []qa.HTTPFixture{
{
Method: "POST",
Resource: "/api/2.0/secrets/acls/put",
ExpectedRequest: SecretACLRequest{
Principal: "something",
Permission: "CAN_MANAGE",
Scope: "global",
},
},
{
Method: "GET",
Resource: "/api/2.0/secrets/acls/get?principal=something&scope=global",
Response: ACLItem{
Permission: "CAN_MANAGE",
},
},
},
Resource: ResourceSecretACL(),
State: map[string]interface{}{
"permission": "CAN_MANAGE",
"principal": "something",
"scope": "global",
},
Create: true,
}.Apply(t)
assert.NoError(t, err, err)
assert.Equal(t, "global|||something", d.Id())
}
func TestResourceSecretACLCreate_Error(t *testing.T) {
d, err := qa.ResourceFixture{
Fixtures: []qa.HTTPFixture{
{ // read log output for better stub url...
Method: "POST",
Resource: "/api/2.0/secrets/acls/put",
Response: common.APIErrorBody{
ErrorCode: "INVALID_REQUEST",
Message: "Internal error happened",
},
Status: 400,
},
},
Resource: ResourceSecretACL(),
State: map[string]interface{}{
"permission": "CAN_MANAGE",
"principal": "something",
"scope": "global",
},
Create: true,
}.Apply(t)
qa.AssertErrorStartsWith(t, err, "Internal error happened")
assert.Equal(t, "", d.Id(), "Id should be empty for error creates")
}
func TestResourceSecretACLDelete(t *testing.T) {
d, err := qa.ResourceFixture{
Fixtures: []qa.HTTPFixture{
{
Method: "POST",
Resource: "/api/2.0/secrets/acls/delete",
ExpectedRequest: map[string]string{
"scope": "global",
"principal": "something",
},
},
},
Resource: ResourceSecretACL(),
Delete: true,
ID: "global|||something",
}.Apply(t)
assert.NoError(t, err, err)
assert.Equal(t, "global|||something", d.Id())
}
func TestResourceSecretACLDelete_Error(t *testing.T) {
d, err := qa.ResourceFixture{
Fixtures: []qa.HTTPFixture{
{
Method: "POST",
Resource: "/api/2.0/secrets/acls/delete",
Response: common.APIErrorBody{
ErrorCode: "INVALID_REQUEST",
Message: "Internal error happened",
},
Status: 400,
},
},
Resource: ResourceSecretACL(),
Delete: true,
ID: "global|||something",
}.Apply(t)
qa.AssertErrorStartsWith(t, err, "Internal error happened")
assert.Equal(t, "global|||something", d.Id())
}
|
[
"\"CLOUD_ENV\""
] |
[] |
[
"CLOUD_ENV"
] |
[]
|
["CLOUD_ENV"]
|
go
| 1 | 0 | |
manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'AdminPanel.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
go/cmd/sqlflowserver/e2e_alisa_test.go
|
// Copyright 2020 The SQLFlow Authors. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"fmt"
"os"
"testing"
"github.com/stretchr/testify/assert"
server "sqlflow.org/sqlflow/go/sqlflowserver"
)
// TestEnd2EndAlisa test cases that run on Alisa, Need to set the
// below environment variables to run them:
// SQLFLOW_submitter=alisa
// SQLFLOW_TEST_DATASOURCE="xxx"
// SQLFLOW_OSS_CHECKPOINT_CONFIG="xxx"
// SQLFLOW_OSS_ALISA_ENDPOINT="xxx"
// SQLFLOW_OSS_AK="xxx"
// SQLFLOW_OSS_SK="xxx"
// SQLFLOW_OSS_ALISA_BUCKET="xxx"
// SQLFLOW_OSS_MODEL_ENDPOINT="xxx"
func TestEnd2EndAlisa(t *testing.T) {
testDBDriver := os.Getenv("SQLFLOW_TEST_DB")
if testDBDriver != "alisa" {
t.Skip("Skipping non alisa tests")
}
if os.Getenv("SQLFLOW_submitter") != "alisa" {
t.Skip("Skip non Alisa tests")
}
dbConnStr = os.Getenv("SQLFLOW_TEST_DATASOURCE")
tmpDir, caCrt, caKey, err := generateTempCA()
defer os.RemoveAll(tmpDir)
if err != nil {
t.Fatalf("failed to generate CA pair %v", err)
}
caseDB = os.Getenv("SQLFLOW_TEST_DB_MAXCOMPUTE_PROJECT")
if caseDB == "" {
t.Fatalf("Must set env SQLFLOW_TEST_DB_MAXCOMPUTE_PROJECT")
}
caseTrainTable = caseDB + ".sqlflow_test_iris_train"
caseTestTable = caseDB + ".sqlflow_test_iris_test"
casePredictTable = caseDB + ".sqlflow_test_iris_predict"
// write model to current MaxCompute project
caseInto = "sqlflow_test_kmeans_model"
go start(caCrt, caKey, unitTestPort, false)
server.WaitPortReady(fmt.Sprintf("localhost:%d", unitTestPort), 0)
// TODO(Yancey1989): reuse CaseTrainXGBoostOnPAI if support explain XGBoost model
t.Run("CaseTrainXGBoostOnAlisa", CaseTrainXGBoostOnAlisa)
t.Run("CaseTrainPAIKMeans", CaseTrainPAIKMeans)
}
func CaseTrainXGBoostOnAlisa(t *testing.T) {
a := assert.New(t)
model := "my_xgb_class_model"
trainSQL := fmt.Sprintf(`SELECT * FROM %s
TO TRAIN xgboost.gbtree
WITH
objective="multi:softprob",
train.num_boost_round = 30,
eta = 0.4,
num_class = 3
LABEL class
INTO %s;`, caseTrainTable, model)
if _, _, _, err := connectAndRunSQL(trainSQL); err != nil {
a.Fail("Run trainSQL error: %v", err)
}
predSQL := fmt.Sprintf(`SELECT * FROM %s
TO PREDICT %s.class
USING %s;`, caseTestTable, casePredictTable, model)
if _, _, _, err := connectAndRunSQL(predSQL); err != nil {
a.Fail("Run predSQL error: %v", err)
}
explainSQL := fmt.Sprintf(`SELECT * FROM %s
TO EXPLAIN %s
WITH label_col=class
USING TreeExplainer
INTO my_xgb_explain_result;`, caseTrainTable, model)
if _, _, _, err := connectAndRunSQL(explainSQL); err != nil {
a.Fail("Run predSQL error: %v", err)
}
}
|
[
"\"SQLFLOW_TEST_DB\"",
"\"SQLFLOW_submitter\"",
"\"SQLFLOW_TEST_DATASOURCE\"",
"\"SQLFLOW_TEST_DB_MAXCOMPUTE_PROJECT\""
] |
[] |
[
"SQLFLOW_TEST_DB",
"SQLFLOW_TEST_DB_MAXCOMPUTE_PROJECT",
"SQLFLOW_submitter",
"SQLFLOW_TEST_DATASOURCE"
] |
[]
|
["SQLFLOW_TEST_DB", "SQLFLOW_TEST_DB_MAXCOMPUTE_PROJECT", "SQLFLOW_submitter", "SQLFLOW_TEST_DATASOURCE"]
|
go
| 4 | 0 | |
venv/Lib/site-packages/IPython/kernel/launcher.py
|
"""Utilities for launching kernels
Authors:
* Min Ragan-Kelley
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import sys
from subprocess import Popen, PIPE
from IPython.utils.encoding import getdefaultencoding
from IPython.utils.py3compat import cast_bytes_py2
#-----------------------------------------------------------------------------
# Launching Kernels
#-----------------------------------------------------------------------------
def swallow_argv(argv, aliases=None, flags=None):
"""strip frontend-specific aliases and flags from an argument list
For use primarily in frontend apps that want to pass a subset of command-line
arguments through to a subprocess, where frontend-specific flags and aliases
should be removed from the list.
Parameters
----------
argv : list(str)
The starting argv, to be filtered
aliases : container of aliases (dict, list, set, etc.)
The frontend-specific aliases to be removed
flags : container of flags (dict, list, set, etc.)
The frontend-specific flags to be removed
Returns
-------
argv : list(str)
The argv list, excluding flags and aliases that have been stripped
"""
if aliases is None:
aliases = set()
if flags is None:
flags = set()
stripped = list(argv) # copy
swallow_next = False
was_flag = False
for a in argv:
if a == '--':
break
if swallow_next:
swallow_next = False
# last arg was an alias, remove the next one
# *unless* the last alias has a no-arg flag version, in which
# case, don't swallow the next arg if it's also a flag:
if not (was_flag and a.startswith('-')):
stripped.remove(a)
continue
if a.startswith('-'):
split = a.lstrip('-').split('=')
name = split[0]
# we use startswith because argparse accepts any arg to be specified
# by any leading section, as long as it is unique,
# so `--no-br` means `--no-browser` in the notebook, etc.
if any(alias.startswith(name) for alias in aliases):
stripped.remove(a)
if len(split) == 1:
# alias passed with arg via space
swallow_next = True
# could have been a flag that matches an alias, e.g. `existing`
# in which case, we might not swallow the next arg
was_flag = name in flags
elif len(split) == 1 and any(flag.startswith(name) for flag in flags):
# strip flag, but don't swallow next, as flags don't take args
stripped.remove(a)
# return shortened list
return stripped
def make_ipkernel_cmd(code, executable=None, extra_arguments=[], **kw):
"""Build Popen command list for launching an IPython kernel.
Parameters
----------
code : str,
A string of Python code that imports and executes a kernel entry point.
executable : str, optional (default sys.executable)
The Python executable to use for the kernel process.
extra_arguments : list, optional
A list of extra arguments to pass when executing the launch code.
Returns
-------
A Popen command list
"""
# Build the kernel launch command.
if executable is None:
executable = sys.executable
arguments = [ executable, '-c', code, '-f', '{connection_file}' ]
arguments.extend(extra_arguments)
# Spawn a kernel.
if sys.platform == 'win32':
# If the kernel is running on pythonw and stdout/stderr are not been
# re-directed, it will crash when more than 4KB of data is written to
# stdout or stderr. This is a bug that has been with Python for a very
# long time; see http://bugs.python.org/issue706263.
# A cleaner solution to this problem would be to pass os.devnull to
# Popen directly. Unfortunately, that does not work.
if executable.endswith('pythonw.exe'):
arguments.append('--no-stdout')
arguments.append('--no-stderr')
return arguments
def launch_kernel(cmd, stdin=None, stdout=None, stderr=None,
independent=False,
cwd=None, ipython_kernel=True,
env=None,
**kw
):
""" Launches a localhost kernel, binding to the specified ports.
Parameters
----------
cmd : Popen list,
A string of Python code that imports and executes a kernel entry point.
stdin, stdout, stderr : optional (default None)
Standards streams, as defined in subprocess.Popen.
independent : bool, optional (default False)
If set, the kernel process is guaranteed to survive if this process
dies. If not set, an effort is made to ensure that the kernel is killed
when this process dies. Note that in this case it is still good practice
to kill kernels manually before exiting.
cwd : path, optional
The working dir of the kernel process (default: cwd of this process).
ipython_kernel : bool, optional
Whether the kernel is an official IPython one,
and should get a bit of special treatment.
Returns
-------
Popen instance for the kernel subprocess
"""
# Popen will fail (sometimes with a deadlock) if stdin, stdout, and stderr
# are invalid. Unfortunately, there is in general no way to detect whether
# they are valid. The following two blocks redirect them to (temporary)
# pipes in certain important cases.
# If this process has been backgrounded, our stdin is invalid. Since there
# is no compelling reason for the kernel to inherit our stdin anyway, we'll
# place this one safe and always redirect.
redirect_in = True
_stdin = PIPE if stdin is None else stdin
# If this process in running on pythonw, we know that stdin, stdout, and
# stderr are all invalid.
redirect_out = sys.executable.endswith('pythonw.exe')
if redirect_out:
_stdout = PIPE if stdout is None else stdout
_stderr = PIPE if stderr is None else stderr
else:
_stdout, _stderr = stdout, stderr
env = env if (env is not None) else os.environ.copy()
encoding = getdefaultencoding(prefer_stream=False)
# Spawn a kernel.
if sys.platform == 'win32':
# Popen on Python 2 on Windows cannot handle unicode args or cwd
cmd = [ cast_bytes_py2(c, encoding) for c in cmd ]
if cwd:
cwd = cast_bytes_py2(cwd, sys.getfilesystemencoding() or 'ascii')
from IPython.kernel.zmq.parentpoller import ParentPollerWindows
# Create a Win32 event for interrupting the kernel.
interrupt_event = ParentPollerWindows.create_interrupt_event()
# Store this in an environment variable for third party kernels, but at
# present, our own kernel expects this as a command line argument.
env["IPY_INTERRUPT_EVENT"] = str(interrupt_event)
if ipython_kernel:
cmd += [ '--interrupt=%i' % interrupt_event ]
# If the kernel is running on pythonw and stdout/stderr are not been
# re-directed, it will crash when more than 4KB of data is written to
# stdout or stderr. This is a bug that has been with Python for a very
# long time; see http://bugs.python.org/issue706263.
# A cleaner solution to this problem would be to pass os.devnull to
# Popen directly. Unfortunately, that does not work.
if cmd[0].endswith('pythonw.exe'):
if stdout is None:
cmd.append('--no-stdout')
if stderr is None:
cmd.append('--no-stderr')
# Launch the kernel process.
if independent:
proc = Popen(cmd,
creationflags=512, # CREATE_NEW_PROCESS_GROUP
stdin=_stdin, stdout=_stdout, stderr=_stderr, env=os.environ)
else:
if ipython_kernel:
try:
from _winapi import DuplicateHandle, GetCurrentProcess, \
DUPLICATE_SAME_ACCESS
except:
from _subprocess import DuplicateHandle, GetCurrentProcess, \
DUPLICATE_SAME_ACCESS
pid = GetCurrentProcess()
handle = DuplicateHandle(pid, pid, pid, 0,
True, # Inheritable by new processes.
DUPLICATE_SAME_ACCESS)
cmd +=[ '--parent=%i' % handle ]
proc = Popen(cmd,
stdin=_stdin, stdout=_stdout, stderr=_stderr, cwd=cwd, env=os.environ)
# Attach the interrupt event to the Popen objet so it can be used later.
proc.win32_interrupt_event = interrupt_event
else:
if independent:
proc = Popen(cmd, preexec_fn=lambda: os.setsid(),
stdin=_stdin, stdout=_stdout, stderr=_stderr, cwd=cwd, env=os.environ)
else:
if ipython_kernel:
cmd += ['--parent=1']
proc = Popen(cmd,
stdin=_stdin, stdout=_stdout, stderr=_stderr, cwd=cwd, env=os.environ)
# Clean up pipes created to work around Popen bug.
if redirect_in:
if stdin is None:
proc.stdin.close()
if redirect_out:
if stdout is None:
proc.stdout.close()
if stderr is None:
proc.stderr.close()
return proc
__all__ = [
'swallow_argv',
'make_ipkernel_cmd',
'launch_kernel',
]
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
preprocessing/bbmap/current/align2/Shared.java
|
package align2;
import java.lang.management.ManagementFactory;
import java.util.List;
import dna.Data;
public class Shared {
private static int THREADS=setThreads(-1);
public static int READ_BUFFER_LENGTH=200;
private static int READ_BUFFER_NUM_BUFFERS=setBuffers();
public static long READ_BUFFER_MAX_DATA=400000;
/** Temporary, for testing; will be made non-global */
public static boolean AMINO_IN=false;
//TODO: For some reason, it seems as though GAPBUFFER must equal exactly 1/2 of GAPLEN. Not good; 1/4 would be far better.
public static final int GAPBUFFER=64; //TODO: Seems to break less than 64, for some reason
public static final int GAPBUFFER2=2*GAPBUFFER;
public static final int GAPLEN=128; //TODO: May break when over 128
public static final int MINGAP=GAPBUFFER2+GAPLEN;
public static final int GAPCOST=Tools.max(1, GAPLEN/64);
public static final byte GAPC='-';
public static String BBMAP_VERSION_STRING="35.85";
public static boolean TRIM_READ_COMMENTS=false;
public static boolean USE_JNI=false;//Data.GENEPOOL;
public static boolean USE_MPI=false;
public static boolean MPI_KEEP_ALL=true;
/** Use ConcurrentReadInputStreamMPI instead of D */
public static boolean USE_CRISMPI=true;
public static int MPI_RANK=0;
public static int MPI_NUM_RANKS=1;
public static int FASTA_WRAP=70;
public static byte FAKE_QUAL=30;
public static String BBMAP_CLASS=null;
public static String[] COMMAND_LINE=null;
public static List<String> JVM_ARGS(){
return ManagementFactory.getRuntimeMXBean().getInputArguments();
}
public static long getAvailableMemory(){
long usableMemory;
{
long memory=Runtime.getRuntime().maxMemory();
double xmsRatio=Shared.xmsRatio();
usableMemory=(long)Tools.max(((memory-96000000-(20*400000))*(xmsRatio>0.97 ? 0.82 : 0.75)), memory*0.45);
}
return usableMemory;
}
/** Directory in which to write temp files */
public static String TMPDIR=(System.getenv("TMPDIR")==null ? null : (System.getenv("TMPDIR")+"/").replaceAll("//", "/"));
// static{assert(false) : "TMPDIR="+TMPDIR;}
/** Anomaly probably resolved as of v.20.1
* This variable should be TRUE for normal users and FALSE for me. */
public static boolean anomaly=!System.getProperty("user.dir").contains("/bushnell/") && !Data.WINDOWS;
public static final char[] getTLCB(int len){
char[] buffer=TLCB.get();
if(buffer==null || buffer.length<len){
buffer=new char[len];
if(len<1000000){TLCB.set(buffer);}
}
return buffer;
}
private static final ThreadLocal<char[]> TLCB=new ThreadLocal<char[]>();
public static int setThreads(String x){
int y=Data.LOGICAL_PROCESSORS;
if(x!=null && !x.equalsIgnoreCase("auto")){
y=Integer.parseInt(x);
}
return setThreads(y);
}
public static int setThreads(int x){
if(x>0){
THREADS=x;
}else{
THREADS=Tools.max(1, Data.LOGICAL_PROCESSORS);
}
setBuffers();
return THREADS;
}
public static int threads(){
assert(THREADS>0);
return THREADS;
}
public static int capBuffers(int num){
return setBuffers(Tools.min(num, READ_BUFFER_NUM_BUFFERS));
}
public static int setBuffers(){
return setBuffersFromThreads(THREADS);
}
public static int setBuffersFromThreads(int threads){
return setBuffers(Tools.max(4, (threads*3)/2));
}
public static int setBuffers(int num){
num=Tools.max(2, num);
return READ_BUFFER_NUM_BUFFERS=num;
}
public static int numBuffers(){
return READ_BUFFER_NUM_BUFFERS;
}
public static boolean LOW_MEMORY=false;
/** Ratio of -Xms to -Xmx parameters */
public static final double xmsRatio(){
Runtime rt=Runtime.getRuntime();
return rt.totalMemory()*1.0/rt.maxMemory();
}
/** Print statistics about current memory use and availability */
public static final void printMemory(){
if(GC_BEFORE_PRINT_MEMORY){
System.gc();
System.gc();
}
Runtime rt=Runtime.getRuntime();
long mmemory=rt.maxMemory()/1000000;
long tmemory=rt.totalMemory()/1000000;
long fmemory=rt.freeMemory()/1000000;
long umemory=tmemory-fmemory;
System.err.println("Memory: "+"max="+mmemory+/*"m, total="+tmemory+*/"m, "+"free="+fmemory+"m, used="+umemory+"m");
}
/** Do garbage collection prior to printing memory usage */
private static final boolean GC_BEFORE_PRINT_MEMORY=false;
}
|
[
"\"TMPDIR\"",
"\"TMPDIR\""
] |
[] |
[
"TMPDIR"
] |
[]
|
["TMPDIR"]
|
java
| 1 | 0 | |
shared/version/api.go
|
package version
import (
"os"
"strconv"
)
// APIVersion contains the API base version. Only bumped for backward incompatible changes.
var APIVersion = "1.0"
// APIExtensions is the list of all API extensions in the order they were added.
//
// The following kind of changes come with a new extensions:
//
// - New configuration key
// - New valid values for a configuration key
// - New REST API endpoint
// - New argument inside an existing REST API call
// - New HTTPs authentication mechanisms or protocols
//
// This list is used mainly by the LXD server code, but it's in the shared
// package as well for reference.
var APIExtensions = []string{
"storage_zfs_remove_snapshots",
"container_host_shutdown_timeout",
"container_stop_priority",
"container_syscall_filtering",
"auth_pki",
"container_last_used_at",
"etag",
"patch",
"usb_devices",
"https_allowed_credentials",
"image_compression_algorithm",
"directory_manipulation",
"container_cpu_time",
"storage_zfs_use_refquota",
"storage_lvm_mount_options",
"network",
"profile_usedby",
"container_push",
"container_exec_recording",
"certificate_update",
"container_exec_signal_handling",
"gpu_devices",
"container_image_properties",
"migration_progress",
"id_map",
"network_firewall_filtering",
"network_routes",
"storage",
"file_delete",
"file_append",
"network_dhcp_expiry",
"storage_lvm_vg_rename",
"storage_lvm_thinpool_rename",
"network_vlan",
"image_create_aliases",
"container_stateless_copy",
"container_only_migration",
"storage_zfs_clone_copy",
"unix_device_rename",
"storage_lvm_use_thinpool",
"storage_rsync_bwlimit",
"network_vxlan_interface",
"storage_btrfs_mount_options",
"entity_description",
"image_force_refresh",
"storage_lvm_lv_resizing",
"id_map_base",
"file_symlinks",
"container_push_target",
"network_vlan_physical",
"storage_images_delete",
"container_edit_metadata",
"container_snapshot_stateful_migration",
"storage_driver_ceph",
"storage_ceph_user_name",
"resource_limits",
"storage_volatile_initial_source",
"storage_ceph_force_osd_reuse",
"storage_block_filesystem_btrfs",
"resources",
"kernel_limits",
"storage_api_volume_rename",
"macaroon_authentication",
"network_sriov",
"console",
"restrict_devlxd",
"migration_pre_copy",
"infiniband",
"maas_network",
"devlxd_events",
"proxy",
"network_dhcp_gateway",
"file_get_symlink",
"network_leases",
"unix_device_hotplug",
"storage_api_local_volume_handling",
"operation_description",
"clustering",
"event_lifecycle",
"storage_api_remote_volume_handling",
"nvidia_runtime",
"container_mount_propagation",
"container_backup",
"devlxd_images",
"container_local_cross_pool_handling",
"proxy_unix",
"proxy_udp",
"clustering_join",
"proxy_tcp_udp_multi_port_handling",
"network_state",
"proxy_unix_dac_properties",
"container_protection_delete",
"unix_priv_drop",
"pprof_http",
"proxy_haproxy_protocol",
"network_hwaddr",
"proxy_nat",
"network_nat_order",
"container_full",
"candid_authentication",
"backup_compression",
"candid_config",
"nvidia_runtime_config",
"storage_api_volume_snapshots",
"storage_unmapped",
"projects",
"candid_config_key",
"network_vxlan_ttl",
"container_incremental_copy",
"usb_optional_vendorid",
"snapshot_scheduling",
"container_copy_project",
"clustering_server_address",
"clustering_image_replication",
"container_protection_shift",
"snapshot_expiry",
"container_backup_override_pool",
"snapshot_expiry_creation",
"network_leases_location",
"resources_cpu_socket",
"resources_gpu",
"resources_numa",
"kernel_features",
"id_map_current",
"event_location",
"storage_api_remote_volume_snapshots",
"network_nat_address",
"container_nic_routes",
"rbac",
"cluster_internal_copy",
"seccomp_notify",
"lxc_features",
"container_nic_ipvlan",
"network_vlan_sriov",
"storage_cephfs",
"container_nic_ipfilter",
"resources_v2",
"container_exec_user_group_cwd",
"container_syscall_intercept",
"container_disk_shift",
"storage_shifted",
"resources_infiniband",
"daemon_storage",
"instances",
"image_types",
"resources_disk_sata",
"clustering_roles",
"images_expiry",
"resources_network_firmware",
"backup_compression_algorithm",
"ceph_data_pool_name",
"container_syscall_intercept_mount",
"compression_squashfs",
"container_raw_mount",
"container_nic_routed",
"container_syscall_intercept_mount_fuse",
"container_disk_ceph",
"virtual-machines",
"image_profiles",
"clustering_architecture",
"resources_disk_id",
"storage_lvm_stripes",
"vm_boot_priority",
"unix_hotplug_devices",
"api_filtering",
"instance_nic_network",
"clustering_sizing",
"firewall_driver",
"projects_limits",
"container_syscall_intercept_hugetlbfs",
"limits_hugepages",
"container_nic_routed_gateway",
"projects_restrictions",
"custom_volume_snapshot_expiry",
"volume_snapshot_scheduling",
"trust_ca_certificates",
"snapshot_disk_usage",
"clustering_edit_roles",
"container_nic_routed_host_address",
"container_nic_ipvlan_gateway",
"resources_usb_pci",
"resources_cpu_threads_numa",
"resources_cpu_core_die",
"api_os",
"container_nic_routed_host_table",
"container_nic_ipvlan_host_table",
"container_nic_ipvlan_mode",
"resources_system",
"images_push_relay",
"network_dns_search",
"container_nic_routed_limits",
"instance_nic_bridged_vlan",
"network_state_bond_bridge",
"usedby_consistency",
"custom_block_volumes",
"clustering_failure_domains",
"resources_gpu_mdev",
"console_vga_type",
"projects_limits_disk",
"network_type_macvlan",
"network_type_sriov",
"container_syscall_intercept_bpf_devices",
"network_type_ovn",
"projects_networks",
"projects_networks_restricted_uplinks",
"custom_volume_backup",
"backup_override_name",
"storage_rsync_compression",
"network_type_physical",
}
// APIExtensionsCount returns the number of available API extensions.
func APIExtensionsCount() int {
count := len(APIExtensions)
// This environment variable is an internal one to force the code
// to believe that we have an API extensions count greater than we
// actually have. It's used by integration tests to exercise the
// cluster upgrade process.
artificialBump := os.Getenv("LXD_ARTIFICIALLY_BUMP_API_EXTENSIONS")
if artificialBump != "" {
n, err := strconv.Atoi(artificialBump)
if err == nil {
count += n
}
}
return count
}
|
[
"\"LXD_ARTIFICIALLY_BUMP_API_EXTENSIONS\""
] |
[] |
[
"LXD_ARTIFICIALLY_BUMP_API_EXTENSIONS"
] |
[]
|
["LXD_ARTIFICIALLY_BUMP_API_EXTENSIONS"]
|
go
| 1 | 0 | |
steps/source_extraction.py
|
# -*- coding: utf-8 -*-
import datetime
import data_base_manipulation as db
import analysis_files_manipulation as fm
import caiman as cm
from caiman.source_extraction import cnmf
from caiman.source_extraction.cnmf import params as params
import caiman.base.rois
import logging
import numpy as np
import os
import psutil
#step = 'source_extraction'
#%% MAIN
def run_source_extraction(row, parameters, dview, session_wise = False):
'''
This is the function for source extraction.
Its goal is to take in a .mmap file,
perform source extraction on it using cnmf-e and save the cnmf object as a .pkl file.
This function is only runnable on the cn76 server because it requires parralel processing.
Args:
row: pd.DataFrame object
The row corresponding to the analysis state to be source extracted.
Returns:
row: pd.DataFrame object
The row corresponding to the source extracted analysis state.
'''
step_index = 4
row_local = row.copy()
row_local.loc['source_extraction_parameters'] = str(parameters)
row_local = db.set_version_analysis('source_extraction',row_local,session_wise)
index = row_local.name
# Determine input path
if parameters['session_wise']:
input_mmap_file_path = eval(row_local.loc['alignment_output'])['main']
else:
input_mmap_file_path = eval(row_local.loc['motion_correction_output'])['main']
if not os.path.isfile(input_mmap_file_path):
logging.error('Input file does not exist. Cancelling.')
return row_local
# Determine output paths
file_name = db.create_file_name(step_index, index)
data_dir = 'data/interim/source_extraction/session_wise/' if parameters['session_wise'] else 'data/interim/source_extraction/trial_wise/'
output_file_path = data_dir + f'main/{file_name}.hdf5'
# Create a dictionary with parameters
output = {
'main': output_file_path,
'meta':{
'analysis' : {
'analyst' : os.environ['ANALYST'],
'date' : datetime.datetime.today().strftime("%m-%d-%Y"),
'time' : datetime.datetime.today().strftime("%H:%M:%S"),
},
'duration': {}
}
}
# Load memmory mappable input file
if os.path.isfile(input_mmap_file_path):
Yr, dims, T = cm.load_memmap(input_mmap_file_path)
# logging.debug(f'{index} Loaded movie. dims = {dims}, T = {T}.')
images = Yr.T.reshape((T,) + dims, order='F')
else:
logging.warning(f'{index} .mmap file does not exist. Cancelling')
return row_local
# SOURCE EXTRACTION
# Check if the summary images are already there
corr_npy_file_path, pnr_npy_file_path = fm.get_corr_pnr_path(index, gSig_abs = parameters['gSig'][0])
if corr_npy_file_path != None and os.path.isfile(corr_npy_file_path):
# Already computed summary images
logging.info(f'{index} Already computed summary images')
cn_filter = np.load(corr_npy_file_path)
pnr = np.load(pnr_npy_file_path)
else:
# Compute summary images
t0 = datetime.datetime.today()
logging.info(f'{index} Computing summary images')
cn_filter, pnr = cm.summary_images.correlation_pnr(images[::1], gSig = parameters['gSig'][0], swap_dim=False)
dt = int((datetime.datetime.today() - t0).seconds/60) # timedelta in minutes
output['meta']['duration']['summary_images'] = dt
logging.info(f'{index} Computed summary images. dt = {dt} min')
# Saving summary images as npy files
gSig = parameters['gSig'][0]
corr_npy_file_path = data_dir + f'/meta/corr/{db.create_file_name(3, index)}_gSig_{gSig}.npy'
pnr_npy_file_path = data_dir + f'/meta/pnr/{db.create_file_name(3, index)}_gSig_{gSig}.npy'
with open(corr_npy_file_path, 'wb') as f:
np.save(f, cn_filter)
with open(pnr_npy_file_path, 'wb') as f:
np.save(f, pnr)
# Store the paths in the meta dictionary
output['meta']['corr'] = {'main': corr_npy_file_path, 'meta': {}}
output['meta']['pnr'] = {'main': pnr_npy_file_path, 'meta': {}}
# Calculate min, mean, max value for cn_filter and pnr
corr_min, corr_mean, corr_max = cn_filter.min(), cn_filter.mean(), cn_filter.max()
output['meta']['corr']['meta'] = {'min': corr_min, 'mean': corr_mean, 'max': corr_max}
pnr_min, pnr_mean, pnr_max = pnr.min(), pnr.mean(), pnr.max()
output['meta']['pnr']['meta'] = {'min': pnr_min, 'mean': pnr_mean, 'max': pnr_max}
# If min_corr and min_pnr are specified via a linear equation, calculate
# this value
if type(parameters['min_corr']) == list:
min_corr = parameters['min_corr'][0]*corr_mean + parameters['min_corr'][1]
parameters['min_corr'] = min_corr
logging.info(f'{index} Automatically setting min_corr = {min_corr}')
if type(parameters['min_pnr']) == list:
min_pnr = parameters['min_pnr'][0]*pnr_mean + parameters['min_pnr'][1]
parameters['min_pnr'] = min_pnr
logging.info(f'{index} Automatically setting min_pnr = {min_pnr}')
# Set the parameters for caiman
opts = params.CNMFParams(params_dict = parameters)
# SOURCE EXTRACTION
logging.info(f'{index} Performing source extraction')
t0 = datetime.datetime.today()
n_processes = psutil.cpu_count()
logging.info(f'{index} n_processes: {n_processes}')
cnm = cnmf.CNMF(n_processes = n_processes, dview = dview, params = opts)
cnm.fit(images)
cnm.estimates.dims = dims
# Store the number of neurons
output['meta']['K'] = len(cnm.estimates.C)
# Calculate the center of masses
cnm.estimates.center = caiman.base.rois.com(cnm.estimates.A, images.shape[1], images.shape[2])
# Save the cnmf object as a hdf5 file
logging.info(f'{index} Saving cnmf object')
cnm.save(output_file_path)
dt = int((datetime.datetime.today() - t0).seconds/60) # timedelta in minutes
output['meta']['duration']['source_extraction'] = dt
logging.info(f'{index} Source extraction finished. dt = {dt} min')
# Write necessary variables in row and return
row_local.loc['source_extraction_parameters'] = str(parameters)
row_local.loc['source_extraction_output'] = str(output)
return row_local
|
[] |
[] |
[
"ANALYST"
] |
[]
|
["ANALYST"]
|
python
| 1 | 0 | |
fabfile.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# fabfile for ymir
#
# this file is a self-hosting fabfile, meaning it
# supports direct invocation with standard option
# parsing, including --help and -l (for listing commands).
#
# summary of commands/arguments:
#
# * fab release: update this package on pypi
# * fab version_bump: bump the package version
#
import os
from fabric import api, colors
from fabric.contrib.console import confirm
_ope = os.path.exists
_mkdir = os.mkdir
_expanduser = os.path.expanduser
_dirname = os.path.dirname
ldir = _dirname(__file__)
PKG_NAME = 'ymir'
VERSION_DELTA = .01
@api.task
def version_bump(force=False):
""" bump the version number for """ + PKG_NAME
sandbox = {}
version_file = os.path.join(PKG_NAME, 'version.py')
err = 'version file not found in expected location: ' + version_file
assert os.path.exists(version_file), err
# running "import pkg.version" should have no side-effects,
# so there's little point in ASTing the file. just exec it
execfile(version_file, sandbox)
current_version = sandbox['__version__']
new_version = current_version + VERSION_DELTA
with open(version_file, 'r') as fhandle:
version_file_contents = [
x for x in fhandle.readlines() if x.strip()]
new_file = version_file_contents[:-1] + \
["__version__={0}".format(new_version)]
new_file = '\n'.join(new_file)
if not force:
print colors.red("warning:"),
print " version will be changed to {0}\n".format(new_version)
print colors.red("new version file will look like this:\n")
print new_file
ans = confirm('proceed with version change?')
if not ans:
print 'aborting.'
raise SystemExit(1)
with open(version_file, 'w') as fhandle:
fhandle.write(new_file)
print 'version rewritten to {0}'.format(new_version)
@api.task
def test():
with api.lcd(os.path.dirname(__file__)):
api.local('py.test --cov-config .coveragerc '
'--cov=ymir --cov-report=term -v '
'--pyargs ./tests')
@api.task
def vulture():
with api.lcd(os.path.dirname(__file__)):
api.local(
'vulture ymir --exclude fabfile.py|grep -v _provision_|grep -v ymir/checks.py')
if __name__ == '__main__':
# a neat hack that makes this file a "self-hosting" fabfile,
# ie it is invoked directly but still gets all the fabric niceties
# like real option parsing, including --help and -l (for listing
# commands). note that as of fabric 1.10, the file for some reason
# needs to end in .py, despite what the documentation says. see:
# http://docs.fabfile.org/en/1.4.2/usage/fabfiles.html#fabfile-discovery
#
# the .index() manipulation below should make this work regardless of
# whether this is invoked from shell as "./foo.py" or "python foo.py"
import sys
from fabric.main import main as fmain
patched_argv = ['fab', '-f', __file__, ] + \
sys.argv[sys.argv.index(__file__) + 1:]
sys.argv = patched_argv
fmain()
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
flyway-core/src/main/java/org/flywaydb/core/internal/configuration/ConfigUtils.java
|
/*
* Copyright © Red Gate Software Ltd 2010-2021
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.flywaydb.core.internal.configuration;
import org.flywaydb.core.api.ErrorCode;
import org.flywaydb.core.api.FlywayException;
import org.flywaydb.core.api.logging.Log;
import org.flywaydb.core.api.logging.LogFactory;
import org.flywaydb.core.internal.database.DatabaseTypeRegister;
import org.flywaydb.core.internal.util.FileCopyUtils;
import org.flywaydb.core.internal.util.StringUtils;
import java.io.*;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import static org.flywaydb.core.internal.sqlscript.SqlScriptMetadata.isMultilineBooleanExpression;
public class ConfigUtils {
private static final Log LOG = LogFactory.getLog(ConfigUtils.class);
public static final String CONFIG_FILE_NAME = "flyway.conf";
public static final String CONFIG_FILES = "flyway.configFiles";
public static final String CONFIG_FILE_ENCODING = "flyway.configFileEncoding";
public static final String BASELINE_DESCRIPTION = "flyway.baselineDescription";
public static final String BASELINE_ON_MIGRATE = "flyway.baselineOnMigrate";
public static final String BASELINE_VERSION = "flyway.baselineVersion";
public static final String BATCH = "flyway.batch";
public static final String CALLBACKS = "flyway.callbacks";
public static final String CLEAN_DISABLED = "flyway.cleanDisabled";
public static final String CLEAN_ON_VALIDATION_ERROR = "flyway.cleanOnValidationError";
public static final String CONNECT_RETRIES = "flyway.connectRetries";
public static final String DEFAULT_SCHEMA = "flyway.defaultSchema";
public static final String DRIVER = "flyway.driver";
public static final String DRYRUN_OUTPUT = "flyway.dryRunOutput";
public static final String ENCODING = "flyway.encoding";
public static final String DETECT_ENCODING = "flyway.detectEncoding";
public static final String ERROR_OVERRIDES = "flyway.errorOverrides";
public static final String GROUP = "flyway.group";
public static final String IGNORE_FUTURE_MIGRATIONS = "flyway.ignoreFutureMigrations";
public static final String IGNORE_MISSING_MIGRATIONS = "flyway.ignoreMissingMigrations";
public static final String IGNORE_IGNORED_MIGRATIONS = "flyway.ignoreIgnoredMigrations";
public static final String IGNORE_PENDING_MIGRATIONS = "flyway.ignorePendingMigrations";
public static final String IGNORE_MIGRATION_PATTERNS = "flyway.ignoreMigrationPatterns";
public static final String INIT_SQL = "flyway.initSql";
public static final String INSTALLED_BY = "flyway.installedBy";
public static final String LICENSE_KEY = "flyway.licenseKey";
public static final String LOCATIONS = "flyway.locations";
public static final String MIXED = "flyway.mixed";
public static final String OUT_OF_ORDER = "flyway.outOfOrder";
public static final String SKIP_EXECUTING_MIGRATIONS = "flyway.skipExecutingMigrations";
public static final String OUTPUT_QUERY_RESULTS = "flyway.outputQueryResults";
public static final String PASSWORD = "flyway.password";
public static final String PLACEHOLDER_PREFIX = "flyway.placeholderPrefix";
public static final String PLACEHOLDER_REPLACEMENT = "flyway.placeholderReplacement";
public static final String PLACEHOLDER_SUFFIX = "flyway.placeholderSuffix";
public static final String PLACEHOLDERS_PROPERTY_PREFIX = "flyway.placeholders.";
public static final String LOCK_RETRY_COUNT = "flyway.lockRetryCount";
public static final String JDBC_PROPERTIES_PREFIX = "flyway.jdbcProperties.";
public static final String REPEATABLE_SQL_MIGRATION_PREFIX = "flyway.repeatableSqlMigrationPrefix";
public static final String RESOLVERS = "flyway.resolvers";
public static final String SCHEMAS = "flyway.schemas";
public static final String SKIP_DEFAULT_CALLBACKS = "flyway.skipDefaultCallbacks";
public static final String SKIP_DEFAULT_RESOLVERS = "flyway.skipDefaultResolvers";
public static final String SQL_MIGRATION_PREFIX = "flyway.sqlMigrationPrefix";
public static final String SQL_MIGRATION_SEPARATOR = "flyway.sqlMigrationSeparator";
public static final String SQL_MIGRATION_SUFFIXES = "flyway.sqlMigrationSuffixes";
public static final String STREAM = "flyway.stream";
public static final String TABLE = "flyway.table";
public static final String TABLESPACE = "flyway.tablespace";
public static final String TARGET = "flyway.target";
public static final String CHERRY_PICK = "flyway.cherryPick";
public static final String UNDO_SQL_MIGRATION_PREFIX = "flyway.undoSqlMigrationPrefix";
public static final String URL = "flyway.url";
public static final String USER = "flyway.user";
public static final String VALIDATE_ON_MIGRATE = "flyway.validateOnMigrate";
public static final String VALIDATE_MIGRATION_NAMING = "flyway.validateMigrationNaming";
public static final String CREATE_SCHEMAS = "flyway.createSchemas";
public static final String FAIL_ON_MISSING_LOCATIONS = "flyway.failOnMissingLocations";
// Secrets-manager specific
public static final String VAULT_URL = "flyway.vault.url";
public static final String VAULT_TOKEN = "flyway.vault.token";
public static final String VAULT_SECRETS = "flyway.vault.secrets";
// Oracle-specific
public static final String ORACLE_SQLPLUS = "flyway.oracle.sqlplus";
public static final String ORACLE_SQLPLUS_WARN = "flyway.oracle.sqlplusWarn";
public static final String ORACLE_KERBEROS_CONFIG_FILE = "flyway.oracle.kerberosConfigFile";
public static final String ORACLE_KERBEROS_CACHE_FILE = "flyway.oracle.kerberosCacheFile";
// Command-line specific
public static final String JAR_DIRS = "flyway.jarDirs";
// Gradle specific
public static final String CONFIGURATIONS = "flyway.configurations";
private ConfigUtils() { }
/**
* Converts Flyway-specific environment variables to their matching properties.
*
* @return The properties corresponding to the environment variables.
*/
public static Map<String, String> environmentVariablesToPropertyMap() {
Map<String, String> result = new HashMap<>();
for (Map.Entry<String, String> entry : System.getenv().entrySet()) {
String convertedKey = convertKey(entry.getKey());
if (convertedKey != null) {
// Known environment variable
result.put(convertKey(entry.getKey()), entry.getValue());
}
}
return result;
}
private static String convertKey(String key) {
if ("FLYWAY_BASELINE_DESCRIPTION".equals(key)) {
return BASELINE_DESCRIPTION;
}
if ("FLYWAY_BASELINE_ON_MIGRATE".equals(key)) {
return BASELINE_ON_MIGRATE;
}
if ("FLYWAY_BASELINE_VERSION".equals(key)) {
return BASELINE_VERSION;
}
if ("FLYWAY_BATCH".equals(key)) {
return BATCH;
}
if ("FLYWAY_CALLBACKS".equals(key)) {
return CALLBACKS;
}
if ("FLYWAY_CLEAN_DISABLED".equals(key)) {
return CLEAN_DISABLED;
}
if ("FLYWAY_CLEAN_ON_VALIDATION_ERROR".equals(key)) {
return CLEAN_ON_VALIDATION_ERROR;
}
if ("FLYWAY_CONFIG_FILE_ENCODING".equals(key)) {
return CONFIG_FILE_ENCODING;
}
if ("FLYWAY_CONFIG_FILES".equals(key)) {
return CONFIG_FILES;
}
if ("FLYWAY_CONNECT_RETRIES".equals(key)) {
return CONNECT_RETRIES;
}
if ("FLYWAY_DEFAULT_SCHEMA".equals(key)) {
return DEFAULT_SCHEMA;
}
if ("FLYWAY_DRIVER".equals(key)) {
return DRIVER;
}
if ("FLYWAY_DRYRUN_OUTPUT".equals(key)) {
return DRYRUN_OUTPUT;
}
if ("FLYWAY_ENCODING".equals(key)) {
return ENCODING;
}
if ("FLYWAY_DETECT_ENCODING".equals(key)) {
return DETECT_ENCODING;
}
if ("FLYWAY_ERROR_OVERRIDES".equals(key)) {
return ERROR_OVERRIDES;
}
if ("FLYWAY_GROUP".equals(key)) {
return GROUP;
}
if ("FLYWAY_IGNORE_FUTURE_MIGRATIONS".equals(key)) {
return IGNORE_FUTURE_MIGRATIONS;
}
if ("FLYWAY_IGNORE_MISSING_MIGRATIONS".equals(key)) {
return IGNORE_MISSING_MIGRATIONS;
}
if ("FLYWAY_IGNORE_IGNORED_MIGRATIONS".equals(key)) {
return IGNORE_IGNORED_MIGRATIONS;
}
if ("FLYWAY_IGNORE_PENDING_MIGRATIONS".equals(key)) {
return IGNORE_PENDING_MIGRATIONS;
}
if ("FLYWAY_IGNORE_MIGRATION_PATTERNS".equals(key)) {
return IGNORE_MIGRATION_PATTERNS;
}
if ("FLYWAY_INIT_SQL".equals(key)) {
return INIT_SQL;
}
if ("FLYWAY_INSTALLED_BY".equals(key)) {
return INSTALLED_BY;
}
if ("FLYWAY_LICENSE_KEY".equals(key)) {
return LICENSE_KEY;
}
if ("FLYWAY_LOCATIONS".equals(key)) {
return LOCATIONS;
}
if ("FLYWAY_MIXED".equals(key)) {
return MIXED;
}
if ("FLYWAY_OUT_OF_ORDER".equals(key)) {
return OUT_OF_ORDER;
}
if ("FLYWAY_SKIP_EXECUTING_MIGRATIONS".equals(key)) {
return SKIP_EXECUTING_MIGRATIONS;
}
if ("FLYWAY_OUTPUT_QUERY_RESULTS".equals(key)) {
return OUTPUT_QUERY_RESULTS;
}
if ("FLYWAY_PASSWORD".equals(key)) {
return PASSWORD;
}
if ("FLYWAY_LOCK_RETRY_COUNT".equals(key)) {
return LOCK_RETRY_COUNT;
}
if ("FLYWAY_PLACEHOLDER_PREFIX".equals(key)) {
return PLACEHOLDER_PREFIX;
}
if ("FLYWAY_PLACEHOLDER_REPLACEMENT".equals(key)) {
return PLACEHOLDER_REPLACEMENT;
}
if ("FLYWAY_PLACEHOLDER_SUFFIX".equals(key)) {
return PLACEHOLDER_SUFFIX;
}
if (key.matches("FLYWAY_PLACEHOLDERS_.+")) {
return PLACEHOLDERS_PROPERTY_PREFIX + key.substring("FLYWAY_PLACEHOLDERS_".length()).toLowerCase(Locale.ENGLISH);
}
if (key.matches("FLYWAY_JDBC_PROPERTIES_.+")) {
return JDBC_PROPERTIES_PREFIX + key.substring("FLYWAY_JDBC_PROPERTIES_".length());
}
if ("FLYWAY_REPEATABLE_SQL_MIGRATION_PREFIX".equals(key)) {
return REPEATABLE_SQL_MIGRATION_PREFIX;
}
if ("FLYWAY_RESOLVERS".equals(key)) {
return RESOLVERS;
}
if ("FLYWAY_SCHEMAS".equals(key)) {
return SCHEMAS;
}
if ("FLYWAY_SKIP_DEFAULT_CALLBACKS".equals(key)) {
return SKIP_DEFAULT_CALLBACKS;
}
if ("FLYWAY_SKIP_DEFAULT_RESOLVERS".equals(key)) {
return SKIP_DEFAULT_RESOLVERS;
}
if ("FLYWAY_SQL_MIGRATION_PREFIX".equals(key)) {
return SQL_MIGRATION_PREFIX;
}
if ("FLYWAY_SQL_MIGRATION_SEPARATOR".equals(key)) {
return SQL_MIGRATION_SEPARATOR;
}
if ("FLYWAY_SQL_MIGRATION_SUFFIXES".equals(key)) {
return SQL_MIGRATION_SUFFIXES;
}
if ("FLYWAY_STREAM".equals(key)) {
return STREAM;
}
if ("FLYWAY_TABLE".equals(key)) {
return TABLE;
}
if ("FLYWAY_TABLESPACE".equals(key)) {
return TABLESPACE;
}
if ("FLYWAY_TARGET".equals(key)) {
return TARGET;
}
if ("FLYWAY_CHERRY_PICK".equals(key)) {
return CHERRY_PICK;
}
if ("FLYWAY_UNDO_SQL_MIGRATION_PREFIX".equals(key)) {
return UNDO_SQL_MIGRATION_PREFIX;
}
if ("FLYWAY_URL".equals(key)) {
return URL;
}
if ("FLYWAY_USER".equals(key)) {
return USER;
}
if ("FLYWAY_VALIDATE_ON_MIGRATE".equals(key)) {
return VALIDATE_ON_MIGRATE;
}
if ("FLYWAY_VALIDATE_MIGRATION_NAMING".equals(key)) {
return VALIDATE_MIGRATION_NAMING;
}
if ("FLYWAY_CREATE_SCHEMAS".equals(key)) {
return CREATE_SCHEMAS;
}
if ("FLYWAY_FAIL_ON_MISSING_LOCATIONS".equals(key)) {
return FAIL_ON_MISSING_LOCATIONS;
}
// Oracle-specific
if ("FLYWAY_ORACLE_SQLPLUS".equals(key)) {
return ORACLE_SQLPLUS;
}
if ("FLYWAY_ORACLE_SQLPLUS_WARN".equals(key)) {
return ORACLE_SQLPLUS_WARN;
}
if ("FLYWAY_ORACLE_KERBEROS_CONFIG_FILE".equals(key)) {
return ORACLE_KERBEROS_CONFIG_FILE;
}
if ("FLYWAY_ORACLE_KERBEROS_CACHE_FILE".equals(key)) {
return ORACLE_KERBEROS_CACHE_FILE;
}
// Secrets-manager specific
if ("FLYWAY_VAULT_URL".equals(key)) {
return VAULT_URL;
}
if ("FLYWAY_VAULT_TOKEN".equals(key)) {
return VAULT_TOKEN;
}
if ("FLYWAY_VAULT_SECRETS".equals(key)) {
return VAULT_SECRETS;
}
// Command-line specific
if ("FLYWAY_JAR_DIRS".equals(key)) {
return JAR_DIRS;
}
// Gradle specific
if ("FLYWAY_CONFIGURATIONS".equals(key)) {
return CONFIGURATIONS;
}
return null;
}
/**
* Load configuration files from the default locations:
* $installationDir$/conf/flyway.conf
* $user.home$/flyway.conf
* $workingDirectory$/flyway.conf
*
* @param encoding The conf file encoding.
* @throws FlywayException When the configuration failed.
*/
public static Map<String, String> loadDefaultConfigurationFiles(File installationDir, String encoding) {
Map<String, String> configMap = new HashMap<>();
configMap.putAll(ConfigUtils.loadConfigurationFile(new File(installationDir.getAbsolutePath() + "/conf/" + ConfigUtils.CONFIG_FILE_NAME), encoding, false));
configMap.putAll(ConfigUtils.loadConfigurationFile(new File(System.getProperty("user.home") + "/" + ConfigUtils.CONFIG_FILE_NAME), encoding, false));
configMap.putAll(ConfigUtils.loadConfigurationFile(new File(ConfigUtils.CONFIG_FILE_NAME), encoding, false));
return configMap;
}
/**
* Loads the configuration from this configuration file.
*
* @param configFile The configuration file to load.
* @param encoding The encoding of the configuration file.
* @param failIfMissing Whether to fail if the file is missing.
* @return The properties from the configuration file. An empty Map if none.
* @throws FlywayException When the configuration file could not be loaded.
*/
public static Map<String, String> loadConfigurationFile(File configFile, String encoding, boolean failIfMissing) throws FlywayException {
String errorMessage = "Unable to load config file: " + configFile.getAbsolutePath();
if ("-".equals(configFile.getName())) {
return loadConfigurationFromInputStream(System.in);
} else if (!configFile.isFile() || !configFile.canRead()) {
if (!failIfMissing) {
LOG.debug(errorMessage);
return new HashMap<>();
}
throw new FlywayException(errorMessage);
}
if (LOG.isDebugEnabled()) {
LOG.debug("Loading config file: " + configFile.getAbsolutePath());
}
try {
return loadConfigurationFromReader(new InputStreamReader(new FileInputStream(configFile), encoding));
} catch (IOException | FlywayException e) {
throw new FlywayException(errorMessage, e);
}
}
public static Map<String, String> loadConfigurationFromInputStream(InputStream inputStream) {
Map<String, String> config = new HashMap<>();
try {
// System.in.available() : returns an estimate of the number of bytes that can be read (or skipped over) from this input stream
// Used to check if there is any data in the stream
if (inputStream != null && inputStream.available() > 0) {
BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(inputStream));
LOG.debug("Attempting to load configuration from standard input");
int firstCharacter = bufferedReader.read();
if (bufferedReader.ready() && firstCharacter != -1) {
// Prepend the first character to the rest of the string
// This is a char, represented as an int, so we cast to a char
// which is implicitly converted to an string
String configurationString = (char)firstCharacter + FileCopyUtils.copyToString(bufferedReader);
Map<String, String> configurationFromStandardInput = loadConfigurationFromString(configurationString);
if (configurationFromStandardInput.isEmpty()) {
LOG.debug("Empty configuration provided from standard input");
} else {
LOG.info("Loaded configuration from standard input");
config.putAll(configurationFromStandardInput);
}
} else {
LOG.debug("Could not load configuration from standard input");
}
}
} catch (Exception e) {
if (LOG.isDebugEnabled()) {
LOG.debug("Could not load configuration from standard input " + e.getMessage());
}
}
return config;
}
/**
* Reads the configuration from a Reader.
*
* @return The properties from the configuration file. An empty Map if none.
* @throws FlywayException When the configuration could not be read.
*/
public static Map<String, String> loadConfigurationFromReader(Reader reader) throws FlywayException {
try {
String contents = FileCopyUtils.copyToString(reader);
return loadConfigurationFromString(contents);
} catch (IOException e) {
throw new FlywayException("Unable to read config", e);
}
}
public static Map<String, String> loadConfigurationFromString(String configuration) throws IOException {
String[] lines = configuration.replace("\r\n", "\n").split("\n");
StringBuilder confBuilder = new StringBuilder();
for (int i = 0; i < lines.length; i++) {
String replacedLine = lines[i].trim().replace("\\", "\\\\");
// if the line ends in a \\, then it may be a multiline property
if (replacedLine.endsWith("\\\\")) {
// if we aren't the last line
if (i < lines.length-1) {
// look ahead to see if the next line is a blank line, a property, or another multiline
String nextLine = lines[i+1];
boolean restoreMultilineDelimiter = false;
if (nextLine.isEmpty()) {
// blank line
} else if (nextLine.contains("=")) {
if (isMultilineBooleanExpression(nextLine)) {
// next line is an extension of a boolean expression
restoreMultilineDelimiter = true;
}
// next line is a property
} else {
// line with content, this was a multiline property
restoreMultilineDelimiter = true;
}
if (restoreMultilineDelimiter) {
// it's a multiline property, so restore the original single slash
replacedLine = replacedLine.substring(0, replacedLine.length()-2) + "\\";
}
}
}
confBuilder.append(replacedLine).append("\n");
}
String contents = confBuilder.toString();
Properties properties = new Properties();
contents = expandEnvironmentVariables(contents, System.getenv());
properties.load(new StringReader(contents));
return propertiesToMap(properties);
}
static String expandEnvironmentVariables(String value, Map<String, String> environmentVariables) {
Pattern pattern = Pattern.compile("\\$\\{([A-Za-z0-9_]+)}");
Matcher matcher = pattern.matcher(value);
String expandedValue = value;
while (matcher.find()) {
String variableName = matcher.group(1);
String variableValue = environmentVariables.getOrDefault(variableName, "");
if (LOG.isDebugEnabled()) {
LOG.debug("Expanding environment variable in config: " + variableName + " -> " + variableValue);
}
expandedValue = expandedValue.replaceAll(Pattern.quote(matcher.group(0)), Matcher.quoteReplacement(variableValue));
}
return expandedValue;
}
/**
* Converts this Properties object into a map.
*/
public static Map<String, String> propertiesToMap(Properties properties) {
Map<String, String> props = new HashMap<>();
for (Map.Entry<Object, Object> entry : properties.entrySet()) {
props.put(entry.getKey().toString(), entry.getValue().toString());
}
return props;
}
/**
* Puts this property in the config if it has been set in any of these values.
*
* @param config The config.
* @param key The property name.
* @param values The values to try. The first non-null value will be set.
*/
public static void putIfSet(Map<String, String> config, String key, Object... values) {
for (Object value : values) {
if (value != null) {
config.put(key, value.toString());
return;
}
}
}
/**
* Puts this property in the config if it has been set in any of these values.
*
* @param config The config.
* @param key The property name.
* @param values The values to try. The first non-null value will be set.
*/
public static void putArrayIfSet(Map<String, String> config, String key, String[]... values) {
for (String[] value : values) {
if (value != null) {
config.put(key, StringUtils.arrayToCommaDelimitedString(value));
return;
}
}
}
/**
* @param config The config.
* @param key The property name.
* @return The property value as a boolean if it exists, otherwise {@code null}.
* @throws FlywayException when the property value is not a valid boolean.
*/
public static Boolean removeBoolean(Map<String, String> config, String key) {
String value = config.remove(key);
if (value == null) {
return null;
}
if (!"true".equals(value) && !"false".equals(value)) {
throw new FlywayException("Invalid value for " + key + " (should be either true or false): " + value,
ErrorCode.CONFIGURATION);
}
return Boolean.valueOf(value);
}
/**
* @param config The config.
* @param key The property name.
* @return The property value as an integer if it exists, otherwise {@code null}.
* @throws FlywayException When the property value is not a valid integer.
*/
public static Integer removeInteger(Map<String, String> config, String key) {
String value = config.remove(key);
if (value == null) {
return null;
}
try {
return Integer.valueOf(value);
} catch (NumberFormatException e) {
throw new FlywayException("Invalid value for " + key + " (should be an integer): " + value,
ErrorCode.CONFIGURATION);
}
}
public static void dumpConfiguration(Map<String, String> config) {
if (LOG.isDebugEnabled()) {
LOG.debug("Using configuration:");
for (Map.Entry<String, String> entry : new TreeMap<>(config).entrySet()) {
String value = entry.getValue();
switch (entry.getKey()) {
// Mask the password. Ex.: T0pS3cr3t -> *********
case ConfigUtils.PASSWORD:
value = StringUtils.trimOrPad("", value.length(), '*');
break;
// Mask the licence key, leaving a few characters to confirm which key is in use
case ConfigUtils.LICENSE_KEY:
value = value.substring(0, 8) + "******" + value.substring(value.length() - 4);
break;
// Mask any password in the URL
case ConfigUtils.URL:
value = DatabaseTypeRegister.redactJdbcUrl(value);
break;
}
LOG.debug(entry.getKey() + " -> " + value);
}
}
}
/**
* Checks the configuration for any unrecognised properties remaining after expected ones have been consumed.
*
* @param config The configured properties.
* @param prefix The expected prefix for Flyway configuration parameters. {@code null} if none.
*/
public static void checkConfigurationForUnrecognisedProperties(Map<String, String> config, String prefix) {
ArrayList<String> unknownFlywayProperties = new ArrayList<>();
for (String key : config.keySet()) {
if (prefix == null || key.startsWith(prefix)) {
unknownFlywayProperties.add(key);
}
}
if (!unknownFlywayProperties.isEmpty()) {
String property = (unknownFlywayProperties.size() == 1) ? "property" : "properties";
String message = String.format("Unknown configuration %s: %s",
property,
StringUtils.arrayToCommaDelimitedString(unknownFlywayProperties.toArray()));
throw new FlywayException(message, ErrorCode.CONFIGURATION);
}
}
}
|
[] |
[] |
[] |
[]
|
[]
|
java
| 0 | 0 | |
config.go
|
package main
import (
"os"
)
var VERSION = "v1.2.0"
var NAME = "git-hooks"
var TRIGGERS = [...]string{"applypatch-msg", "commit-msg", "post-applypatch", "post-checkout", "post-commit", "post-merge", "post-receive", "pre-applypatch", "pre-auto-gc", "pre-commit", "prepare-commit-msg", "pre-rebase", "pre-receive", "update", "pre-push"}
var CONTRIB_DIRNAME = "githooks-contrib"
var tplPreInstall = `#!/usr/bin/env bash
echo \"git hooks not installed in this repository. Run 'git hooks --install' to install it or 'git hooks -h' for more information.\"`
var tplPostInstall = `#!/usr/bin/env bash
git-hooks run "$0" "$@"`
var ENV = os.Getenv("ENV")
var DIRS = map[string]string{
"HomeTemplate": ".git-template-with-git-hooks",
"GlobalTemplate": "/usr/share/git-core/templates",
}
var GIT = map[string]string{
"SetTemplateDir": "config --global init.templatedir ",
"GetTemplateDir": "config --global --get init.templatedir",
"UnsetTemplateDir": "config --global --unset init.templatedir",
"RemoveTemplateDir": "config --global --remove init",
"FirstCommit": "rev-list --max-parents=0 HEAD",
}
var MESSAGES = map[string]string{
"NotGitRepo": "Current directory is not a git repo",
"Installed": "Git hooks ARE installed in this repository.",
"NotInstalled": "Git hooks are NOT installed in this repository. (Run 'git hooks install' to install it)",
"ExistHooks": "hooks.old already exists, perhaps you already installed?",
"NotExistHooks": "Error, hooks.old doesn't exists, aborting uninstall to not destroy something",
"Restore": "Restore hooks.old",
"SetTemplateDir": "Git global config init.templatedir is now set to ",
"UpdateToDate": "git-hooks is update to date",
"Incompatible": "Version backward incompatible, manually update required",
}
func isTestEnv() bool {
return ENV == "test"
}
|
[
"\"ENV\""
] |
[] |
[
"ENV"
] |
[]
|
["ENV"]
|
go
| 1 | 0 | |
pop_test.go
|
package pop
import (
stdlog "log"
"os"
"testing"
"time"
"github.com/gobuffalo/nulls"
"github.com/gobuffalo/pop/v6/logging"
"github.com/gobuffalo/validate/v3"
"github.com/gobuffalo/validate/v3/validators"
"github.com/gofrs/uuid"
"github.com/stretchr/testify/suite"
)
var PDB *Connection
type PostgreSQLSuite struct {
suite.Suite
}
type MySQLSuite struct {
suite.Suite
}
type SQLiteSuite struct {
suite.Suite
}
type CockroachSuite struct {
suite.Suite
}
func TestSpecificSuites(t *testing.T) {
switch os.Getenv("SODA_DIALECT") {
case "postgres":
suite.Run(t, &PostgreSQLSuite{})
case "mysql", "mysql_travis":
suite.Run(t, &MySQLSuite{})
case "sqlite":
suite.Run(t, &SQLiteSuite{})
case "cockroach":
suite.Run(t, &CockroachSuite{})
}
}
func init() {
Debug = false
AddLookupPaths("./")
dialect := os.Getenv("SODA_DIALECT")
if dialect == "" {
log(logging.Info, "Skipping integration tests")
return
}
if err := LoadConfigFile(); err != nil {
stdlog.Panic(err)
}
var err error
log(logging.Info, "Run test with dialect %v", dialect)
PDB, err = Connect(dialect)
if err != nil {
stdlog.Panic(err)
}
}
func transaction(fn func(tx *Connection)) {
err := PDB.Rollback(func(tx *Connection) {
fn(tx)
})
if err != nil {
stdlog.Fatal(err)
}
}
func ts(s string) string {
return PDB.Dialect.TranslateSQL(s)
}
type Client struct {
ClientID string `db:"id"`
}
func (c Client) TableName() string {
return "clients"
}
type User struct {
ID int `db:"id"`
UserName string `db:"user_name"`
Email string `db:"email"`
Name nulls.String `db:"name"`
Alive nulls.Bool `db:"alive"`
CreatedAt time.Time `db:"created_at"`
UpdatedAt time.Time `db:"updated_at"`
BirthDate nulls.Time `db:"birth_date"`
Bio nulls.String `db:"bio"`
Price nulls.Float64 `db:"price"`
FullName nulls.String `db:"full_name" select:"name as full_name"`
Books Books `has_many:"books" order_by:"title asc"`
FavoriteSong Song `has_one:"song" fk_id:"u_id"`
Houses Addresses `many_to_many:"users_addresses"`
}
type UserPointerAssocs struct {
ID int `db:"id"`
UserName string `db:"user_name"`
Email string `db:"email"`
Name nulls.String `db:"name"`
Alive nulls.Bool `db:"alive"`
CreatedAt time.Time `db:"created_at"`
UpdatedAt time.Time `db:"updated_at"`
BirthDate nulls.Time `db:"birth_date"`
Bio nulls.String `db:"bio"`
Price nulls.Float64 `db:"price"`
FullName nulls.String `db:"full_name" select:"name as full_name"`
Books Books `has_many:"books" order_by:"title asc" fk_id:"user_id"`
FavoriteSong *Song `has_one:"song" fk_id:"u_id"`
Houses Addresses `many_to_many:"users_addresses"`
}
func (UserPointerAssocs) TableName() string {
return "users"
}
// Validate gets run every time you call a "Validate*" (ValidateAndSave, ValidateAndCreate, ValidateAndUpdate) method.
// This method is not required and may be deleted.
func (u *User) Validate(tx *Connection) (*validate.Errors, error) {
return validate.Validate(
&validators.StringIsPresent{Field: u.Name.String, Name: "Name"},
), nil
}
type Users []User
type UserAttribute struct {
ID int `db:"id"`
UserName string `db:"user_name"`
NickName string `db:"nick_name"`
User User `json:"user" belongs_to:"user" fk_id:"UserName" primary_id:"UserName"`
}
type Book struct {
ID int `db:"id"`
Title string `db:"title"`
Isbn string `db:"isbn"`
UserID nulls.Int `db:"user_id"`
User User `belongs_to:"user"`
Description string `db:"description"`
Writers Writers `has_many:"writers"`
TaxiID nulls.Int `db:"taxi_id"`
Taxi Taxi `belongs_to:"taxi"`
CreatedAt time.Time `db:"created_at"`
UpdatedAt time.Time `db:"updated_at"`
}
type Taxi struct {
ID int `db:"id"`
Model string `db:"model"`
UserID nulls.Int `db:"user_id"`
AddressID nulls.Int `db:"address_id"`
Driver *User `belongs_to:"user" fk_id:"user_id"`
Address Address `belongs_to:"address"`
ToAddressID *int `db:"to_address_id"`
ToAddress *Address `belongs_to:"address"`
CreatedAt time.Time `db:"created_at"`
UpdatedAt time.Time `db:"updated_at"`
}
type Taxis []Taxi
// Validate gets run every time you call a "Validate*" (ValidateAndSave, ValidateAndCreate, ValidateAndUpdate) method.
// This method is not required and may be deleted.
func (b *Book) Validate(tx *Connection) (*validate.Errors, error) {
// Execute another query to test if Validate causes eager creation to fail
if err := tx.All(&Taxis{}); err != nil {
return nil, err
}
return validate.Validate(
&validators.StringIsPresent{Field: b.Description, Name: "Description"},
), nil
}
type Books []Book
type Writer struct {
ID int `db:"id"`
Name string `db:"name"`
Addresses Addresses `has_many:"addresses"`
Friends []Friend `has_many:"friends"`
BookID int `db:"book_id"`
Book Book `belongs_to:"book"`
CreatedAt time.Time `db:"created_at"`
UpdatedAt time.Time `db:"updated_at"`
}
type Writers []Writer
type Address struct {
ID int `db:"id"`
Street string `db:"street"`
WriterID int `db:"writer_id"`
HouseNumber int `db:"house_number"`
CreatedAt time.Time `db:"created_at"`
UpdatedAt time.Time `db:"updated_at"`
TaxisToHere Taxis `has_many:"taxis" fk_id:"to_address_id" order_by:"created_at asc"`
}
type Addresses []Address
type UsersAddress struct {
ID int `db:"id"`
UserID int `db:"user_id"`
AddressID int `db:"address_id"`
CreatedAt time.Time `db:"created_at"`
UpdatedAt time.Time `db:"updated_at"`
}
type UsersAddressQuery struct {
ID int `db:"id"`
UserID int `db:"user_id"`
AddressID int `db:"address_id"`
CreatedAt time.Time `db:"created_at"`
UpdatedAt time.Time `db:"updated_at"`
UserName *string `db:"name" json:"user_name"`
UserEmail *string `db:"email" json:"user_email"`
}
func (UsersAddressQuery) TableName() string {
return "users_addresses"
}
type Friend struct {
ID int `db:"id"`
FirstName string `db:"first_name"`
WriterID int `db:"writer_id"`
LastName string `db:"last_name"`
CreatedAt time.Time `db:"created_at"`
UpdatedAt time.Time `db:"updated_at"`
}
func (Friend) TableName() string {
return "good_friends"
}
type Family struct {
ID int `db:"id"`
FirstName string `db:"first_name"`
LastName string `db:"last_name"`
CreatedAt time.Time `db:"created_at"`
UpdatedAt time.Time `db:"updated_at"`
}
func (Family) TableName() string {
// schema.table_name
return "family.members"
}
type Enemy struct {
A string
}
type Song struct {
ID uuid.UUID `db:"id"`
Title string `db:"title"`
UserID int `db:"u_id"`
CreatedAt time.Time `json:"created_at" db:"created_at"`
UpdatedAt time.Time `json:"updated_at" db:"updated_at"`
ComposedByID int `json:"composed_by_id" db:"composed_by_id"`
ComposedBy Composer `belongs_to:"composer"`
}
type Composer struct {
ID int `db:"id"`
Name string `db:"name"`
CreatedAt time.Time `db:"created_at"`
UpdatedAt time.Time `db:"updated_at"`
}
type Course struct {
ID uuid.UUID `json:"id" db:"id"`
CreatedAt time.Time `json:"created_at" db:"created_at"`
UpdatedAt time.Time `json:"updated_at" db:"updated_at"`
}
type CourseCode struct {
ID uuid.UUID `json:"id" db:"id"`
CreatedAt time.Time `json:"created_at" db:"created_at"`
UpdatedAt time.Time `json:"updated_at" db:"updated_at"`
CourseID uuid.UUID `json:"course_id" db:"course_id"`
Course Course `json:"-" belongs_to:"course"`
// Course Course `belongs_to:"course"`
}
type NetClient struct {
ID uuid.UUID `json:"id" db:"id"`
Hops []Hop `json:"hop_id" has_many:"hops"`
}
type Hop struct {
ID uuid.UUID `json:"id" db:"id"`
NetClient *NetClient `json:"net_client" belongs_to:"net_client" fk_id:"NetClientID"`
NetClientID uuid.UUID `json:"net_client_id" db:"net_client_id"`
Server *Server `json:"course" belongs_to:"server" fk_id:"ServerID" oder_by:"id asc"`
ServerID uuid.NullUUID `json:"server_id" db:"server_id"`
}
type Server struct {
ID uuid.UUID `json:"id" db:"id"`
}
type ValidatableCar struct {
ID int64 `db:"id"`
Name string `db:"name"`
CreatedAt time.Time `json:"created_at" db:"created_at"`
UpdatedAt time.Time `json:"updated_at" db:"updated_at"`
}
var validationLogs []string
func (v *ValidatableCar) Validate(tx *Connection) (*validate.Errors, error) {
validationLogs = append(validationLogs, "Validate")
verrs := validate.Validate(&validators.StringIsPresent{Field: v.Name, Name: "Name"})
return verrs, nil
}
func (v *ValidatableCar) ValidateSave(tx *Connection) (*validate.Errors, error) {
validationLogs = append(validationLogs, "ValidateSave")
return nil, nil
}
func (v *ValidatableCar) ValidateUpdate(tx *Connection) (*validate.Errors, error) {
validationLogs = append(validationLogs, "ValidateUpdate")
return nil, nil
}
func (v *ValidatableCar) ValidateCreate(tx *Connection) (*validate.Errors, error) {
validationLogs = append(validationLogs, "ValidateCreate")
return nil, nil
}
type NotValidatableCar struct {
ID int `db:"id"`
Name string `db:"name"`
CreatedAt time.Time `json:"created_at" db:"created_at"`
UpdatedAt time.Time `json:"updated_at" db:"updated_at"`
}
type CallbacksUser struct {
ID int `db:"id"`
BeforeS string `db:"before_s"`
BeforeC string `db:"before_c"`
BeforeU string `db:"before_u"`
BeforeD string `db:"before_d"`
BeforeV string `db:"before_v"`
AfterS string `db:"after_s"`
AfterC string `db:"after_c"`
AfterU string `db:"after_u"`
AfterD string `db:"after_d"`
AfterF string `db:"after_f"`
CreatedAt time.Time `json:"created_at" db:"created_at"`
UpdatedAt time.Time `json:"updated_at" db:"updated_at"`
}
type CallbacksUsers []CallbacksUser
func (u *CallbacksUser) BeforeSave(tx *Connection) error {
u.BeforeS = "BeforeSave"
return nil
}
func (u *CallbacksUser) BeforeUpdate(tx *Connection) error {
u.BeforeU = "BeforeUpdate"
return nil
}
func (u *CallbacksUser) BeforeCreate(tx *Connection) error {
u.BeforeC = "BeforeCreate"
return nil
}
func (u *CallbacksUser) BeforeDestroy(tx *Connection) error {
u.BeforeD = "BeforeDestroy"
return nil
}
func (u *CallbacksUser) BeforeValidate(tx *Connection) error {
u.BeforeV = "BeforeValidate"
return nil
}
func (u *CallbacksUser) AfterSave(tx *Connection) error {
u.AfterS = "AfterSave"
return nil
}
func (u *CallbacksUser) AfterUpdate(tx *Connection) error {
u.AfterU = "AfterUpdate"
return nil
}
func (u *CallbacksUser) AfterCreate(tx *Connection) error {
u.AfterC = "AfterCreate"
return nil
}
func (u *CallbacksUser) AfterDestroy(tx *Connection) error {
u.AfterD = "AfterDestroy"
return nil
}
func (u *CallbacksUser) AfterFind(tx *Connection) error {
u.AfterF = "AfterFind"
return nil
}
type Label struct {
ID string `db:"id"`
}
type SingleID struct {
ID int `db:"id"`
}
type Body struct {
ID int `json:"id" db:"id"`
Head *Head `json:"head" has_one:"head"`
}
type Head struct {
ID int `json:"id,omitempty" db:"id"`
BodyID int `json:"-" db:"body_id"`
Body *Body `json:"body,omitempty" belongs_to:"body"`
}
type HeadPtr struct {
ID int `json:"id,omitempty" db:"id"`
BodyID *int `json:"-" db:"body_id"`
Body *Body `json:"body,omitempty" belongs_to:"body"`
}
type Student struct {
ID uuid.UUID `json:"id" db:"id"`
CreatedAt time.Time `json:"created_at" db:"created_at"`
UpdatedAt time.Time `json:"updated_at" db:"updated_at"`
}
// https://github.com/gobuffalo/pop/issues/302
type Parent struct {
ID uuid.UUID `json:"id" db:"id"`
CreatedAt time.Time `json:"created_at" db:"created_at"`
UpdatedAt time.Time `json:"updated_at" db:"updated_at"`
Students []*Student `many_to_many:"parents_students"`
}
type CrookedColour struct {
ID int `db:"pk"`
Name string `db:"name"`
CreatedAt time.Time `db:"created_at"`
UpdatedAt time.Time `db:"updated_at"`
}
type CrookedSong struct {
ID string `db:"name"`
CreatedAt time.Time `db:"created_at"`
UpdatedAt time.Time `db:"updated_at"`
}
type NonStandardID struct {
ID int `db:"pk"`
OutfacingID string `db:"id"`
}
type InnerStruct struct {
ID int `db:"id"`
CreatedAt time.Time `db:"created_at"`
UpdatedAt time.Time `db:"updated_at"`
}
type EmbeddingStruct struct {
InnerStruct
AdditionalField string `db:"additional_field"`
}
|
[
"\"SODA_DIALECT\"",
"\"SODA_DIALECT\""
] |
[] |
[
"SODA_DIALECT"
] |
[]
|
["SODA_DIALECT"]
|
go
| 1 | 0 | |
main.go
|
package main
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"os"
"os/exec"
"strconv"
)
const (
// Application metadata
appId = "com.github.heisantosh.redshiftctl"
appName = "redshiftctl"
version = "0.1.0"
// Redshift on/off state.
stateOn = "on"
stateOff = "off"
// Redshift state fields.
stateTemperature = "temperature"
stateState = "state"
// Supported commands.
cmdHelp = "help"
cmdLoad = "load"
cmdIncrease = "increase"
cmdDecrease = "decrease"
cmdToggle = "toggle"
cmdSet = "set"
cmdGet = "get"
)
func configDir() string {
return os.Getenv("HOME") + "/.config/redshiftctl"
}
func configFile() string {
return configDir() + "/config.json"
}
func help() {
fmt.Println(appName + ` (` + appId + `) ` + version + `
Tool to manually control monitor color temperature using redshft.
USAGE
redshiftctl COMMAND [ARG]
COMMANDS
toggle [STATE] toggle redshift to state on or off, if not provided toggele current state
load load the state of the configuration file
increase TEMP increase the color temperature by TEMP
decrease TEMP decrease the color temperature by TEMP
set TEMP set the color temperature to TEMP
get STATE get the value of the state, STATE can be state or temperature
help print this help information
CONFIGURATION
Configuration file is ` + configFile() + `
Keys are
# current state on, off
state=on
# color temperature
temperature=4500`)
}
// redshiftState represents current state of redshift.
type redshiftState struct {
State string `json:"state"`
Temperature int `json:"temperature"`
}
// cmdArgs holds the command and the provided arguments for the command.
type cmdArgs struct {
cmd string
toggleState string
temperatureDelta int
temperature int
getState string
}
func main() {
args, err := parseArgs()
if err != nil {
fmt.Printf("Error: %v\n\n", err)
os.Exit(1)
}
firstRunCheck()
runCommand(args)
}
// firstRunCheck makes sure the config files are in place.
func firstRunCheck() {
_, err := os.Stat(configFile())
if os.IsNotExist(err) {
os.MkdirAll(configDir(), 0744)
stateStore(redshiftState{State: stateOn, Temperature: 4500})
}
}
// parseArgs parses and valideates the CLI arguments.
func parseArgs() (cmdArgs, error) {
args := cmdArgs{}
if len(os.Args) == 2 && (os.Args[1] == cmdHelp || os.Args[1] == cmdLoad || os.Args[1] == cmdToggle) {
args.cmd = os.Args[1]
return args, nil
}
if len(os.Args)-1 != 2 {
return args, errors.New("insufficient args")
}
args.cmd = os.Args[1]
switch args.cmd {
case cmdDecrease, cmdIncrease:
v, err := strconv.Atoi(os.Args[2])
if err != nil {
return args, errors.New("invalid " + os.Args[2] + " value")
}
args.temperatureDelta = v
case cmdSet:
v, err := strconv.Atoi(os.Args[2])
if err != nil {
return args, errors.New("invalid " + os.Args[2] + " value")
}
args.temperature = v
case cmdGet:
args.getState = os.Args[2]
if !(args.getState == stateState || args.getState == stateTemperature) {
return args, errors.New("invalid get arg" + args.getState)
}
case cmdToggle:
args.toggleState = os.Args[2]
if !(args.toggleState == stateOn || args.toggleState == stateOff) {
return args, errors.New("invalid " + args.toggleState + " value")
}
default:
return args, errors.New("unknown command " + args.cmd)
}
return args, nil
}
func stateLoad() (redshiftState, error) {
state := redshiftState{}
if b, err := ioutil.ReadFile(configFile()); err == nil {
json.Unmarshal(b, &state)
} else {
return state, err
}
return state, nil
}
func stateStore(state redshiftState) {
b, _ := json.Marshal(state)
_ = os.WriteFile(configFile(), b, 0664)
}
// toggleOff turns off redshift by resetting color temperature.
func toggleOff() {
exec.Command("redshift", "-o", "-x").Run()
}
// setTemperature manually sets the color temperature in one shot mode.
func setTemperature(temp int) {
exec.Command("redshift", "-P", "-o", "-O", strconv.Itoa(temp)).Run()
}
func runCommand(args cmdArgs) {
state, _ := stateLoad()
switch args.cmd {
case cmdToggle:
if (args.toggleState == "" && state.State == stateOn) || args.toggleState == stateOff {
toggleOff()
state.State = stateOff
} else {
setTemperature(state.Temperature)
state.State = stateOn
}
case cmdIncrease:
setTemperature(state.Temperature + args.temperatureDelta)
state.Temperature = state.Temperature + args.temperatureDelta
case cmdDecrease:
setTemperature(state.Temperature - args.temperatureDelta)
state.Temperature = state.Temperature - args.temperatureDelta
case cmdSet:
setTemperature(args.temperature)
state.Temperature = args.temperature
case cmdGet:
switch args.getState {
case stateState:
fmt.Println(state.State)
case stateTemperature:
fmt.Println(state.Temperature)
}
case cmdLoad:
if state.State == stateOff {
toggleOff()
} else {
setTemperature(state.Temperature)
}
case cmdHelp:
help()
return
}
stateStore(state)
}
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
tests/test_MIDI_unittests.py
|
# The MIT License (MIT)
#
# Copyright (c) 2019 Kevin J. Walters
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import unittest
from unittest.mock import Mock, MagicMock, call
import random
import os
verbose = int(os.getenv("TESTVERBOSE", "2"))
# adafruit_midi had an import usb_midi
import sys
# sys.modules['usb_midi'] = MagicMock()
# Borrowing the dhalbert/tannewt technique from adafruit/Adafruit_CircuitPython_Motor
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
# Full monty
from adafruit_midi.channel_pressure import ChannelPressure
from adafruit_midi.control_change import ControlChange
from adafruit_midi.note_off import NoteOff
from adafruit_midi.note_on import NoteOn
from adafruit_midi.pitch_bend import PitchBend
from adafruit_midi.polyphonic_key_pressure import PolyphonicKeyPressure
from adafruit_midi.program_change import ProgramChange
from adafruit_midi.start import Start
from adafruit_midi.stop import Stop
from adafruit_midi.system_exclusive import SystemExclusive
from adafruit_midi.timing_clock import TimingClock
# Import after messages - opposite to other test file
import adafruit_midi
# For loopback/echo tests
def MIDI_mocked_both_loopback(in_c, out_c):
usb_data = bytearray()
def write(buffer, length):
nonlocal usb_data
usb_data.extend(buffer[0:length])
def read(length):
nonlocal usb_data
poppedbytes = usb_data[0:length]
usb_data = usb_data[len(poppedbytes) :]
return bytes(poppedbytes)
mockedPortIn = Mock()
mockedPortIn.read = read
mockedPortOut = Mock()
mockedPortOut.write = write
m = adafruit_midi.MIDI(
midi_out=mockedPortOut, midi_in=mockedPortIn, out_channel=out_c, in_channel=in_c
)
return m
def MIDI_mocked_receive(in_c, data, read_sizes):
usb_data = bytearray(data)
chunks = read_sizes
chunk_idx = 0
def read(length):
nonlocal usb_data, chunks, chunk_idx
# pylint: disable=no-else-return
if length != 0 and chunk_idx < len(chunks):
# min() to ensure we only read what's asked for and present
poppedbytes = usb_data[0 : min(length, chunks[chunk_idx])]
usb_data = usb_data[len(poppedbytes) :]
if length >= chunks[chunk_idx]:
chunk_idx += 1
else:
chunks[chunk_idx] -= len(poppedbytes)
return bytes(poppedbytes)
else:
return bytes()
mockedPortIn = Mock()
mockedPortIn.read = read
m = adafruit_midi.MIDI(
midi_out=None, midi_in=mockedPortIn, out_channel=in_c, in_channel=in_c
)
return m
class Test_MIDI_constructor(unittest.TestCase):
def test_no_inout(self):
# constructor likes a bit of in out
with self.assertRaises(ValueError):
adafruit_midi.MIDI()
class Test_MIDI(unittest.TestCase):
# pylint: disable=too-many-branches
def test_captured_data_one_byte_reads(self):
c = 0
# From an M-Audio AXIOM controller
raw_data = bytearray(
[0x90, 0x3E, 0x5F]
+ [0xD0, 0x10]
+ [0x90, 0x40, 0x66]
+ [0xB0, 0x1, 0x08]
+ [0x90, 0x41, 0x74]
+ [0xE0, 0x03, 0x40]
)
m = MIDI_mocked_receive(c, raw_data, [1] * len(raw_data))
for unused in range(100): # pylint: disable=unused-variable
msg = m.receive()
if msg is not None:
break
self.assertIsInstance(msg, NoteOn)
self.assertEqual(msg.note, 0x3E)
self.assertEqual(msg.velocity, 0x5F)
self.assertEqual(msg.channel, c)
# for loops currently absorb any Nones but could
# be set to read precisely the expected number...
for unused in range(100): # pylint: disable=unused-variable
msg = m.receive()
if msg is not None:
break
self.assertIsInstance(msg, ChannelPressure)
self.assertEqual(msg.pressure, 0x10)
self.assertEqual(msg.channel, c)
for unused in range(100): # pylint: disable=unused-variable
msg = m.receive()
if msg is not None:
break
self.assertIsInstance(msg, NoteOn)
self.assertEqual(msg.note, 0x40)
self.assertEqual(msg.velocity, 0x66)
self.assertEqual(msg.channel, c)
for unused in range(100): # pylint: disable=unused-variable
msg = m.receive()
if msg is not None:
break
self.assertIsInstance(msg, ControlChange)
self.assertEqual(msg.control, 0x01)
self.assertEqual(msg.value, 0x08)
self.assertEqual(msg.channel, c)
for unused in range(100): # pylint: disable=unused-variable
msg = m.receive()
if msg is not None:
break
self.assertIsInstance(msg, NoteOn)
self.assertEqual(msg.note, 0x41)
self.assertEqual(msg.velocity, 0x74)
self.assertEqual(msg.channel, c)
for unused in range(100): # pylint: disable=unused-variable
msg = m.receive()
if msg is not None:
break
self.assertIsInstance(msg, PitchBend)
self.assertEqual(msg.pitch_bend, 8195)
self.assertEqual(msg.channel, c)
for unused in range(100): # pylint: disable=unused-variable
msg = m.receive()
self.assertIsNone(msg)
def test_unknown_before_NoteOn(self):
c = 0
# From an M-Audio AXIOM controller
raw_data = bytes(
[0b11110011, 0x10] # Song Select (not yet implemented)
+ [0b11110011, 0x20]
+ [0b11110100]
+ [0b11110101]
) + bytes(NoteOn("C5", 0x7F, channel=c))
m = MIDI_mocked_receive(c, raw_data, [2, 2, 1, 1, 3])
for unused in range(4): # pylint: disable=unused-variable
msg = m.receive()
self.assertIsInstance(msg, adafruit_midi.midi_message.MIDIUnknownEvent)
self.assertIsNone(msg.channel)
msg = m.receive()
self.assertIsInstance(msg, NoteOn)
self.assertEqual(msg.note, 0x48) # 0x48 is C5
self.assertEqual(msg.velocity, 0x7F)
self.assertEqual(msg.channel, c)
# See https://github.com/adafruit/Adafruit_CircuitPython_MIDI/issues/8
def test_running_status_when_implemented(self):
c = 8
raw_data = (
bytes(NoteOn("C5", 0x7F, channel=c))
+ bytes([0xE8, 0x72, 0x40] + [0x6D, 0x40] + [0x05, 0x41])
+ bytes(NoteOn("D5", 0x7F, channel=c))
)
m = MIDI_mocked_receive(c, raw_data, [3 + 3 + 2 + 3 + 3])
self.assertIsInstance(m, adafruit_midi.MIDI) # silence pylint!
# self.assertEqual(TOFINISH, WHENIMPLEMENTED)
def test_somegood_somemissing_databytes(self):
c = 8
raw_data = (
bytes(NoteOn("C5", 0x7F, channel=c))
+ bytes(
[0xE8, 0x72, 0x40]
+ [0xE8, 0x6D] # Missing last data byte
+ [0xE8, 0x5, 0x41]
)
+ bytes(NoteOn("D5", 0x7F, channel=c))
)
m = MIDI_mocked_receive(c, raw_data, [3 + 3 + 2 + 3 + 3])
msg1 = m.receive()
self.assertIsInstance(msg1, NoteOn)
self.assertEqual(msg1.note, 72)
self.assertEqual(msg1.velocity, 0x7F)
self.assertEqual(msg1.channel, c)
msg2 = m.receive()
self.assertIsInstance(msg2, PitchBend)
self.assertEqual(msg2.pitch_bend, 8306)
self.assertEqual(msg2.channel, c)
# The current implementation will read status bytes for data
# In most cases it would be a faster recovery with fewer messages
# lost if the next status byte wasn't consumed
# and parsing restarted from that byte
msg3 = m.receive()
self.assertIsInstance(msg3, adafruit_midi.midi_message.MIDIBadEvent)
self.assertIsInstance(msg3.data, bytes)
self.assertEqual(msg3.data, bytes([0xE8, 0x6D, 0xE8]))
self.assertIsNone(msg3.channel)
# (msg4, channel4) = m.receive()
# self.assertIsInstance(msg4, PitchBend)
# self.assertEqual(msg4.pitch_bend, 72)
# self.assertEqual(channel4, c)
msg5 = m.receive()
self.assertIsInstance(msg5, NoteOn)
self.assertEqual(msg5.note, 74)
self.assertEqual(msg5.velocity, 0x7F)
self.assertEqual(msg5.channel, c)
msg6 = m.receive()
self.assertIsNone(msg6)
def test_smallsysex_between_notes(self):
m = MIDI_mocked_both_loopback(3, 3)
m.send(
[
NoteOn("C4", 0x7F),
SystemExclusive([0x1F], [1, 2, 3, 4, 5, 6, 7, 8]),
NoteOff(60, 0x28),
]
)
msg1 = m.receive()
self.assertIsInstance(msg1, NoteOn)
self.assertEqual(msg1.note, 60)
self.assertEqual(msg1.velocity, 0x7F)
self.assertEqual(msg1.channel, 3)
msg2 = m.receive()
self.assertIsInstance(msg2, SystemExclusive)
self.assertEqual(msg2.manufacturer_id, bytes([0x1F]))
self.assertEqual(msg2.data, bytes([1, 2, 3, 4, 5, 6, 7, 8]))
self.assertEqual(msg2.channel, None) # SysEx does not have a channel
msg3 = m.receive()
self.assertIsInstance(msg3, NoteOff)
self.assertEqual(msg3.note, 60)
self.assertEqual(msg3.velocity, 0x28)
self.assertEqual(msg3.channel, 3)
msg4 = m.receive()
self.assertIsNone(msg4)
def test_smallsysex_bytes_type(self):
s = SystemExclusive([0x1F], [100, 150, 200])
self.assertIsInstance(s, SystemExclusive)
self.assertEqual(s.manufacturer_id, bytes([0x1F]))
self.assertIsInstance(s.manufacturer_id, bytes)
# check this really is immutable (pylint also picks this up!)
with self.assertRaises(TypeError):
s.data[0] = 0 # pylint: disable=unsupported-assignment-operation
self.assertEqual(s.data, bytes([100, 150, 200]))
self.assertIsInstance(s.data, bytes)
# pylint: disable=too-many-locals
def test_larger_than_buffer_sysex(self):
c = 0
monster_data_len = 500
raw_data = (
bytes(NoteOn("C5", 0x7F, channel=c))
+ bytes(
SystemExclusive([0x02], [d & 0x7F for d in range(monster_data_len)])
)
+ bytes(NoteOn("D5", 0x7F, channel=c))
)
m = MIDI_mocked_receive(c, raw_data, [len(raw_data)])
buffer_len = m._in_buf_size # pylint: disable=protected-access
self.assertTrue(
monster_data_len > buffer_len,
"checking our SysEx truly is larger than buffer",
)
msg1 = m.receive()
self.assertIsInstance(msg1, NoteOn)
self.assertEqual(msg1.note, 72)
self.assertEqual(msg1.velocity, 0x7F)
self.assertEqual(msg1.channel, c)
# (Ab)using python's rounding down for negative division
# pylint: disable=unused-variable
for unused in range(-(-(1 + 1 + monster_data_len + 1) // buffer_len) - 1):
msg2 = m.receive()
self.assertIsNone(msg2)
# The current implementation will read SysEx end status byte
# and report it as an unknown
msg3 = m.receive()
self.assertIsInstance(msg3, adafruit_midi.midi_message.MIDIUnknownEvent)
self.assertEqual(msg3.status, 0xF7)
self.assertIsNone(msg3.channel)
# (msg4, channel4) = m.receive()
# self.assertIsInstance(msg4, PitchBend)
# self.assertEqual(msg4.pitch_bend, 72)
# self.assertEqual(channel4, c)
msg5 = m.receive()
self.assertIsInstance(msg5, NoteOn)
self.assertEqual(msg5.note, 74)
self.assertEqual(msg5.velocity, 0x7F)
self.assertEqual(msg5.channel, c)
msg6 = m.receive()
self.assertIsNone(msg6)
# pylint does not like mock_calls - must be a better way to handle this?
# pylint: disable=no-member
class Test_MIDI_send(unittest.TestCase):
def test_send_basic_single(self):
# def printit(buffer, len):
# print(buffer[0:len])
mockedPortOut = Mock()
# mockedPortOut.write = printit
m = adafruit_midi.MIDI(midi_out=mockedPortOut, out_channel=2)
# Test sending some NoteOn and NoteOff to various channels
nextcall = 0
m.send(NoteOn(0x60, 0x7F))
self.assertEqual(
mockedPortOut.write.mock_calls[nextcall], call(b"\x92\x60\x7f", 3)
)
nextcall += 1
m.send(NoteOn(0x64, 0x3F))
self.assertEqual(
mockedPortOut.write.mock_calls[nextcall], call(b"\x92\x64\x3f", 3)
)
nextcall += 1
m.send(NoteOn(0x67, 0x1F))
self.assertEqual(
mockedPortOut.write.mock_calls[nextcall], call(b"\x92\x67\x1f", 3)
)
nextcall += 1
m.send(NoteOn(0x60, 0x00)) # Alternative to NoteOff
self.assertEqual(
mockedPortOut.write.mock_calls[nextcall], call(b"\x92\x60\x00", 3)
)
nextcall += 1
m.send(NoteOff(0x64, 0x01))
self.assertEqual(
mockedPortOut.write.mock_calls[nextcall], call(b"\x82\x64\x01", 3)
)
nextcall += 1
m.send(NoteOff(0x67, 0x02))
self.assertEqual(
mockedPortOut.write.mock_calls[nextcall], call(b"\x82\x67\x02", 3)
)
nextcall += 1
# Setting channel to non default
m.send(NoteOn(0x6C, 0x7F), channel=9)
self.assertEqual(
mockedPortOut.write.mock_calls[nextcall], call(b"\x99\x6c\x7f", 3)
)
nextcall += 1
m.send(NoteOff(0x6C, 0x7F), channel=9)
self.assertEqual(
mockedPortOut.write.mock_calls[nextcall], call(b"\x89\x6c\x7f", 3)
)
nextcall += 1
def test_send_badnotes(self):
mockedPortOut = Mock()
m = adafruit_midi.MIDI(midi_out=mockedPortOut, out_channel=2)
# Test sending some NoteOn and NoteOff to various channels
nextcall = 0
m.send(NoteOn(60, 0x7F))
self.assertEqual(
mockedPortOut.write.mock_calls[nextcall], call(b"\x92\x3c\x7f", 3)
)
nextcall += 1
with self.assertRaises(ValueError):
m.send(NoteOn(64, 0x80)) # Velocity > 127 - illegal value
with self.assertRaises(ValueError):
m.send(NoteOn(67, -1))
# test after exceptions to ensure sending is still ok
m.send(NoteOn(72, 0x7F))
self.assertEqual(
mockedPortOut.write.mock_calls[nextcall], call(b"\x92\x48\x7f", 3)
)
nextcall += 1
def test_send_basic_sequences(self):
# def printit(buffer, len):
# print(buffer[0:len])
mockedPortOut = Mock()
# mockedPortOut.write = printit
m = adafruit_midi.MIDI(midi_out=mockedPortOut, out_channel=2)
# Test sending some NoteOn and NoteOff to various channels
nextcall = 0
# Test sequences with list syntax and pass a tuple too
note_list = [NoteOn(0x6C, 0x51), NoteOn(0x70, 0x52), NoteOn(0x73, 0x53)]
note_tuple = tuple(note_list)
m.send(note_list, channel=10)
self.assertEqual(
mockedPortOut.write.mock_calls[nextcall],
call(b"\x9a\x6c\x51\x9a\x70\x52\x9a\x73\x53", 9),
"The implementation writes in one go, single 9 byte write expected",
)
nextcall += 1
m.send(note_tuple, channel=11)
self.assertEqual(
mockedPortOut.write.mock_calls[nextcall],
call(b"\x9b\x6c\x51\x9b\x70\x52\x9b\x73\x53", 9),
"The implementation writes in one go, single 9 byte write expected",
)
nextcall += 1
def test_termination_with_random_data(self):
"""Test with a random stream of bytes to ensure that the parsing code
termates and returns, i.e. does not go into any infinite loops.
"""
c = 0
random.seed(303808)
raw_data = bytearray([random.randint(0, 255) for i in range(50000)])
m = MIDI_mocked_receive(c, raw_data, [len(raw_data)])
noinfiniteloops = False
for unused in range(len(raw_data)): # pylint: disable=unused-variable
m.receive() # not interested in returned tuple
noinfiniteloops = True # interested in getting to here
self.assertTrue(noinfiniteloops)
if __name__ == "__main__":
unittest.main(verbosity=verbose)
|
[] |
[] |
[
"TESTVERBOSE"
] |
[]
|
["TESTVERBOSE"]
|
python
| 1 | 0 | |
src/main/java/utils/EMF_Creator.java
|
package utils;
import java.util.Properties;
import javax.persistence.EntityManagerFactory;
import javax.persistence.Persistence;
public class EMF_Creator {
/**
* Call this method before all integration tests that uses the Grizzly
* Server and the Test Database (in @BeforeAll ) Remember to call
* enRestTestWithDB() in @AfterAll
*/
public static void startREST_TestWithDB() {
System.setProperty("IS_INTEGRATION_TEST_WITH_DB", "testing");
}
/*
Call this method in your @AfterAll method if startREST_TestWithDB() was previously called
*/
public static void endREST_TestWithDB() {
System.clearProperty("IS_INTEGRATION_TEST_WITH_DB");
}
public static EntityManagerFactory createEntityManagerFactory() {
return createEntityManagerFactory(false);
}
public static EntityManagerFactory createEntityManagerFactoryForTest() {
return createEntityManagerFactory(true);
}
private static EntityManagerFactory createEntityManagerFactory(boolean isTest) {
boolean isDeployed = (System.getenv("DEPLOYED") != null);
if (isDeployed) {
/* Strategy for deployment */
System.out.println("USING ENVIRONMENT VARIABLES");
System.out.println("DEPLOYED -->" + System.getenv("DEPLOYED"));
System.out.println("USER -->" + System.getenv("USER"));
System.out.println("PW -->" + System.getenv("PW"));
System.out.println("CONNECTION_STR -->" + System.getenv("CONNECTION_STR"));
String user = System.getenv("USER");
String pw = System.getenv("PW");
String connection_str = System.getenv("CONNECTION_STR");
Properties props = new Properties();
props.setProperty("javax.persistence.jdbc.user", user);
props.setProperty("javax.persistence.jdbc.password", pw);
props.setProperty("javax.persistence.jdbc.url", connection_str);
props.setProperty("javax.persistence.jdbc.driver", "com.mysql.cj.jdbc.Driver");
return Persistence.createEntityManagerFactory("pu", props);
}
/* Strategy for dev and test
Uses the two persistence units declared in persistence.xml
*/
String puName = isTest || System.getProperty("IS_INTEGRATION_TEST_WITH_DB") != null ? "puTest" : "pu"; //Only legal names
if (puName.equals("puTest")) {
System.out.println("Using the TEST database via persistence-unit --> puTest ");
} else {
System.out.println("Using the DEV database via persistence-unit --> pu ");
}
EntityManagerFactory emf = null;
try {
emf = Persistence.createEntityManagerFactory(puName, null);
} catch (javax.persistence.PersistenceException ex){
System.out.println("##########################################################");
System.out.println("###### ERROR Creating a persistence Unit ######");
System.out.println("###### Have you started the dev and test databases? ######");
System.out.println("###### (docker-compose up -d ) ######");
System.out.println("##########################################################");
throw ex;
}
return emf;
}
}
|
[
"\"DEPLOYED\"",
"\"DEPLOYED\"",
"\"USER\"",
"\"PW\"",
"\"CONNECTION_STR\"",
"\"USER\"",
"\"PW\"",
"\"CONNECTION_STR\""
] |
[] |
[
"PW",
"USER",
"DEPLOYED",
"CONNECTION_STR"
] |
[]
|
["PW", "USER", "DEPLOYED", "CONNECTION_STR"]
|
java
| 4 | 0 | |
subprojects/core-api/src/main/java/org/gradle/initialization/BuildLayoutParameters.java
|
/*
* Copyright 2013 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.initialization;
import org.gradle.internal.SystemProperties;
import org.gradle.internal.installation.CurrentGradleInstallation;
import org.gradle.internal.installation.GradleInstallation;
import javax.annotation.Nullable;
import java.io.File;
import static org.gradle.internal.FileUtils.canonicalize;
public class BuildLayoutParameters {
public static final String GRADLE_USER_HOME_PROPERTY_KEY = "gradle.user.home";
private static final File DEFAULT_GRADLE_USER_HOME = new File(SystemProperties.getInstance().getUserHome() + "/.gradle");
private File gradleInstallationHomeDir;
private File gradleUserHomeDir;
private File projectDir;
private File currentDir;
public BuildLayoutParameters() {
this(
findGradleInstallationHomeDir(),
findGradleUserHomeDir(),
null,
canonicalize(SystemProperties.getInstance().getCurrentDir())
);
}
public BuildLayoutParameters(
@Nullable File gradleInstallationHomeDir,
File gradleUserHomeDir,
@Nullable File projectDir,
File currentDir
) {
this.gradleUserHomeDir = gradleUserHomeDir;
this.gradleInstallationHomeDir = gradleInstallationHomeDir;
this.projectDir = projectDir;
this.currentDir = currentDir;
}
static private File findGradleUserHomeDir() {
String gradleUserHome = System.getProperty(GRADLE_USER_HOME_PROPERTY_KEY);
if (gradleUserHome == null) {
gradleUserHome = System.getenv("GRADLE_USER_HOME");
if (gradleUserHome == null) {
gradleUserHome = DEFAULT_GRADLE_USER_HOME.getAbsolutePath();
}
}
return canonicalize(new File(gradleUserHome));
}
@Nullable
static private File findGradleInstallationHomeDir() {
GradleInstallation gradleInstallation = CurrentGradleInstallation.get();
if (gradleInstallation != null) {
return gradleInstallation.getGradleHome();
}
return null;
}
public BuildLayoutParameters setProjectDir(@Nullable File projectDir) {
this.projectDir = projectDir;
return this;
}
public BuildLayoutParameters setGradleUserHomeDir(File gradleUserHomeDir) {
this.gradleUserHomeDir = gradleUserHomeDir;
return this;
}
public BuildLayoutParameters setGradleInstallationHomeDir(@Nullable File gradleInstallationHomeDir) {
this.gradleInstallationHomeDir = gradleInstallationHomeDir;
return this;
}
public BuildLayoutParameters setCurrentDir(File currentDir) {
this.currentDir = currentDir;
return this;
}
public File getCurrentDir() {
return currentDir;
}
@Nullable
public File getProjectDir() {
return projectDir;
}
public File getSearchDir() {
return projectDir != null ? projectDir : currentDir;
}
public File getGradleUserHomeDir() {
return gradleUserHomeDir;
}
@Nullable
public File getGradleInstallationHomeDir() {
return gradleInstallationHomeDir;
}
}
|
[
"\"GRADLE_USER_HOME\""
] |
[] |
[
"GRADLE_USER_HOME"
] |
[]
|
["GRADLE_USER_HOME"]
|
java
| 1 | 0 | |
serverless/log/main.go
|
package main
import (
"encoding/json"
"os"
"github.com/aws/aws-lambda-go/events"
"github.com/aws/aws-lambda-go/lambda"
"github.com/go-redis/redis"
)
var rdb *redis.Client
func initClient() (err error) {
rdb = redis.NewClient(&redis.Options{
Addr: os.Getenv("REDIS_HOST") + ":" + os.Getenv("REDIS_PORT"),
Password: os.Getenv("REDIS_PASSWORD"),
DB: 0,
})
_, err = rdb.Ping().Result()
if err != nil {
return err
}
return nil
}
func Resp(StatusCode int, Body string) (*events.APIGatewayProxyResponse, error) {
return &events.APIGatewayProxyResponse{
StatusCode: int(StatusCode),
Headers: map[string]string{"Content-Type": "application/json"},
Body: Body,
IsBase64Encoded: false,
}, nil
}
func SCUBotLogKey(uid string) string {
const PREFIX = "checkinLog-"
return PREFIX + uid + "-*"
}
func handler(request events.APIGatewayProxyRequest) (*events.APIGatewayProxyResponse, error) {
if initClient() != nil {
return Resp(403, "{\"detail\": \"数据库连接错误\"}")
}
if uid, ok := request.QueryStringParameters["uid"]; ok {
result, err := rdb.Keys(SCUBotLogKey(uid)).Result()
if err != nil {
return Resp(403, "{\"detail\": \"数据库操作错误\"}")
}
if len(result) == 0 {
return Resp(200, "{\"message\": []}")
}
valRes, err := rdb.MGet(result...).Result()
if err != nil {
return Resp(403, "{\"detail\": \"数据库操作错误\"}")
}
resJson, err := json.Marshal(valRes)
if err != nil {
return Resp(403, "{\"detail\": \"数据库操作错误\"}")
}
return Resp(200, "{\"message\": " + string(resJson) + "}")
}
return Resp(403, "{\"detail\": \"参数错误\"}")
}
func main() {
lambda.Start(handler)
}
|
[
"\"REDIS_HOST\"",
"\"REDIS_PORT\"",
"\"REDIS_PASSWORD\""
] |
[] |
[
"REDIS_PASSWORD",
"REDIS_PORT",
"REDIS_HOST"
] |
[]
|
["REDIS_PASSWORD", "REDIS_PORT", "REDIS_HOST"]
|
go
| 3 | 0 | |
backend/roody_itech_30297/wsgi.py
|
"""
WSGI config for roody_itech_30297 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'roody_itech_30297.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
test/functional/test_framework/test_framework.py
|
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Base class for RPC testing."""
from enum import Enum
from io import BytesIO
import logging
import optparse
import os
import pdb
import shutil
from struct import pack
import sys
import tempfile
import time
from . import coverage
from .address import wif_to_privkey
from .authproxy import JSONRPCException
from .blocktools import (
create_block,
create_coinbase_pos,
create_transaction_from_outpoint,
is_zerocoin,
)
from .key import CECKey
from .messages import (
COIN,
COutPoint,
CTransaction,
CTxIn,
CTxOut,
hash256,
)
from .script import (
CScript,
OP_CHECKSIG,
)
from .test_node import TestNode
from .util import (
MAX_NODES,
PortSeed,
assert_equal,
assert_greater_than,
check_json_precision,
connect_nodes,
connect_nodes_clique,
disconnect_nodes,
DEFAULT_FEE,
get_datadir_path,
hex_str_to_bytes,
bytes_to_hex_str,
initialize_datadir,
set_node_times,
SPORK_ACTIVATION_TIME,
SPORK_DEACTIVATION_TIME,
sync_blocks,
sync_mempools,
vZC_DENOMS,
)
class TestStatus(Enum):
PASSED = 1
FAILED = 2
SKIPPED = 3
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
TMPDIR_PREFIX = "sss_func_test_"
class sssTestFramework():
"""Base class for a sss test script.
Individual sss test scripts should subclass this class and override the set_test_params() and run_test() methods.
Individual tests can also override the following methods to customize the test setup:
- add_options()
- setup_chain()
- setup_network()
- setup_nodes()
The __init__() and main() methods should not be overridden.
This class also contains various public and private helper methods."""
def __init__(self):
"""Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method"""
self.setup_clean_chain = False
self.nodes = []
self.mocktime = 0
self.supports_cli = False
self.set_test_params()
assert hasattr(self, "num_nodes"), "Test must set self.num_nodes in set_test_params()"
def main(self):
"""Main function. This should not be overridden by the subclass test scripts."""
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave sssolutionsds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop sssolutionsds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../../src"),
help="Source directory containing sssolutionsd/sssolutions-cli (default: %default)")
parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
help="Directory for caching pregenerated datadirs")
parser.add_option("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
parser.add_option("-l", "--loglevel", dest="loglevel", default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int',
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
parser.add_option("--configfile", dest="configfile",
help="Location of the test framework config file")
parser.add_option('--legacywallet', dest="legacywallet", default=False, action="store_true",
help='create pre-HD wallets only')
parser.add_option("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true",
help="Attach a python debugger if test fails")
parser.add_option("--usecli", dest="usecli", default=False, action="store_true",
help="use sssolutions-cli instead of RPC for all commands")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
PortSeed.n = self.options.port_seed
os.environ['PATH'] = self.options.srcdir + ":" + self.options.srcdir + "/qt:" + os.environ['PATH']
check_json_precision()
self.options.cachedir = os.path.abspath(self.options.cachedir)
# Set up temp directory and start logging
if self.options.tmpdir:
self.options.tmpdir = os.path.abspath(self.options.tmpdir)
os.makedirs(self.options.tmpdir, exist_ok=False)
else:
self.options.tmpdir = tempfile.mkdtemp(prefix=TMPDIR_PREFIX)
self._start_logging()
success = TestStatus.FAILED
try:
if self.options.usecli and not self.supports_cli:
raise SkipTest("--usecli specified but test does not support using CLI")
self.setup_chain()
self.setup_network()
time.sleep(5)
self.run_test()
success = TestStatus.PASSED
except JSONRPCException as e:
self.log.exception("JSONRPC error")
except SkipTest as e:
self.log.warning("Test Skipped: %s" % e.message)
success = TestStatus.SKIPPED
except AssertionError as e:
self.log.exception("Assertion failed")
except KeyError as e:
self.log.exception("Key error")
except Exception as e:
self.log.exception("Unexpected exception caught during testing")
except KeyboardInterrupt as e:
self.log.warning("Exiting after keyboard interrupt")
if success == TestStatus.FAILED and self.options.pdbonfailure:
print("Testcase failed. Attaching python debugger. Enter ? for help")
pdb.set_trace()
if not self.options.noshutdown:
self.log.info("Stopping nodes")
if self.nodes:
self.stop_nodes()
else:
for node in self.nodes:
node.cleanup_on_exit = False
self.log.info("Note: sssolutionsds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success != TestStatus.FAILED:
self.log.info("Cleaning up")
shutil.rmtree(self.options.tmpdir)
else:
self.log.warning("Not cleaning up dir %s" % self.options.tmpdir)
if success == TestStatus.PASSED:
self.log.info("Tests successful")
exit_code = TEST_EXIT_PASSED
elif success == TestStatus.SKIPPED:
self.log.info("Test skipped")
exit_code = TEST_EXIT_SKIPPED
else:
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
self.log.error("Hint: Call {} '{}' to consolidate all logs".format(os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../combine_logs.py"), self.options.tmpdir))
exit_code = TEST_EXIT_FAILED
logging.shutdown()
sys.exit(exit_code)
# Methods to override in subclass test scripts.
def set_test_params(self):
"""Tests must this method to change default values for number of nodes, topology, etc"""
raise NotImplementedError
def add_options(self, parser):
"""Override this method to add command-line options to the test"""
pass
def setup_chain(self):
"""Override this method to customize blockchain setup"""
self.log.info("Initializing test directory " + self.options.tmpdir)
if self.setup_clean_chain:
self._initialize_chain_clean()
else:
self._initialize_chain()
def setup_network(self):
"""Override this method to customize test network topology"""
self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
#
# Topology looks like this:
# node0 <-- node1 <-- node2 <-- node3
#
# If all nodes are in IBD (clean chain from genesis), node0 is assumed to be the source of blocks (miner). To
# ensure block propagation, all nodes will establish outgoing connections toward node0.
# See fPreferredDownload in net_processing.
#
# If further outbound connections are needed, they can be added at the beginning of the test with e.g.
# connect_nodes(self.nodes[1], 2)
for i in range(self.num_nodes - 1):
connect_nodes(self.nodes[i + 1], i)
self.sync_all()
def setup_nodes(self):
"""Override this method to customize test node setup"""
extra_args = None
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args)
self.start_nodes()
def run_test(self):
"""Tests must override this method to define test logic"""
raise NotImplementedError
# Public helper methods. These can be accessed by the subclass test scripts.
def add_nodes(self, num_nodes, extra_args=None, rpchost=None, timewait=None, binary=None):
"""Instantiate TestNode objects"""
if extra_args is None:
extra_args = [[]] * num_nodes
# Check wallet version
if self.options.legacywallet:
for arg in extra_args:
arg.append('-legacywallet')
self.log.info("Running test with legacy (pre-HD) wallet")
if binary is None:
binary = [None] * num_nodes
assert_equal(len(extra_args), num_nodes)
assert_equal(len(binary), num_nodes)
for i in range(num_nodes):
self.nodes.append(TestNode(i, self.options.tmpdir, extra_args[i], rpchost, timewait=timewait, binary=binary[i], stderr=None, mocktime=self.mocktime, coverage_dir=self.options.coveragedir, use_cli=self.options.usecli))
def start_node(self, i, *args, **kwargs):
"""Start a sssolutionsd"""
node = self.nodes[i]
node.start(*args, **kwargs)
node.wait_for_rpc_connection()
time.sleep(10)
if self.options.coveragedir is not None:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def start_nodes(self, extra_args=None, *args, **kwargs):
"""Start multiple sssolutionsds"""
if extra_args is None:
extra_args = [None] * self.num_nodes
assert_equal(len(extra_args), self.num_nodes)
try:
for i, node in enumerate(self.nodes):
node.start(extra_args[i], *args, **kwargs)
for node in self.nodes:
node.wait_for_rpc_connection()
except:
# If one node failed to start, stop the others
self.stop_nodes()
raise
time.sleep(10)
if self.options.coveragedir is not None:
for node in self.nodes:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def stop_node(self, i):
"""Stop a sssolutionsd test node"""
self.nodes[i].stop_node()
self.nodes[i].wait_until_stopped()
def stop_nodes(self):
"""Stop multiple sssolutionsd test nodes"""
for node in self.nodes:
# Issue RPC to stop nodes
node.stop_node()
for node in self.nodes:
# Wait for nodes to stop
time.sleep(5)
node.wait_until_stopped()
def restart_node(self, i, extra_args=None):
"""Stop and start a test node"""
self.stop_node(i)
self.start_node(i, extra_args)
def assert_start_raises_init_error(self, i, extra_args=None, expected_msg=None, *args, **kwargs):
with tempfile.SpooledTemporaryFile(max_size=2**16) as log_stderr:
try:
self.start_node(i, extra_args, stderr=log_stderr, *args, **kwargs)
self.stop_node(i)
except Exception as e:
assert 'sssolutionsd exited' in str(e) # node must have shutdown
self.nodes[i].running = False
self.nodes[i].process = None
if expected_msg is not None:
log_stderr.seek(0)
stderr = log_stderr.read().decode('utf-8')
if expected_msg not in stderr:
raise AssertionError("Expected error \"" + expected_msg + "\" not found in:\n" + stderr)
else:
if expected_msg is None:
assert_msg = "sssolutionsd should have exited with an error"
else:
assert_msg = "sssolutionsd should have exited with expected error " + expected_msg
raise AssertionError(assert_msg)
def wait_for_node_exit(self, i, timeout):
self.nodes[i].process.wait(timeout)
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
self.sync_all([self.nodes[:2], self.nodes[2:]])
def join_network(self):
"""
Join the (previously split) network halves together.
"""
connect_nodes(self.nodes[1], 2)
self.sync_all()
def sync_all(self, node_groups=None):
if not node_groups:
node_groups = [self.nodes]
for group in node_groups:
sync_blocks(group)
sync_mempools(group)
def enable_mocktime(self):
"""Enable mocktime for the script.
mocktime may be needed for scripts that use the cached version of the
blockchain. If the cached version of the blockchain is used without
mocktime then the mempools will not sync due to IBD.
Sets mocktime to Tuesday, October 31, 2017 6:21:20 PM GMT (1572546080)
"""
self.mocktime = 1572546080
def disable_mocktime(self):
self.mocktime = 0
# Private helper methods. These should not be accessed by the subclass test scripts.
def _start_logging(self):
# Add logger and logging handlers
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
ch.setLevel(ll)
# Format logs the same as sssolutionsd's debug.log with microprecision (so log files can be concatenated and sorted)
formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d000 %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
self.log.addHandler(fh)
self.log.addHandler(ch)
if self.options.trace_rpc:
rpc_logger = logging.getLogger("BitcoinRPC")
rpc_logger.setLevel(logging.DEBUG)
rpc_handler = logging.StreamHandler(sys.stdout)
rpc_handler.setLevel(logging.DEBUG)
rpc_logger.addHandler(rpc_handler)
def _initialize_chain(self, toPosPhase=False):
"""Initialize a pre-mined blockchain for use by the test."""
def create_cachedir(cachedir):
if os.path.isdir(cachedir):
shutil.rmtree(cachedir)
os.makedirs(cachedir)
def copy_cachedir(origin, destination, num_nodes=MAX_NODES):
for i in range(num_nodes):
from_dir = get_datadir_path(origin, i)
to_dir = get_datadir_path(destination, i)
shutil.copytree(from_dir, to_dir)
initialize_datadir(destination, i) # Overwrite port/rpcport in sss.conf
def clone_cache_from_node_1(cachedir, from_num=4):
""" Clones cache subdir from node 1 to nodes from 'from_num' to MAX_NODES"""
def copy_and_overwrite(from_path, to_path):
if os.path.exists(to_path):
shutil.rmtree(to_path)
shutil.copytree(from_path, to_path)
assert from_num < MAX_NODES
node_0_datadir = os.path.join(get_datadir_path(cachedir, 0), "regtest")
for i in range(from_num, MAX_NODES):
node_i_datadir = os.path.join(get_datadir_path(cachedir, i), "regtest")
for subdir in ["blocks", "chainstate", "sporks", "zerocoin"]:
copy_and_overwrite(os.path.join(node_0_datadir, subdir),
os.path.join(node_i_datadir, subdir))
initialize_datadir(cachedir, i) # Overwrite port/rpcport in sss.conf
def cachedir_valid(cachedir):
for i in range(MAX_NODES):
if not os.path.isdir(get_datadir_path(cachedir, i)):
return False
# nodes directories exist. check if the first one has the .incomplete flagfile
return (not os.path.exists(os.path.join(get_datadir_path(cachedir, 0), ".incomplete")))
def clean_cache_subdir(cachedir):
os.remove(os.path.join(get_datadir_path(cachedir, 0), ".incomplete"))
def cache_path(n, *paths):
return os.path.join(get_datadir_path(cachedir, n), "regtest", *paths)
for i in range(MAX_NODES):
for entry in os.listdir(cache_path(i)):
if entry not in ['wallet.dat', 'chainstate', 'blocks', 'sporks', 'zerocoin', 'backups']:
os.remove(cache_path(i, entry))
def clean_cache_dir():
if os.path.isdir(self.options.cachedir):
# migrate old cache dir
if cachedir_valid(self.options.cachedir):
powcachedir = os.path.join(self.options.cachedir, "pow")
self.log.info("Found old cachedir. Migrating to %s" % str(powcachedir))
copy_cachedir(self.options.cachedir, powcachedir)
# remove everything except pow and pos subdirs
for entry in os.listdir(self.options.cachedir):
if entry not in ['pow', 'pos']:
entry_path = os.path.join(self.options.cachedir, entry)
if os.path.isfile(entry_path):
os.remove(entry_path)
elif os.path.isdir(entry_path):
shutil.rmtree(entry_path)
# no cachedir found
else:
os.makedirs(self.options.cachedir)
def start_nodes_from_dir(ddir, num_nodes=MAX_NODES):
self.log.info("Starting %d nodes..." % num_nodes)
for i in range(num_nodes):
datadir = initialize_datadir(ddir, i)
if i == 0:
# Add .incomplete flagfile
# (removed at the end during clean_cache_subdir)
open(os.path.join(datadir, ".incomplete"), 'a').close()
args = [os.getenv("BITCOIND", "sssolutionsd"), "-spendzeroconfchange=1", "-server", "-keypool=1",
"-datadir=" + datadir, "-discover=0"]
self.nodes.append(
TestNode(i, ddir, extra_args=[], rpchost=None, timewait=None, binary=None, stderr=None,
mocktime=self.mocktime, coverage_dir=None))
self.nodes[i].args = args
self.start_node(i)
self.log.info("Node %d started." % i)
# Wait for RPC connections to be ready
self.log.info("Nodes started. Waiting for RPC connections...")
for node in range(4):
self.nodes[node].wait_for_rpc_connection()
self.log.info("Connecting nodes")
connect_nodes_clique(self.nodes)
def stop_and_clean_cache_dir(ddir):
self.stop_nodes()
self.nodes = []
# Copy cache for nodes 5 to MAX_NODES
self.log.info("Copying cache dir to non-started nodes")
clone_cache_from_node_1(ddir)
self.log.info("Cleaning up.")
clean_cache_subdir(ddir)
def generate_pow_cache():
### POW Cache ###
# Create a 200-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# Note: To preserve compatibility with older versions of
# initialize_chain, only 4 nodes will generate coins.
#
# blocks are created with timestamps 1 minutes apart
# starting from 331 minutes in the past
# Create cache directories, run sssolutionsds:
create_cachedir(powcachedir)
self.log.info("Creating 'PoW-chain': 200 blocks")
start_nodes_from_dir(powcachedir, 4)
# Mine the blocks
self.log.info("Mining 200 blocks")
self.enable_mocktime()
block_time = self.mocktime - (331 * 60)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(self.nodes, block_time)
self.nodes[peer].generate(1)
block_time += 60
# Must sync before next peer starts generating blocks
sync_blocks(self.nodes)
# Shut them down, and clean up cache directories:
self.log.info("Stopping nodes")
stop_and_clean_cache_dir(powcachedir)
self.log.info("---> pow cache created")
self.disable_mocktime()
assert self.num_nodes <= MAX_NODES
clean_cache_dir()
powcachedir = os.path.join(self.options.cachedir, "pow")
is_powcache_valid = cachedir_valid(powcachedir)
poscachedir = os.path.join(self.options.cachedir, "pos")
is_poscache_valid = cachedir_valid(poscachedir)
if not toPosPhase and not is_powcache_valid:
self.log.info("PoW-CACHE NOT FOUND or INVALID.")
self.log.info("Creating new cached blockchain data.")
generate_pow_cache()
elif toPosPhase and not is_poscache_valid:
self.log.info("PoS-CACHE NOT FOUND or INVALID.")
self.log.info("Creating new cached blockchain data.")
# check if first 200 blocks (pow cache) is present. if not generate it.
if not is_powcache_valid:
self.log.info("PoW-CACHE NOT FOUND or INVALID. Generating it first.")
generate_pow_cache()
self.enable_mocktime()
block_time = self.mocktime - (131 * 60)
### POS Cache ###
# Create a 330-block-long chain
# First 200 PoW blocks are copied from PoW chain.
# The next 48 PoW blocks are mined in 12-blocks bursts by the first 4 nodes.
# The last 2 PoW blocks are then mined by the last node (Node 3).
# Then 80 PoS blocks are generated in 20-blocks bursts by the first 4 nodes.
#
# - Node 0 and node 1 get 62 mature blocks (pow) + 20 immmature (pos)
# 42 rewards spendable (62 mature blocks - 20 spent rewards)
# - Node 2 gets 56 mature blocks (pow) + 26 immmature (6 pow + 20 pos)
# 35 rewards spendable (55 mature blocks - 20 spent rewards)
# - Node 3 gets 50 mature blocks (pow) + 34 immmature (14 pow + 20 pos)
# 30 rewards spendable (50 mature blocks - 20 spent rewards)
# - Nodes 2 and 3 mint one zerocoin for each denom (tot 6666 SSS) on block 301/302
# 8 mature zc + 8/3 rewards spendable (35/30 - 27 spent) + change 83.92
#
# Block 331-336 will mature last 6 pow blocks mined by node 2.
# Then 337-350 will mature last 14 pow blocks mined by node 3.
# Then staked blocks start maturing at height 351.
# Create cache directories, run sssolutionsds:
create_cachedir(poscachedir)
self.log.info("Creating 'PoS-chain': 330 blocks")
self.log.info("Copying 200 initial blocks from pow cache")
copy_cachedir(powcachedir, poscachedir)
# Change datadir and restart the nodes (only 4 of them)
start_nodes_from_dir(poscachedir, 4)
# Mine 50 more blocks to reach PoS start.
self.log.info("Mining 50 more blocks to reach PoS phase")
for peer in range(4):
for j in range(12):
set_node_times(self.nodes, block_time)
self.nodes[peer].generate(1)
block_time += 60
# Must sync before next peer starts generating blocks
if peer < 3:
sync_blocks(self.nodes)
set_node_times(self.nodes, block_time)
self.nodes[3].generate(2)
block_time += 60
sync_blocks(self.nodes)
# Then stake 80 blocks.
self.log.info("Staking 80 blocks...")
nBlocks = 250
res = [] # used to save the two txids for change outputs of mints (locked)
for peer in range(4):
for j in range(20):
# Stake block
block_time = self.generate_pos(peer, block_time)
nBlocks += 1
# Mint zerocoins with node-2 at block 301 and with node-3 at block 302
if nBlocks == 301 or nBlocks == 302:
# mints 7 zerocoins, one for each denom (tot 6666 SSS), fee = 0.01 * 8
# consumes 27 utxos (tot 6750 SSS), change = 6750 - 6666 - fee
res.append(self.nodes[nBlocks-299].mintzerocoin(6666))
self.sync_all()
# lock the change output (so it's not used as stake input in generate_pos)
assert (self.nodes[nBlocks-299].lockunspent(False, [{"txid": res[-1]['txid'], "vout": 8}]))
# Must sync before next peer starts generating blocks
sync_blocks(self.nodes)
time.sleep(1)
self.log.info("80 blocks staked")
# Unlock previously locked change outputs
for i in [2, 3]:
assert (self.nodes[i].lockunspent(True, [{"txid": res[i-2]['txid'], "vout": 8}]))
# Verify height and balances
self.test_PoS_chain_balances()
# Shut nodes down, and clean up cache directories:
self.log.info("Stopping nodes")
stop_and_clean_cache_dir(poscachedir)
self.log.info("--> pos cache created")
self.disable_mocktime()
else:
self.log.info("CACHE FOUND.")
# Copy requested cache to tempdir
if toPosPhase:
self.log.info("Copying datadir from %s to %s" % (poscachedir, self.options.tmpdir))
copy_cachedir(poscachedir, self.options.tmpdir, self.num_nodes)
else:
self.log.info("Copying datadir from %s to %s" % (powcachedir, self.options.tmpdir))
copy_cachedir(powcachedir, self.options.tmpdir, self.num_nodes)
def _initialize_chain_clean(self):
"""Initialize empty blockchain for use by the test.
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization."""
for i in range(self.num_nodes):
initialize_datadir(self.options.tmpdir, i)
### SSS Specific TestFramework ###
###################################
def init_dummy_key(self):
self.DUMMY_KEY = CECKey()
self.DUMMY_KEY.set_secretbytes(hash256(pack('<I', 0xffff)))
def test_PoS_chain_balances(self):
from .util import DecimalAmt
# 330 blocks
# - Nodes 0 and 1 get 82 blocks:
# 62 pow + 20 pos (20 immature)
# - Nodes 2 gets 82 blocks:
# 62 pow + 20 pos (26 immature)
# - Nodes 3 gets 84 blocks:
# 64 pow + 20 pos (34 immature)
# - Nodes 2 and 3 have 6666 SSS worth of zerocoins
zc_tot = sum(vZC_DENOMS)
zc_fee = len(vZC_DENOMS) * 0.01
used_utxos = (zc_tot // 250) + 1
zc_change = 250 * used_utxos - zc_tot - zc_fee
# check at least 1 node and at most 5
num_nodes = min(5, len(self.nodes))
assert_greater_than(num_nodes, 0)
# each node has the same height and tip
best_block = self.nodes[0].getbestblockhash()
for i in range(num_nodes):
assert_equal(self.nodes[i].getblockcount(), 330)
if i > 0:
assert_equal(self.nodes[i].getbestblockhash(), best_block)
# balance is mature pow blocks rewards minus stake inputs (spent)
w_info = [self.nodes[i].getwalletinfo() for i in range(num_nodes)]
assert_equal(w_info[0]["balance"], DecimalAmt(250.0 * (62 - 20)))
assert_equal(w_info[1]["balance"], DecimalAmt(250.0 * (62 - 20)))
assert_equal(w_info[2]["balance"], DecimalAmt(250.0 * (56 - 20) - (used_utxos * 250) + zc_change))
assert_equal(w_info[3]["balance"], DecimalAmt(250.0 * (50 - 20) - (used_utxos * 250) + zc_change))
for i in range(4, num_nodes):
# only first 4 nodes have mined/staked
assert_equal(w_info[i]["balance"], DecimalAmt(0))
# immature balance is immature pow blocks rewards plus
# immature stakes (outputs=inputs+rewards)
assert_equal(w_info[0]["immature_balance"], DecimalAmt(500.0 * 20))
assert_equal(w_info[1]["immature_balance"], DecimalAmt(500.0 * 20))
assert_equal(w_info[2]["immature_balance"], DecimalAmt((250.0 * 6) + (500.0 * 20)))
assert_equal(w_info[3]["immature_balance"], DecimalAmt((250.0 * 14) + (500.0 * 20)))
for i in range(4, num_nodes):
# only first 4 nodes have mined/staked
assert_equal(w_info[i]["immature_balance"], DecimalAmt(0))
# check zerocoin balances / mints
for peer in [2, 3]:
if num_nodes > peer:
zcBalance = self.nodes[peer].getzerocoinbalance()
zclist = self.nodes[peer].listmintedzerocoins(True)
zclist_spendable = self.nodes[peer].listmintedzerocoins(True, True)
assert_equal(len(zclist), len(vZC_DENOMS))
assert_equal(zcBalance['Total'], 6666)
assert_equal(zcBalance['Immature'], 0)
if peer == 2:
assert_equal(len(zclist), len(zclist_spendable))
assert_equal(set([x['denomination'] for x in zclist]), set(vZC_DENOMS))
assert_equal([x['confirmations'] for x in zclist], [30-peer] * len(vZC_DENOMS))
self.log.info("Balances of first %d nodes check out" % num_nodes)
def get_prevouts(self, node_id, utxo_list, zpos=False, nHeight=-1):
""" get prevouts (map) for each utxo in a list
:param node_id: (int) index of the CTestNode used as rpc connection. Must own the utxos.
utxo_list: <if zpos=False> (JSON list) utxos returned from listunspent used as input
<if zpos=True> (JSON list) mints returned from listmintedzerocoins used as input
zpos: (bool) type of utxo_list
nHeight: (int) height of the previous block. used only if zpos=True for
stake checksum. Optional, if not provided rpc_conn's height is used.
:return: prevouts: ({bytes --> (int, bytes, int)} dictionary)
maps CStake "uniqueness" (i.e. serialized COutPoint -or hash stake, for zsss-)
to (amount, prevScript, timeBlockFrom).
For zsss prevScript is replaced with serialHash hex string.
"""
assert_greater_than(len(self.nodes), node_id)
rpc_conn = self.nodes[node_id]
prevouts = {}
for utxo in utxo_list:
if not zpos:
outPoint = COutPoint(int(utxo['txid'], 16), utxo['vout'])
outValue = int(utxo['amount']) * COIN
prevtx_json = rpc_conn.getrawtransaction(utxo['txid'], 1)
prevTx = CTransaction()
prevTx.deserialize(BytesIO(hex_str_to_bytes(prevtx_json['hex'])))
if (prevTx.is_coinbase() or prevTx.is_coinstake()) and utxo['confirmations'] < 100:
# skip immature coins
continue
prevScript = prevtx_json['vout'][utxo['vout']]['scriptPubKey']['hex']
prevTime = prevtx_json['blocktime']
prevouts[outPoint.serialize_uniqueness()] = (outValue, prevScript, prevTime)
else:
uniqueness = bytes.fromhex(utxo['hash stake'])[::-1]
prevouts[uniqueness] = (int(utxo["denomination"]) * COIN, utxo["serial hash"], 0)
return prevouts
def make_txes(self, node_id, spendingPrevOuts, to_pubKey):
""" makes a list of CTransactions each spending an input from spending PrevOuts to an output to_pubKey
:param node_id: (int) index of the CTestNode used as rpc connection. Must own spendingPrevOuts.
spendingPrevouts: ({bytes --> (int, bytes, int)} dictionary)
maps CStake "uniqueness" (i.e. serialized COutPoint -or hash stake, for zsss-)
to (amount, prevScript, timeBlockFrom).
For zsss prevScript is replaced with serialHash hex string.
to_pubKey (bytes) recipient public key
:return: block_txes: ([CTransaction] list)
"""
assert_greater_than(len(self.nodes), node_id)
rpc_conn = self.nodes[node_id]
block_txes = []
for uniqueness in spendingPrevOuts:
if is_zerocoin(uniqueness):
# spend zSSS
_, serialHash, _ = spendingPrevOuts[uniqueness]
raw_spend = rpc_conn.createrawzerocoinspend(serialHash, "", False)
else:
# spend SSS
value_out = int(spendingPrevOuts[uniqueness][0] - DEFAULT_FEE * COIN)
scriptPubKey = CScript([to_pubKey, OP_CHECKSIG])
prevout = COutPoint()
prevout.deserialize_uniqueness(BytesIO(uniqueness))
tx = create_transaction_from_outpoint(prevout, b"", value_out, scriptPubKey)
# sign tx
raw_spend = rpc_conn.signrawtransaction(bytes_to_hex_str(tx.serialize()))['hex']
# add signed tx to the list
signed_tx = CTransaction()
signed_tx.from_hex(raw_spend)
block_txes.append(signed_tx)
return block_txes
def stake_block(self, node_id,
nHeight,
prevHhash,
prevModifier,
stakeableUtxos,
startTime=None,
privKeyWIF=None,
vtx=[],
fDoubleSpend=False):
""" manually stakes a block selecting the coinstake input from a list of candidates
:param node_id: (int) index of the CTestNode used as rpc connection. Must own stakeableUtxos.
nHeight: (int) height of the block being produced
prevHash: (string) hex string of the previous block hash
prevModifier (string) hex string of the previous block stake modifier
stakeableUtxos: ({bytes --> (int, bytes, int)} dictionary)
maps CStake "uniqueness" (i.e. serialized COutPoint -or hash stake, for zsss-)
to (amount, prevScript, timeBlockFrom).
For zsss prevScript is replaced with serialHash hex string.
startTime: (int) epoch time to be used as blocktime (iterated in solve_stake)
privKeyWIF: (string) private key to be used for staking/signing
If empty string, it will be used the pk from the stake input
(dumping the sk from rpc_conn). If None, then the DUMMY_KEY will be used.
vtx: ([CTransaction] list) transactions to add to block.vtx
fDoubleSpend: (bool) wether any tx in vtx is allowed to spend the coinstake input
:return: block: (CBlock) block produced, must be manually relayed
"""
assert_greater_than(len(self.nodes), node_id)
rpc_conn = self.nodes[node_id]
if not len(stakeableUtxos) > 0:
raise Exception("Need at least one stakeable utxo to stake a block!")
# Get start time to stake
if startTime is None:
startTime = time.time()
# Create empty block with coinbase
nTime = int(startTime) & 0xfffffff0
coinbaseTx = create_coinbase_pos(nHeight)
block = create_block(int(prevHhash, 16), coinbaseTx, nTime)
# Find valid kernel hash - iterates stakeableUtxos, then block.nTime
block.solve_stake(stakeableUtxos, int(prevModifier, 16))
# Check if this is a zPoS block or regular/cold stake - sign stake tx
block_sig_key = CECKey()
isZPoS = is_zerocoin(block.prevoutStake)
if isZPoS:
# !TODO: remove me
raise Exception("zPOS tests discontinued")
else:
coinstakeTx_unsigned = CTransaction()
prevout = COutPoint()
prevout.deserialize_uniqueness(BytesIO(block.prevoutStake))
coinstakeTx_unsigned.vin.append(CTxIn(prevout, b"", 0xffffffff))
coinstakeTx_unsigned.vout.append(CTxOut())
amount, prevScript, _ = stakeableUtxos[block.prevoutStake]
outNValue = int(amount + 250 * COIN)
coinstakeTx_unsigned.vout.append(CTxOut(outNValue, hex_str_to_bytes(prevScript)))
if privKeyWIF == "":
# Use dummy key
if not hasattr(self, 'DUMMY_KEY'):
self.init_dummy_key()
block_sig_key = self.DUMMY_KEY
# replace coinstake output script
coinstakeTx_unsigned.vout[1].scriptPubKey = CScript([block_sig_key.get_pubkey(), OP_CHECKSIG])
else:
if privKeyWIF == None:
# Use pk of the input. Ask sk from rpc_conn
rawtx = rpc_conn.getrawtransaction('{:064x}'.format(prevout.hash), True)
privKeyWIF = rpc_conn.dumpprivkey(rawtx["vout"][prevout.n]["scriptPubKey"]["addresses"][0])
# Use the provided privKeyWIF (cold staking).
# export the corresponding private key to sign block
privKey, compressed = wif_to_privkey(privKeyWIF)
block_sig_key.set_compressed(compressed)
block_sig_key.set_secretbytes(bytes.fromhex(privKey))
# Sign coinstake TX and add it to the block
stake_tx_signed_raw_hex = rpc_conn.signrawtransaction(
bytes_to_hex_str(coinstakeTx_unsigned.serialize()))['hex']
# Add coinstake to the block
coinstakeTx = CTransaction()
coinstakeTx.from_hex(stake_tx_signed_raw_hex)
block.vtx.append(coinstakeTx)
# Add provided transactions to the block.
# Don't add tx doublespending the coinstake input, unless fDoubleSpend=True
for tx in vtx:
if not fDoubleSpend:
# assume txes don't double spend zSSS inputs when fDoubleSpend is false. It needs to
# be checked outside until a convenient tx.spends(zerocoin) is added to the framework.
if not isZPoS and tx.spends(prevout):
continue
block.vtx.append(tx)
# Get correct MerkleRoot and rehash block
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
# sign block with block signing key and return it
block.sign_block(block_sig_key)
return block
def stake_next_block(self, node_id,
stakeableUtxos,
btime=None,
privKeyWIF=None,
vtx=[],
fDoubleSpend=False):
""" Calls stake_block appending to the current tip"""
assert_greater_than(len(self.nodes), node_id)
nHeight = self.nodes[node_id].getblockcount()
prevHhash = self.nodes[node_id].getblockhash(nHeight)
prevModifier = self.nodes[node_id].getblock(prevHhash)['stakeModifier']
return self.stake_block(node_id,
nHeight+1,
prevHhash,
prevModifier,
stakeableUtxos,
btime,
privKeyWIF,
vtx,
fDoubleSpend)
def check_tx_in_chain(self, node_id, txid):
assert_greater_than(len(self.nodes), node_id)
rawTx = self.nodes[node_id].getrawtransaction(txid, 1)
assert_greater_than(rawTx["confirmations"], 0)
def spend_inputs(self, node_id, inputs, outputs):
""" auxiliary function used by spend_utxo / spend_utxos """
assert_greater_than(len(self.nodes), node_id)
rpc_conn = self.nodes[node_id]
spendingTx = rpc_conn.createrawtransaction(inputs, outputs)
spendingTx_signed = rpc_conn.signrawtransaction(spendingTx)
if spendingTx_signed["complete"]:
txhash = rpc_conn.sendrawtransaction(spendingTx_signed["hex"])
return txhash
else:
return ""
def spend_utxo(self, node_id, utxo, recipient=''):
""" spend amount from previously unspent output to a provided address
:param node_id: (int) index of the CTestNode used as rpc connection. Must own the utxo.
utxo: (JSON) returned from listunspent used as input
recipient: (string) destination address (new one if not provided)
:return: txhash: (string) tx hash if successful, empty string otherwise
"""
assert_greater_than(len(self.nodes), node_id)
rpc_conn = self.nodes[node_id]
inputs = [{"txid": utxo["txid"], "vout": utxo["vout"]}]
out_amount = float(utxo["amount"]) - DEFAULT_FEE
outputs = {}
if recipient == '':
recipient = rpc_conn.getnewaddress()
outputs[recipient] = out_amount
return self.spend_inputs(node_id, inputs, outputs)
def spend_utxos(self, node_id, utxo_list, recipient='', fMultiple=False):
""" spend utxos to provided list of addresses or 10 new generate ones.
:param node_id: (int) index of the CTestNode used as rpc connection. Must own the utxo.
utxo_list: (JSON list) returned from listunspent used as input
recipient: (string, optional) destination address (new one if not provided)
fMultiple: (boolean, optional, default=false) spend each utxo on a different tx
:return: txHashes: (string list) list of hashes of completed txs
"""
assert_greater_than(len(self.nodes), node_id)
rpc_conn = self.nodes[node_id]
txHashes = []
# If no recipient is given, create a new one
if recipient == '':
recipient = rpc_conn.getnewaddress()
# If fMultiple=True send one tx for each utxo
if fMultiple:
for utxo in utxo_list:
txHash = self.spend_utxo(node_id, utxo, recipient)
if txHash != "":
txHashes.append(txHash)
# Otherwise make a single tx with all the inputs
else:
inputs = [{"txid": x["txid"], "vout": x["vout"]} for x in utxo_list]
out_amount = sum([float(x["amount"]) for x in utxo_list]) - DEFAULT_FEE
outputs = {}
if recipient == '':
recipient = rpc_conn.getnewaddress()
outputs[recipient] = out_amount
txHash = self.spend_inputs(node_id, inputs, outputs)
if txHash != "":
txHashes.append(txHash)
return txHashes
def generate_pos(self, node_id, btime=None):
""" stakes a block using generate on nodes[node_id]"""
assert_greater_than(len(self.nodes), node_id)
rpc_conn = self.nodes[node_id]
ss = rpc_conn.getstakingstatus()
assert ss["walletunlocked"]
assert ss["stakeablecoins"] > 0
assert ss["stakingbalance"] > 0.0
if btime is not None:
next_btime = btime + 60
fStaked = False
failures = 0
while not fStaked:
try:
rpc_conn.generate(1)
fStaked = True
except JSONRPCException as e:
if ("Couldn't create new block" in str(e)):
failures += 1
# couldn't generate block. check that this node can still stake (after 60 failures)
if failures > 60:
ss = rpc_conn.getstakingstatus()
if not (ss["walletunlocked"] and ss["stakeablecoins"] > 0 and ss["stakingbalance"] > 0.0):
raise AssertionError("Node %d unable to stake!" % node_id)
# try to stake one sec in the future
if btime is not None:
btime += 1
set_node_times(self.nodes, btime)
else:
time.sleep(1)
else:
raise e
# block generated. adjust block time
if btime is not None:
btime = max(btime + 1, next_btime)
set_node_times(self.nodes, btime)
return btime
else:
return None
def generate_pow(self, node_id, btime=None):
""" stakes a block using generate on nodes[node_id]"""
assert_greater_than(len(self.nodes), node_id)
self.nodes[node_id].generate(1)
if btime is not None:
btime += 60
set_node_times(self.nodes, btime)
return btime
def set_spork(self, node_id, sporkName, value):
assert_greater_than(len(self.nodes), node_id)
return self.nodes[node_id].spork(sporkName, value)
def get_spork(self, node_id, sporkName):
assert_greater_than(len(self.nodes), node_id)
return self.nodes[node_id].spork("show")[sporkName]
def activate_spork(self, node_id, sporkName):
return self.set_spork(node_id, sporkName, SPORK_ACTIVATION_TIME)
def deactivate_spork(self, node_id, sporkName):
return self.set_spork(node_id, sporkName, SPORK_DEACTIVATION_TIME)
def is_spork_active(self, node_id, sporkName):
assert_greater_than(len(self.nodes), node_id)
return self.nodes[node_id].spork("active")[sporkName]
### ------------------------------------------------------
class ComparisonTestFramework(sssTestFramework):
"""Test framework for doing p2p comparison testing
Sets up some sssolutionsd binaries:
- 1 binary: test binary
- 2 binaries: 1 test binary, 1 ref binary
- n>2 binaries: 1 test binary, n-1 ref binaries"""
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOIND", "sssolutionsd"),
help="sssolutionsd binary to test")
parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("BITCOIND", "sssolutionsd"),
help="sssolutionsd binary to use for reference nodes (if any)")
def setup_network(self):
extra_args = [['-whitelist=127.0.0.1']] * self.num_nodes
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args,
binary=[self.options.testbinary] +
[self.options.refbinary] * (self.num_nodes - 1))
self.start_nodes()
class SkipTest(Exception):
"""This exception is raised to skip a test"""
def __init__(self, message):
self.message = message
|
[] |
[] |
[
"PATH",
"BITCOIND"
] |
[]
|
["PATH", "BITCOIND"]
|
python
| 2 | 0 | |
test/e2e/self_provisioned_elasticsearch_test.go
|
// +build self_provisioned_elasticsearch
package e2e
import (
"context"
goctx "context"
"fmt"
"os"
"strings"
"testing"
framework "github.com/operator-framework/operator-sdk/pkg/test"
"github.com/operator-framework/operator-sdk/pkg/test/e2eutil"
"github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"github.com/jaegertracing/jaeger-operator/pkg/apis"
v1 "github.com/jaegertracing/jaeger-operator/pkg/apis/jaegertracing/v1"
esv1 "github.com/jaegertracing/jaeger-operator/pkg/storage/elasticsearch/v1"
)
type SelfProvisionedTestSuite struct {
suite.Suite
}
func (suite *SelfProvisionedTestSuite) SetupSuite() {
t = suite.T()
if !isOpenShift(t) {
t.Skipf("Test %s is currently supported only on OpenShift because es-operator runs only on OpenShift\n", t.Name())
}
assert.NoError(t, framework.AddToFrameworkScheme(apis.AddToScheme, &v1.JaegerList{
TypeMeta: metav1.TypeMeta{
Kind: "Jaeger",
APIVersion: "jaegertracing.io/v1",
},
}))
assert.NoError(t, framework.AddToFrameworkScheme(apis.AddToScheme, &esv1.ElasticsearchList{
TypeMeta: metav1.TypeMeta{
Kind: "Elasticsearch",
APIVersion: "logging.openshift.io/v1",
},
}))
addToFrameworkSchemeForSmokeTests(t)
var err error
ctx, err = prepare(t)
if err != nil {
if ctx != nil {
ctx.Cleanup()
}
require.FailNow(t, "Failed in prepare")
}
fw = framework.Global
namespace, _ = ctx.GetNamespace()
require.NotNil(t, namespace, "GetNamespace failed")
}
func (suite *SelfProvisionedTestSuite) TearDownSuite() {
handleSuiteTearDown()
}
func TestSelfProvisionedSuite(t *testing.T) {
suite.Run(t, new(SelfProvisionedTestSuite))
}
func (suite *SelfProvisionedTestSuite) SetupTest() {
t = suite.T()
}
func (suite *SelfProvisionedTestSuite) AfterTest(suiteName, testName string) {
handleTestFailure()
}
func (suite *SelfProvisionedTestSuite) TestSelfProvisionedESSmokeTest() {
// create jaeger custom resource
jaegerInstanceName := "simple-prod"
exampleJaeger := getJaegerSimpleProd(jaegerInstanceName, testOtelCollector)
err := fw.Client.Create(goctx.TODO(), exampleJaeger, &framework.CleanupOptions{TestContext: ctx, Timeout: timeout, RetryInterval: retryInterval})
require.NoError(t, err, "Error deploying example Jaeger")
defer undeployJaegerInstance(exampleJaeger)
err = e2eutil.WaitForDeployment(t, fw.KubeClient, namespace, jaegerInstanceName+"-collector", 1, retryInterval, timeout)
require.NoError(t, err, "Error waiting for collector deployment")
err = e2eutil.WaitForDeployment(t, fw.KubeClient, namespace, jaegerInstanceName+"-query", 1, retryInterval, timeout)
require.NoError(t, err, "Error waiting for query deployment")
ProductionSmokeTest(jaegerInstanceName)
// Make sure we were using the correct collector image
verifyCollectorImage(jaegerInstanceName, namespace, testOtelCollector)
}
func (suite *SelfProvisionedTestSuite) TestIncreasingReplicas() {
jaegerInstanceName := "simple-prod2"
exampleJaeger := getJaegerSimpleProd(jaegerInstanceName, testOtelCollector)
err := fw.Client.Create(goctx.TODO(), exampleJaeger, &framework.CleanupOptions{TestContext: ctx, Timeout: timeout, RetryInterval: retryInterval})
require.NoError(t, err, "Error deploying example Jaeger")
defer undeployJaegerInstance(exampleJaeger)
err = e2eutil.WaitForDeployment(t, fw.KubeClient, namespace, jaegerInstanceName+"-collector", 1, retryInterval, timeout)
require.NoError(t, err, "Error waiting for collector deployment")
err = e2eutil.WaitForDeployment(t, fw.KubeClient, namespace, jaegerInstanceName+"-query", 1, retryInterval, timeout)
require.NoError(t, err, "Error waiting for query deployment")
ProductionSmokeTest(jaegerInstanceName)
updateESNodeCount := 2
updateCollectorCount := int32(2)
updateQueryCount := int32(2)
changeNodeCount(jaegerInstanceName, updateESNodeCount, updateCollectorCount, updateQueryCount)
updatedJaegerInstance := getJaegerInstance(jaegerInstanceName, namespace)
require.EqualValues(t, updateESNodeCount, updatedJaegerInstance.Spec.Storage.Elasticsearch.NodeCount)
require.EqualValues(t, updateCollectorCount, *updatedJaegerInstance.Spec.Collector.Replicas)
require.EqualValues(t, updateQueryCount, *updatedJaegerInstance.Spec.Query.Replicas)
err = e2eutil.WaitForDeployment(t, fw.KubeClient, namespace, jaegerInstanceName+"-collector", int(updateCollectorCount), retryInterval, timeout)
require.NoError(t, err, "Error waiting for collector deployment")
err = e2eutil.WaitForDeployment(t, fw.KubeClient, namespace, jaegerInstanceName+"-query", int(updateQueryCount), retryInterval, timeout)
require.NoError(t, err, "Error waiting for query deployment")
// wait for second ES node to come up
err = e2eutil.WaitForDeployment(t, fw.KubeClient, namespace, esDeploymentName(namespace, jaegerInstanceName, 2), 1, retryInterval, timeout)
require.NoError(t, err, "Error waiting for Elasticsearch deployment")
// Make sure there are 2 ES deployments and wait for them to be available
listOptions := metav1.ListOptions{
LabelSelector: "component=elasticsearch",
}
deployments, err := fw.KubeClient.AppsV1().Deployments(namespace).List(listOptions)
require.NoError(t, err)
require.Equal(t, updateESNodeCount, len(deployments.Items))
for _, deployment := range deployments.Items {
if deployment.Namespace == namespace {
logrus.Infof("Looking for deployment %s with annotations %v", deployment.Name, deployment.Annotations)
err = e2eutil.WaitForDeployment(t, fw.KubeClient, namespace, deployment.Name, 1, retryInterval, timeout)
require.NoError(t, err, "Error waiting for deployment: "+deployment.Name)
}
}
/// Verify the number of Collector and Query pods
var collectorPodCount int32
var queryPodCount int32
// Wait until pod counts equalize, otherwise we risk counting or port forwarding to a terminating pod
err = wait.Poll(retryInterval, timeout, func() (done bool, err error) {
collectorPodCount = 0
queryPodCount = 0
pods, err := fw.KubeClient.CoreV1().Pods(namespace).List(metav1.ListOptions{})
require.NoError(t, err)
for _, pod := range pods.Items {
if strings.HasPrefix(pod.Name, jaegerInstanceName+"-collector") {
collectorPodCount++
} else if strings.HasPrefix(pod.Name, jaegerInstanceName+"-query") {
queryPodCount++
}
}
if queryPodCount == updateQueryCount && collectorPodCount == updateCollectorCount {
return true, nil
} else {
return false, nil
}
})
require.EqualValues(t, updateCollectorCount, collectorPodCount)
require.EqualValues(t, updateQueryCount, queryPodCount)
require.NoError(t, err)
ProductionSmokeTest(jaegerInstanceName)
// Make sure we were using the correct collector image
verifyCollectorImage(jaegerInstanceName, namespace, testOtelCollector)
}
func esDeploymentName(ns, jaegerName string, instances int) string {
nsAndName := strings.ReplaceAll(ns, "-", "") + strings.ReplaceAll(jaegerName, "-", "")
return fmt.Sprintf("elasticsearch-cdm-%s-%d", nsAndName[:36], instances)
}
func changeNodeCount(name string, newESNodeCount int, newCollectorNodeCount, newQueryNodeCount int32) {
jaegerInstance := getJaegerInstance(name, namespace)
jaegerInstance.Spec.Collector.Replicas = &newCollectorNodeCount
jaegerInstance.Spec.Query.Replicas = &newQueryNodeCount
jaegerInstance.Spec.Storage.Elasticsearch.NodeCount = int32(newESNodeCount)
err := fw.Client.Update(context.Background(), jaegerInstance)
require.NoError(t, err)
}
func (suite *SelfProvisionedTestSuite) TestValidateEsOperatorImage() {
// TODO reinstate this if we come up with a good solution, but skip for now when using OLM installed operators
if usingOLM {
t.Skip()
}
expectedEsOperatorImage := os.Getenv("ES_OPERATOR_IMAGE")
require.NotEmpty(t, expectedEsOperatorImage, "ES_OPERATOR_IMAGE must be defined")
esOperatorNamespace := os.Getenv("ES_OPERATOR_NAMESPACE")
require.NotEmpty(t, esOperatorNamespace, "ES_OPERATOR_NAMESPACE must be defined")
imageName := getElasticSearchOperatorImage(fw.KubeClient, esOperatorNamespace)
t.Logf("Using elasticsearch-operator image: %s\n", imageName)
require.Equal(t, expectedEsOperatorImage, imageName)
}
func getJaegerSimpleProd(instanceName string, useOtelCollector bool) *v1.Jaeger {
ingressEnabled := true
exampleJaeger := &v1.Jaeger{
TypeMeta: metav1.TypeMeta{
Kind: "Jaeger",
APIVersion: "jaegertracing.io/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: instanceName,
Namespace: namespace,
},
Spec: v1.JaegerSpec{
Ingress: v1.JaegerIngressSpec{
Enabled: &ingressEnabled,
Security: v1.IngressSecurityNoneExplicit,
},
Strategy: v1.DeploymentStrategyProduction,
Storage: v1.JaegerStorageSpec{
Type: "elasticsearch",
Elasticsearch: v1.ElasticsearchSpec{
NodeCount: 1,
Resources: &corev1.ResourceRequirements{
Limits: corev1.ResourceList{corev1.ResourceMemory: resource.MustParse("1Gi")},
Requests: corev1.ResourceList{corev1.ResourceMemory: resource.MustParse("1Gi")},
},
},
},
},
}
if useOtelCollector {
logrus.Infof("Using OTEL collector for %s", instanceName)
exampleJaeger.Spec.Collector.Image = otelCollectorImage
exampleJaeger.Spec.Collector.Config = v1.NewFreeForm(getOtelCollectorOptions())
}
return exampleJaeger
}
func getElasticSearchOperatorImage(kubeclient kubernetes.Interface, namespace string) string {
deployment, err := kubeclient.AppsV1().Deployments(namespace).Get("elasticsearch-operator", metav1.GetOptions{})
require.NoErrorf(t, err, "Did not find elasticsearch-operator in namespace %s\n", namespace)
containers := deployment.Spec.Template.Spec.Containers
for _, container := range containers {
if container.Name == "elasticsearch-operator" {
return container.Image
}
}
require.FailNowf(t, "Did not find elasticsearch-operator in namespace %s\n", namespace)
return ""
}
|
[
"\"ES_OPERATOR_IMAGE\"",
"\"ES_OPERATOR_NAMESPACE\""
] |
[] |
[
"ES_OPERATOR_NAMESPACE",
"ES_OPERATOR_IMAGE"
] |
[]
|
["ES_OPERATOR_NAMESPACE", "ES_OPERATOR_IMAGE"]
|
go
| 2 | 0 | |
examples/openoracle/main.go
|
// Copyright (c) 2020 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package main
import (
"context"
"encoding/hex"
"fmt"
"io/ioutil"
"math/big"
"os"
)
var (
gasPrice, _ = big.NewInt(0).SetString("1000000000000", 10)
gasLimit = uint64(1000000)
)
func main() {
PrivateKey := os.Getenv("PrivateKey")
if PrivateKey == "" {
fmt.Println("Environment Variable [PrivateKey] not defined")
return
}
bin, err := ioutil.ReadFile("OpenOraclePriceData.bin")
if err != nil {
fmt.Println("OpenOraclePriceData.bin not found")
return
}
abi, err := ioutil.ReadFile("OpenOraclePriceData.abi")
if err != nil {
fmt.Println("OpenOraclePriceData.abi not found")
return
}
s, err := NewOpenOracleService(PrivateKey, string(abi), string(bin), "", gasPrice, gasLimit, "api.testnet.iotex.one:80", false)
if err != nil {
fmt.Println(err)
return
}
r, err := s.Deploy(context.Background(), true)
fmt.Println("Contract deployed, action hash: ", r, err)
writeClient, err := NewOpenOracleService(PrivateKey, string(abi), string(bin), "io1s50xy46vjtneh5m8jv6ync75m9vlj28qe0pr26", gasPrice, gasLimit, "api.testnet.iotex.one:80", false)
if err != nil {
fmt.Println(err)
return
}
// get some data from coinbase,curl https://api.pro.coinbase.com/oracle, have to be latest data or it will fail
mes := "0000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000005f45b50000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000002a1ec56780000000000000000000000000000000000000000000000000000000000000006707269636573000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000034254430000000000000000000000000000000000000000000000000000000000"
sig := "187a858734a49fd7ffaa5ba57a44cf12dd4dbb73e142e6f70a3b3e27d9d00a8bb8e430c650eb58b33a79c5ef9ec8d3bf1b3937015a3dbbb00f50cce07b90e8a6000000000000000000000000000000000000000000000000000000000000001c"
testMessage, err := hex.DecodeString(mes)
if err != nil {
fmt.Println(err)
return
}
testSignature, err := hex.DecodeString(sig)
if err != nil {
fmt.Println(err)
return
}
hash, err := writeClient.Put(context.Background(), testMessage, testSignature)
fmt.Println("Invoke contract via action: ", hash, err)
readClient, err := NewOpenOracleService("", string(abi), string(bin), "io1s50xy46vjtneh5m8jv6ync75m9vlj28qe0pr26", gasPrice, gasLimit, "api.testnet.iotex.one:80", false)
if err != nil {
fmt.Println(err)
return
}
// io1ln4d4743f4rwyq2y7jyzf5xqnvdq8u4u9hma6k - 0xfCEAdAFab14d46e20144F48824d0C09B1a03F2BC
ret, err := readClient.Get(context.Background(), "io1ln4d4743f4rwyq2y7jyzf5xqnvdq8u4u9hma6k", "BTC")
fmt.Println("Read contract: ", ret, err)
}
|
[
"\"PrivateKey\""
] |
[] |
[
"PrivateKey"
] |
[]
|
["PrivateKey"]
|
go
| 1 | 0 | |
backend/delicate_forest_30319/wsgi.py
|
"""
WSGI config for delicate_forest_30319 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'delicate_forest_30319.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'stats.settings.development')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
tests/functional/local_endpoint/test_personal.py
|
import os
import shutil
import tempfile
import pytest
import globus_sdk
from tests.common import register_api_route
_IS_WINDOWS = os.name == "nt"
BASE32_ID = "u_vy2bvggsoqi6loei3oxdvc5fiu"
def _compute_confdir(homedir, alt=False):
if alt:
return os.path.join(homedir, "alt-conf-dir/lta")
if _IS_WINDOWS:
return os.path.join(homedir, "Globus Connect")
else:
return os.path.join(homedir, ".globusonline/lta")
def normalize_config_dir_argument(config_dir):
return config_dir if _IS_WINDOWS else os.path.dirname(config_dir)
@pytest.fixture(autouse=True)
def mocked_homedir(monkeypatch):
tempdir = tempfile.mkdtemp()
def mock_expanduser(path):
return os.path.join(tempdir, path.replace("~/", ""))
try:
confdir = _compute_confdir(tempdir)
altconfdir = _compute_confdir(tempdir, alt=True)
os.makedirs(confdir)
os.makedirs(altconfdir)
if _IS_WINDOWS:
monkeypatch.setitem(os.environ, "LOCALAPPDATA", tempdir)
else:
monkeypatch.setattr(os.path, "expanduser", mock_expanduser)
yield tempdir
finally:
shutil.rmtree(tempdir)
@pytest.fixture
def mocked_confdir(mocked_homedir):
return _compute_confdir(mocked_homedir)
@pytest.fixture
def mocked_alternate_confdir(mocked_homedir):
return _compute_confdir(mocked_homedir, alt=True)
@pytest.fixture
def write_gcp_id_file(mocked_confdir, mocked_alternate_confdir):
def _func_fixture(epid, alternate=False):
fpath = os.path.join(
mocked_alternate_confdir if alternate else mocked_confdir, "client-id.txt"
)
with open(fpath, "w") as f:
f.write(epid)
f.write("\n")
return _func_fixture
@pytest.fixture
def write_gridmap(mocked_confdir):
def _func_fixture(data, alternate=False):
fpath = os.path.join(
mocked_alternate_confdir if alternate else mocked_confdir, "gridmap"
)
with open(fpath, "w") as f:
f.write(data)
return _func_fixture
@pytest.fixture
def local_gcp():
return globus_sdk.LocalGlobusConnectPersonal()
@pytest.fixture
def auth_client():
return globus_sdk.AuthClient()
@pytest.mark.skipif(not _IS_WINDOWS, reason="test requires Windows")
def test_localep_localappdata_notset(local_gcp, monkeypatch):
monkeypatch.delitem(os.environ, "LOCALAPPDATA")
with pytest.raises(globus_sdk.GlobusSDKUsageError):
local_gcp.endpoint_id
def test_localep_load_id(local_gcp, write_gcp_id_file):
assert local_gcp.endpoint_id is None
write_gcp_id_file("foobar")
assert local_gcp.endpoint_id == "foobar"
write_gcp_id_file("xyz")
assert local_gcp.endpoint_id == "foobar"
del local_gcp.endpoint_id
assert local_gcp.endpoint_id == "xyz"
def test_localep_load_id_alternate_conf_dir(
mocked_alternate_confdir, write_gcp_id_file
):
gcp = globus_sdk.LocalGlobusConnectPersonal(
config_dir=normalize_config_dir_argument(mocked_alternate_confdir)
)
assert gcp.endpoint_id is None
write_gcp_id_file("foobar", alternate=True)
assert gcp.endpoint_id == "foobar"
write_gcp_id_file("xyz", alternate=True)
assert gcp.endpoint_id == "foobar"
del gcp.endpoint_id
assert gcp.endpoint_id == "xyz"
def test_load_id_no_confdir(local_gcp, mocked_confdir, mocked_alternate_confdir):
shutil.rmtree(mocked_confdir)
shutil.rmtree(mocked_alternate_confdir)
alt_gcp = globus_sdk.LocalGlobusConnectPersonal(config_dir=mocked_alternate_confdir)
assert local_gcp.endpoint_id is None
assert alt_gcp.endpoint_id is None
def test_get_owner_info(local_gcp, write_gridmap, auth_client):
write_gridmap(
'"/C=US/O=Globus Consortium/OU=Globus Connect User/CN=sirosen" sirosen\n'
)
info = local_gcp.get_owner_info()
assert isinstance(info, globus_sdk.GlobusConnectPersonalOwnerInfo)
assert info.username == "[email protected]"
assert info.id is None
assert str(info) == "GlobusConnectPersonalOwnerInfo([email protected])"
register_api_route(
"auth",
"/v2/api/identities",
json={
"identities": [
{
"email": "[email protected]",
"id": "ae332d86-d274-11e5-b885-b31714a110e9",
"identity_provider": "41143743-f3c8-4d60-bbdb-eeecaba85bd9",
"identity_type": "login",
"name": "Stephen Rosen",
"organization": "Globus Team",
"status": "used",
"username": "[email protected]",
}
]
},
)
data = local_gcp.get_owner_info(auth_client)
assert isinstance(data, dict)
assert data["id"] == "ae332d86-d274-11e5-b885-b31714a110e9"
def test_get_owner_info_b32_mode(local_gcp, write_gridmap, auth_client):
write_gridmap(
f'"/C=US/O=Globus Consortium/OU=Globus Connect User/CN={BASE32_ID}" sirosen\n'
)
info = local_gcp.get_owner_info()
assert isinstance(info, globus_sdk.GlobusConnectPersonalOwnerInfo)
assert info.username is None
assert info.id == "ae341a98-d274-11e5-b888-dbae3a8ba545"
register_api_route(
"auth",
"/v2/api/identities",
json={
"identities": [
{
"email": "[email protected]",
"id": "ae341a98-d274-11e5-b888-dbae3a8ba545",
"identity_provider": "927d7238-f917-4eb2-9ace-c523fa9ba34e",
"identity_type": "login",
"name": "Stephen Rosen",
"organization": "Globus Team",
"status": "used",
"username": "[email protected]",
}
]
},
)
data = local_gcp.get_owner_info(auth_client)
assert isinstance(data, dict)
assert data["id"] == "ae341a98-d274-11e5-b888-dbae3a8ba545"
# these things are close to the right thing, but each is somehow wrong
@pytest.mark.parametrize(
"cn",
[
# no 'u_'
BASE32_ID[2:],
# short one char
BASE32_ID[:-1],
# invalid b32 char included
BASE32_ID[:-1] + "/",
],
)
def test_get_owner_info_b32_mode_invalid_data(
local_gcp, write_gridmap, cn, auth_client
):
write_gridmap(
f'"/C=US/O=Globus Consortium/OU=Globus Connect User/CN={cn}" sirosen\n'
)
info = local_gcp.get_owner_info()
assert isinstance(info, globus_sdk.GlobusConnectPersonalOwnerInfo)
assert info.username == f"{cn}@globusid.org"
@pytest.mark.parametrize(
"bad_cn_line",
[
'"/C=US/O=Globus Consortium/OU=Globus Connect User/CN=sirosen"',
'"/C=US/O=Globus Consortium/OU=Globus Connect User/CN=sirosen" sirosen sirosen',
"",
'"" sirosen',
],
)
def test_get_owner_info_malformed_entry(local_gcp, write_gridmap, bad_cn_line):
write_gridmap(bad_cn_line + "\n")
assert local_gcp.get_owner_info() is None
def test_get_owner_info_no_conf(local_gcp):
assert local_gcp.get_owner_info() is None
assert local_gcp.get_owner_info(auth_client) is None
def test_get_owner_info_no_confdir(local_gcp, mocked_confdir, auth_client):
shutil.rmtree(mocked_confdir)
assert local_gcp.get_owner_info() is None
assert local_gcp.get_owner_info(auth_client) is None
def test_get_owner_info_multiline_data(local_gcp, write_gridmap, auth_client):
write_gridmap(
"\n".join(
[
f'"/C=US/O=Globus Consortium/OU=Globus Connect User/CN=sirosen{x}" sirosen{x}' # noqa: E501
for x in ["", "2", "3"]
]
)
+ "\n"
)
info = local_gcp.get_owner_info()
assert isinstance(info, globus_sdk.GlobusConnectPersonalOwnerInfo)
assert info.username == "[email protected]"
register_api_route(
"auth",
"/v2/api/identities",
json={
"identities": [
{
"email": "[email protected]",
"id": "ae332d86-d274-11e5-b885-b31714a110e9",
"identity_provider": "41143743-f3c8-4d60-bbdb-eeecaba85bd9",
"identity_type": "login",
"name": "Stephen Rosen",
"organization": "Globus Team",
"status": "used",
"username": "[email protected]",
}
]
},
)
data = local_gcp.get_owner_info(auth_client)
assert isinstance(data, dict)
assert data["id"] == "ae332d86-d274-11e5-b885-b31714a110e9"
def test_get_owner_info_no_auth_data(local_gcp, write_gridmap, auth_client):
write_gridmap(
'"/C=US/O=Globus Consortium/OU=Globus Connect User/CN=sirosen" sirosen\n'
)
info = local_gcp.get_owner_info()
assert isinstance(info, globus_sdk.GlobusConnectPersonalOwnerInfo)
assert info.username == "[email protected]"
register_api_route("auth", "/v2/api/identities", json={"identities": []})
data = local_gcp.get_owner_info(auth_client)
assert data is None
def test_get_owner_info_gridmap_permission_denied(local_gcp, mocked_confdir):
fpath = os.path.join(mocked_confdir, "gridmap")
if not _IS_WINDOWS:
with open(fpath, "w"): # "touch"
pass
os.chmod(fpath, 0o000)
else:
# on windows, trying to read a directory gets a permission error
# this is just an easy way for tests to simulate bad permissions
os.makedirs(fpath)
with pytest.raises(PermissionError):
local_gcp.get_owner_info()
def test_get_endpoint_id_permission_denied(local_gcp, mocked_confdir):
fpath = os.path.join(mocked_confdir, "client-id.txt")
if not _IS_WINDOWS:
with open(fpath, "w"): # "touch"
pass
os.chmod(fpath, 0o000)
else:
# on windows, trying to read a directory gets a permission error
# this is just an easy way for tests to simulate bad permissions
os.makedirs(fpath)
with pytest.raises(PermissionError):
local_gcp.endpoint_id
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
server/HysiaREST/settings.py
|
"""
Django settings for HysiaREST project.
Generated by 'django-admin startproject' using Django 1.11.15.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'hi(o-1t(ukianx3%zxgob)@!k^@7wo17r09yx9ac9t320z0s^7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# We can access the remote
ALLOWED_HOSTS = ['*']
# CORS configs
CORS_ORIGIN_ALLOW_ALL = False
CORS_ORIGIN_WHITELIST = ['*']
# Channels-specific settings
redis_host = os.environ.get('REDIS_HOST', 'localhost')
# Channel layer definitions
CHANNEL_LAYERS = {
"default": {
# This example app uses the Redis channel layer implementation channels_redis
"BACKEND": "channels_redis.core.RedisChannelLayer",
"CONFIG": {
"hosts": [(redis_host, 6379)],
},
},
}
# ASGI_APPLICATION should be set to your outermost router
ASGI_APPLICATION = 'HysiaREST.routing.application'
DATA_UPLOAD_MAX_MEMORY_SIZE = None
# Application definition
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'restapi',
'dashboard',
'rest_framework',
'crispy_forms',
'corsheaders',
'channels',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware',
]
ROOT_URLCONF = 'HysiaREST.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['./templates', ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
],
},
},
]
WSGI_APPLICATION = 'HysiaREST.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Path to Fixtures
FIXTURE_DIRS = (
'/restapi/fixtures/',
)
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Redirect to home URL after login (Default redirects to /accounts/profile/)
LOGIN_REDIRECT_URL = '/'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, '/')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
# Media url
MEDIA_URL = '/output/'
MEDIA_ROOT = os.path.join(BASE_DIR, "output")
# Third party apps configuration
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# Decoding hardware
DECODING_HARDWARE = "CPU"
|
[] |
[] |
[
"REDIS_HOST"
] |
[]
|
["REDIS_HOST"]
|
python
| 1 | 0 | |
CeleryBasicProject/CeleryBasicProject/wsgi.py
|
"""
WSGI config for CeleryBasicProject project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'CeleryBasicProject.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
integration/docker/csi/alluxio/nodeserver.go
|
/*
* The Alluxio Open Foundation licenses this work under the Apache License, version 2.0
* (the "License"). You may not use this work except in compliance with the License, which is
* available at www.apache.org/licenses/LICENSE-2.0
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied, as more fully set forth in the License.
*
* See the NOTICE file distributed with this work for information regarding copyright ownership.
*/
package alluxio
import (
"fmt"
"io/ioutil"
"os"
"os/exec"
"strconv"
"strings"
"sync"
"time"
"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/golang/glog"
csicommon "github.com/kubernetes-csi/drivers/pkg/csi-common"
"golang.org/x/net/context"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
mount "k8s.io/mount-utils"
)
type nodeServer struct {
client kubernetes.Clientset
*csicommon.DefaultNodeServer
nodeId string
mutex sync.Mutex
}
/*
* When there is no app pod using the pv, the first app pod using the pv would trigger NodeStageVolume().
* Only after a successful return, NodePublishVolume() is called.
* When a pv is already in use and a new app pod uses it as its volume, it would only trigger NodePublishVolume()
*
* NodeUnpublishVolume() and NodeUnstageVolume() are the opposites of NodePublishVolume() and NodeStageVolume()
* When a pv would still be using by other pods after an app pod terminated, only NodeUnpublishVolume() is called.
* When a pv would not be in use after an app pod terminated, NodeUnpublishVolume() is called. Only after a successful
* return, NodeUnstageVolume() is called.
*
* For more detailed CSI doc, refer to https://github.com/container-storage-interface/spec/blob/master/spec.md
*/
func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) {
if req.GetVolumeContext()["mountInPod"] == "true" {
glog.V(4).Infoln("Bind mount staging path (global mount point) to target path (pod volume path).")
return bindMountGlobalMountPointToPodVolPath(req)
}
glog.V(4).Infoln("Mount Alluxio to target path (pod volume path) with AlluxioFuse in CSI node server.")
return newFuseProcessInNodeServer(req)
}
func newFuseProcessInNodeServer(req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) {
targetPath := req.GetTargetPath()
notMnt, err := ensureMountPoint(targetPath)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
if !notMnt {
return &csi.NodePublishVolumeResponse{}, nil
}
mountOptions := req.GetVolumeCapability().GetMount().GetMountFlags()
if req.GetReadonly() {
mountOptions = append(mountOptions, "ro")
}
/*
https://docs.alluxio.io/os/user/edge/en/api/POSIX-API.html
https://github.com/Alluxio/alluxio/blob/master/integration/fuse/bin/alluxio-fuse
*/
alluxioPath := req.GetVolumeContext()["alluxioPath"]
if alluxioPath == "" {
alluxioPath = "/"
}
args := []string{"mount"}
if len(mountOptions) > 0 {
args = append(args, "-o", strings.Join(mountOptions, ","))
}
args = append(args, targetPath, alluxioPath)
command := exec.Command("/opt/alluxio/integration/fuse/bin/alluxio-fuse", args...)
extraJavaOptions := req.GetVolumeContext()["javaOptions"]
alluxioFuseJavaOpts := os.Getenv("ALLUXIO_FUSE_JAVA_OPTS")
alluxioFuseJavaOpts = "ALLUXIO_FUSE_JAVA_OPTS=" + strings.Join([]string{alluxioFuseJavaOpts,
extraJavaOptions,
}, " ")
command.Env = append(os.Environ(), alluxioFuseJavaOpts)
glog.V(4).Infoln(command)
stdoutStderr, err := command.CombinedOutput()
glog.V(4).Infoln(string(stdoutStderr))
if err != nil {
if os.IsPermission(err) {
return nil, status.Error(codes.PermissionDenied, err.Error())
}
if strings.Contains(err.Error(), "invalid argument") {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
return nil, status.Error(codes.Internal, err.Error())
}
return &csi.NodePublishVolumeResponse{}, nil
}
func bindMountGlobalMountPointToPodVolPath(req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) {
targetPath := req.GetTargetPath()
stagingPath := req.GetStagingTargetPath()
notMnt, err := ensureMountPoint(targetPath)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
if !notMnt {
glog.V(4).Infoln("target path is already mounted")
return &csi.NodePublishVolumeResponse{}, nil
}
args := []string{"--bind", stagingPath, targetPath}
command := exec.Command("mount", args...)
glog.V(4).Infoln(command)
stdoutStderr, err := command.CombinedOutput()
glog.V(4).Infoln(string(stdoutStderr))
if err != nil {
if os.IsPermission(err) {
return nil, status.Error(codes.PermissionDenied, err.Error())
}
if strings.Contains(err.Error(), "invalid argument") {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
return nil, status.Error(codes.Internal, err.Error())
}
return &csi.NodePublishVolumeResponse{}, nil
}
func (ns *nodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) {
targetPath := req.GetTargetPath()
command := exec.Command("/opt/alluxio/integration/fuse/bin/alluxio-fuse", "umount", targetPath)
glog.V(4).Infoln(command)
stdoutStderr, err := command.CombinedOutput()
if err != nil {
glog.V(3).Infoln(err)
}
glog.V(4).Infoln(string(stdoutStderr))
err = mount.CleanupMountPoint(targetPath, mount.New(""), false)
if err != nil {
glog.V(3).Infoln(err)
} else {
glog.V(4).Infof("Succeed in unmounting %s", targetPath)
}
return &csi.NodeUnpublishVolumeResponse{}, nil
}
func (ns *nodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) {
if req.GetVolumeContext()["mountInPod"] != "true" {
return &csi.NodeStageVolumeResponse{}, nil
}
ns.mutex.Lock()
defer ns.mutex.Unlock()
glog.V(4).Infoln("Creating Alluxio-fuse pod and mounting Alluxio to global mount point.")
fusePod, err := getAndCompleteFusePodObj(ns.nodeId, req)
if err != nil {
return nil, err
}
if _, err := ns.client.CoreV1().Pods(os.Getenv("NAMESPACE")).Create(fusePod); err != nil {
if strings.Contains(err.Error(), "already exists") {
glog.V(4).Infof("Fuse pod %s already exists.", fusePod.Name)
return &csi.NodeStageVolumeResponse{}, nil
}
return nil, status.Errorf(codes.Internal, "Failed to launch Fuse Pod at %v.\n%v", ns.nodeId, err.Error())
}
glog.V(4).Infoln("Successfully creating Fuse pod.")
// Wait for alluxio-fuse pod finishing mount to global mount point
retry, err := strconv.Atoi(os.Getenv("FAILURE_THRESHOLD"))
if err != nil {
return nil, status.Errorf(codes.InvalidArgument, "Cannot convert failure threshold %v to int.", os.Getenv("FAILURE_THRESHOLD"))
}
timeout, err := strconv.Atoi(os.Getenv("PERIOD_SECONDS"))
if err != nil {
return nil, status.Errorf(codes.InvalidArgument, "Cannot convert period seconds %v to int.", os.Getenv("PERIOD_SECONDS"))
}
for i:= 0; i < retry; i++ {
time.Sleep(time.Duration(timeout) * time.Second)
command := exec.Command("bash", "-c", fmt.Sprintf("mount | grep %v | grep alluxio-fuse", req.GetStagingTargetPath()))
stdout, err := command.CombinedOutput()
if err != nil {
glog.V(3).Infoln(fmt.Sprintf("Alluxio is not mounted in %v seconds.", i * timeout))
}
if len(stdout) > 0 {
return &csi.NodeStageVolumeResponse{}, nil
}
}
glog.V(3).Infoln(fmt.Sprintf("Time out. Alluxio-fuse is not mounted to global mount point in %vs.", (retry - 1) * timeout))
return nil, status.Error(codes.DeadlineExceeded, fmt.Sprintf("alluxio-fuse is not mounted to global mount point in %vs", (retry - 1) * timeout))
}
func getAndCompleteFusePodObj(nodeId string, req *csi.NodeStageVolumeRequest) (*v1.Pod, error) {
csiFusePodObj, err := getFusePodObj()
if err != nil {
return nil, err
}
// Append volumeId to pod name for uniqueness
csiFusePodObj.Name = csiFusePodObj.Name + "-" + req.GetVolumeId()
// Set node name for scheduling
csiFusePodObj.Spec.NodeName = nodeId
// Set pre-stop command (umount) in pod lifecycle
lifecycle := &v1.Lifecycle {
PreStop: &v1.Handler {
Exec: &v1.ExecAction {
Command: []string{"/opt/alluxio/integration/fuse/bin/alluxio-fuse", "unmount", req.GetStagingTargetPath()},
},
},
}
csiFusePodObj.Spec.Containers[0].Lifecycle = lifecycle
// Set fuse mount options
fuseOptsStr := strings.Join(req.GetVolumeCapability().GetMount().GetMountFlags(), ",")
csiFusePodObj.Spec.Containers[0].Args = append(csiFusePodObj.Spec.Containers[0].Args, "--fuse-opts=" + fuseOptsStr)
// Set fuse mount point
csiFusePodObj.Spec.Containers[0].Args = append(csiFusePodObj.Spec.Containers[0].Args, req.GetStagingTargetPath())
// Set alluxio path to be mounted if set
alluxioPath := req.GetVolumeContext()["alluxioPath"]
if alluxioPath != "" {
csiFusePodObj.Spec.Containers[0].Args = append(csiFusePodObj.Spec.Containers[0].Args, alluxioPath)
}
// Update ALLUXIO_FUSE_JAVA_OPTS to include csi client java options
alluxioCSIFuseJavaOpts :=
strings.Join([]string{os.Getenv("ALLUXIO_FUSE_JAVA_OPTS"), req.GetVolumeContext()["javaOptions"]}, " ")
alluxioFuseJavaOptsEnv := v1.EnvVar{Name: "ALLUXIO_FUSE_JAVA_OPTS", Value: alluxioCSIFuseJavaOpts}
csiFusePodObj.Spec.Containers[0].Env = append(csiFusePodObj.Spec.Containers[0].Env, alluxioFuseJavaOptsEnv)
return csiFusePodObj, nil
}
func (ns *nodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error) {
podName := "alluxio-fuse-" + req.GetVolumeId()
if err := ns.client.CoreV1().Pods(os.Getenv("NAMESPACE")).Delete(podName, &metav1.DeleteOptions{}); err != nil {
if strings.Contains(err.Error(), "not found") {
// Pod not found. Try to clean up the mount point.
command := exec.Command("umount", req.GetStagingTargetPath())
glog.V(4).Infoln(command)
stdoutStderr, err := command.CombinedOutput()
if err != nil {
glog.V(3).Infoln(err)
}
glog.V(4).Infoln(string(stdoutStderr))
return &csi.NodeUnstageVolumeResponse{}, nil
}
return nil, status.Error(codes.Internal, fmt.Sprintf("Error deleting fuse pod %v\n%v", podName, err.Error()))
}
return &csi.NodeUnstageVolumeResponse{}, nil
}
func (ns *nodeServer) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandVolumeRequest) (*csi.NodeExpandVolumeResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
func (ns *nodeServer) NodeGetCapabilities(ctx context.Context, req *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) {
return &csi.NodeGetCapabilitiesResponse{
Capabilities: []*csi.NodeServiceCapability {
{
Type: &csi.NodeServiceCapability_Rpc{
Rpc: &csi.NodeServiceCapability_RPC{
Type: csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME,
},
},
},
},
}, nil
}
func isCorruptedDir(dir string) bool {
pathExists, pathErr := mount.PathExists(dir)
glog.V(3).Infoln("isCorruptedDir(%s) returned with error: (%v, %v)\\n", dir, pathExists, pathErr)
return pathErr != nil && mount.IsCorruptedMnt(pathErr)
}
func ensureMountPoint(targetPath string) (bool, error) {
mounter := mount.New(targetPath)
notMnt, err := mounter.IsLikelyNotMountPoint(targetPath)
if err == nil {
return notMnt, nil
}
if err != nil && os.IsNotExist(err) {
if err := os.MkdirAll(targetPath, 0750); err != nil {
return notMnt, err
}
return true, nil
}
if isCorruptedDir(targetPath) {
glog.V(3).Infoln("detected corrupted mount for targetPath [%s]", targetPath)
if err := mounter.Unmount(targetPath); err != nil {
glog.V(3).Infoln("failed to umount corrupted path [%s]", targetPath)
return false, err
}
return true, nil
}
return notMnt, err
}
func getFusePodObj() (*v1.Pod, error) {
csiFuseYaml, err := ioutil.ReadFile("/opt/alluxio/integration/kubernetes/csi/alluxio-csi-fuse.yaml")
if err != nil {
glog.V(3).Info("csi-fuse config yaml file not found")
return nil, status.Errorf(codes.NotFound, "csi-fuse config yaml file not found: %v", err.Error())
}
csiFuseObj, grpVerKind, err := scheme.Codecs.UniversalDeserializer().Decode(csiFuseYaml, nil, nil)
if err != nil {
glog.V(3).Info("Failed to decode csi-fuse config yaml file")
return nil, status.Errorf(codes.Internal, "Failed to decode csi-fuse config yaml file.\n", err.Error())
}
// Only support Fuse Pod
if grpVerKind.Kind != "Pod" {
glog.V(3).Info("csi-fuse only support pod. %v found.")
return nil, status.Errorf(codes.InvalidArgument, "csi-fuse only support Pod. %v found.\n%v", grpVerKind.Kind, err.Error())
}
return csiFuseObj.(*v1.Pod), nil
}
|
[
"\"ALLUXIO_FUSE_JAVA_OPTS\"",
"\"NAMESPACE\"",
"\"FAILURE_THRESHOLD\"",
"\"FAILURE_THRESHOLD\"",
"\"PERIOD_SECONDS\"",
"\"PERIOD_SECONDS\"",
"\"ALLUXIO_FUSE_JAVA_OPTS\"",
"\"NAMESPACE\""
] |
[] |
[
"PERIOD_SECONDS",
"ALLUXIO_FUSE_JAVA_OPTS",
"NAMESPACE",
"FAILURE_THRESHOLD"
] |
[]
|
["PERIOD_SECONDS", "ALLUXIO_FUSE_JAVA_OPTS", "NAMESPACE", "FAILURE_THRESHOLD"]
|
go
| 4 | 0 | |
70-problems-solved/search/minimumtime/Solution.java
|
import java.io.BufferedWriter;
import java.io.FileWriter;
import java.io.IOException;
import java.util.Arrays;
import java.util.Scanner;
public class Solution {
private static final Scanner scanner = new Scanner(System.in);
static long minTime(long[] machines, long goal) {
Arrays.sort(machines);
long max = machines[machines.length - 1];
long minDays = machines[0];
long maxDays = max * goal;
while (minDays < maxDays) {
long mid = (minDays + maxDays) / 2;
long unit = 0;
for (long machine : machines) {
unit += mid / machine;
}
if (unit < goal) {
minDays = mid + 1;
} else {
maxDays = mid;
}
}
return maxDays;
}
public static void main(String[] args) throws IOException {
try (BufferedWriter bufferedWriter = new BufferedWriter(new FileWriter(System.getenv("OUTPUT_PATH")))) {
String[] nGoal = scanner.nextLine().split(" ");
int n = Integer.parseInt(nGoal[0]);
long goal = Long.parseLong(nGoal[1]);
long[] machines = new long[n];
String[] machinesItems = scanner.nextLine().split(" ");
scanner.skip("(\r\n|[\n\r\u2028\u2029\u0085])?");
for (int i = 0; i < n; i++) {
long machinesItem = Long.parseLong(machinesItems[i]);
machines[i] = machinesItem;
}
long ans = minTime(machines, goal);
bufferedWriter.write(String.valueOf(ans));
bufferedWriter.newLine();
}
scanner.close();
}
}
|
[
"\"OUTPUT_PATH\""
] |
[] |
[
"OUTPUT_PATH"
] |
[]
|
["OUTPUT_PATH"]
|
java
| 1 | 0 | |
cfroutesync/integration/testenv_test.go
|
package integration_test
import (
"bytes"
"crypto/x509"
"encoding/json"
"encoding/pem"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"os"
"os/exec"
"path/filepath"
"regexp"
"strings"
"sync"
"k8s.io/client-go/rest"
"github.com/onsi/gomega/gexec"
"code.cloudfoundry.org/cf-k8s-networking/cfroutesync/ccclient"
"code.cloudfoundry.org/cf-k8s-networking/cfroutesync/cfg"
log "github.com/sirupsen/logrus"
fakeapiserver "sigs.k8s.io/controller-runtime/pkg/envtest"
logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
)
type TestEnv struct {
lock sync.Mutex
ConfigDir string
FakeUAA struct {
Handler http.Handler
Server *httptest.Server
}
FakeCC struct {
Handler http.Handler
Server *httptest.Server
Data struct {
Domains []ccclient.Domain
Routes []ccclient.Route
Destinations map[string][]ccclient.Destination
}
}
FakeApiServerEnv *fakeapiserver.Environment
KubeCtlHome string
KubeConfigPath string
GalleySession *gexec.Session
TestOutput io.Writer
}
func (te *TestEnv) FakeUAAServeHTTP(w http.ResponseWriter, r *http.Request) {
json.NewEncoder(w).Encode(struct {
AccessToken string `json:"access_token"`
}{"fake-access-token"})
}
func (te *TestEnv) FakeCCServeHTTP(w http.ResponseWriter, r *http.Request) {
switch {
case strings.Contains(r.URL.Path, "domains"):
json.NewEncoder(w).Encode(map[string]interface{}{
"resources": te.FakeCC.Data.Domains,
})
case strings.Contains(r.URL.Path, "destinations"):
routeGUIDs := regexp.MustCompile("/v3/routes/(.*)/destinations").FindStringSubmatch(r.URL.Path)
json.NewEncoder(w).Encode(map[string]interface{}{
"destinations": te.FakeCC.Data.Destinations[routeGUIDs[1]],
})
case strings.Contains(r.URL.Path, "routes"):
json.NewEncoder(w).Encode(map[string]interface{}{
"resources": te.FakeCC.Data.Routes,
})
default:
log.WithFields(log.Fields{"server": "fakeCC", "request": r}).Error("unrecognized request")
panic("request for unimplemented route on fake CC")
}
}
func NewTestEnv(testOutput io.Writer) (*TestEnv, error) {
configDir, err := ioutil.TempDir("", "cfroutesync-integ-test-config-dir")
if err != nil {
return nil, err
}
testEnv := &TestEnv{
ConfigDir: configDir,
TestOutput: testOutput,
}
testEnv.FakeUAA.Handler = http.HandlerFunc(testEnv.FakeUAAServeHTTP)
testEnv.FakeUAA.Server = httptest.NewTLSServer(testEnv.FakeUAA.Handler)
testEnv.FakeCC.Handler = http.HandlerFunc(testEnv.FakeCCServeHTTP)
testEnv.FakeCC.Server = httptest.NewTLSServer(testEnv.FakeCC.Handler)
fakeUAACertBytes, err := tlsCertToPem(testEnv.FakeUAA.Server.Certificate())
if err != nil {
return nil, err
}
fakeCCCertBytes, err := tlsCertToPem(testEnv.FakeCC.Server.Certificate())
if err != nil {
return nil, err
}
for filename, contents := range map[string]string{
cfg.FileUAABaseURL: testEnv.FakeUAA.Server.URL,
cfg.FileUAAClientName: "fake-uaa-client-name",
cfg.FileUAAClientSecret: "fake-uaa-client-secret",
cfg.FileUAACA: string(fakeUAACertBytes),
cfg.FileCCBaseURL: testEnv.FakeCC.Server.URL,
cfg.FileCCCA: string(fakeCCCertBytes),
} {
if err := ioutil.WriteFile(filepath.Join(testEnv.ConfigDir, filename), []byte(contents), 0644); err != nil {
return nil, err
}
}
logf.SetLogger(logf.ZapLoggerTo(testEnv.TestOutput /* development */, true))
testEnv.FakeApiServerEnv = &fakeapiserver.Environment{
KubeAPIServerFlags: getApiServerFlags(),
}
testEnvConfig, err = testEnv.FakeApiServerEnv.Start()
if err != nil {
return nil, fmt.Errorf("starting fake api server: %w", err)
}
testEnv.KubeCtlHome, err = ioutil.TempDir("", "kubectl-home")
if err != nil {
return nil, fmt.Errorf("creating home dir for kubectl: %w", err)
}
if err := testEnv.createKubeConfig(testEnvConfig); err != nil {
return nil, fmt.Errorf("writing kube config: %w", err)
}
if err := testEnv.startGalley(); err != nil {
return nil, fmt.Errorf("starting galley: %w", err)
}
return testEnv, nil
}
func getApiServerFlags() []string {
apiServerFlags := make([]string, len(fakeapiserver.DefaultKubeAPIServerFlags))
copy(apiServerFlags, fakeapiserver.DefaultKubeAPIServerFlags)
for i, current := range apiServerFlags {
if strings.HasPrefix(current, "--admission-control") {
apiServerFlags[i] = "--enable-admission-plugins=ValidatingAdmissionWebhook"
}
}
return apiServerFlags
}
func (te *TestEnv) startGalley() error {
cmd := exec.Command("galley",
"server",
"--enable-server=false",
"--enable-validation=true",
"--validation-webhook-config-file", "./fixtures/istio-validating-admission-webhook.yaml",
"--caCertFile", "./fixtures/galley-certs/galley-ca.crt",
"--tlsCertFile", "./fixtures/galley-certs/galley-webhook.crt",
"--tlsKeyFile", "./fixtures/galley-certs/galley-webhook.key",
"--insecure",
"--kubeconfig", te.KubeConfigPath,
)
var err error
te.GalleySession, err = gexec.Start(cmd, te.TestOutput, te.TestOutput)
if err != nil {
return err
}
return nil
}
func (te *TestEnv) checkAdmissionWebhookRunning() error {
// attempt to apply invalid data
outBytes, err := te.kubectl("apply", "-f", "./fixtures/invalid-virtual-service.yaml")
out := string(outBytes)
if err == nil {
// it succeeded, clean-up
_, errOnDelete := te.kubectl("delete", "-f", "./fixtures/invalid-virtual-service.yaml")
if errOnDelete != nil {
return fmt.Errorf("applying invalid data was successful (bad) and then we errored when attempting to delete it (even worse!): %w", err)
}
return fmt.Errorf("invalid virtual-service was admitted to the K8s API: %s", out)
}
const expectedErrorSnippet = `admission webhook "pilot.validation.istio.io" denied the request`
if strings.Contains(out, expectedErrorSnippet) {
fmt.Fprintf(te.TestOutput, "invalid data was rejected, it appears that the istio galley validating admission webhook is working\n")
return nil
}
return fmt.Errorf("unexpected condition while applying invalid VirtualService: %w: %s", err, out)
}
func (te *TestEnv) Cleanup() {
if te == nil {
return
}
te.lock.Lock()
defer te.lock.Unlock()
if len(te.ConfigDir) > 0 {
os.RemoveAll(te.ConfigDir)
te.ConfigDir = ""
}
if te.FakeUAA.Server != nil {
te.FakeUAA.Server.Close()
te.FakeUAA.Server = nil
}
if te.FakeCC.Server != nil {
te.FakeCC.Server.Close()
te.FakeCC.Server = nil
}
if te.FakeApiServerEnv != nil {
te.FakeApiServerEnv.Stop()
te.FakeApiServerEnv = nil
}
if te.GalleySession != nil {
te.GalleySession.Terminate().Wait("2s")
te.GalleySession = nil
}
}
func tlsCertToPem(cert *x509.Certificate) ([]byte, error) {
pemBlock := &pem.Block{
Type: "CERTIFICATE",
Headers: nil,
Bytes: cert.Raw,
}
buf := new(bytes.Buffer)
if err := pem.Encode(buf, pemBlock); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
func (te *TestEnv) kubectl(args ...string) ([]byte, error) {
cmd := exec.Command("kubectl", args...)
cmd.Env = []string{
fmt.Sprintf("KUBECONFIG=%s", te.KubeConfigPath),
fmt.Sprintf("PATH=%s", os.Getenv("PATH")),
fmt.Sprintf("HOME=%s", te.KubeCtlHome),
}
fmt.Fprintf(te.TestOutput, "+ kubectl %s\n", strings.Join(args, " "))
output, err := cmd.CombinedOutput()
te.TestOutput.Write(output)
return output, err
}
func (te *TestEnv) createKubeConfig(config *rest.Config) error {
payload := fmt.Sprintf(`apiVersion: v1
clusters:
- cluster:
server: %s
name: test-env
contexts:
- context:
cluster: test-env
user: test-user
name: test-env
current-context: test-env
kind: Config
users:
- name: test-user
user:
token: %s`, config.Host, config.BearerToken)
te.KubeConfigPath = filepath.Join(te.KubeCtlHome, "config")
fmt.Fprintf(te.TestOutput, "saving kubecfg to %s\n", te.KubeConfigPath)
return ioutil.WriteFile(te.KubeConfigPath, []byte(payload), 0644)
}
func createCompositeController(webhookHost string) (string, error) {
compositeControllerYAML, err := ioutil.TempFile("", "compositecontroller.yaml")
if err != nil {
return "", err
}
defer compositeControllerYAML.Close()
payload := fmt.Sprintf(`---
apiVersion: metacontroller.k8s.io/v1alpha1
kind: CompositeController
metadata:
name: cfroutesync
spec:
resyncPeriodSeconds: 5
parentResource:
apiVersion: apps.cloudfoundry.org/v1alpha1
resource: routebulksyncs
childResources:
- apiVersion: v1
resource: services
updateStrategy:
method: InPlace
- apiVersion: networking.istio.io/v1alpha3
resource: virtualservices
updateStrategy:
method: InPlace
hooks:
sync:
webhook:
url: http://%s/sync`, webhookHost)
_, err = compositeControllerYAML.Write([]byte(payload))
if err != nil {
return "", nil
}
return compositeControllerYAML.Name(), nil
}
|
[
"\"PATH\""
] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
go
| 1 | 0 | |
server/metric/common.go
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package metric
import (
"github.com/apache/servicecomb-service-center/pkg/util"
"github.com/astaxie/beego"
"net"
"os"
"sync"
"time"
)
const (
defaultCollectPeriod = 30 * time.Second
FamilyName = "service_center"
familyNamePrefix = FamilyName + "_"
bufferSize = 1024
)
var (
// Period is metrics collect period
Period = 30 * time.Second
// SysMetrics map
SysMetrics util.ConcurrentMap
getEndpointOnce sync.Once
instance string
)
func init() {
Period = getPeriod()
SysMetrics.Put("process_resident_memory_bytes", struct{}{})
SysMetrics.Put("process_cpu_seconds_total", struct{}{})
SysMetrics.Put("go_threads", struct{}{})
SysMetrics.Put("go_goroutines", struct{}{})
}
func getPeriod() time.Duration {
inv := os.Getenv("METRICS_INTERVAL")
d, err := time.ParseDuration(inv)
if err == nil && d >= time.Second {
return d
}
return defaultCollectPeriod
}
func InstanceName() string {
getEndpointOnce.Do(func() {
restIP := beego.AppConfig.String("httpaddr")
restPort := beego.AppConfig.String("httpport")
if len(restIP) > 0 {
instance = net.JoinHostPort(restIP, restPort)
return
}
rpcIP := beego.AppConfig.String("rpcaddr")
rpcPort := beego.AppConfig.String("rpcport")
if len(rpcIP) > 0 {
instance = net.JoinHostPort(rpcIP, rpcPort)
return
}
})
return instance
}
|
[
"\"METRICS_INTERVAL\""
] |
[] |
[
"METRICS_INTERVAL"
] |
[]
|
["METRICS_INTERVAL"]
|
go
| 1 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.