filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
components/main-chef-wrapper/platform-lib/version_linux.go | //
// Copyright (c) Chef Software, Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// logic for linux platform
package platform_lib
import (
"encoding/json"
"fmt"
"github.com/chef/chef-workstation/components/main-chef-wrapper/dist"
"github.com/chef/chef-workstation/components/main-chef-wrapper/lib"
"io/ioutil"
"os"
"path"
"path/filepath"
)
var gemManifestMap map[string]interface{}
var manifestMap map[string]interface{}
func init() {
gemManifestMap = gemManifestHash()
manifestMap = manifestHash()
}
func Version() error {
if omnibusInstall() == true {
showVersionViaVersionManifest()
} else {
fmt.Fprintln(os.Stderr, "ERROR:", dist.WorkstationProduct, "has not been installed via the platform-specific package provided by", dist.DistributorName, "Version information is not available.")
}
return nil
}
func showVersionViaVersionManifest() {
fmt.Printf("%v version: %v", dist.WorkstationProduct, componentVersion("build_version"))
productMap := map[string]string{
dist.ClientProduct: dist.CLIWrapperExec,
dist.InspecProduct: dist.InspecCli,
dist.CliProduct: dist.CliGem,
dist.HabProduct: dist.HabSoftwareName,
"Test Kitchen": "test-kitchen",
"Cookstyle": "cookstyle",
}
for prodName, component := range productMap {
fmt.Printf("\n%v version: %v", prodName, componentVersion(component))
}
fmt.Printf("\n")
}
func componentVersion(component string) string {
v, ok := gemManifestMap[component]
if ok {
stringifyVal := v.([]interface{})[0]
return stringifyVal.(string)
} else if v, ok := manifestMap[component]; ok {
return v.(string)
} else {
success, _ := lib.Dig(manifestMap, "software", component, "locked_version")
if success == nil {
return "unknown"
} else {
return success.(string)
}
}
}
func gemManifestHash() map[string]interface{} {
filepath := path.Join(omnibusRoot(), "gem-version-manifest.json")
jsonFile, err := os.Open(filepath)
if err != nil {
fmt.Fprintln(os.Stderr, "ERROR:", err.Error())
os.Exit(4)
}
byteValue, _ := ioutil.ReadAll(jsonFile)
var gemManifestHash map[string]interface{}
json.Unmarshal([]byte(byteValue), &gemManifestHash)
defer jsonFile.Close()
return gemManifestHash
}
func manifestHash() map[string]interface{} {
filepath := path.Join(omnibusRoot(), "version-manifest.json")
jsonFile, err := os.Open(filepath)
if err != nil {
fmt.Fprintln(os.Stderr, "ERROR:", err.Error())
os.Exit(4)
}
byteValue, _ := ioutil.ReadAll(jsonFile)
var manifestHash map[string]interface{}
json.Unmarshal([]byte(byteValue), &manifestHash)
defer jsonFile.Close()
return manifestHash
}
func omnibusInstall() bool {
//# We also check if the location we're running from (omnibus_root is relative to currently-running ruby)
//# includes the version manifest that omnibus packages ship with. If it doesn't, then we're running locally
//# or out of a gem - so not as an 'omnibus install'
ExpectedOmnibusRoot := ExpectedOmnibusRoot()
if _, err := os.Stat(ExpectedOmnibusRoot); err == nil {
if _, err = os.Stat(path.Join(ExpectedOmnibusRoot, "version-manifest.json")); err == nil {
return true
} else {
return false
}
} else {
return false
}
}
func omnibusRoot() string {
omnibusroot, err := filepath.Abs(path.Join(ExpectedOmnibusRoot()))
if err != nil {
fmt.Fprintln(os.Stderr, "ERROR:", dist.WorkstationProduct, "has not been installed via the platform-specific package provided by", dist.DistributorName, "Version information is not available.")
os.Exit(4)
}
return omnibusroot
//below code can be used for running and testing in local repos e.g ./main-chef-wrapper -v, comment out rest code of this method(darwin,linux)
//return "/opt/chef-workstation"
}
func ExpectedOmnibusRoot() string {
ex, _ := os.Executable()
exReal, err := filepath.EvalSymlinks(ex)
if err != nil {
fmt.Fprintln(os.Stderr, "ERROR:", err)
os.Exit(4)
}
rootPath := path.Join(filepath.Dir(exReal), "..")
//groot := os.Getenv("GEM_ROOT")
//rootPath, err := filepath.Abs(path.Join(groot,"..","..", "..", "..", ".."))
return rootPath
//below code can be used for running and testing in local repos e.g ./main-chef-wrapper -v, comment out rest code of this method(darwin,linux)
//return "/opt/chef-workstation"
}
| [
"\"GEM_ROOT\""
]
| []
| [
"GEM_ROOT"
]
| [] | ["GEM_ROOT"] | go | 1 | 0 | |
bin/reset_all_sequences.py | #!/usr/bin/env python
import operator as op
import os
import psycopg2
import sqlalchemy as sa
db_url = os.environ.get('BAUBLE_DB_URL')
if db_url is None:
print("** Error: BAUBLE_DB_URL is not defined")
exit(1)
from bauble.model import Model
col_filter = lambda c: isinstance(c.type, sa.Integer) and c.autoincrement \
and len(c.foreign_keys) == 0 \
and (c.default is None or (isinstance(c.default, sa.schema.Sequence) and
c.default.optional))
sequences = []
for table in Model.metadata.sorted_tables:
for column in filter(col_filter, table.columns):
sequence_name = '{}_{}_seq'.format(table.name, column.name)
sequences.append((table.name, column.name, sequence_name))
seq_reset_stmt = "SELECT pg_catalog.setval(pg_get_serial_sequence('{table}', '{column}'), (SELECT MAX({column}) FROM {table})+1);"
with psycopg2.connect(db_url) as conn:
with conn.cursor() as cursor:
# get all the organization schema
cursor.execute('select pg_schema from organization;')
for schema in map(op.itemgetter(0), cursor.fetchall()):
print('schema: ', schema)
cursor.execute('set search_path to {},public;'.format(schema))
for table, column, sequence in sequences:
cursor.execute('begin;')
try:
stmt = seq_reset_stmt.format(table=table, column=column,
sequence=sequence)
cursor.execute(stmt)
except Exception as exc:
print("Could not reset {} on schema {}".format(sequence, schema))
cursor.execute('rollback;')
print(exc)
else:
cursor.execute('commit;')
| []
| []
| [
"BAUBLE_DB_URL"
]
| [] | ["BAUBLE_DB_URL"] | python | 1 | 0 | |
src/cmd/vendor/github.com/google/pprof/internal/driver/commands.go | // Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package driver
import (
"bytes"
"fmt"
"io"
"os"
"os/exec"
"runtime"
"sort"
"strconv"
"strings"
"time"
"github.com/google/pprof/internal/plugin"
"github.com/google/pprof/internal/report"
)
// commands describes the commands accepted by pprof.
type commands map[string]*command
// command describes the actions for a pprof command. Includes a
// function for command-line completion, the report format to use
// during report generation, any postprocessing functions, and whether
// the command expects a regexp parameter (typically a function name).
type command struct {
format int // report format to generate
postProcess PostProcessor // postprocessing to run on report
visualizer PostProcessor // display output using some callback
hasParam bool // collect a parameter from the CLI
description string // single-line description text saying what the command does
usage string // multi-line help text saying how the command is used
}
// help returns a help string for a command.
func (c *command) help(name string) string {
message := c.description + "\n"
if c.usage != "" {
message += " Usage:\n"
lines := strings.Split(c.usage, "\n")
for _, line := range lines {
message += fmt.Sprintf(" %s\n", line)
}
}
return message + "\n"
}
// AddCommand adds an additional command to the set of commands
// accepted by pprof. This enables extensions to add new commands for
// specialized visualization formats. If the command specified already
// exists, it is overwritten.
func AddCommand(cmd string, format int, post PostProcessor, desc, usage string) {
pprofCommands[cmd] = &command{format, post, nil, false, desc, usage}
}
// SetVariableDefault sets the default value for a pprof
// variable. This enables extensions to set their own defaults.
func SetVariableDefault(variable, value string) {
if v := pprofVariables[variable]; v != nil {
v.value = value
}
}
// PostProcessor is a function that applies post-processing to the report output
type PostProcessor func(input io.Reader, output io.Writer, ui plugin.UI) error
// interactiveMode is true if pprof is running on interactive mode, reading
// commands from its shell.
var interactiveMode = false
// pprofCommands are the report generation commands recognized by pprof.
var pprofCommands = commands{
// Commands that require no post-processing.
"comments": {report.Comments, nil, nil, false, "Output all profile comments", ""},
"disasm": {report.Dis, nil, nil, true, "Output assembly listings annotated with samples", listHelp("disasm", true)},
"dot": {report.Dot, nil, nil, false, "Outputs a graph in DOT format", reportHelp("dot", false, true)},
"list": {report.List, nil, nil, true, "Output annotated source for functions matching regexp", listHelp("list", false)},
"peek": {report.Tree, nil, nil, true, "Output callers/callees of functions matching regexp", "peek func_regex\nDisplay callers and callees of functions matching func_regex."},
"raw": {report.Raw, nil, nil, false, "Outputs a text representation of the raw profile", ""},
"tags": {report.Tags, nil, nil, false, "Outputs all tags in the profile", "tags [tag_regex]* [-ignore_regex]* [>file]\nList tags with key:value matching tag_regex and exclude ignore_regex."},
"text": {report.Text, nil, nil, false, "Outputs top entries in text form", reportHelp("text", true, true)},
"top": {report.Text, nil, nil, false, "Outputs top entries in text form", reportHelp("top", true, true)},
"traces": {report.Traces, nil, nil, false, "Outputs all profile samples in text form", ""},
"tree": {report.Tree, nil, nil, false, "Outputs a text rendering of call graph", reportHelp("tree", true, true)},
// Save binary formats to a file
"callgrind": {report.Callgrind, nil, awayFromTTY("callgraph.out"), false, "Outputs a graph in callgrind format", reportHelp("callgrind", false, true)},
"proto": {report.Proto, nil, awayFromTTY("pb.gz"), false, "Outputs the profile in compressed protobuf format", ""},
"topproto": {report.TopProto, nil, awayFromTTY("pb.gz"), false, "Outputs top entries in compressed protobuf format", ""},
// Generate report in DOT format and postprocess with dot
"gif": {report.Dot, invokeDot("gif"), awayFromTTY("gif"), false, "Outputs a graph image in GIF format", reportHelp("gif", false, true)},
"pdf": {report.Dot, invokeDot("pdf"), awayFromTTY("pdf"), false, "Outputs a graph in PDF format", reportHelp("pdf", false, true)},
"png": {report.Dot, invokeDot("png"), awayFromTTY("png"), false, "Outputs a graph image in PNG format", reportHelp("png", false, true)},
"ps": {report.Dot, invokeDot("ps"), awayFromTTY("ps"), false, "Outputs a graph in PS format", reportHelp("ps", false, true)},
// Save SVG output into a file
"svg": {report.Dot, massageDotSVG(), awayFromTTY("svg"), false, "Outputs a graph in SVG format", reportHelp("svg", false, true)},
// Visualize postprocessed dot output
"eog": {report.Dot, invokeDot("svg"), invokeVisualizer("svg", []string{"eog"}), false, "Visualize graph through eog", reportHelp("eog", false, false)},
"evince": {report.Dot, invokeDot("pdf"), invokeVisualizer("pdf", []string{"evince"}), false, "Visualize graph through evince", reportHelp("evince", false, false)},
"gv": {report.Dot, invokeDot("ps"), invokeVisualizer("ps", []string{"gv --noantialias"}), false, "Visualize graph through gv", reportHelp("gv", false, false)},
"web": {report.Dot, massageDotSVG(), invokeVisualizer("svg", browsers()), false, "Visualize graph through web browser", reportHelp("web", false, false)},
// Visualize callgrind output
"kcachegrind": {report.Callgrind, nil, invokeVisualizer("grind", kcachegrind), false, "Visualize report in KCachegrind", reportHelp("kcachegrind", false, false)},
// Visualize HTML directly generated by report.
"weblist": {report.WebList, nil, invokeVisualizer("html", browsers()), true, "Display annotated source in a web browser", listHelp("weblist", false)},
}
// pprofVariables are the configuration parameters that affect the
// reported generated by pprof.
var pprofVariables = variables{
// Filename for file-based output formats, stdout by default.
"output": &variable{stringKind, "", "", helpText("Output filename for file-based outputs")},
// Comparisons.
"drop_negative": &variable{boolKind, "f", "", helpText(
"Ignore negative differences",
"Do not show any locations with values <0.")},
// Graph handling options.
"call_tree": &variable{boolKind, "f", "", helpText(
"Create a context-sensitive call tree",
"Treat locations reached through different paths as separate.")},
// Display options.
"relative_percentages": &variable{boolKind, "f", "", helpText(
"Show percentages relative to focused subgraph",
"If unset, percentages are relative to full graph before focusing",
"to facilitate comparison with original graph.")},
"unit": &variable{stringKind, "minimum", "", helpText(
"Measurement units to display",
"Scale the sample values to this unit.",
"For time-based profiles, use seconds, milliseconds, nanoseconds, etc.",
"For memory profiles, use megabytes, kilobytes, bytes, etc.",
"Using auto will scale each value independently to the most natural unit.")},
"compact_labels": &variable{boolKind, "f", "", "Show minimal headers"},
"source_path": &variable{stringKind, "", "", "Search path for source files"},
"trim_path": &variable{stringKind, "", "", "Path to trim from source paths before search"},
// Filtering options
"nodecount": &variable{intKind, "-1", "", helpText(
"Max number of nodes to show",
"Uses heuristics to limit the number of locations to be displayed.",
"On graphs, dotted edges represent paths through nodes that have been removed.")},
"nodefraction": &variable{floatKind, "0.005", "", "Hide nodes below <f>*total"},
"edgefraction": &variable{floatKind, "0.001", "", "Hide edges below <f>*total"},
"trim": &variable{boolKind, "t", "", helpText(
"Honor nodefraction/edgefraction/nodecount defaults",
"Set to false to get the full profile, without any trimming.")},
"focus": &variable{stringKind, "", "", helpText(
"Restricts to samples going through a node matching regexp",
"Discard samples that do not include a node matching this regexp.",
"Matching includes the function name, filename or object name.")},
"ignore": &variable{stringKind, "", "", helpText(
"Skips paths going through any nodes matching regexp",
"If set, discard samples that include a node matching this regexp.",
"Matching includes the function name, filename or object name.")},
"prune_from": &variable{stringKind, "", "", helpText(
"Drops any functions below the matched frame.",
"If set, any frames matching the specified regexp and any frames",
"below it will be dropped from each sample.")},
"hide": &variable{stringKind, "", "", helpText(
"Skips nodes matching regexp",
"Discard nodes that match this location.",
"Other nodes from samples that include this location will be shown.",
"Matching includes the function name, filename or object name.")},
"show": &variable{stringKind, "", "", helpText(
"Only show nodes matching regexp",
"If set, only show nodes that match this location.",
"Matching includes the function name, filename or object name.")},
"show_from": &variable{stringKind, "", "", helpText(
"Drops functions above the highest matched frame.",
"If set, all frames above the highest match are dropped from every sample.",
"Matching includes the function name, filename or object name.")},
"tagfocus": &variable{stringKind, "", "", helpText(
"Restricts to samples with tags in range or matched by regexp",
"Use name=value syntax to limit the matching to a specific tag.",
"Numeric tag filter examples: 1kb, 1kb:10kb, memory=32mb:",
"String tag filter examples: foo, foo.*bar, mytag=foo.*bar")},
"tagignore": &variable{stringKind, "", "", helpText(
"Discard samples with tags in range or matched by regexp",
"Use name=value syntax to limit the matching to a specific tag.",
"Numeric tag filter examples: 1kb, 1kb:10kb, memory=32mb:",
"String tag filter examples: foo, foo.*bar, mytag=foo.*bar")},
"tagshow": &variable{stringKind, "", "", helpText(
"Only consider tags matching this regexp",
"Discard tags that do not match this regexp")},
"taghide": &variable{stringKind, "", "", helpText(
"Skip tags matching this regexp",
"Discard tags that match this regexp")},
// Heap profile options
"divide_by": &variable{floatKind, "1", "", helpText(
"Ratio to divide all samples before visualization",
"Divide all samples values by a constant, eg the number of processors or jobs.")},
"mean": &variable{boolKind, "f", "", helpText(
"Average sample value over first value (count)",
"For memory profiles, report average memory per allocation.",
"For time-based profiles, report average time per event.")},
"sample_index": &variable{stringKind, "", "", helpText(
"Sample value to report (0-based index or name)",
"Profiles contain multiple values per sample.",
"Use sample_index=i to select the ith value (starting at 0).")},
"normalize": &variable{boolKind, "f", "", helpText(
"Scales profile based on the base profile.")},
// Data sorting criteria
"flat": &variable{boolKind, "t", "cumulative", helpText("Sort entries based on own weight")},
"cum": &variable{boolKind, "f", "cumulative", helpText("Sort entries based on cumulative weight")},
// Output granularity
"functions": &variable{boolKind, "t", "granularity", helpText(
"Aggregate at the function level.",
"Takes into account the filename/lineno where the function was defined.")},
"files": &variable{boolKind, "f", "granularity", "Aggregate at the file level."},
"lines": &variable{boolKind, "f", "granularity", "Aggregate at the source code line level."},
"addresses": &variable{boolKind, "f", "granularity", helpText(
"Aggregate at the function level.",
"Includes functions' addresses in the output.")},
"noinlines": &variable{boolKind, "f", "granularity", helpText(
"Aggregate at the function level.",
"Attributes inlined functions to their first out-of-line caller.")},
"addressnoinlines": &variable{boolKind, "f", "granularity", helpText(
"Aggregate at the function level, including functions' addresses in the output.",
"Attributes inlined functions to their first out-of-line caller.")},
}
func helpText(s ...string) string {
return strings.Join(s, "\n") + "\n"
}
// usage returns a string describing the pprof commands and variables.
// if commandLine is set, the output reflect cli usage.
func usage(commandLine bool) string {
var prefix string
if commandLine {
prefix = "-"
}
fmtHelp := func(c, d string) string {
return fmt.Sprintf(" %-16s %s", c, strings.SplitN(d, "\n", 2)[0])
}
var commands []string
for name, cmd := range pprofCommands {
commands = append(commands, fmtHelp(prefix+name, cmd.description))
}
sort.Strings(commands)
var help string
if commandLine {
help = " Output formats (select at most one):\n"
} else {
help = " Commands:\n"
commands = append(commands, fmtHelp("o/options", "List options and their current values"))
commands = append(commands, fmtHelp("quit/exit/^D", "Exit pprof"))
}
help = help + strings.Join(commands, "\n") + "\n\n" +
" Options:\n"
// Print help for variables after sorting them.
// Collect radio variables by their group name to print them together.
radioOptions := make(map[string][]string)
var variables []string
for name, vr := range pprofVariables {
if vr.group != "" {
radioOptions[vr.group] = append(radioOptions[vr.group], name)
continue
}
variables = append(variables, fmtHelp(prefix+name, vr.help))
}
sort.Strings(variables)
help = help + strings.Join(variables, "\n") + "\n\n" +
" Option groups (only set one per group):\n"
var radioStrings []string
for radio, ops := range radioOptions {
sort.Strings(ops)
s := []string{fmtHelp(radio, "")}
for _, op := range ops {
s = append(s, " "+fmtHelp(prefix+op, pprofVariables[op].help))
}
radioStrings = append(radioStrings, strings.Join(s, "\n"))
}
sort.Strings(radioStrings)
return help + strings.Join(radioStrings, "\n")
}
func reportHelp(c string, cum, redirect bool) string {
h := []string{
c + " [n] [focus_regex]* [-ignore_regex]*",
"Include up to n samples",
"Include samples matching focus_regex, and exclude ignore_regex.",
}
if cum {
h[0] += " [-cum]"
h = append(h, "-cum sorts the output by cumulative weight")
}
if redirect {
h[0] += " >f"
h = append(h, "Optionally save the report on the file f")
}
return strings.Join(h, "\n")
}
func listHelp(c string, redirect bool) string {
h := []string{
c + "<func_regex|address> [-focus_regex]* [-ignore_regex]*",
"Include functions matching func_regex, or including the address specified.",
"Include samples matching focus_regex, and exclude ignore_regex.",
}
if redirect {
h[0] += " >f"
h = append(h, "Optionally save the report on the file f")
}
return strings.Join(h, "\n")
}
// browsers returns a list of commands to attempt for web visualization.
func browsers() []string {
cmds := []string{"chrome", "google-chrome", "firefox"}
switch runtime.GOOS {
case "darwin":
return append(cmds, "/usr/bin/open")
case "windows":
return append(cmds, "cmd /c start")
default:
userBrowser := os.Getenv("BROWSER")
if userBrowser != "" {
cmds = append([]string{userBrowser, "sensible-browser"}, cmds...)
} else {
cmds = append([]string{"sensible-browser"}, cmds...)
}
return append(cmds, "xdg-open")
}
}
var kcachegrind = []string{"kcachegrind"}
// awayFromTTY saves the output in a file if it would otherwise go to
// the terminal screen. This is used to avoid dumping binary data on
// the screen.
func awayFromTTY(format string) PostProcessor {
return func(input io.Reader, output io.Writer, ui plugin.UI) error {
if output == os.Stdout && (ui.IsTerminal() || interactiveMode) {
tempFile, err := newTempFile("", "profile", "."+format)
if err != nil {
return err
}
ui.PrintErr("Generating report in ", tempFile.Name())
output = tempFile
}
_, err := io.Copy(output, input)
return err
}
}
func invokeDot(format string) PostProcessor {
return func(input io.Reader, output io.Writer, ui plugin.UI) error {
cmd := exec.Command("dot", "-T"+format)
cmd.Stdin, cmd.Stdout, cmd.Stderr = input, output, os.Stderr
if err := cmd.Run(); err != nil {
return fmt.Errorf("Failed to execute dot. Is Graphviz installed? Error: %v", err)
}
return nil
}
}
// massageDotSVG invokes the dot tool to generate an SVG image and alters
// the image to have panning capabilities when viewed in a browser.
func massageDotSVG() PostProcessor {
generateSVG := invokeDot("svg")
return func(input io.Reader, output io.Writer, ui plugin.UI) error {
baseSVG := new(bytes.Buffer)
if err := generateSVG(input, baseSVG, ui); err != nil {
return err
}
_, err := output.Write([]byte(massageSVG(baseSVG.String())))
return err
}
}
func invokeVisualizer(suffix string, visualizers []string) PostProcessor {
return func(input io.Reader, output io.Writer, ui plugin.UI) error {
tempFile, err := newTempFile(os.TempDir(), "pprof", "."+suffix)
if err != nil {
return err
}
deferDeleteTempFile(tempFile.Name())
if _, err := io.Copy(tempFile, input); err != nil {
return err
}
tempFile.Close()
// Try visualizers until one is successful
for _, v := range visualizers {
// Separate command and arguments for exec.Command.
args := strings.Split(v, " ")
if len(args) == 0 {
continue
}
viewer := exec.Command(args[0], append(args[1:], tempFile.Name())...)
viewer.Stderr = os.Stderr
if err = viewer.Start(); err == nil {
// Wait for a second so that the visualizer has a chance to
// open the input file. This needs to be done even if we're
// waiting for the visualizer as it can be just a wrapper that
// spawns a browser tab and returns right away.
defer func(t <-chan time.Time) {
<-t
}(time.After(time.Second))
// On interactive mode, let the visualizer run in the background
// so other commands can be issued.
if !interactiveMode {
return viewer.Wait()
}
return nil
}
}
return err
}
}
// variables describe the configuration parameters recognized by pprof.
type variables map[string]*variable
// variable is a single configuration parameter.
type variable struct {
kind int // How to interpret the value, must be one of the enums below.
value string // Effective value. Only values appropriate for the Kind should be set.
group string // boolKind variables with the same Group != "" cannot be set simultaneously.
help string // Text describing the variable, in multiple lines separated by newline.
}
const (
// variable.kind must be one of these variables.
boolKind = iota
intKind
floatKind
stringKind
)
// set updates the value of a variable, checking that the value is
// suitable for the variable Kind.
func (vars variables) set(name, value string) error {
v := vars[name]
if v == nil {
return fmt.Errorf("no variable %s", name)
}
var err error
switch v.kind {
case boolKind:
var b bool
if b, err = stringToBool(value); err == nil {
if v.group != "" && !b {
err = fmt.Errorf("%q can only be set to true", name)
}
}
case intKind:
_, err = strconv.Atoi(value)
case floatKind:
_, err = strconv.ParseFloat(value, 64)
case stringKind:
// Remove quotes, particularly useful for empty values.
if len(value) > 1 && strings.HasPrefix(value, `"`) && strings.HasSuffix(value, `"`) {
value = value[1 : len(value)-1]
}
}
if err != nil {
return err
}
vars[name].value = value
if group := vars[name].group; group != "" {
for vname, vvar := range vars {
if vvar.group == group && vname != name {
vvar.value = "f"
}
}
}
return err
}
// boolValue returns the value of a boolean variable.
func (v *variable) boolValue() bool {
b, err := stringToBool(v.value)
if err != nil {
panic("unexpected value " + v.value + " for bool ")
}
return b
}
// intValue returns the value of an intKind variable.
func (v *variable) intValue() int {
i, err := strconv.Atoi(v.value)
if err != nil {
panic("unexpected value " + v.value + " for int ")
}
return i
}
// floatValue returns the value of a Float variable.
func (v *variable) floatValue() float64 {
f, err := strconv.ParseFloat(v.value, 64)
if err != nil {
panic("unexpected value " + v.value + " for float ")
}
return f
}
// stringValue returns a canonical representation for a variable.
func (v *variable) stringValue() string {
switch v.kind {
case boolKind:
return fmt.Sprint(v.boolValue())
case intKind:
return fmt.Sprint(v.intValue())
case floatKind:
return fmt.Sprint(v.floatValue())
}
return v.value
}
func stringToBool(s string) (bool, error) {
switch strings.ToLower(s) {
case "true", "t", "yes", "y", "1", "":
return true, nil
case "false", "f", "no", "n", "0":
return false, nil
default:
return false, fmt.Errorf(`illegal value "%s" for bool variable`, s)
}
}
// makeCopy returns a duplicate of a set of shell variables.
func (vars variables) makeCopy() variables {
varscopy := make(variables, len(vars))
for n, v := range vars {
vcopy := *v
varscopy[n] = &vcopy
}
return varscopy
}
| [
"\"BROWSER\""
]
| []
| [
"BROWSER"
]
| [] | ["BROWSER"] | go | 1 | 0 | |
testutil/db.go | package testutil
import (
"context"
"os"
"testing"
"time"
"github.com/go-pg/pg/v10"
"github.com/stretchr/testify/require"
"golang.org/x/xerrors"
"github.com/filecoin-project/sentinel-visor/wait"
)
var testDatabase = os.Getenv("VISOR_TEST_DB")
// DatabaseAvailable reports whether a database is available for testing
func DatabaseAvailable() bool {
return testDatabase != ""
}
// Database returns the connection string for connecting to the test database
func DatabaseOptions() string {
return testDatabase
}
// WaitForExclusiveDatabase waits for exclusive access to the test database until the context is done or the
// exclusive access is granted. It returns a cleanup function that should be called to close the database connection.
func WaitForExclusiveDatabase(ctx context.Context, tb testing.TB) (*pg.DB, func() error, error) {
require.NotEmpty(tb, testDatabase, "No test database available: VISOR_TEST_DB not set")
opt, err := pg.ParseURL(testDatabase)
require.NoError(tb, err)
db := pg.Connect(opt)
db = db.WithContext(ctx)
// Check if connection credentials are valid and PostgreSQL is up and running.
if err := db.Ping(ctx); err != nil {
return nil, db.Close, xerrors.Errorf("ping database: %w", err)
}
release, err := WaitForExclusiveDatabaseLock(ctx, db)
if err != nil {
db.Close()
tb.Fatalf("failed to get exclusive database access: %v", err)
}
cleanup := func() error {
_ = release()
return db.Close()
}
return db, cleanup, nil
}
const (
testDatabaseLockID = 88899888
testDatabaseLockCheckInterval = 2 * time.Millisecond
)
// WaitForExclusiveDatabaseLock waits for a an exclusive lock on the test database until the context is done or the
// exclusive access is granted. It returns a cleanup function that should be called to release the exclusive lock. In any
// case the lock will be automatically released when the database session ends.
func WaitForExclusiveDatabaseLock(ctx context.Context, db *pg.DB) (func() error, error) {
err := wait.RepeatUntil(ctx, testDatabaseLockCheckInterval, tryTestDatabaseLock(ctx, db))
if err != nil {
return nil, err
}
release := func() error {
var released bool
_, err := db.QueryOneContext(ctx, pg.Scan(&released), `SELECT pg_advisory_unlock(?);`, int64(testDatabaseLockID))
if err != nil {
return xerrors.Errorf("unlocking exclusive lock: %w", err)
}
if !released {
return xerrors.Errorf("exclusive lock not released")
}
return nil
}
return release, nil
}
func tryTestDatabaseLock(ctx context.Context, db *pg.DB) func(context.Context) (bool, error) {
return func(context.Context) (bool, error) {
var acquired bool
_, err := db.QueryOneContext(ctx, pg.Scan(&acquired), `SELECT pg_try_advisory_lock(?);`, int64(testDatabaseLockID))
return acquired, err
}
}
| [
"\"VISOR_TEST_DB\""
]
| []
| [
"VISOR_TEST_DB"
]
| [] | ["VISOR_TEST_DB"] | go | 1 | 0 | |
providers/shopify/session.go | package shopify
import (
"crypto/hmac"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"os"
"regexp"
"strings"
"time"
"github.com/plybit/goth"
)
const (
shopifyHostnameRegex = `^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])$`
)
// Session stores data during the auth process with Shopify.
type Session struct {
AuthURL string
AccessToken string
Hostname string
HMAC string
ExpiresAt time.Time
}
var _ goth.Session = &Session{}
// GetAuthURL will return the URL set by calling the `BeginAuth` function on the Shopify provider.
func (s Session) GetAuthURL() (string, error) {
if s.AuthURL == "" {
return "", errors.New(goth.NoAuthUrlErrorMessage)
}
return s.AuthURL, nil
}
// Authorize the session with Shopify and return the access token to be stored for future use.
func (s *Session) Authorize(provider goth.Provider, params goth.Params) (string, error) {
// Validate the incoming HMAC is valid.
// See: https://help.shopify.com/en/api/getting-started/authentication/oauth#verification
digest := fmt.Sprintf(
"code=%s&shop=%s&state=%s×tamp=%s",
params.Get("code"),
params.Get("shop"),
params.Get("state"),
params.Get("timestamp"),
)
h := hmac.New(sha256.New, []byte(os.Getenv("SHOPIFY_SECRET")))
h.Write([]byte(digest))
sha := hex.EncodeToString(h.Sum(nil))
// Ensure our HMAC hash's match.
if sha != params.Get("hmac") {
return "", errors.New("Invalid HMAC received")
}
// Validate the hostname matches what we're expecting.
// See: https://help.shopify.com/en/api/getting-started/authentication/oauth#step-3-confirm-installation
re := regexp.MustCompile(shopifyHostnameRegex)
if !re.MatchString(params.Get("shop")) {
return "", errors.New("Invalid hostname received")
}
// Make the exchange for an access token.
p := provider.(*Provider)
token, err := p.config.Exchange(goth.ContextForClient(p.Client()), params.Get("code"))
if err != nil {
return "", err
}
// Ensure it's valid.
if !token.Valid() {
return "", errors.New("Invalid token received from provider")
}
s.AccessToken = token.AccessToken
s.Hostname = params.Get("hostname")
s.HMAC = params.Get("hmac")
return token.AccessToken, err
}
// Marshal the session into a string
func (s Session) Marshal() string {
b, _ := json.Marshal(s)
return string(b)
}
func (s Session) String() string {
return s.Marshal()
}
// UnmarshalSession wil unmarshal a JSON string into a session.
func (p *Provider) UnmarshalSession(data string) (goth.Session, error) {
s := &Session{}
err := json.NewDecoder(strings.NewReader(data)).Decode(s)
return s, err
}
| [
"\"SHOPIFY_SECRET\""
]
| []
| [
"SHOPIFY_SECRET"
]
| [] | ["SHOPIFY_SECRET"] | go | 1 | 0 | |
vendor/github.com/elotl/node-cli/internal/commands/root/http.go | // Copyright © 2017 The virtual-kubelet authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package root
import (
"context"
"crypto/tls"
"fmt"
"io"
"net"
"net/http"
"os"
"time"
"github.com/elotl/node-cli/opts"
"github.com/elotl/node-cli/provider"
"github.com/pkg/errors"
"github.com/virtual-kubelet/virtual-kubelet/log"
"github.com/virtual-kubelet/virtual-kubelet/node/api"
)
// AcceptedCiphers is the list of accepted TLS ciphers, with known weak ciphers elided
// Note this list should be a moving target.
var AcceptedCiphers = []uint16{
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
}
func loadTLSConfig(certPath, keyPath string) (*tls.Config, error) {
cert, err := tls.LoadX509KeyPair(certPath, keyPath)
if err != nil {
return nil, errors.Wrap(err, "error loading tls certs")
}
return &tls.Config{
Certificates: []tls.Certificate{cert},
MinVersion: tls.VersionTLS12,
PreferServerCipherSuites: true,
CipherSuites: AcceptedCiphers,
}, nil
}
func setupHTTPServer(ctx context.Context, p provider.Provider, cfg *apiServerConfig) (_ func(), retErr error) {
var closers []io.Closer
cancel := func() {
for _, c := range closers {
c.Close()
}
}
defer func() {
if retErr != nil {
cancel()
}
}()
if cfg.CertPath == "" || cfg.KeyPath == "" {
log.G(ctx).
WithField("certPath", cfg.CertPath).
WithField("keyPath", cfg.KeyPath).
Error("TLS certificates not provided, not setting up pod http server")
} else {
tlsCfg, err := loadTLSConfig(cfg.CertPath, cfg.KeyPath)
if err != nil {
return nil, err
}
l, err := tls.Listen("tcp", cfg.Addr, tlsCfg)
if err != nil {
return nil, errors.Wrap(err, "error setting up listener for pod http server")
}
mux := http.NewServeMux()
podRoutes := api.PodHandlerConfig{
RunInContainer: p.RunInContainer,
GetContainerLogs: p.GetContainerLogs,
GetPods: p.GetPods,
StreamIdleTimeout: cfg.StreamIdleTimeout,
StreamCreationTimeout: cfg.StreamCreationTimeout,
}
api.AttachPodRoutes(podRoutes, mux, true)
s := &http.Server{
Handler: mux,
TLSConfig: tlsCfg,
}
go serveHTTP(ctx, s, l, "pods")
closers = append(closers, s)
}
if cfg.MetricsAddr == "" {
log.G(ctx).Info("Pod metrics server not setup due to empty metrics address")
} else {
l, err := net.Listen("tcp", cfg.MetricsAddr)
if err != nil {
return nil, errors.Wrap(err, "could not setup listener for pod metrics http server")
}
mux := http.NewServeMux()
var summaryHandlerFunc api.PodStatsSummaryHandlerFunc
if mp, ok := p.(provider.PodMetricsProvider); ok {
summaryHandlerFunc = mp.GetStatsSummary
}
podMetricsRoutes := api.PodMetricsConfig{
GetStatsSummary: summaryHandlerFunc,
}
api.AttachPodMetricsRoutes(podMetricsRoutes, mux)
s := &http.Server{
Handler: mux,
}
go serveHTTP(ctx, s, l, "pod metrics")
closers = append(closers, s)
}
return cancel, nil
}
func serveHTTP(ctx context.Context, s *http.Server, l net.Listener, name string) {
if err := s.Serve(l); err != nil {
select {
case <-ctx.Done():
default:
log.G(ctx).WithError(err).Errorf("Error setting up %s http server", name)
}
}
l.Close()
}
type apiServerConfig struct {
CertPath string
KeyPath string
Addr string
MetricsAddr string
StreamIdleTimeout time.Duration
StreamCreationTimeout time.Duration
}
func getAPIConfig(c *opts.Opts) (*apiServerConfig, error) {
config := apiServerConfig{
CertPath: os.Getenv("APISERVER_CERT_LOCATION"),
KeyPath: os.Getenv("APISERVER_KEY_LOCATION"),
}
config.Addr = fmt.Sprintf(":%d", c.ListenPort)
config.MetricsAddr = c.MetricsAddr
config.StreamIdleTimeout = c.StreamIdleTimeout
config.StreamCreationTimeout = c.StreamCreationTimeout
return &config, nil
}
| [
"\"APISERVER_CERT_LOCATION\"",
"\"APISERVER_KEY_LOCATION\""
]
| []
| [
"APISERVER_CERT_LOCATION",
"APISERVER_KEY_LOCATION"
]
| [] | ["APISERVER_CERT_LOCATION", "APISERVER_KEY_LOCATION"] | go | 2 | 0 | |
test/com/facebook/buck/cli/TargetsCommandTest.java | /*
* Copyright 2012-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package com.facebook.buck.cli;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import com.facebook.buck.android.AndroidResourceBuilder;
import com.facebook.buck.apple.AppleLibraryBuilder;
import com.facebook.buck.apple.AppleTestBuilder;
import com.facebook.buck.artifact_cache.ArtifactCache;
import com.facebook.buck.artifact_cache.NoopArtifactCache;
import com.facebook.buck.config.FakeBuckConfig;
import com.facebook.buck.event.BuckEventBus;
import com.facebook.buck.event.BuckEventBusForTests;
import com.facebook.buck.io.filesystem.ProjectFilesystem;
import com.facebook.buck.io.filesystem.TestProjectFilesystems;
import com.facebook.buck.jvm.java.FakeJavaPackageFinder;
import com.facebook.buck.jvm.java.JavaLibraryBuilder;
import com.facebook.buck.jvm.java.JavaLibraryDescription;
import com.facebook.buck.jvm.java.JavaTestBuilder;
import com.facebook.buck.jvm.java.JavaTestDescription;
import com.facebook.buck.jvm.java.PrebuiltJarBuilder;
import com.facebook.buck.model.BuildTarget;
import com.facebook.buck.model.BuildTargetFactory;
import com.facebook.buck.parser.exceptions.BuildFileParseException;
import com.facebook.buck.rules.Cell;
import com.facebook.buck.rules.FakeSourcePath;
import com.facebook.buck.rules.SourceWithFlags;
import com.facebook.buck.rules.TargetGraph;
import com.facebook.buck.rules.TargetNode;
import com.facebook.buck.rules.TestCellBuilder;
import com.facebook.buck.shell.GenruleBuilder;
import com.facebook.buck.testutil.FakeOutputStream;
import com.facebook.buck.testutil.FakeProjectFilesystem;
import com.facebook.buck.testutil.ProcessResult;
import com.facebook.buck.testutil.TargetGraphFactory;
import com.facebook.buck.testutil.TemporaryPaths;
import com.facebook.buck.testutil.TestConsole;
import com.facebook.buck.testutil.integration.ProjectWorkspace;
import com.facebook.buck.testutil.integration.TestDataHelper;
import com.facebook.buck.util.ObjectMappers;
import com.facebook.buck.util.environment.Platform;
import com.fasterxml.jackson.core.JsonParser.Feature;
import com.fasterxml.jackson.databind.JsonNode;
import com.google.common.base.Charsets;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.ImmutableSortedSet;
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.common.util.concurrent.MoreExecutors;
import java.io.IOException;
import java.io.PrintStream;
import java.io.UnsupportedEncodingException;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Optional;
import java.util.SortedMap;
import java.util.SortedSet;
import java.util.TreeSet;
import java.util.concurrent.Executors;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
public class TargetsCommandTest {
private TestConsole console;
private ProjectWorkspace workspace;
private TargetsCommand targetsCommand;
private CommandRunnerParams params;
private ProjectFilesystem filesystem;
private ListeningExecutorService executor;
private Iterable<TargetNode<?, ?>> buildTargetNodes(
ProjectFilesystem filesystem, String buildTarget) {
SortedSet<TargetNode<?, ?>> buildRules = new TreeSet<>();
BuildTarget target = BuildTargetFactory.newInstance(filesystem.getRootPath(), buildTarget);
TargetNode<?, ?> node = JavaLibraryBuilder.createBuilder(target).build();
buildRules.add(node);
return buildRules;
}
@Rule public TemporaryPaths tmp = new TemporaryPaths();
@Before
public void setUp() throws IOException, InterruptedException {
console = new TestConsole();
workspace = TestDataHelper.createProjectWorkspaceForScenario(this, "target_command", tmp);
workspace.setUp();
filesystem =
TestProjectFilesystems.createProjectFilesystem(
workspace.getDestPath().toRealPath().normalize());
Cell cell = new TestCellBuilder().setFilesystem(filesystem).build();
ArtifactCache artifactCache = new NoopArtifactCache();
BuckEventBus eventBus = BuckEventBusForTests.newInstance();
targetsCommand = new TargetsCommand();
params =
CommandRunnerParamsForTesting.createCommandRunnerParamsForTesting(
console,
cell,
artifactCache,
eventBus,
FakeBuckConfig.builder().build(),
Platform.detect(),
ImmutableMap.copyOf(System.getenv()),
new FakeJavaPackageFinder(),
Optional.empty());
executor = MoreExecutors.listeningDecorator(Executors.newSingleThreadExecutor());
}
@After
public void tearDown() {
executor.shutdown();
}
@Test
public void testJsonOutputForBuildTarget() throws IOException, BuildFileParseException {
// run `buck targets` on the build file and parse the observed JSON.
Iterable<TargetNode<?, ?>> nodes = buildTargetNodes(filesystem, "//:test-library");
targetsCommand.printJsonForTargets(
params, executor, nodes, ImmutableMap.of(), ImmutableSet.of());
String observedOutput = console.getTextWrittenToStdOut();
JsonNode observed = ObjectMappers.READER.readTree(ObjectMappers.createParser(observedOutput));
// parse the expected JSON.
String expectedJson = workspace.getFileContents("TargetsCommandTestBuckJson1.js");
JsonNode expected =
ObjectMappers.READER.readTree(
ObjectMappers.createParser(expectedJson).enable(Feature.ALLOW_COMMENTS));
assertEquals("Output from targets command should match expected JSON.", expected, observed);
assertEquals("Nothing should be printed to stderr.", "", console.getTextWrittenToStdErr());
}
@Test
public void testJsonOutputWithDirectDependencies() throws IOException {
// Run Buck targets command on a case where the deps and direct_dependencies differ
ProcessResult result = workspace.runBuckCommand("targets", "--json", "//:B");
// Parse the observed JSON.
JsonNode observed =
ObjectMappers.READER.readTree(
ObjectMappers.createParser(result.getStdout()).enable(Feature.ALLOW_COMMENTS));
// Parse the expected JSON.
String expectedJson = workspace.getFileContents("TargetsCommandTestBuckJson2.js");
JsonNode expected =
ObjectMappers.READER.readTree(
ObjectMappers.createParser(expectedJson).enable(Feature.ALLOW_COMMENTS));
assertThat(
"Output from targets command should match expected JSON.", observed, is(equalTo(expected)));
assertThat(
"Nothing should be printed to stderr.", console.getTextWrittenToStdErr(), is(equalTo("")));
}
@Test
public void testJsonOutputWithOutputAttributes() throws IOException {
ProcessResult result =
workspace.runBuckCommand(
"targets",
"--json",
"//:B",
"--output-attributes",
"buck.direct_dependencies",
"fully_qualified_name");
// Parse the observed JSON.
JsonNode observed =
ObjectMappers.READER.readTree(
ObjectMappers.createParser(result.getStdout()).enable(Feature.ALLOW_COMMENTS));
// Parse the expected JSON.
String expectedJson = workspace.getFileContents("TargetsCommandTestBuckJson2Filtered.js");
JsonNode expected =
ObjectMappers.READER.readTree(
ObjectMappers.createParser(expectedJson).enable(Feature.ALLOW_COMMENTS));
assertThat(
"Output from targets command should match expected JSON.", observed, is(equalTo(expected)));
assertThat(
"Nothing should be printed to stderr.", console.getTextWrittenToStdErr(), is(equalTo("")));
}
@Test
public void testJsonOutputForMissingBuildTarget() throws BuildFileParseException {
// nonexistent target should not exist.
Iterable<TargetNode<?, ?>> buildRules = buildTargetNodes(filesystem, "//:nonexistent");
targetsCommand.printJsonForTargets(
params, executor, buildRules, ImmutableMap.of(), ImmutableSet.of());
String output = console.getTextWrittenToStdOut();
assertEquals("[\n]\n", output);
assertEquals(
"unable to find rule for target //:nonexistent\n", console.getTextWrittenToStdErr());
}
@Test
public void testPrintNullDelimitedTargets() throws UnsupportedEncodingException {
Iterable<String> targets = ImmutableList.of("//foo:bar", "//foo:baz");
FakeOutputStream fakeStream = new FakeOutputStream();
PrintStream printStream = new PrintStream(fakeStream);
TargetsCommand.printNullDelimitedTargets(targets, printStream);
printStream.flush();
assertEquals("//foo:bar\0//foo:baz\0", fakeStream.toString(Charsets.UTF_8.name()));
}
@Test
public void testGetMatchingBuildTargets() {
BuildTarget prebuiltJarTarget = BuildTargetFactory.newInstance("//empty:empty");
TargetNode<?, ?> prebuiltJarNode =
PrebuiltJarBuilder.createBuilder(prebuiltJarTarget)
.setBinaryJar(Paths.get("spoof"))
.build();
BuildTarget javaLibraryTarget = BuildTargetFactory.newInstance("//javasrc:java-library");
TargetNode<?, ?> javaLibraryNode =
JavaLibraryBuilder.createBuilder(javaLibraryTarget)
.addSrc(Paths.get("javasrc/JavaLibrary.java"))
.addDep(prebuiltJarTarget)
.build();
BuildTarget javaTestTarget = BuildTargetFactory.newInstance("//javatest:test-java-library");
TargetNode<?, ?> javaTestNode =
JavaTestBuilder.createBuilder(javaTestTarget)
.addSrc(Paths.get("javatest/TestJavaLibrary.java"))
.addDep(javaLibraryTarget)
.build();
ImmutableSet<TargetNode<?, ?>> nodes =
ImmutableSet.of(prebuiltJarNode, javaLibraryNode, javaTestNode);
TargetGraph targetGraph = TargetGraphFactory.newInstance(nodes);
ImmutableSet<Path> referencedFiles;
// No target depends on the referenced file.
referencedFiles = ImmutableSet.of(Paths.get("excludesrc/CannotFind.java"));
SortedMap<String, TargetNode<?, ?>> matchingBuildRules =
targetsCommand.getMatchingNodes(
targetGraph,
Optional.of(referencedFiles),
Optional.empty(),
Optional.empty(),
false,
"BUCK");
assertTrue(matchingBuildRules.isEmpty());
// Only test-android-library target depends on the referenced file.
referencedFiles = ImmutableSet.of(Paths.get("javatest/TestJavaLibrary.java"));
matchingBuildRules =
targetsCommand.getMatchingNodes(
targetGraph,
Optional.of(referencedFiles),
Optional.empty(),
Optional.empty(),
false,
"BUCK");
assertEquals(ImmutableSet.of("//javatest:test-java-library"), matchingBuildRules.keySet());
// The test-android-library target indirectly depends on the referenced file,
// while test-java-library target directly depends on the referenced file.
referencedFiles = ImmutableSet.of(Paths.get("javasrc/JavaLibrary.java"));
matchingBuildRules =
targetsCommand.getMatchingNodes(
targetGraph,
Optional.of(referencedFiles),
Optional.empty(),
Optional.empty(),
false,
"BUCK");
assertEquals(
ImmutableSet.of("//javatest:test-java-library", "//javasrc:java-library"),
matchingBuildRules.keySet());
// Verify that BUCK files show up as referenced files.
referencedFiles = ImmutableSet.of(Paths.get("javasrc/BUCK"));
matchingBuildRules =
targetsCommand.getMatchingNodes(
targetGraph,
Optional.of(referencedFiles),
Optional.empty(),
Optional.empty(),
false,
"BUCK");
assertEquals(
ImmutableSet.of("//javatest:test-java-library", "//javasrc:java-library"),
matchingBuildRules.keySet());
// Output target only need to depend on one referenced file.
referencedFiles =
ImmutableSet.of(
Paths.get("javatest/TestJavaLibrary.java"), Paths.get("othersrc/CannotFind.java"));
matchingBuildRules =
targetsCommand.getMatchingNodes(
targetGraph,
Optional.of(referencedFiles),
Optional.empty(),
Optional.empty(),
false,
"BUCK");
assertEquals(ImmutableSet.of("//javatest:test-java-library"), matchingBuildRules.keySet());
// If no referenced file, means this filter is disabled, we can find all targets.
matchingBuildRules =
targetsCommand.getMatchingNodes(
targetGraph, Optional.empty(), Optional.empty(), Optional.empty(), false, "BUCK");
assertEquals(
ImmutableSet.of("//javatest:test-java-library", "//javasrc:java-library", "//empty:empty"),
matchingBuildRules.keySet());
// Specify java_test, java_library as type filters.
matchingBuildRules =
targetsCommand.getMatchingNodes(
targetGraph,
Optional.empty(),
Optional.empty(),
Optional.of(ImmutableSet.of(JavaTestDescription.class, JavaLibraryDescription.class)),
false,
"BUCK");
assertEquals(
ImmutableSet.of("//javatest:test-java-library", "//javasrc:java-library"),
matchingBuildRules.keySet());
// Specify java_test, java_library, and a rule name as type filters.
matchingBuildRules =
targetsCommand.getMatchingNodes(
targetGraph,
Optional.empty(),
Optional.of(ImmutableSet.of(BuildTargetFactory.newInstance("//javasrc:java-library"))),
Optional.of(ImmutableSet.of(JavaTestDescription.class, JavaLibraryDescription.class)),
false,
"BUCK");
assertEquals(ImmutableSet.of("//javasrc:java-library"), matchingBuildRules.keySet());
// Only filter by BuildTarget
matchingBuildRules =
targetsCommand.getMatchingNodes(
targetGraph,
Optional.empty(),
Optional.of(ImmutableSet.of(BuildTargetFactory.newInstance("//javasrc:java-library"))),
Optional.empty(),
false,
"BUCK");
assertEquals(ImmutableSet.of("//javasrc:java-library"), matchingBuildRules.keySet());
// Filter by BuildTarget and Referenced Files
matchingBuildRules =
targetsCommand.getMatchingNodes(
targetGraph,
Optional.of(ImmutableSet.of(Paths.get("javatest/TestJavaLibrary.java"))),
Optional.of(ImmutableSet.of(BuildTargetFactory.newInstance("//javasrc:java-library"))),
Optional.empty(),
false,
"BUCK");
assertEquals(ImmutableSet.<String>of(), matchingBuildRules.keySet());
}
@Test
public void testGetMatchingAppleLibraryBuildTarget() {
BuildTarget libraryTarget = BuildTargetFactory.newInstance("//foo:lib");
TargetNode<?, ?> libraryNode =
AppleLibraryBuilder.createBuilder(libraryTarget)
.setSrcs(ImmutableSortedSet.of(SourceWithFlags.of(FakeSourcePath.of("foo/foo.m"))))
.build();
ImmutableSet<TargetNode<?, ?>> nodes = ImmutableSet.of(libraryNode);
TargetGraph targetGraph = TargetGraphFactory.newInstance(nodes);
// No target depends on the referenced file.
SortedMap<String, TargetNode<?, ?>> matchingBuildRules =
targetsCommand.getMatchingNodes(
targetGraph,
Optional.of(ImmutableSet.of(Paths.get("foo/bar.m"))),
Optional.empty(),
Optional.empty(),
false,
"BUCK");
assertTrue(matchingBuildRules.isEmpty());
// The AppleLibrary matches the referenced file.
matchingBuildRules =
targetsCommand.getMatchingNodes(
targetGraph,
Optional.of(ImmutableSet.of(Paths.get("foo/foo.m"))),
Optional.empty(),
Optional.empty(),
false,
"BUCK");
assertEquals(ImmutableSet.of("//foo:lib"), matchingBuildRules.keySet());
}
@Test
public void testGetMatchingAppleTestBuildTarget() {
BuildTarget libraryTarget = BuildTargetFactory.newInstance("//foo:lib");
TargetNode<?, ?> libraryNode =
AppleLibraryBuilder.createBuilder(libraryTarget)
.setSrcs(ImmutableSortedSet.of(SourceWithFlags.of(FakeSourcePath.of("foo/foo.m"))))
.build();
BuildTarget testTarget = BuildTargetFactory.newInstance("//foo:xctest");
TargetNode<?, ?> testNode =
AppleTestBuilder.createBuilder(testTarget)
.setSrcs(ImmutableSortedSet.of(SourceWithFlags.of(FakeSourcePath.of("foo/testfoo.m"))))
.setDeps(ImmutableSortedSet.of(libraryTarget))
.setInfoPlist(FakeSourcePath.of("Info.plist"))
.build();
ImmutableSet<TargetNode<?, ?>> nodes = ImmutableSet.of(libraryNode, testNode);
TargetGraph targetGraph = TargetGraphFactory.newInstance(nodes);
// No target depends on the referenced file.
SortedMap<String, TargetNode<?, ?>> matchingBuildRules =
targetsCommand.getMatchingNodes(
targetGraph,
Optional.of(ImmutableSet.of(Paths.get("foo/bar.m"))),
Optional.empty(),
Optional.empty(),
false,
"BUCK");
assertTrue(matchingBuildRules.isEmpty());
// Both AppleLibrary nodes, AppleBundle, and AppleTest match the referenced file.
matchingBuildRules =
targetsCommand.getMatchingNodes(
targetGraph,
Optional.of(ImmutableSet.of(Paths.get("foo/foo.m"))),
Optional.empty(),
Optional.empty(),
false,
"BUCK");
assertEquals(ImmutableSet.of("//foo:lib", "//foo:xctest"), matchingBuildRules.keySet());
// The test AppleLibrary, AppleBundle and AppleTest match the referenced file.
matchingBuildRules =
targetsCommand.getMatchingNodes(
targetGraph,
Optional.of(ImmutableSet.of(Paths.get("foo/testfoo.m"))),
Optional.empty(),
Optional.empty(),
false,
"BUCK");
assertEquals(ImmutableSet.of("//foo:xctest"), matchingBuildRules.keySet());
}
@Test
public void testPathsUnderDirectories() {
ProjectFilesystem projectFilesystem = new FakeProjectFilesystem();
Path resDir = Paths.get("some/resources/dir");
BuildTarget androidResourceTarget = BuildTargetFactory.newInstance("//:res");
TargetNode<?, ?> androidResourceNode =
AndroidResourceBuilder.createBuilder(androidResourceTarget).setRes(resDir).build();
Path genSrc = resDir.resolve("foo.txt");
BuildTarget genTarget = BuildTargetFactory.newInstance("//:gen");
TargetNode<?, ?> genNode =
GenruleBuilder.newGenruleBuilder(genTarget)
.setSrcs(ImmutableList.of(FakeSourcePath.of(projectFilesystem, genSrc)))
.setOut("out")
.build();
TargetGraph targetGraph = TargetGraphFactory.newInstance(androidResourceNode, genNode);
SortedMap<String, TargetNode<?, ?>> matchingBuildRules;
// Specifying a resource under the resource directory causes a match.
matchingBuildRules =
targetsCommand.getMatchingNodes(
targetGraph,
Optional.of(ImmutableSet.of(resDir.resolve("some_resource.txt"))),
Optional.empty(),
Optional.empty(),
false,
"BUCK");
assertEquals(ImmutableSet.of(androidResourceTarget.toString()), matchingBuildRules.keySet());
// Specifying a resource with the same string-like common prefix, but not under the above
// resource dir, should not trigger a match.
matchingBuildRules =
targetsCommand.getMatchingNodes(
targetGraph,
Optional.of(ImmutableSet.of(Paths.get(resDir + "_extra").resolve("some_resource.txt"))),
Optional.empty(),
Optional.empty(),
false,
"BUCK");
assertTrue(matchingBuildRules.isEmpty());
// Specifying a resource with the same string-like common prefix, but not under the above
// resource dir, should not trigger a match.
matchingBuildRules =
targetsCommand.getMatchingNodes(
targetGraph,
Optional.of(ImmutableSet.of(genSrc)),
Optional.empty(),
Optional.empty(),
false,
"BUCK");
assertEquals(
ImmutableSet.of(androidResourceTarget.toString(), genTarget.toString()),
matchingBuildRules.keySet());
}
@Test
public void testDetectTestChanges() {
BuildTarget libraryTarget = BuildTargetFactory.newInstance("//foo:lib");
BuildTarget libraryTestTarget1 = BuildTargetFactory.newInstance("//foo:xctest1");
BuildTarget libraryTestTarget2 = BuildTargetFactory.newInstance("//foo:xctest2");
BuildTarget testLibraryTarget = BuildTargetFactory.newInstance("//testlib:testlib");
BuildTarget testLibraryTestTarget = BuildTargetFactory.newInstance("//testlib:testlib-xctest");
TargetNode<?, ?> libraryNode =
AppleLibraryBuilder.createBuilder(libraryTarget)
.setSrcs(ImmutableSortedSet.of(SourceWithFlags.of(FakeSourcePath.of("foo/foo.m"))))
.setTests(ImmutableSortedSet.of(libraryTestTarget1, libraryTestTarget2))
.build();
TargetNode<?, ?> libraryTestNode1 =
AppleTestBuilder.createBuilder(libraryTestTarget1)
.setSrcs(ImmutableSortedSet.of(SourceWithFlags.of(FakeSourcePath.of("foo/testfoo1.m"))))
.setDeps(ImmutableSortedSet.of(libraryTarget))
.setInfoPlist(FakeSourcePath.of("Info.plist"))
.build();
TargetNode<?, ?> libraryTestNode2 =
AppleTestBuilder.createBuilder(libraryTestTarget2)
.setSrcs(ImmutableSortedSet.of(SourceWithFlags.of(FakeSourcePath.of("foo/testfoo2.m"))))
.setDeps(ImmutableSortedSet.of(testLibraryTarget))
.setInfoPlist(FakeSourcePath.of("Info.plist"))
.build();
TargetNode<?, ?> testLibraryNode =
AppleLibraryBuilder.createBuilder(testLibraryTarget)
.setSrcs(
ImmutableSortedSet.of(SourceWithFlags.of(FakeSourcePath.of("testlib/testlib.m"))))
.setTests(ImmutableSortedSet.of(testLibraryTestTarget))
.build();
TargetNode<?, ?> testLibraryTestNode =
AppleTestBuilder.createBuilder(testLibraryTestTarget)
.setSrcs(
ImmutableSortedSet.of(
SourceWithFlags.of(FakeSourcePath.of("testlib/testlib-test.m"))))
.setDeps(ImmutableSortedSet.of(testLibraryTarget))
.setInfoPlist(FakeSourcePath.of("Info.plist"))
.build();
ImmutableSet<TargetNode<?, ?>> nodes =
ImmutableSet.of(
libraryNode, libraryTestNode1, libraryTestNode2, testLibraryNode, testLibraryTestNode);
TargetGraph targetGraph = TargetGraphFactory.newInstance(nodes);
// No target depends on the referenced file.
SortedMap<String, TargetNode<?, ?>> matchingBuildRules =
targetsCommand.getMatchingNodes(
targetGraph,
Optional.of(ImmutableSet.of(Paths.get("foo/bar.m"))),
Optional.empty(),
Optional.empty(),
true,
"BUCK");
assertTrue(matchingBuildRules.isEmpty());
// Test1, test2 and the library depend on the referenced file.
matchingBuildRules =
targetsCommand.getMatchingNodes(
targetGraph,
Optional.of(ImmutableSet.of(Paths.get("foo/testfoo1.m"))),
Optional.empty(),
Optional.empty(),
true,
"BUCK");
assertEquals(ImmutableSet.of("//foo:lib", "//foo:xctest1"), matchingBuildRules.keySet());
// Test1, test2 and the library depend on the referenced file.
matchingBuildRules =
targetsCommand.getMatchingNodes(
targetGraph,
Optional.of(ImmutableSet.of(Paths.get("foo/testfoo2.m"))),
Optional.empty(),
Optional.empty(),
true,
"BUCK");
assertEquals(
ImmutableSet.of("//foo:lib", "//foo:xctest1", "//foo:xctest2"),
matchingBuildRules.keySet());
// Library, test1, test2, test library and its test depend on the referenced file.
matchingBuildRules =
targetsCommand.getMatchingNodes(
targetGraph,
Optional.of(ImmutableSet.of(Paths.get("testlib/testlib.m"))),
Optional.empty(),
Optional.empty(),
true,
"BUCK");
assertEquals(
ImmutableSet.of(
"//foo:lib",
"//foo:xctest1",
"//foo:xctest2",
"//testlib:testlib",
"//testlib:testlib-xctest"),
matchingBuildRules.keySet());
// Library, test1, test2, test library and its test depend on the referenced file.
matchingBuildRules =
targetsCommand.getMatchingNodes(
targetGraph,
Optional.of(ImmutableSet.of(Paths.get("testlib/testlib-test.m"))),
Optional.empty(),
Optional.empty(),
true,
"BUCK");
assertEquals(
ImmutableSet.of(
"//foo:lib",
"//foo:xctest1",
"//foo:xctest2",
"//testlib:testlib",
"//testlib:testlib-xctest"),
matchingBuildRules.keySet());
}
}
| []
| []
| []
| [] | [] | java | 0 | 0 | |
StreamStatus.go | package main
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"os"
"strings"
"time"
log "github.com/sirupsen/logrus"
git "github.com/go-git/go-git/v5"
"github.com/go-git/go-git/v5/plumbing/object"
httpauth "github.com/go-git/go-git/v5/plumbing/transport/http"
"github.com/nicklaw5/helix"
)
var VALID_GAMES = []string{"science \u0026 technology", "software and game development", "tryhackme", "hack the box", "just chatting", "talk shows \u0026 podcasts"}
// StreamersRepo struct represents fields to hold various data while updating status.
type StreamersRepo struct {
auth *httpauth.BasicAuth
indexFilePath string
indexMdText string
online bool
repo *git.Repository
repoPath string
streamer string
url string
language string
game string
client *helix.Client
}
// NoChangeNeededError is a struct for a custom error handler
// when no changes are needed to the git repository.
type NoChangeNeededError struct {
err string
}
// Error returns a string for the NoChangeNeededError struct.
func (e *NoChangeNeededError) Error() string {
return e.err
}
// gitPush pushes the repository to github and return and error.
func (s *StreamersRepo) gitPush() error {
err := s.repo.Push(&git.PushOptions{
RemoteName: "origin",
Auth: s.auth,
})
if err != nil {
return err
}
log.Println("remote repo updated.", s.indexFilePath)
return nil
}
// gitCommit makes a commit to the repository and returns an error.
func (s *StreamersRepo) gitCommit() error {
w, err := s.repo.Worktree()
if err != nil {
return err
}
commitMessage := ""
if s.online {
commitMessage = fmt.Sprintf("🟢 %s has gone online! [no ci]", s.streamer)
} else {
commitMessage = fmt.Sprintf("☠️ %s has gone offline! [no ci]", s.streamer)
}
_, err = w.Commit(commitMessage, &git.CommitOptions{
Author: &object.Signature{
Name: "🤖 STATUSS (Seriously Totally Automated Twitch Updating StreamStatus)",
Email: "[email protected]",
When: time.Now(),
},
})
if err != nil {
return err
}
commit, err := s.getHeadCommit()
if err != nil {
return err
}
log.Println(commit)
return nil
}
// gitAdd adds the index file to the repository and returns an error.
func (s *StreamersRepo) gitAdd() error {
w, err := s.repo.Worktree()
if err != nil {
return err
}
_, err = w.Add(strings.Split(s.indexFilePath, "/")[1])
if err != nil {
return err
}
return nil
}
// getHeadCommit gets the commit at HEAD.
func (s *StreamersRepo) getHeadCommit() (string, error) {
// Get repo head.
ref, err := s.repo.Head()
if err != nil {
return "", err
}
commit, err := s.repo.CommitObject(ref.Hash())
if err != nil {
return "", err
}
return commit.String(), nil
}
// getRepo clones a repo to pwd and returns an error.
func (s *StreamersRepo) getRepo() error {
directory := strings.SplitN(s.url, "/", 5)[4]
repo, err := git.PlainClone(directory, false, &git.CloneOptions{
// The intended use of a GitHub personal access token is in replace of your password
// because access tokens can easily be revoked.
// https://help.github.com/articles/creating-a-personal-access-token-for-the-command-line/
Auth: s.auth,
URL: s.url,
// We're discarding the stdout out here. If you'd like to see it toggle
// `Progress` to something like os.Stdout.
Progress: ioutil.Discard,
})
if err == nil {
s.repo = repo
return nil
}
// Check if the error is that the repo exists and if it is on disk open it.
errStr := fmt.Sprint(err)
// Otherwise return error
if !strings.Contains(errStr, "exists") {
return err
}
repo, err = git.PlainOpen(s.repoPath)
if err != nil {
return err
}
log.Warn("Doing git pull")
w, err := repo.Worktree()
if err != nil {
return err
}
w.Pull(&git.PullOptions{
Force: true,
ReferenceName: "HEAD",
RemoteName: "origin",
})
s.repo = repo
return nil
}
// writeFile writes given text and returns an error.
func (s *StreamersRepo) writefile(text string) error {
bytesToWrite := []byte(text)
return ioutil.WriteFile(s.indexFilePath, bytesToWrite, 0644)
}
// updateStreamStatus toggles the streamers status online/offline based on the boolean online.
// this function returns the strings in text replaced or an error.
func (s *StreamersRepo) updateStreamStatus() error {
streamerFormatted := fmt.Sprintf("`%s`", s.streamer)
indexMdLines := strings.Split(s.indexMdText, "\n")
for i, v := range indexMdLines {
if strings.Contains(v, streamerFormatted) {
otherInfo := strings.Split(v, "|")[2]
newLine := s.generateStreamerLine(otherInfo)
if newLine != v {
indexMdLines[i] = newLine
} else {
err := &NoChangeNeededError{}
err.err = fmt.Sprintf("no change needed for: %s, online: %v", s.streamer, s.online)
return err
}
break
}
}
s.indexMdText = strings.Join(indexMdLines, "\n")
return nil
}
func (s *StreamersRepo) generateStreamerLine(otherInfo string) string {
tw := strings.Split(strings.Split(otherInfo, " ")[0], ")")[0]
yt := strings.Split(otherInfo, " ")[1]
if s.online {
return fmt.Sprintf("%s | `%s` |%s \"%s\") %s| %s",
"🟢",
s.streamer,
tw,
s.game,
yt,
s.language,
)
}
return fmt.Sprintf("%s | `%s` |%s) %s",
" ",
s.streamer,
tw,
yt,
)
}
// readFile reads in a slice of bytes from the provided path and returns a string or an error.
func (s *StreamersRepo) readFile() error {
markdownText, err := os.ReadFile(s.indexFilePath)
if err != nil {
return err
} else {
s.indexMdText = string(markdownText)
return nil
}
}
// updateMarkdown reads index.md, updates the streamer's status,
// then writes the change back to index.md and returns an error.
func updateMarkdown(repo *StreamersRepo) error {
err := repo.getRepo()
if err != nil {
log.Printf("error during repo clone: %s\n", err)
}
err = repo.readFile()
if err != nil {
log.Printf("error reading file: %+s\n", err)
os.Exit(-1)
}
err = repo.updateStreamStatus()
if err != nil {
if fmt.Sprintf("%T", err) == "*main.NoChangeNeededError" {
return err
}
log.Printf("error updating status: %s\n", err)
}
err = repo.writefile(repo.indexMdText)
if err != nil {
log.Printf("error writing file: %s\n", err)
}
return nil
}
// updateRepo adds and commits the chanages to the repository.
func updateRepo(repo *StreamersRepo) {
err := repo.gitAdd()
if err != nil {
log.Printf("error git adding file: error: %s\n", err)
}
err = repo.gitCommit()
if err != nil {
log.Printf("error making commit: %s\n", err)
}
}
// pushRepo pushes the committed changes to GitHub.
func pushRepo(repo *StreamersRepo) {
err := repo.gitPush()
if err != nil {
log.Printf("error pushing repo to GitHub: %s\n", err)
}
}
// eventSubNotification is a struct to hold the eventSub webhook request from Twitch.
type eventSubNotification struct {
Challenge string `json:"challenge"`
Event json.RawMessage `json:"event"`
Subscription helix.EventSubSubscription `json:"subscription"`
}
// eventsubStatus takes and http Request and ResponseWriter to handle the incoming webhook request.
func (s *StreamersRepo) eventsubStatus(w http.ResponseWriter, r *http.Request) {
// Read the request body.
body, err := ioutil.ReadAll(r.Body)
if err != nil {
log.Println(err)
return
}
defer r.Body.Close()
// Verify that the notification came from twitch using the secret.
if !helix.VerifyEventSubNotification(os.Getenv("SS_SECRETKEY"), r.Header, string(body)) {
log.Println("invalid signature on message")
return
} else {
log.Println("verified signature on message")
}
// Read the request into eventSubNotification struct.
var vals eventSubNotification
err = json.NewDecoder(bytes.NewReader(body)).Decode(&vals)
if err != nil {
log.Println(err)
return
}
// If there's a challenge in the request respond with only the challenge to verify the eventsubscription.
if vals.Challenge != "" {
w.Write([]byte(vals.Challenge))
return
}
if vals.Subscription.Type == "stream.offline" {
var offlineEvent helix.EventSubStreamOfflineEvent
_ = json.NewDecoder(bytes.NewReader(vals.Event)).Decode(&offlineEvent)
log.Printf("got offline event for: %s\n", offlineEvent.BroadcasterUserName)
w.WriteHeader(200)
w.Write([]byte("ok"))
s.streamer = offlineEvent.BroadcasterUserName
s.online = false
s.language = ""
s.game = ""
err := updateMarkdown(s)
if err == nil {
updateRepo(s)
pushRepo(s)
} else {
log.Warnf("index.md doesn't need to be changed for %s", s.streamer)
}
} else if vals.Subscription.Type == "stream.online" {
var onlineEvent helix.EventSubStreamOnlineEvent
_ = json.NewDecoder(bytes.NewReader(vals.Event)).Decode(&onlineEvent)
log.Printf("got online event for: %s\n", onlineEvent.BroadcasterUserName)
w.WriteHeader(200)
w.Write([]byte("ok"))
stream, err := s.fetchStreamInfo(onlineEvent.BroadcasterUserID)
if err != nil {
log.Errorf("Error fetching stream info for %s (uid: %s)", onlineEvent.BroadcasterUserName, onlineEvent.BroadcasterUserID)
return
}
s.game = stream.GameName
s.streamer = onlineEvent.BroadcasterUserName
// Show streamer as offline if they're not doing infosec
s.online = contains(VALID_GAMES, s.game)
s.language = strings.ToUpper(stream.Language)
err = updateMarkdown(s)
if err == nil {
updateRepo(s)
pushRepo(s)
} else {
log.Warnf("index.md doesn't need to be changed for %s", s.streamer)
}
} else {
log.Errorf("error: event type %s has not been implemented -- pull requests welcome!", r.Header.Get("Twitch-Eventsub-Subscription-Type"))
}
}
func (s *StreamersRepo) fetchStreamInfo(user_id string) (*helix.Stream, error) {
streams, err := s.client.GetStreams(&helix.StreamsParams{
UserIDs: []string{user_id},
})
if err != nil {
return nil, err
}
if streams.ErrorStatus != 0 {
return nil, fmt.Errorf("error fetching stream info status=%d %s error=%s", streams.ErrorStatus, streams.Error, streams.ErrorMessage)
}
if len(streams.Data.Streams) > 0 {
return &streams.Data.Streams[0], nil
}
return nil, fmt.Errorf("no stream returned for uid: %s", user_id)
}
func contains(arr []string, item string) bool {
for _, v := range arr {
if v == strings.ToLower(item) {
return true
}
}
return false
}
// main do the work.
func main() {
// Setup file and repo paths.
var repoUrl string
if len(os.Getenv("SS_GH_REPO")) == 0 {
log.Warn("warning: no SS_GH_REPO specified in environment, defaulting to: https://github.com/infosecstreams/infosecstreams.github.io")
repoUrl = "https://github.com/infosecstreams/infosecstreams.github.io"
}
repoPath := strings.Split(repoUrl, "/")[4]
filePath := repoPath + "/index.md"
// Setup auth.
if len(os.Getenv("SS_USERNAME")) == 0 || len(os.Getenv("SS_TOKEN")) == 0 || len(os.Getenv("SS_SECRETKEY")) == 0 {
log.Fatalln("error: no SS_USERNAME and/or SS_TOKEN and/or SS_SECRETKEY specified in environment!")
}
auth := &httpauth.BasicAuth{
Username: os.Getenv("SS_USERNAME"),
Password: os.Getenv("SS_TOKEN"),
}
if len(os.Getenv("TW_CLIENT_ID")) == 0 || len(os.Getenv("TW_CLIENT_SECRET")) == 0 {
log.Fatalln("error: no TW_CLIENT_ID and/or TW_CLIENT_SECRET specified in environment! https://dev.twitch.tv/console/app")
}
client, err := helix.NewClient(&helix.Options{
ClientID: os.Getenv("TW_CLIENT_ID"),
ClientSecret: os.Getenv("TW_CLIENT_SECRET"),
})
if err != nil {
log.Fatalln(err)
return
}
access_token, err := client.RequestAppAccessToken([]string{})
if err != nil {
log.Fatalln(err)
return
}
client.SetAppAccessToken(access_token.Data.AccessToken)
// Create StreamersRepo object
var repo = StreamersRepo{
auth: auth,
indexFilePath: filePath,
repoPath: repoPath,
url: repoUrl,
client: client,
}
port := ":8080"
// Google Cloud Run defaults to 8080. Their platform
// sets the $PORT ENV var if you override it with, e.g.:
// `gcloud run services update <service-name> --port <port>`.
if os.Getenv("PORT") != "" {
port = ":" + os.Getenv("PORT")
} else if os.Getenv("SS_PORT") != "" {
port = ":" + os.Getenv("SS_PORT")
}
// Listen and serve.
log.Printf("server starting on %s\n", port)
http.HandleFunc("/webhook/callbacks", repo.eventsubStatus)
log.Fatal(http.ListenAndServe(port, nil))
}
| [
"\"SS_SECRETKEY\"",
"\"SS_GH_REPO\"",
"\"SS_USERNAME\"",
"\"SS_TOKEN\"",
"\"SS_SECRETKEY\"",
"\"SS_USERNAME\"",
"\"SS_TOKEN\"",
"\"TW_CLIENT_ID\"",
"\"TW_CLIENT_SECRET\"",
"\"TW_CLIENT_ID\"",
"\"TW_CLIENT_SECRET\"",
"\"PORT\"",
"\"PORT\"",
"\"SS_PORT\"",
"\"SS_PORT\""
]
| []
| [
"PORT",
"SS_USERNAME",
"SS_GH_REPO",
"SS_PORT",
"SS_SECRETKEY",
"TW_CLIENT_SECRET",
"TW_CLIENT_ID",
"SS_TOKEN"
]
| [] | ["PORT", "SS_USERNAME", "SS_GH_REPO", "SS_PORT", "SS_SECRETKEY", "TW_CLIENT_SECRET", "TW_CLIENT_ID", "SS_TOKEN"] | go | 8 | 0 | |
func/branch.go | package main
import (
"bufio"
"fmt"
"io/ioutil"
"math"
"os"
"reflect"
"runtime"
"strconv"
"time"
)
func main() {
const filename = "abc.txt"
contents, err := ioutil.ReadFile(filename)
if err != nil {
fmt.Println(err)
} else {
fmt.Printf("%s\n", contents)
}
fmt.Println(grade(0))
fmt.Println(grade(59))
fmt.Println(grade(60))
fmt.Println(grade(80))
fmt.Println(grade(90))
fmt.Println(grade(100))
//fmt.Println(grade(101))
sum := 0
for i := 1; i <= 100; i++ {
sum += i
}
fmt.Printf("%d\n", sum)
fmt.Println(convert2bin(0))
fmt.Println(convert2bin(8))
fmt.Println(convert2bin(13))
fmt.Println(convert2bin(16))
fmt.Println(convert2bin(17))
fmt.Println(convert2bin(65535))
printFileContent("/tmp/abc.txt")
read()
q, r := div(13, 4)
println(q, r)
if result, err := eval(5, 4, "/"); err != nil {
print(err)
} else {
print(result)
}
fmt.Println(apply(pow, 3, 4))
fmt.Println(apply(func(a int, b int) int {
return int(math.Pow(float64(a), float64(b)))
}, 3, 4))
}
func sum(numbers ...int) int {
s := 0
for i := range numbers {
s += i
}
return s
}
func apply(op func(int, int) int, a, b int) int {
p := reflect.ValueOf(op).Pointer()
opName := runtime.FuncForPC(p).Name()
fmt.Printf("calling function %s with parameters(%d, %d)\n", opName, a, b)
return op(a, b)
}
func pow(a, b int) int {
return int(math.Pow(float64(a), float64(b)))
}
func printFileContent(filename string) {
environ := os.Environ()
for k, v := range environ {
fmt.Println(k, v)
}
fmt.Println("aaaaa", os.Getenv("USERNAME"))
file, err := os.Open(filename)
if err != nil {
panic(err)
}
scanner := bufio.NewScanner(file)
for scanner.Scan() {
fmt.Println(scanner.Text())
}
}
func forever() {
for {
fmt.Println("aaa")
time.Sleep(1)
}
}
func convert2bin(n int) string {
result := ""
if n == 0 {
return "0"
}
for ; n > 0; n /= 2 {
lsb := n % 2
result = strconv.Itoa(lsb) + result
}
return result
}
func grade(score int) string {
g := ""
switch {
case score < 0 || score > 100:
panic(fmt.Sprintf("Invalid score: %d", score))
case score < 60:
g = "F"
case score < 80:
g = "C"
case score < 90:
g = "B"
case score <= 100:
g = "A"
}
return g
}
func read() {
const filename = "/tmp/abc.txt"
if contents, err := ioutil.ReadFile(filename); err != nil {
fmt.Println(err)
} else {
fmt.Printf("%s\n", contents)
}
}
func eval(a int, b int, op string) (int, error) {
switch op {
case "+":
return a + b, nil
case "-":
return a - b, nil
case "*":
return a * b, nil
case "/":
q, _, e := div(a, b)
return q, e
default:
return 0, fmt.Errorf("unsupported optation %s", op)
}
}
func div(a, b int) (q, r int, e error) {
if b == 0 {
return 0, 0, fmt.Errorf("the denominator cannot be zero")
}
return a / b, a % b, nil
}
func div2(a, b int) (q, r int) {
q = a / b
r = a % b
return
}
| [
"\"USERNAME\""
]
| []
| [
"USERNAME"
]
| [] | ["USERNAME"] | go | 1 | 0 | |
tests/alert_test.go | /*
* Copyright (C) 2016 Red Hat, Inc.
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package tests
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net"
"net/http"
"net/url"
"os"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/gorilla/websocket"
"github.com/hydrogen18/stoppableListener"
"github.com/skydive-project/skydive/alert"
"github.com/skydive-project/skydive/api/types"
"github.com/skydive-project/skydive/common"
"github.com/skydive-project/skydive/config"
"github.com/skydive-project/skydive/topology/graph"
ws "github.com/skydive-project/skydive/websocket"
)
var alertLock sync.Mutex
func checkMessage(t *testing.T, b []byte, al *types.Alert, nsName string) (bool, error) {
alertLock.Lock()
defer alertLock.Unlock()
var alertMsg alert.Message
if err := common.JSONDecode(bytes.NewReader(b), &alertMsg); err == nil {
if alertMsg.UUID == al.UUID {
var nodes []*graph.Node
switch arr := alertMsg.ReasonData.(type) {
case []interface{}:
for _, obj := range arr {
n := new(graph.Node)
if err := n.Decode(obj); err != nil {
return false, err
}
nodes = append(nodes, n)
}
}
if len(nodes) > 0 {
if name, _ := nodes[0].GetFieldString("Name"); name == nsName {
return true, nil
}
}
}
}
return false, nil
}
func wsClose(ws *websocket.Conn) error {
if err := ws.WriteControl(websocket.CloseMessage, nil, time.Now().Add(3*time.Second)); err != nil {
return err
}
return ws.Close()
}
func decodeStructMessageJSON(b []byte) *ws.StructMessage {
mJSON := ws.StructMessageJSON{}
if err := json.Unmarshal(b, &mJSON); err != nil {
return nil
}
msg := &ws.StructMessage{
Protocol: ws.JSONProtocol,
Namespace: mJSON.Namespace,
Type: mJSON.Type,
UUID: mJSON.UUID,
Status: mJSON.Status,
JsonObj: mJSON.Obj,
}
return msg
}
func newClient(endpoint string) (*websocket.Conn, error) {
conn, err := net.Dial("tcp", endpoint)
if err != nil {
return nil, err
}
scheme := "ws"
if config.IsTLSEnabled() == true {
scheme = "wss"
}
endpoint = fmt.Sprintf("%s://%s/ws/subscriber", scheme, endpoint)
u, err := url.Parse(endpoint)
if err != nil {
return nil, err
}
wsConn, _, err := websocket.NewClient(conn, u, http.Header{"Origin": {endpoint}}, 1024, 1024)
if err != nil {
return nil, err
}
return wsConn, nil
}
func connect(endpoint string, timeout int, onReady func(*websocket.Conn)) (*websocket.Conn, error) {
var ws *websocket.Conn
var err error
t := 0
for {
if t > timeout {
return nil, errors.New("Connection to Agent : timeout reached")
}
ws, err = newClient(endpoint)
if err == nil {
break
}
time.Sleep(1 * time.Second)
t++
}
ready := false
h := func(message string) error {
err := ws.WriteControl(websocket.PongMessage, []byte(message), time.Now().Add(time.Second))
if err != nil {
return err
}
if !ready {
ready = true
if onReady != nil {
onReady(ws)
}
}
return nil
}
ws.SetPingHandler(h)
return ws, nil
}
func TestAlertWebhook(t *testing.T) {
var (
err error
al *types.Alert
sl *stoppableListener.StoppableListener
wg sync.WaitGroup
testPassed atomic.Value
)
testPassed.Store(false)
agent1IP := os.Getenv("AGENT1_IP")
if agent1IP == "" {
agent1IP = "localhost"
}
ListenAndServe := func(addr string, port int) {
listener, err := net.Listen("tcp", fmt.Sprintf("%s:%d", addr, port))
if err != nil {
t.Fatalf("Failed to listen on %s:%d: %s", addr, port, err.Error())
}
sl, err = stoppableListener.New(listener)
if err != nil {
t.Fatalf("Failed to create stoppable listener: %s", err.Error())
}
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
wg.Add(1)
defer wg.Done()
if r.Method == "POST" {
b, _ := ioutil.ReadAll(r.Body)
result, _ := checkMessage(t, b, al, "alert-ns-webhook")
testPassed.Store(result)
}
})
go func() {
http.Serve(sl, nil)
wg.Done()
}()
}
test := &Test{
setupCmds: []Cmd{
{"ip netns add alert-ns-webhook", true},
},
setupFunction: func(c *TestContext) error {
wg.Add(1)
ListenAndServe(agent1IP, 8080)
alertLock.Lock()
defer alertLock.Unlock()
al = types.NewAlert()
al.Expression = "G.V().Has('Name', 'alert-ns-webhook', 'Type', 'netns')"
al.Action = fmt.Sprintf("http://%s:8080/", agent1IP)
if err = c.client.Create("alert", al); err != nil {
return fmt.Errorf("Failed to create alert: %s", err.Error())
}
return nil
},
tearDownCmds: []Cmd{
{"ip netns del alert-ns-webhook", true},
},
tearDownFunction: func(c *TestContext) error {
sl.Close()
wg.Wait()
return c.client.Delete("alert", al.ID())
},
checks: []CheckFunction{func(c *CheckContext) error {
if testPassed.Load() == false {
if err != nil {
return err
}
return errors.New("Webhook was not triggered")
}
return nil
}},
}
RunTest(t, test)
}
func TestAlertScript(t *testing.T) {
if agentTestsOnly {
t.Skip("this test works only when agent and analyzers are on the same host")
}
var (
err error
al *types.Alert
testPassed = false
)
cookie, err := ioutil.TempFile("", "test-alert-script")
if err == nil {
err = os.Remove(cookie.Name())
}
if err != nil {
t.Fatalf(err.Error())
return
}
tmpfile, err := ioutil.TempFile("", "example")
if err == nil {
if _, err = tmpfile.Write([]byte(fmt.Sprintf("#!/bin/sh\ncat > %s", cookie.Name()))); err == nil {
err = os.Chmod(tmpfile.Name(), 0755)
}
}
if err != nil {
t.Fatalf(err.Error())
return
}
tmpfile.Close()
defer os.Remove(tmpfile.Name())
test := &Test{
setupCmds: []Cmd{
{"ip netns add alert-ns-script", true},
},
setupFunction: func(c *TestContext) error {
al = types.NewAlert()
al.Expression = "G.V().Has('Name', 'alert-ns-script', 'Type', 'netns')"
al.Action = "file://" + tmpfile.Name()
if err = c.client.Create("alert", al); err != nil {
return fmt.Errorf("Failed to create alert: %s", err.Error())
}
return nil
},
tearDownCmds: []Cmd{
{"ip netns del alert-ns-script", true},
},
tearDownFunction: func(c *TestContext) error {
return c.client.Delete("alert", al.ID())
},
checks: []CheckFunction{func(c *CheckContext) error {
if _, err := os.Stat(cookie.Name()); err != nil {
return errors.New("No alert was triggered")
}
b, err := ioutil.ReadFile(cookie.Name())
if err != nil {
return errors.New("No alert was triggered")
}
testPassed, err = checkMessage(t, b, al, "alert-ns-script")
if !testPassed {
return fmt.Errorf("Wrong message %+v (error: %+v)", string(b), err)
}
return nil
}},
}
RunTest(t, test)
}
func TestAlertWithTimer(t *testing.T) {
var (
err error
ws *websocket.Conn
al *types.Alert
)
test := &Test{
retries: 1,
setupCmds: []Cmd{
{"ip netns add alert-ns-timer", true},
},
setupFunction: func(c *TestContext) error {
ws, err = connect(config.GetStringSlice("analyzers")[0], 5, nil)
if err != nil {
return err
}
al = types.NewAlert()
al.Expression = "G.V().Has('Name', 'alert-ns-timer', 'Type', 'netns')"
al.Trigger = "duration:+1s"
if err = c.client.Create("alert", al); err != nil {
return fmt.Errorf("Failed to create alert: %s", err.Error())
}
return nil
},
tearDownCmds: []Cmd{
{"ip netns del alert-ns-timer", true},
},
tearDownFunction: func(c *TestContext) error {
wsClose(ws)
return c.client.Delete("alert", al.ID())
},
checks: []CheckFunction{func(c *CheckContext) error {
for {
_, m, err := ws.ReadMessage()
if err != nil {
return err
}
msg := decodeStructMessageJSON(m)
if msg == nil {
t.Fatal("Failed to unmarshal message")
}
if msg.Namespace != "Alert" {
continue
}
testPassed, err := checkMessage(t, []byte(*msg.JsonObj), al, "alert-ns-timer")
if err != nil {
return err
}
if !testPassed {
return fmt.Errorf("Wrong alert message: %+v (error: %+v)", string(*msg.JsonObj), err)
}
break
}
return nil
}},
}
RunTest(t, test)
}
func TestMultipleTriggering(t *testing.T) {
var (
err error
ws *websocket.Conn
al *types.Alert
)
test := &Test{
setupCmds: []Cmd{
{"ip netns add alert-lo-down", true},
},
setupFunction: func(c *TestContext) error {
ws, err = connect(config.GetStringSlice("analyzers")[0], 5, nil)
if err != nil {
return err
}
al = types.NewAlert()
al.Expression = "G.V().Has('Name', 'alert-lo-down', 'Type', 'netns').Out('Name','lo').Values('State')"
if err = c.client.Create("alert", al); err != nil {
return fmt.Errorf("Failed to create alert: %s", err.Error())
}
t.Logf("alert created with UUID : %s", al.UUID)
return nil
},
tearDownCmds: []Cmd{
{"ip netns del alert-lo-down", true},
},
tearDownFunction: func(c *TestContext) error {
wsClose(ws)
return c.client.Delete("alert", al.ID())
},
retries: 1,
checks: []CheckFunction{func(c *CheckContext) error {
alertNumber := 0
cmd := []Cmd{
{"ip netns exec alert-lo-down ip l set lo up", true},
}
downLo := []Cmd{
{"ip netns exec alert-lo-down ip l set lo down", true},
}
for alertNumber < 2 {
_, m, err := ws.ReadMessage()
if err != nil {
return err
}
msg := decodeStructMessageJSON(m)
if msg == nil {
t.Fatal("Failed to unmarshal message")
}
if msg.Namespace != "Alert" {
continue
}
var alertMsg alert.Message
if err := msg.DecodeObj(&alertMsg); err != nil {
t.Fatalf("Failed to unmarshal alert : %s", err.Error())
}
t.Logf("ws msg received with namespace %s and alertMsg UUID %s", msg.Namespace, alertMsg.UUID)
if alertMsg.UUID != al.UUID {
continue
}
alertNumber++
execCmds(t, cmd...)
cmd = downLo
}
return nil
}},
}
RunTest(t, test)
}
| [
"\"AGENT1_IP\""
]
| []
| [
"AGENT1_IP"
]
| [] | ["AGENT1_IP"] | go | 1 | 0 | |
mem/mem_linux_test.go | // +build linux
package mem
import (
"os"
"path/filepath"
"reflect"
"strings"
"testing"
"github.com/stretchr/testify/assert"
)
func TestVirtualMemoryEx(t *testing.T) {
v, err := VirtualMemoryEx()
if err != nil {
t.Error(err)
}
t.Log(v)
}
var virtualMemoryTests = []struct {
mockedRootFS string
stat *VirtualMemoryStat
}{
{"intelcorei5", &VirtualMemoryStat{
Total: 16502300672,
Available: 11495358464,
Used: 3437277184,
UsedPercent: 20.82907863769651,
Free: 8783491072,
Active: 4347392000,
Inactive: 2938834944,
Wired: 0,
Laundry: 0,
Buffers: 212496384,
Cached: 4069036032,
Writeback: 0,
Dirty: 176128,
WritebackTmp: 0,
Shared: 1222402048,
Slab: 253771776,
SReclaimable: 186470400,
SUnreclaim: 67301376,
PageTables: 65241088,
SwapCached: 0,
CommitLimit: 16509730816,
CommittedAS: 12360818688,
HighTotal: 0,
HighFree: 0,
LowTotal: 0,
LowFree: 0,
SwapTotal: 8258580480,
SwapFree: 8258580480,
Mapped: 1172627456,
VMallocTotal: 35184372087808,
VMallocUsed: 0,
VMallocChunk: 0,
HugePagesTotal: 0,
HugePagesFree: 0,
HugePageSize: 2097152},
},
{"issue1002", &VirtualMemoryStat{
Total: 260579328,
Available: 215199744,
Used: 34328576,
UsedPercent: 13.173944481121694,
Free: 124506112,
Active: 108785664,
Inactive: 8581120,
Wired: 0,
Laundry: 0,
Buffers: 4915200,
Cached: 96829440,
Writeback: 0,
Dirty: 0,
WritebackTmp: 0,
Shared: 0,
Slab: 9293824,
SReclaimable: 2764800,
SUnreclaim: 6529024,
PageTables: 405504,
SwapCached: 0,
CommitLimit: 130289664,
CommittedAS: 25567232,
HighTotal: 134217728,
HighFree: 67784704,
LowTotal: 126361600,
LowFree: 56721408,
SwapTotal: 0,
SwapFree: 0,
Mapped: 38793216,
VMallocTotal: 1996488704,
VMallocUsed: 0,
VMallocChunk: 0,
HugePagesTotal: 0,
HugePagesFree: 0,
HugePageSize: 0},
},
}
func TestVirtualMemoryLinux(t *testing.T) {
origProc := os.Getenv("HOST_PROC")
defer os.Setenv("HOST_PROC", origProc)
for _, tt := range virtualMemoryTests {
t.Run(tt.mockedRootFS, func(t *testing.T) {
os.Setenv("HOST_PROC", filepath.Join("testdata/linux/virtualmemory/", tt.mockedRootFS, "proc"))
stat, err := VirtualMemory()
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("error %v", err)
}
if !reflect.DeepEqual(stat, tt.stat) {
t.Errorf("got: %+v\nwant: %+v", stat, tt.stat)
}
})
}
}
const validFile = `Filename Type Size Used Priority
/dev/dm-2 partition 67022844 490788 -2
/swapfile file 2 1 -3
`
const invalidFile = `INVALID Type Size Used Priority
/dev/dm-2 partition 67022844 490788 -2
/swapfile file 1048572 0 -3
`
func TestParseSwapsFile_ValidFile(t *testing.T) {
assert := assert.New(t)
stats, err := parseSwapsFile(strings.NewReader(validFile))
assert.NoError(err)
assert.Equal(*stats[0], SwapDevice{
Name: "/dev/dm-2",
UsedBytes: 502566912,
FreeBytes: 68128825344,
})
assert.Equal(*stats[1], SwapDevice{
Name: "/swapfile",
UsedBytes: 1024,
FreeBytes: 1024,
})
}
func TestParseSwapsFile_InvalidFile(t *testing.T) {
_, err := parseSwapsFile(strings.NewReader(invalidFile))
assert.Error(t, err)
}
func TestParseSwapsFile_EmptyFile(t *testing.T) {
_, err := parseSwapsFile(strings.NewReader(""))
assert.Error(t, err)
}
| [
"\"HOST_PROC\""
]
| []
| [
"HOST_PROC"
]
| [] | ["HOST_PROC"] | go | 1 | 0 | |
examples/tag-liker-bot/main.go | package main
import (
"log"
"os"
"github.com/klubprojekan/gotakinsta"
)
func fetchTag(insta *goinsta.Instagram, tag string) error {
feedTag, err := insta.Feed.Tags(tag)
if err != nil {
return err
}
for _, item := range feedTag.RankedItems {
err = item.Like()
if err != nil {
log.Printf("error on liking item %s, %v", item.ID, err)
} else {
log.Printf("item %s liked", item.ID)
}
}
return nil
}
func main() {
insta := goinsta.New(
os.Getenv("INSTAGRAM_USERNAME"),
os.Getenv("INSTAGRAM_PASSWORD"),
)
if err := insta.Login(); err != nil {
log.Println(err)
return
}
defer insta.Logout()
for _, tag := range []string{
"golang",
"pizza",
"google",
} {
if err := fetchTag(insta, tag); err != nil {
log.Println(tag, err)
}
}
}
| [
"\"INSTAGRAM_USERNAME\"",
"\"INSTAGRAM_PASSWORD\""
]
| []
| [
"INSTAGRAM_PASSWORD",
"INSTAGRAM_USERNAME"
]
| [] | ["INSTAGRAM_PASSWORD", "INSTAGRAM_USERNAME"] | go | 2 | 0 | |
train.py | """
Trains a Pixel-CNN++ generative model on CIFAR-10 or Tiny ImageNet data.
Uses multiple GPUs, indicated by the flag --nr-gpu
Example usage:
CUDA_VISIBLE_DEVICES=0,1,2,3 python train_double_cnn.py --nr_gpu 4
"""
import os
import sys
import time
import json
import argparse
import numpy as np
import tensorflow as tf
import scipy.misc
import pixel_cnn_pp.nn as nn
import pixel_cnn_pp.plotting as plotting
from pixel_cnn_pp.model import model_spec, model_spec_encoder
import data.cifar10_data as cifar10_data
import data.imagenet_data as imagenet_data
from pixel_cnn_pp.encoder import compute_mutual_information, ComputeLL
# -----------------------------------------------------------------------------
parser = argparse.ArgumentParser()
# data I/O
parser.add_argument('-i', '--data_dir', type=str, default='data', help='Location for the dataset')
parser.add_argument('-o', '--save_dir', type=str, default='elbo', help='Location for parameter checkpoints and samples')
parser.add_argument('-d', '--data_set', type=str, default='cifar', help='Can be either cifar|imagenet')
parser.add_argument('-t', '--save_interval', type=int, default=1, help='Every how many epochs to write checkpoint/samples?')
parser.add_argument('-r', '--load_params', dest='load_params', action='store_true', help='Restore training from previous model checkpoint?')
parser.add_argument('-name', '--name', type=str, default='elbo', help='Name of the network')
# model
parser.add_argument('-q', '--nr_resnet', type=int, default=5, help='Number of residual blocks per stage of the model')
parser.add_argument('-n', '--nr_filters', type=int, default=160, help='Number of filters to use across the model. Higher = larger model.')
parser.add_argument('-m', '--nr_logistic_mix', type=int, default=10, help='Number of logistic components in the mixture. Higher = more flexible model')
parser.add_argument('-z', '--resnet_nonlinearity', type=str, default='concat_elu', help='Which nonlinearity to use in the ResNet layers. One of "concat_elu", "elu", "relu" ')
parser.add_argument('-c', '--class_conditional', dest='class_conditional', action='store_true', help='Condition generative model on labels?')
parser.add_argument('-ae', '--use_autoencoder', dest='use_autoencoder', action='store_true', help='Use autoencoders?')
parser.add_argument('-reg', '--reg_type', type=str, default='elbo', help='Type of regularization to use for autoencoder')
parser.add_argument('-cs', '--chain_step', type=int, default=10, help='Steps to run Markov chain for sampling')
# optimization
parser.add_argument('-l', '--learning_rate', type=float, default=0.001, help='Base learning rate')
parser.add_argument('-e', '--lr_decay', type=float, default=0.999995, help='Learning rate decay, applied every step of the optimization')
parser.add_argument('-b', '--batch_size', type=int, default=12, help='Batch size during training per GPU')
parser.add_argument('-a', '--init_batch_size', type=int, default=80, help='How much data to use for data-dependent initialization.')
parser.add_argument('-p', '--dropout_p', type=float, default=0.5, help='Dropout strength (i.e. 1 - keep_prob). 0 = No dropout, higher = more dropout.')
parser.add_argument('-x', '--max_epochs', type=int, default=5000, help='How many epochs to run in total?')
parser.add_argument('-g', '--nr_gpu', type=int, default=2, help='How many GPUs to distribute the training across?')
parser.add_argument('-gid', '--gpu_id', type=str, default='', help='Which GPUs to use')
# evaluation
parser.add_argument('--polyak_decay', type=float, default=0.9995, help='Exponential decay rate of the sum of previous model iterates during Polyak averaging')
# reproducibility
parser.add_argument('-s', '--seed', type=int, default=1, help='Random seed to use')
args = parser.parse_args()
print('input args:\n', json.dumps(vars(args), indent=4, separators=(',',':'))) # pretty print args
# python train.py --use_autoencoder --save_dir=elbo --name=elbo --reg_type=elbo
# python train.py --use_autoencoder --save_dir=no_reg --name=no_reg --reg_type=no_reg
if args.gpu_id != "":
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
latent_dim = 20
args.latent_dim = latent_dim
# -----------------------------------------------------------------------------
# fix random seed for reproducibility
rng = np.random.RandomState(args.seed)
tf.set_random_seed(args.seed)
# initialize data loaders for train/test splits
if args.data_set == 'imagenet' and args.class_conditional:
raise("We currently don't have labels for the small imagenet data set")
DataLoader = {'cifar':cifar10_data.DataLoader, 'imagenet':imagenet_data.DataLoader}[args.data_set]
train_data = DataLoader(args.data_dir, 'train', args.batch_size * args.nr_gpu, rng=rng, shuffle=True, return_labels=args.class_conditional)
test_data = DataLoader(args.data_dir, 'test', args.batch_size * args.nr_gpu, shuffle=False, return_labels=args.class_conditional)
obs_shape = train_data.get_observation_size() # e.g. a tuple (32,32,3)
assert len(obs_shape) == 3, 'assumed right now'
# data place holders
x_init = tf.placeholder(tf.float32, shape=(args.init_batch_size,) + obs_shape)
xs = [tf.placeholder(tf.float32, shape=(args.batch_size, ) + obs_shape) for i in range(args.nr_gpu)]
encoder_x_init = tf.placeholder(tf.float32, shape=(args.init_batch_size,) + obs_shape)
encoder_x = [tf.placeholder(tf.float32, shape=(args.batch_size, ) + obs_shape) for i in range(args.nr_gpu)]
# if the model is class-conditional we'll set up label placeholders + one-hot encodings 'h' to condition on
if args.class_conditional:
num_labels = train_data.get_num_labels()
y_init = tf.placeholder(tf.int32, shape=(args.init_batch_size,))
h_init = tf.one_hot(y_init, num_labels)
y_sample = np.split(np.mod(np.arange(args.batch_size*args.nr_gpu), num_labels), args.nr_gpu)
h_sample = [tf.one_hot(tf.Variable(y_sample[i], trainable=False), num_labels) for i in range(args.nr_gpu)]
ys = [tf.placeholder(tf.int32, shape=(args.batch_size,)) for i in range(args.nr_gpu)]
hs = [tf.one_hot(ys[i], num_labels) for i in range(args.nr_gpu)]
elif args.use_autoencoder:
# h_init = tf.placeholder(tf.float32, shape=(args.init_batch_size, latent_dim))
h_sample = [tf.placeholder(tf.float32, shape=(args.batch_size, latent_dim)) for i in range(args.nr_gpu)]
else:
h_init = None
h_sample = [None] * args.nr_gpu
hs = h_sample
# create the model
model_opt = { 'nr_resnet': args.nr_resnet, 'nr_filters': args.nr_filters, 'nr_logistic_mix': args.nr_logistic_mix, 'resnet_nonlinearity': args.resnet_nonlinearity }
model = tf.make_template('model', model_spec)
if args.use_autoencoder:
encoder_opt = model_opt.copy()
encoder_opt['reg_type'] = args.reg_type
encoder_opt['latent_dim'] = latent_dim
encoder_model = tf.make_template('encoder', model_spec_encoder)
# run once for data dependent initialization of parameters
if args.use_autoencoder:
encoder = encoder_model(encoder_x_init, init=True, dropout_p=args.dropout_p, **encoder_opt)
gen_par = model(x_init, encoder.pred, init=True, dropout_p=args.dropout_p, **model_opt)
else:
gen_par = model(x_init, h_init, init=True, dropout_p=args.dropout_p, **model_opt)
# keep track of moving average
all_params = tf.trainable_variables()
ema = tf.train.ExponentialMovingAverage(decay=args.polyak_decay)
maintain_averages_op = tf.group(ema.apply(all_params))
# get loss gradients over multiple GPUs
grads = []
loss_gen = []
loss_gen_reg = []
loss_gen_elbo = []
loss_gen_test = []
for i in range(args.nr_gpu):
with tf.device('/gpu:%d' % i):
# train
if args.use_autoencoder:
encoder = encoder_model(encoder_x[i], ema=None, dropout_p=args.dropout_p, **encoder_opt)
gen_par = model(xs[i], encoder.pred, ema=None, dropout_p=args.dropout_p, **model_opt)
loss_gen_reg.append(encoder.reg_loss)
loss_gen_elbo.append(encoder.elbo_loss)
else:
gen_par = model(xs[i], hs[i], ema=None, dropout_p=args.dropout_p, **model_opt)
loss_gen.append(nn.discretized_mix_logistic_loss(xs[i], gen_par))
# gradients
if args.use_autoencoder:
total_loss = loss_gen[i] + loss_gen_reg[i]
else:
total_loss = loss_gen[i]
grads.append(tf.gradients(total_loss, all_params))
# test
if args.use_autoencoder:
encoder = encoder_model(encoder_x[i], ema=ema, dropout_p=0., **encoder_opt)
gen_par = model(xs[i], encoder.pred, ema=ema, dropout_p=0., **model_opt)
else:
gen_par = model(xs[i], hs[i], ema=ema, dropout_p=0., **model_opt)
loss_gen_test.append(nn.discretized_mix_logistic_loss(xs[i], gen_par))
# add losses and gradients together and get training updates
tf_lr = tf.placeholder(tf.float32, shape=[])
with tf.device('/gpu:0'):
for i in range(1,args.nr_gpu):
loss_gen[0] += loss_gen[i]
loss_gen_test[0] += loss_gen_test[i]
if args.use_autoencoder:
loss_gen_reg[0] += loss_gen_reg[i]
loss_gen_elbo[0] += loss_gen_elbo[i]
for j in range(len(grads[0])):
grads[0][j] += grads[i][j]
# training op
tf.summary.scalar('ll_loss', loss_gen[0])
if args.use_autoencoder:
tf.summary.scalar('reg', loss_gen_reg[0])
tf.summary.scalar('elbo', loss_gen_elbo[0])
optimizer = tf.group(nn.adam_updates(all_params, grads[0], lr=tf_lr, mom1=0.95, mom2=0.9995), maintain_averages_op)
# convert loss to bits/dim
bits_per_dim = loss_gen[0]/(args.nr_gpu*np.log(2.)*np.prod(obs_shape)*args.batch_size)
bits_per_dim_test = loss_gen_test[0]/(args.nr_gpu*np.log(2.)*np.prod(obs_shape)*args.batch_size)
tf.summary.scalar('ll_bits_per_dim', bits_per_dim)
# sample from the model
new_x_gen = []
encoder_list = []
for i in range(args.nr_gpu):
with tf.device('/gpu:%d' % i):
if args.use_autoencoder:
encoder = encoder_model(encoder_x[i], ema=ema, dropout_p=0, **encoder_opt)
gen_par = model(xs[i], h_sample[i], ema=ema, dropout_p=0, **model_opt)
encoder_list.append(encoder)
else:
gen_par = model(xs[i], h_sample[i], ema=ema, dropout_p=0, **model_opt)
new_x_gen.append(nn.sample_from_discretized_mix_logistic(gen_par, args.nr_logistic_mix))
compute_ll = ComputeLL(latent_dim)
def sample_from_model(sess):
x_gen = [np.zeros((args.batch_size,) + obs_shape, dtype=np.float32) for i in range(args.nr_gpu)]
for yi in range(obs_shape[0]):
for xi in range(obs_shape[1]):
new_x_gen_np = sess.run(new_x_gen, {xs[i]: x_gen[i] for i in range(args.nr_gpu)})
for i in range(args.nr_gpu):
x_gen[i][:,yi,xi,:] = new_x_gen_np[i][:,yi,xi,:]
return np.concatenate(x_gen, axis=0)
def sample_from_decoder_prior(sess):
x_gen = [np.zeros((args.batch_size,) + obs_shape, dtype=np.float32) for i in range(args.nr_gpu)]
latent_code = [np.random.normal(size=(args.batch_size, latent_dim)) for i in range(args.nr_gpu)]
for yi in range(obs_shape[0]):
for xi in range(obs_shape[1]):
feed_dict = {xs[i]: x_gen[i] for i in range(args.nr_gpu)}
feed_dict.update({h_sample[i]: latent_code[i] for i in range(args.nr_gpu)})
new_x_gen_np = sess.run(new_x_gen, feed_dict)
for i in range(args.nr_gpu):
x_gen[i][:,yi,xi,:] = new_x_gen_np[i][:,yi,xi,:]
return np.concatenate(x_gen, axis=0)
def sample_from_markov_chain(sess, initial=None):
history = []
if initial is None:
encoder_current = [np.random.uniform(0.0, 1.0, (args.batch_size,) + obs_shape) for i in range(args.nr_gpu)]
else:
encoder_current = np.split(initial, args.nr_gpu)
latent_op = [encoder.pred for encoder in encoder_list]
num_steps = args.chain_step
history.append(np.concatenate(encoder_current, axis=0))
for step in range(num_steps):
start_time = time.time()
feed_dict = {encoder_x[i]: encoder_current[i] for i in range(args.nr_gpu)}
latent_code = sess.run(latent_op, feed_dict)
x_gen = [np.zeros((args.batch_size,) + obs_shape, dtype=np.float32) for i in range(args.nr_gpu)]
for yi in range(obs_shape[0]):
for xi in range(obs_shape[1]):
feed_dict = {xs[i]: x_gen[i] for i in range(args.nr_gpu)}
feed_dict.update({h_sample[i]: latent_code[i] for i in range(args.nr_gpu)})
new_x_gen_np = sess.run(new_x_gen, feed_dict)
for i in range(args.nr_gpu):
x_gen[i][:,yi,xi,:] = new_x_gen_np[i][:,yi,xi,:]
history.append(np.concatenate(x_gen, axis=0))
encoder_current = x_gen
print("%d (%fs)" % (step, time.time() - start_time))
sys.stdout.flush()
return history
def plot_markov_chain(history):
canvas = np.zeros((args.nr_gpu*args.batch_size*obs_shape[0], len(history)*obs_shape[1], obs_shape[2]))
for i in range(args.nr_gpu*args.batch_size):
for j in range(len(history)):
canvas[i*obs_shape[0]:(i+1)*obs_shape[0], j*obs_shape[1]:(j+1)*obs_shape[1], :] = history[j][i]
return canvas
# init & save
initializer = tf.initialize_all_variables()
saver = tf.train.Saver()
all_summary = tf.summary.merge_all()
writer = tf.summary.FileWriter(logdir=args.save_dir)
file_logger = open(os.path.join(args.save_dir, 'train_log'), 'w')
# turn numpy inputs into feed_dict for use with tensorflow
def make_feed_dict(data, init=False):
if type(data) is tuple:
x,y = data
else:
x = data
y = None
x = np.cast[np.float32]((x - 127.5) / 127.5) # input to pixelCNN is scaled from uint8 [0,255] to float in range [-1,1]
if init:
feed_dict = {x_init: x}
if args.use_autoencoder:
feed_dict.update({encoder_x_init: x})
if y is not None:
feed_dict.update({y_init: y})
else:
x = np.split(x, args.nr_gpu)
feed_dict = {xs[i]: x[i] for i in range(args.nr_gpu)}
if args.use_autoencoder:
feed_dict.update({encoder_x[i]: x[i] for i in range(args.nr_gpu)})
if y is not None:
y = np.split(y, args.nr_gpu)
feed_dict.update({ys[i]: y[i] for i in range(args.nr_gpu)})
return feed_dict
# //////////// perform training //////////////
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
print('starting training')
test_bpd = []
lr = args.learning_rate
global_step = 0
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9, allow_growth=True)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True)) as sess:
for epoch in range(args.max_epochs):
# init
if epoch == 0:
feed_dict = make_feed_dict(train_data.next(args.init_batch_size), init=True) # manually retrieve exactly init_batch_size examples
train_data.reset() # rewind the iterator back to 0 to do one full epoch
sess.run(initializer, feed_dict)
print('initializing the model...')
if args.load_params:
ckpt_file = args.save_dir + '/params_' + args.data_set + '.ckpt'
print('restoring parameters from', ckpt_file)
saver.restore(sess, ckpt_file)
# Compute mutual information
file_logger.write("%d " % epoch)
if args.use_autoencoder:
mutual_info = compute_mutual_information(data=train_data, args=args, sess=sess, encoder_list=encoder_list, ll_compute=compute_ll)
train_data.reset()
file_logger.write("%f " % mutual_info)
file_logger.flush()
# generate samples from the model
if args.use_autoencoder and epoch % 20 == 0:
print("Generating MC")
start_time = time.time()
initial = np.random.uniform(0.0, 1.0, (args.batch_size * args.nr_gpu,) + obs_shape)
for mc_step in range(100):
sample_history = sample_from_markov_chain(sess, initial)
initial = sample_history[-1]
sample_plot = plot_markov_chain(sample_history)
scipy.misc.imsave(os.path.join(args.save_dir, '%s_mc%d.png' % (args.data_set, mc_step)), sample_plot)
print("Finished, time elapsed %fs" % (time.time() - start_time))
exit(0)
# generate samples from the model
if epoch % 2 == 0:
print("Generating samples")
start_time = time.time()
if args.use_autoencoder:
sample_x = sample_from_decoder_prior(sess)
else:
sample_x = sample_from_model(sess)
img_tile = plotting.img_tile(sample_x[:int(np.floor(np.sqrt(args.batch_size * args.nr_gpu)) ** 2)],
aspect_ratio=1.0, border_color=1.0, stretch=True)
img = plotting.plot_img(img_tile, title=args.data_set + ' samples')
plotting.plt.savefig(os.path.join(args.save_dir, '%s_sample%d.png' % (args.data_set, epoch)))
plotting.plt.close('all')
print("Finished, time elapsed %fs" % (time.time() - start_time))
begin = time.time()
# train for one epoch
train_losses = []
batch_c = 10
for d in train_data:
feed_dict = make_feed_dict(d)
# forward/backward/update model on each gpu
lr *= args.lr_decay
feed_dict.update({ tf_lr: lr })
l, _, summaries = sess.run([bits_per_dim, optimizer, all_summary], feed_dict)
train_losses.append(l)
if global_step % 5 == 0:
writer.add_summary(summaries, global_step)
global_step += 1
train_loss_gen = np.mean(train_losses)
# compute likelihood over test data
test_losses = []
for d in test_data:
feed_dict = make_feed_dict(d)
l = sess.run(bits_per_dim_test, feed_dict)
test_losses.append(l)
test_loss_gen = np.mean(test_losses)
test_bpd.append(test_loss_gen)
file_logger.write("%f\n" % test_loss_gen)
# log progress to console
print("Iteration %d, time = %ds, train bits_per_dim = %.4f, test bits_per_dim = %.4f" % (epoch, time.time()-begin, train_loss_gen, test_loss_gen))
sys.stdout.flush()
if epoch % args.save_interval == 0:
# save params
saver.save(sess, args.save_dir + '/params_' + args.data_set + '.ckpt')
np.savez(args.save_dir + '/test_bpd_' + args.data_set + '.npz', test_bpd=np.array(test_bpd))
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
webapp/src/main/java/org/example/pipeline/Database.java | package org.example.pipeline;
import com.couchbase.client.java.Bucket;
import com.couchbase.client.java.CouchbaseCluster;
import java.util.concurrent.TimeUnit;
/**
* @author arungupta
*/
public class Database {
static CouchbaseCluster cluster;
static Bucket bucket;
public static final CouchbaseCluster getCluster() {
if (null == cluster) {
System.out.println(System.getenv());
String host = System.getenv("DB_URI");
if (null == host) {
host = "localhost";
System.out.println("Invalid host, setting to " + host);
}
System.out.println("Using host: " + host);
cluster = CouchbaseCluster.create(host);
}
return cluster;
}
public static Bucket getBucket(String bucketName) {
if (null == bucket) {
bucket = getCluster().openBucket(bucketName, 30, TimeUnit.SECONDS);
}
return bucket;
}
}
| [
"\"DB_URI\""
]
| []
| [
"DB_URI"
]
| [] | ["DB_URI"] | java | 1 | 0 | |
go/src/github.com/hashicorp/terraform/builtin/providers/vcd/resource_vcd_firewall_rules_test.go | package vcd
import (
"fmt"
"log"
"os"
"testing"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
"github.com/hmrc/vmware-govcd"
)
func TestAccVcdFirewallRules_basic(t *testing.T) {
var existingRules, fwRules govcd.EdgeGateway
newConfig := createFirewallRulesConfigs(&existingRules)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
Steps: []resource.TestStep{
resource.TestStep{
Config: newConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckVcdFirewallRulesExists("vcd_firewall_rules.bar", &fwRules),
testAccCheckVcdFirewallRulesAttributes(&fwRules, &existingRules),
),
},
},
})
}
func testAccCheckVcdFirewallRulesExists(n string, gateway *govcd.EdgeGateway) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No Record ID is set")
}
conn := testAccProvider.Meta().(*VCDClient)
resp, err := conn.OrgVdc.FindEdgeGateway(rs.Primary.ID)
if err != nil {
return fmt.Errorf("Edge Gateway does not exist.")
}
*gateway = resp
return nil
}
}
func testAccCheckVcdFirewallRulesAttributes(newRules, existingRules *govcd.EdgeGateway) resource.TestCheckFunc {
return func(s *terraform.State) error {
if len(newRules.EdgeGateway.Configuration.EdgeGatewayServiceConfiguration.FirewallService.FirewallRule) != len(existingRules.EdgeGateway.Configuration.EdgeGatewayServiceConfiguration.FirewallService.FirewallRule)+1 {
return fmt.Errorf("New firewall rule not added: %d != %d",
len(newRules.EdgeGateway.Configuration.EdgeGatewayServiceConfiguration.FirewallService.FirewallRule),
len(existingRules.EdgeGateway.Configuration.EdgeGatewayServiceConfiguration.FirewallService.FirewallRule)+1)
}
return nil
}
}
func createFirewallRulesConfigs(existingRules *govcd.EdgeGateway) string {
config := Config{
User: os.Getenv("VCD_USER"),
Password: os.Getenv("VCD_PASSWORD"),
Org: os.Getenv("VCD_ORG"),
Href: os.Getenv("VCD_URL"),
VDC: os.Getenv("VCD_VDC"),
MaxRetryTimeout: 240,
}
conn, err := config.Client()
if err != nil {
return fmt.Sprintf(testAccCheckVcdFirewallRules_add, "", "")
}
edgeGateway, _ := conn.OrgVdc.FindEdgeGateway(os.Getenv("VCD_EDGE_GATWEWAY"))
*existingRules = edgeGateway
log.Printf("[DEBUG] Edge gateway: %#v", edgeGateway)
firewallRules := *edgeGateway.EdgeGateway.Configuration.EdgeGatewayServiceConfiguration.FirewallService
return fmt.Sprintf(testAccCheckVcdFirewallRules_add, os.Getenv("VCD_EDGE_GATEWAY"), firewallRules.DefaultAction)
}
const testAccCheckVcdFirewallRules_add = `
resource "vcd_firewall_rules" "bar" {
edge_gateway = "%s"
default_action = "%s"
rule {
description = "Test rule"
policy = "allow"
protocol = "any"
destination_port = "any"
destination_ip = "any"
source_port = "any"
source_ip = "any"
}
}
`
| [
"\"VCD_USER\"",
"\"VCD_PASSWORD\"",
"\"VCD_ORG\"",
"\"VCD_URL\"",
"\"VCD_VDC\"",
"\"VCD_EDGE_GATWEWAY\"",
"\"VCD_EDGE_GATEWAY\""
]
| []
| [
"VCD_EDGE_GATWEWAY",
"VCD_PASSWORD",
"VCD_ORG",
"VCD_VDC",
"VCD_URL",
"VCD_USER",
"VCD_EDGE_GATEWAY"
]
| [] | ["VCD_EDGE_GATWEWAY", "VCD_PASSWORD", "VCD_ORG", "VCD_VDC", "VCD_URL", "VCD_USER", "VCD_EDGE_GATEWAY"] | go | 7 | 0 | |
src/test/java/uk/nhs/digital/mait/tkwx/tk/handlers/SpineAsynchronousSoapRequestHandlerTest.java | /*
Copyright 2012-13 Simon Farrow <[email protected]>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package uk.nhs.digital.mait.tkwx.tk.handlers;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.IOException;
import java.util.Properties;
import uk.nhs.digital.mait.tkwx.AbstractHandler;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.junit.Assert.*;
import uk.nhs.digital.mait.tkwx.http.HttpRequest;
import uk.nhs.digital.mait.tkwx.http.HttpResponse;
import static uk.nhs.digital.mait.tkwx.mesh.MeshDataTest.deleteFolderAndContents;
import static uk.nhs.digital.mait.tkwx.tk.GeneralConstants.*;
import static uk.nhs.digital.mait.tkwx.tk.PropertyNameConstants.SAVEDMESSAGES_PROPERTY;
import uk.nhs.digital.mait.tkwx.tk.boot.HttpTransport;
import uk.nhs.digital.mait.tkwx.tk.boot.SimulatorMode;
import uk.nhs.digital.mait.tkwx.tk.boot.ToolkitSimulator;
import static uk.nhs.digital.mait.tkwx.tk.handlers.SpineAsynchronousWorkerTest.CLIENT_ASID;
/**
*
* @author sifa2
*/
public class SpineAsynchronousSoapRequestHandlerTest extends AbstractHandler {
private static String m;
private SpineAsynchronousSoapRequestHandler instance;
private HttpRequest req;
private HttpResponse resp;
private static File simulatorSavedMessages;
public SpineAsynchronousSoapRequestHandlerTest() {
}
@BeforeClass
public static void setUpClass() throws FileNotFoundException, IOException {
AbstractHandler.setUpClass(System.getenv("TKWROOT") + "/contrib/SPINE_Test_Messages/MTH_Test_Messages/PDS2008A_Example_Input_Msg/QUPA_IN000006UK02_QUPA_IN000011UK02.xml");
m = content.replaceFirst("(?s)^.*?(<SOAP:Envelope)", "$1").replaceFirst("(?s)(</SOAP:Envelope>).*$", "$1");
Properties props = new Properties();
props.load(new FileReader(System.getenv("TKWROOT") + "/config/SPINE_MTH/tkw-x.properties"));
simulatorSavedMessages = new File(props.getProperty(SAVEDMESSAGES_PROPERTY) + "/" + CLIENT_ASID);
if (simulatorSavedMessages.exists()) {
deleteFolderAndContents(simulatorSavedMessages);
}
}
@AfterClass
public static void tearDownClass() throws IOException {
deleteFolderAndContents(simulatorSavedMessages);
AbstractHandler.tearDownClass();
}
@Before
@Override
public void setUp() throws Exception {
super.setUp();
ToolkitSimulator t = new ToolkitSimulator(System.getenv("TKWROOT") + "/config/SPINE_MTH/tkw-x.properties");
SimulatorMode m = new SimulatorMode();
m.init(t);
instance = new SpineAsynchronousSoapRequestHandler();
instance.setSavedMessagesDirectory(TEMPFOLDER);
instance.setToolkit(new HttpTransport());
req = new HttpRequest("id");
req.setHeader(SOAP_ACTION_HEADER, "urn:nhs:names:services:pdsquery/QUPA_IN000006UK02");
req.setHeader(CONTENT_LENGTH_HEADER, "" + content.length());
req.setRequestContext("/reliablemessaging/reliablerequest");
req.setRequestType("POST");
req.setInputStream(istream);
resp = new HttpResponse(ostream);
}
@After
@Override
public void tearDown() throws Exception {
super.tearDown();
}
/**
* Test of extractFromPartyID method, of class
* SpineAsynchronousSoapRequestHandler.
*
* @throws java.lang.Exception
*/
@Test
public void testExtractFromPartyID() throws Exception {
System.out.println("extractFromPartyID");
String expResult = "RHM-801710";
String result = instance.extractFromPartyID(m);
assertEquals(expResult, result);
}
/**
* Test of extractToPartyID method, of class
* SpineAsynchronousSoapRequestHandler.
*
* @throws java.lang.Exception
*/
@Test
public void testExtractToPartyID() throws Exception {
System.out.println("extractToPartyID");
String expResult = "SIAB-C-1";
String result = instance.extractToPartyID(m);
assertEquals(expResult, result);
}
/**
* Test of extractConversationID method, of class
* SpineAsynchronousSoapRequestHandler.
*
* @throws java.lang.Exception
*/
@Test
public void testExtractConversationID() throws Exception {
System.out.println("extractConversationID");
String expResult = "993C8839-8FC0-11E2-A805-AFEB563F31BA";
String result = instance.extractConversationID(m);
assertEquals(expResult, result);
}
/**
* Test of extractCPAID method, of class
* SpineAsynchronousSoapRequestHandler.
*
* @throws java.lang.Exception
*/
@Test
public void testExtractCPAID() throws Exception {
System.out.println("extractCPAID");
String expResult = "S2001924A2012004";
String result = instance.extractCPAID(m);
assertEquals(expResult, result);
}
/**
* Test of extractMessageId method, of class
* SpineAsynchronousSoapRequestHandler.
*
* @throws java.lang.Exception
*/
@Test
public void testExtractMessageId() throws Exception {
System.out.println("extractMessageId");
String expResult = "993C8839-8FC0-11E2-A805-AFEB563F31BA";
String result = instance.extractMessageId(m);
assertEquals(expResult, result);
}
/**
* Test of extractRcvAsid method, of class
* SpineAsynchronousSoapRequestHandler.
*
* @throws java.lang.Exception
*/
@Test
public void testExtractRcvAsid() throws Exception {
System.out.println("extractRcvAsid");
String expResult = "";
String result = instance.extractRcvAsid(m);
assertEquals(expResult, result);
}
/**
* Test of extractSndAsid method, of class
* SpineAsynchronousSoapRequestHandler.
*
* @throws java.lang.Exception
*/
@Test
public void testExtractSndAsid() throws Exception {
System.out.println("extractSndAsid");
String expResult = "";
String result = instance.extractSndAsid(m);
assertEquals(expResult, result);
}
/**
* Test of setToolkit method, of class SpineAsynchronousSoapRequestHandler.
*
* @throws java.lang.Exception
*/
@Test
public void testSetToolkit() throws Exception {
System.out.println("setToolkit");
HttpTransport t = new HttpTransport();
instance.setToolkit(t);
}
/**
* Test of handle method, of class SpineAsynchronousSoapRequestHandler.
*
* @throws java.lang.Exception
*/
@Test
public void testHandle() throws Exception {
System.out.println("handle");
String path = "ignored";
String params = "ignored";
instance.handle(path, params, req, resp);
String respResult = resp.getHttpHeader();
assertTrue(respResult.contains("202 Accepted"));
assertEquals(2, simulatorSavedMessages.list().length);
}
/**
* Test of getAckLoadException method, of class
* SpineAsynchronousSoapRequestHandler.
*/
@Test
public void testGetAckLoadException() {
System.out.println("getAckLoadException");
Exception expResult = null;
Exception result = instance.getAckLoadException();
assertEquals(expResult, result);
}
/**
* Test of getSyncAckTemplate method, of class
* SpineAsynchronousSoapRequestHandler.
*/
@Test
public void testGetSyncAckTemplate() {
System.out.println("getSyncAckTemplate");
String expResult = "SOAP:Envelope";
String result = instance.getSyncAckTemplate();
assertTrue(result.contains(expResult));
}
/**
* Test of getAsyncWrapper method, of class
* SpineAsynchronousSoapRequestHandler.
*/
@Test
public void testGetAsyncWrapper() {
System.out.println("getAsyncWrapper");
String expResult = "RelatesTo";
String result = instance.getAsyncWrapper();
assertTrue(result.contains(expResult));
}
/**
* Test of getTimestampOffset method, of class
* SpineAsynchronousSoapRequestHandler.
*/
@Test
public void testGetTimestampOffset() {
System.out.println("getTimestampOffset");
int expResult = 0;
int result = instance.getTimestampOffset();
assertEquals(expResult, result);
}
}
| [
"\"TKWROOT\"",
"\"TKWROOT\"",
"\"TKWROOT\""
]
| []
| [
"TKWROOT"
]
| [] | ["TKWROOT"] | java | 1 | 0 | |
cmd/kube-apiserver/app/server.go | /*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package app does all of the work necessary to create a Kubernetes
// APIServer by binding together the API, master and APIServer infrastructure.
// It can be configured and called directly or via the hyperkube framework.
package app
import (
"crypto/tls"
"fmt"
"net"
"net/http"
"net/url"
"os"
"strconv"
"strings"
"time"
"github.com/go-openapi/spec"
"github.com/golang/glog"
"github.com/pborman/uuid"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"k8s.io/apimachinery/pkg/openapi"
"k8s.io/apimachinery/pkg/runtime/schema"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/admission"
genericapiserver "k8s.io/apiserver/pkg/server"
"k8s.io/apiserver/pkg/server/filters"
"k8s.io/kubernetes/cmd/kube-apiserver/app/options"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/apis/batch"
"k8s.io/kubernetes/pkg/capabilities"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/controller/informers"
serviceaccountcontroller "k8s.io/kubernetes/pkg/controller/serviceaccount"
generatedopenapi "k8s.io/kubernetes/pkg/generated/openapi"
"k8s.io/kubernetes/pkg/kubeapiserver"
kubeadmission "k8s.io/kubernetes/pkg/kubeapiserver/admission"
kubeauthenticator "k8s.io/kubernetes/pkg/kubeapiserver/authenticator"
"k8s.io/kubernetes/pkg/master"
"k8s.io/kubernetes/pkg/master/tunneler"
"k8s.io/kubernetes/pkg/registry/cachesize"
"k8s.io/kubernetes/pkg/version"
)
// NewAPIServerCommand creates a *cobra.Command object with default parameters
func NewAPIServerCommand() *cobra.Command {
s := options.NewServerRunOptions()
s.AddFlags(pflag.CommandLine)
cmd := &cobra.Command{
Use: "kube-apiserver",
Long: `The Kubernetes API server validates and configures data
for the api objects which include pods, services, replicationcontrollers, and
others. The API Server services REST operations and provides the frontend to the
cluster's shared state through which all other components interact.`,
Run: func(cmd *cobra.Command, args []string) {
},
}
return cmd
}
// Run runs the specified APIServer. This should never exit.
func Run(s *options.ServerRunOptions) error {
// set defaults
if err := s.GenericServerRunOptions.DefaultAdvertiseAddress(s.SecureServing, s.InsecureServing); err != nil {
return err
}
serviceIPRange, apiServerServiceIP, err := master.DefaultServiceIPRange(s.ServiceClusterIPRange)
if err != nil {
return fmt.Errorf("error determining service IP ranges: %v", err)
}
if err := s.SecureServing.MaybeDefaultWithSelfSignedCerts(s.GenericServerRunOptions.AdvertiseAddress.String(), apiServerServiceIP); err != nil {
return fmt.Errorf("error creating self-signed certificates: %v", err)
}
if err := s.CloudProvider.DefaultExternalHost(s.GenericServerRunOptions); err != nil {
return fmt.Errorf("error setting the external host value: %v", err)
}
s.Authentication.ApplyAuthorization(s.Authorization)
// validate options
if errs := s.Validate(); len(errs) != 0 {
return utilerrors.NewAggregate(errs)
}
// create config from options
genericConfig := genericapiserver.NewConfig().
WithSerializer(api.Codecs)
if err := s.GenericServerRunOptions.ApplyTo(genericConfig); err != nil {
return err
}
if err := s.InsecureServing.ApplyTo(genericConfig); err != nil {
return err
}
if err := s.SecureServing.ApplyTo(genericConfig); err != nil {
return err
}
if err := s.Authentication.ApplyTo(genericConfig); err != nil {
return err
}
capabilities.Initialize(capabilities.Capabilities{
AllowPrivileged: s.AllowPrivileged,
// TODO(vmarmol): Implement support for HostNetworkSources.
PrivilegedSources: capabilities.PrivilegedSources{
HostNetworkSources: []string{},
HostPIDSources: []string{},
HostIPCSources: []string{},
},
PerConnectionBandwidthLimitBytesPerSec: s.MaxConnectionBytesPerSec,
})
// Setup nodeTunneler if needed
var nodeTunneler tunneler.Tunneler
var proxyDialerFn utilnet.DialFunc
if len(s.SSHUser) > 0 {
// Get ssh key distribution func, if supported
var installSSHKey tunneler.InstallSSHKey
cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider.CloudProvider, s.CloudProvider.CloudConfigFile)
if err != nil {
return fmt.Errorf("cloud provider could not be initialized: %v", err)
}
if cloud != nil {
if instances, supported := cloud.Instances(); supported {
installSSHKey = instances.AddSSHKeyToAllInstances
}
}
if s.KubeletConfig.Port == 0 {
return fmt.Errorf("must enable kubelet port if proxy ssh-tunneling is specified")
}
if s.KubeletConfig.ReadOnlyPort == 0 {
return fmt.Errorf("must enable kubelet readonly port if proxy ssh-tunneling is specified")
}
// Set up the nodeTunneler
// TODO(cjcullen): If we want this to handle per-kubelet ports or other
// kubelet listen-addresses, we need to plumb through options.
healthCheckPath := &url.URL{
Scheme: "http",
Host: net.JoinHostPort("127.0.0.1", strconv.FormatUint(uint64(s.KubeletConfig.ReadOnlyPort), 10)),
Path: "healthz",
}
nodeTunneler = tunneler.New(s.SSHUser, s.SSHKeyfile, healthCheckPath, installSSHKey)
// Use the nodeTunneler's dialer to connect to the kubelet
s.KubeletConfig.Dial = nodeTunneler.Dial
// Use the nodeTunneler's dialer when proxying to pods, services, and nodes
proxyDialerFn = nodeTunneler.Dial
}
// Proxying to pods and services is IP-based... don't expect to be able to verify the hostname
proxyTLSClientConfig := &tls.Config{InsecureSkipVerify: true}
if s.Etcd.StorageConfig.DeserializationCacheSize == 0 {
// When size of cache is not explicitly set, estimate its size based on
// target memory usage.
glog.V(2).Infof("Initializing deserialization cache size based on %dMB limit", s.GenericServerRunOptions.TargetRAMMB)
// This is the heuristics that from memory capacity is trying to infer
// the maximum number of nodes in the cluster and set cache sizes based
// on that value.
// From our documentation, we officially recomment 120GB machines for
// 2000 nodes, and we scale from that point. Thus we assume ~60MB of
// capacity per node.
// TODO: We may consider deciding that some percentage of memory will
// be used for the deserialization cache and divide it by the max object
// size to compute its size. We may even go further and measure
// collective sizes of the objects in the cache.
clusterSize := s.GenericServerRunOptions.TargetRAMMB / 60
s.Etcd.StorageConfig.DeserializationCacheSize = 25 * clusterSize
if s.Etcd.StorageConfig.DeserializationCacheSize < 1000 {
s.Etcd.StorageConfig.DeserializationCacheSize = 1000
}
}
storageGroupsToEncodingVersion, err := s.StorageSerialization.StorageGroupsToEncodingVersion()
if err != nil {
return fmt.Errorf("error generating storage version map: %s", err)
}
storageFactory, err := kubeapiserver.BuildDefaultStorageFactory(
s.Etcd.StorageConfig, s.GenericServerRunOptions.DefaultStorageMediaType, api.Codecs,
genericapiserver.NewDefaultResourceEncodingConfig(api.Registry), storageGroupsToEncodingVersion,
// FIXME: this GroupVersionResource override should be configurable
[]schema.GroupVersionResource{batch.Resource("cronjobs").WithVersion("v2alpha1")},
master.DefaultAPIResourceConfigSource(), s.GenericServerRunOptions.RuntimeConfig)
if err != nil {
return fmt.Errorf("error in initializing storage factory: %s", err)
}
for _, override := range s.Etcd.EtcdServersOverrides {
tokens := strings.Split(override, "#")
if len(tokens) != 2 {
glog.Errorf("invalid value of etcd server overrides: %s", override)
continue
}
apiresource := strings.Split(tokens[0], "/")
if len(apiresource) != 2 {
glog.Errorf("invalid resource definition: %s", tokens[0])
continue
}
group := apiresource[0]
resource := apiresource[1]
groupResource := schema.GroupResource{Group: group, Resource: resource}
servers := strings.Split(tokens[1], ";")
storageFactory.SetEtcdLocation(groupResource, servers)
}
// Default to the private server key for service account token signing
if len(s.Authentication.ServiceAccounts.KeyFiles) == 0 && s.SecureServing.ServerCert.CertKey.KeyFile != "" {
if kubeauthenticator.IsValidServiceAccountKeyFile(s.SecureServing.ServerCert.CertKey.KeyFile) {
s.Authentication.ServiceAccounts.KeyFiles = []string{s.SecureServing.ServerCert.CertKey.KeyFile}
} else {
glog.Warning("No TLS key provided, service account token authentication disabled")
}
}
authenticatorConfig := s.Authentication.ToAuthenticationConfig()
if s.Authentication.ServiceAccounts.Lookup {
// If we need to look up service accounts and tokens,
// go directly to etcd to avoid recursive auth insanity
storageConfig, err := storageFactory.NewConfig(api.Resource("serviceaccounts"))
if err != nil {
return fmt.Errorf("unable to get serviceaccounts storage: %v", err)
}
authenticatorConfig.ServiceAccountTokenGetter = serviceaccountcontroller.NewGetterFromStorageInterface(storageConfig, storageFactory.ResourcePrefix(api.Resource("serviceaccounts")), storageFactory.ResourcePrefix(api.Resource("secrets")))
}
apiAuthenticator, securityDefinitions, err := authenticatorConfig.New()
if err != nil {
return fmt.Errorf("invalid Authentication Config: %v", err)
}
privilegedLoopbackToken := uuid.NewRandom().String()
selfClientConfig, err := genericapiserver.NewSelfClientConfig(genericConfig.SecureServingInfo, genericConfig.InsecureServingInfo, privilegedLoopbackToken)
if err != nil {
return fmt.Errorf("failed to create clientset: %v", err)
}
client, err := internalclientset.NewForConfig(selfClientConfig)
if err != nil {
kubeAPIVersions := os.Getenv("KUBE_API_VERSIONS")
if len(kubeAPIVersions) == 0 {
return fmt.Errorf("failed to create clientset: %v", err)
}
// KUBE_API_VERSIONS is used in test-update-storage-objects.sh, disabling a number of API
// groups. This leads to a nil client above and undefined behaviour further down.
// TODO: get rid of KUBE_API_VERSIONS or define sane behaviour if set
glog.Errorf("Failed to create clientset with KUBE_API_VERSIONS=%q. KUBE_API_VERSIONS is only for testing. Things will break.", kubeAPIVersions)
}
sharedInformers := informers.NewSharedInformerFactory(nil, client, 10*time.Minute)
authorizationConfig := s.Authorization.ToAuthorizationConfig(sharedInformers)
apiAuthorizer, err := authorizationConfig.New()
if err != nil {
return fmt.Errorf("invalid Authorization Config: %v", err)
}
admissionControlPluginNames := strings.Split(s.GenericServerRunOptions.AdmissionControl, ",")
pluginInitializer := kubeadmission.NewPluginInitializer(client, sharedInformers, apiAuthorizer)
admissionConfigProvider, err := admission.ReadAdmissionConfiguration(admissionControlPluginNames, s.GenericServerRunOptions.AdmissionControlConfigFile)
if err != nil {
return fmt.Errorf("failed to read plugin config: %v", err)
}
admissionController, err := admission.NewFromPlugins(admissionControlPluginNames, admissionConfigProvider, pluginInitializer)
if err != nil {
return fmt.Errorf("failed to initialize plugins: %v", err)
}
proxyTransport := utilnet.SetTransportDefaults(&http.Transport{
Dial: proxyDialerFn,
TLSClientConfig: proxyTLSClientConfig,
})
kubeVersion := version.Get()
genericConfig.Version = &kubeVersion
genericConfig.LoopbackClientConfig = selfClientConfig
genericConfig.Authenticator = apiAuthenticator
genericConfig.Authorizer = apiAuthorizer
genericConfig.AdmissionControl = admissionController
genericConfig.OpenAPIConfig = genericapiserver.DefaultOpenAPIConfig(generatedopenapi.GetOpenAPIDefinitions, api.Scheme)
genericConfig.OpenAPIConfig.PostProcessSpec = postProcessOpenAPISpecForBackwardCompatibility
genericConfig.OpenAPIConfig.SecurityDefinitions = securityDefinitions
genericConfig.OpenAPIConfig.Info.Title = "Kubernetes"
genericConfig.SwaggerConfig = genericapiserver.DefaultSwaggerConfig()
genericConfig.EnableMetrics = true
genericConfig.LongRunningFunc = filters.BasicLongRunningRequestCheck(
sets.NewString("watch", "proxy"),
sets.NewString("attach", "exec", "proxy", "log", "portforward"),
)
config := &master.Config{
GenericConfig: genericConfig,
APIResourceConfigSource: storageFactory.APIResourceConfigSource,
StorageFactory: storageFactory,
EnableWatchCache: s.GenericServerRunOptions.EnableWatchCache,
EnableCoreControllers: true,
DeleteCollectionWorkers: s.GenericServerRunOptions.DeleteCollectionWorkers,
EventTTL: s.EventTTL,
KubeletClientConfig: s.KubeletConfig,
EnableUISupport: true,
EnableLogsSupport: true,
ProxyTransport: proxyTransport,
Tunneler: nodeTunneler,
ServiceIPRange: serviceIPRange,
APIServerServiceIP: apiServerServiceIP,
APIServerServicePort: 443,
ServiceNodePortRange: s.ServiceNodePortRange,
KubernetesServiceNodePort: s.KubernetesServiceNodePort,
MasterCount: s.MasterCount,
}
if s.GenericServerRunOptions.EnableWatchCache {
glog.V(2).Infof("Initializing cache sizes based on %dMB limit", s.GenericServerRunOptions.TargetRAMMB)
cachesize.InitializeWatchCacheSizes(s.GenericServerRunOptions.TargetRAMMB)
cachesize.SetWatchCacheSizes(s.GenericServerRunOptions.WatchCacheSizes)
}
m, err := config.Complete().New()
if err != nil {
return err
}
sharedInformers.Start(wait.NeverStop)
m.GenericAPIServer.PrepareRun().Run(wait.NeverStop)
return nil
}
// PostProcessSpec adds removed definitions for backward compatibility
func postProcessOpenAPISpecForBackwardCompatibility(s *spec.Swagger) (*spec.Swagger, error) {
compatibilityMap := map[string]string{
"v1beta1.DeploymentStatus": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.DeploymentStatus",
"v1beta1.ReplicaSetList": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.ReplicaSetList",
"v1beta1.Eviction": "k8s.io/kubernetes/pkg/apis/policy/v1beta1.Eviction",
"v1beta1.StatefulSetList": "k8s.io/kubernetes/pkg/apis/apps/v1beta1.StatefulSetList",
"v1beta1.RoleBinding": "k8s.io/kubernetes/pkg/apis/rbac/v1beta1.RoleBinding",
"v1beta1.PodSecurityPolicyList": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.PodSecurityPolicyList",
"v1.NodeSpec": "k8s.io/kubernetes/pkg/api/v1.NodeSpec",
"v1.FlockerVolumeSource": "k8s.io/kubernetes/pkg/api/v1.FlockerVolumeSource",
"v1.ContainerState": "k8s.io/kubernetes/pkg/api/v1.ContainerState",
"v1beta1.ClusterRole": "k8s.io/kubernetes/pkg/apis/rbac/v1beta1.ClusterRole",
"v1beta1.StorageClass": "k8s.io/kubernetes/pkg/apis/storage/v1beta1.StorageClass",
"v1.FlexVolumeSource": "k8s.io/kubernetes/pkg/api/v1.FlexVolumeSource",
"v1.SecretKeySelector": "k8s.io/kubernetes/pkg/api/v1.SecretKeySelector",
"v1.DeleteOptions": "k8s.io/kubernetes/pkg/api/v1.DeleteOptions",
"v1.PodStatus": "k8s.io/kubernetes/pkg/api/v1.PodStatus",
"v1.NodeStatus": "k8s.io/kubernetes/pkg/api/v1.NodeStatus",
"v1.ServiceSpec": "k8s.io/kubernetes/pkg/api/v1.ServiceSpec",
"v1.AttachedVolume": "k8s.io/kubernetes/pkg/api/v1.AttachedVolume",
"v1.PersistentVolume": "k8s.io/kubernetes/pkg/api/v1.PersistentVolume",
"v1.LimitRangeList": "k8s.io/kubernetes/pkg/api/v1.LimitRangeList",
"v1alpha1.Role": "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1.Role",
"v1.Affinity": "k8s.io/kubernetes/pkg/api/v1.Affinity",
"v1beta1.PodDisruptionBudget": "k8s.io/kubernetes/pkg/apis/policy/v1beta1.PodDisruptionBudget",
"v1alpha1.RoleBindingList": "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1.RoleBindingList",
"v1.PodAffinity": "k8s.io/kubernetes/pkg/api/v1.PodAffinity",
"v1beta1.SELinuxStrategyOptions": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.SELinuxStrategyOptions",
"v1.ResourceQuotaList": "k8s.io/kubernetes/pkg/api/v1.ResourceQuotaList",
"v1.PodList": "k8s.io/kubernetes/pkg/api/v1.PodList",
"v1.EnvVarSource": "k8s.io/kubernetes/pkg/api/v1.EnvVarSource",
"v1beta1.TokenReviewStatus": "k8s.io/kubernetes/pkg/apis/authentication/v1beta1.TokenReviewStatus",
"v1.PersistentVolumeClaimList": "k8s.io/kubernetes/pkg/api/v1.PersistentVolumeClaimList",
"v1beta1.RoleList": "k8s.io/kubernetes/pkg/apis/rbac/v1beta1.RoleList",
"v1.ListMeta": "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta",
"v1.ObjectMeta": "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta",
"v1.APIGroupList": "k8s.io/apimachinery/pkg/apis/meta/v1.APIGroupList",
"v2alpha1.Job": "k8s.io/kubernetes/pkg/apis/batch/v2alpha1.Job",
"v1.EnvFromSource": "k8s.io/kubernetes/pkg/api/v1.EnvFromSource",
"v1beta1.IngressStatus": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.IngressStatus",
"v1.Service": "k8s.io/kubernetes/pkg/api/v1.Service",
"v1beta1.DaemonSetStatus": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.DaemonSetStatus",
"v1alpha1.Subject": "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1.Subject",
"v1.HorizontalPodAutoscaler": "k8s.io/kubernetes/pkg/apis/autoscaling/v1.HorizontalPodAutoscaler",
"v1.StatusCause": "k8s.io/apimachinery/pkg/apis/meta/v1.StatusCause",
"v1.NodeSelectorRequirement": "k8s.io/kubernetes/pkg/api/v1.NodeSelectorRequirement",
"v1beta1.NetworkPolicyIngressRule": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.NetworkPolicyIngressRule",
"v1beta1.ThirdPartyResource": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.ThirdPartyResource",
"v1beta1.PodSecurityPolicy": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.PodSecurityPolicy",
"v1beta1.StatefulSet": "k8s.io/kubernetes/pkg/apis/apps/v1beta1.StatefulSet",
"v1.LabelSelector": "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector",
"v1.ScaleSpec": "k8s.io/kubernetes/pkg/apis/autoscaling/v1.ScaleSpec",
"v1.DownwardAPIVolumeFile": "k8s.io/kubernetes/pkg/api/v1.DownwardAPIVolumeFile",
"v1beta1.HorizontalPodAutoscaler": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.HorizontalPodAutoscaler",
"v1.AWSElasticBlockStoreVolumeSource": "k8s.io/kubernetes/pkg/api/v1.AWSElasticBlockStoreVolumeSource",
"v1.ComponentStatus": "k8s.io/kubernetes/pkg/api/v1.ComponentStatus",
"v2alpha1.JobSpec": "k8s.io/kubernetes/pkg/apis/batch/v2alpha1.JobSpec",
"v1.ContainerImage": "k8s.io/kubernetes/pkg/api/v1.ContainerImage",
"v1.ReplicationControllerStatus": "k8s.io/kubernetes/pkg/api/v1.ReplicationControllerStatus",
"v1.ResourceQuota": "k8s.io/kubernetes/pkg/api/v1.ResourceQuota",
"v1beta1.NetworkPolicyList": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.NetworkPolicyList",
"v1beta1.NonResourceAttributes": "k8s.io/kubernetes/pkg/apis/authorization/v1beta1.NonResourceAttributes",
"v1.JobCondition": "k8s.io/kubernetes/pkg/apis/batch/v1.JobCondition",
"v1.LabelSelectorRequirement": "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelectorRequirement",
"v1beta1.Deployment": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.Deployment",
"v1.LoadBalancerIngress": "k8s.io/kubernetes/pkg/api/v1.LoadBalancerIngress",
"v1.SecretList": "k8s.io/kubernetes/pkg/api/v1.SecretList",
"v1beta1.ReplicaSetSpec": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.ReplicaSetSpec",
"v1beta1.RoleBindingList": "k8s.io/kubernetes/pkg/apis/rbac/v1beta1.RoleBindingList",
"v1.ServicePort": "k8s.io/kubernetes/pkg/api/v1.ServicePort",
"v1.Namespace": "k8s.io/kubernetes/pkg/api/v1.Namespace",
"v1beta1.NetworkPolicyPeer": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.NetworkPolicyPeer",
"v1.ReplicationControllerList": "k8s.io/kubernetes/pkg/api/v1.ReplicationControllerList",
"v1beta1.ReplicaSetCondition": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.ReplicaSetCondition",
"v1.ReplicationControllerCondition": "k8s.io/kubernetes/pkg/api/v1.ReplicationControllerCondition",
"v1.DaemonEndpoint": "k8s.io/kubernetes/pkg/api/v1.DaemonEndpoint",
"v1beta1.NetworkPolicyPort": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.NetworkPolicyPort",
"v1.NodeSystemInfo": "k8s.io/kubernetes/pkg/api/v1.NodeSystemInfo",
"v1.LimitRangeItem": "k8s.io/kubernetes/pkg/api/v1.LimitRangeItem",
"v1.ConfigMapVolumeSource": "k8s.io/kubernetes/pkg/api/v1.ConfigMapVolumeSource",
"v1beta1.ClusterRoleList": "k8s.io/kubernetes/pkg/apis/rbac/v1beta1.ClusterRoleList",
"v1beta1.ResourceAttributes": "k8s.io/kubernetes/pkg/apis/authorization/v1beta1.ResourceAttributes",
"v1.Pod": "k8s.io/kubernetes/pkg/api/v1.Pod",
"v1.FCVolumeSource": "k8s.io/kubernetes/pkg/api/v1.FCVolumeSource",
"v1beta1.SubresourceReference": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.SubresourceReference",
"v1.ResourceQuotaStatus": "k8s.io/kubernetes/pkg/api/v1.ResourceQuotaStatus",
"v1alpha1.RoleBinding": "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1.RoleBinding",
"v1.PodCondition": "k8s.io/kubernetes/pkg/api/v1.PodCondition",
"v1.GroupVersionForDiscovery": "k8s.io/apimachinery/pkg/apis/meta/v1.GroupVersionForDiscovery",
"v1.NamespaceStatus": "k8s.io/kubernetes/pkg/api/v1.NamespaceStatus",
"v1.Job": "k8s.io/kubernetes/pkg/apis/batch/v1.Job",
"v1.PersistentVolumeClaimVolumeSource": "k8s.io/kubernetes/pkg/api/v1.PersistentVolumeClaimVolumeSource",
"v1.Handler": "k8s.io/kubernetes/pkg/api/v1.Handler",
"v1.ComponentStatusList": "k8s.io/kubernetes/pkg/api/v1.ComponentStatusList",
"v1.ServerAddressByClientCIDR": "k8s.io/apimachinery/pkg/apis/meta/v1.ServerAddressByClientCIDR",
"v1.PodAntiAffinity": "k8s.io/kubernetes/pkg/api/v1.PodAntiAffinity",
"v1.ISCSIVolumeSource": "k8s.io/kubernetes/pkg/api/v1.ISCSIVolumeSource",
"v1.ContainerStateRunning": "k8s.io/kubernetes/pkg/api/v1.ContainerStateRunning",
"v1.WeightedPodAffinityTerm": "k8s.io/kubernetes/pkg/api/v1.WeightedPodAffinityTerm",
"v1beta1.HostPortRange": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.HostPortRange",
"v1.HorizontalPodAutoscalerSpec": "k8s.io/kubernetes/pkg/apis/autoscaling/v1.HorizontalPodAutoscalerSpec",
"v1.HorizontalPodAutoscalerList": "k8s.io/kubernetes/pkg/apis/autoscaling/v1.HorizontalPodAutoscalerList",
"v1beta1.RoleRef": "k8s.io/kubernetes/pkg/apis/rbac/v1beta1.RoleRef",
"v1.Probe": "k8s.io/kubernetes/pkg/api/v1.Probe",
"v1beta1.IngressTLS": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.IngressTLS",
"v1beta1.ThirdPartyResourceList": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.ThirdPartyResourceList",
"v1beta1.DaemonSet": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.DaemonSet",
"v1.APIGroup": "k8s.io/apimachinery/pkg/apis/meta/v1.APIGroup",
"v1beta1.Subject": "k8s.io/kubernetes/pkg/apis/rbac/v1beta1.Subject",
"v1beta1.DeploymentList": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.DeploymentList",
"v1.NodeAffinity": "k8s.io/kubernetes/pkg/api/v1.NodeAffinity",
"v1beta1.RollingUpdateDeployment": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.RollingUpdateDeployment",
"v1beta1.APIVersion": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.APIVersion",
"v1alpha1.CertificateSigningRequest": "k8s.io/kubernetes/pkg/apis/certificates/v1alpha1.CertificateSigningRequest",
"v1.CinderVolumeSource": "k8s.io/kubernetes/pkg/api/v1.CinderVolumeSource",
"v1.NamespaceSpec": "k8s.io/kubernetes/pkg/api/v1.NamespaceSpec",
"v1beta1.PodDisruptionBudgetSpec": "k8s.io/kubernetes/pkg/apis/policy/v1beta1.PodDisruptionBudgetSpec",
"v1.Patch": "k8s.io/apimachinery/pkg/apis/meta/v1.Patch",
"v1beta1.ClusterRoleBinding": "k8s.io/kubernetes/pkg/apis/rbac/v1beta1.ClusterRoleBinding",
"v1beta1.HorizontalPodAutoscalerSpec": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.HorizontalPodAutoscalerSpec",
"v1.PersistentVolumeClaimSpec": "k8s.io/kubernetes/pkg/api/v1.PersistentVolumeClaimSpec",
"v1.Secret": "k8s.io/kubernetes/pkg/api/v1.Secret",
"v1.NodeCondition": "k8s.io/kubernetes/pkg/api/v1.NodeCondition",
"v1.LocalObjectReference": "k8s.io/kubernetes/pkg/api/v1.LocalObjectReference",
"runtime.RawExtension": "k8s.io/apimachinery/pkg/runtime.RawExtension",
"v1.PreferredSchedulingTerm": "k8s.io/kubernetes/pkg/api/v1.PreferredSchedulingTerm",
"v1.RBDVolumeSource": "k8s.io/kubernetes/pkg/api/v1.RBDVolumeSource",
"v1.KeyToPath": "k8s.io/kubernetes/pkg/api/v1.KeyToPath",
"v1.ScaleStatus": "k8s.io/kubernetes/pkg/apis/autoscaling/v1.ScaleStatus",
"v1alpha1.PolicyRule": "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1.PolicyRule",
"v1.EndpointPort": "k8s.io/kubernetes/pkg/api/v1.EndpointPort",
"v1beta1.IngressList": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.IngressList",
"v1.EndpointAddress": "k8s.io/kubernetes/pkg/api/v1.EndpointAddress",
"v1.NodeSelector": "k8s.io/kubernetes/pkg/api/v1.NodeSelector",
"v1beta1.StorageClassList": "k8s.io/kubernetes/pkg/apis/storage/v1beta1.StorageClassList",
"v1.ServiceList": "k8s.io/kubernetes/pkg/api/v1.ServiceList",
"v2alpha1.CronJobSpec": "k8s.io/kubernetes/pkg/apis/batch/v2alpha1.CronJobSpec",
"v1.ContainerStateTerminated": "k8s.io/kubernetes/pkg/api/v1.ContainerStateTerminated",
"v1beta1.TokenReview": "k8s.io/kubernetes/pkg/apis/authentication/v1beta1.TokenReview",
"v1beta1.IngressBackend": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.IngressBackend",
"v1.Time": "k8s.io/apimachinery/pkg/apis/meta/v1.Time",
"v1beta1.IngressSpec": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.IngressSpec",
"v2alpha1.JobTemplateSpec": "k8s.io/kubernetes/pkg/apis/batch/v2alpha1.JobTemplateSpec",
"v1.LimitRange": "k8s.io/kubernetes/pkg/api/v1.LimitRange",
"v1beta1.UserInfo": "k8s.io/kubernetes/pkg/apis/authentication/v1beta1.UserInfo",
"v1.ResourceQuotaSpec": "k8s.io/kubernetes/pkg/api/v1.ResourceQuotaSpec",
"v1.ContainerPort": "k8s.io/kubernetes/pkg/api/v1.ContainerPort",
"v1beta1.HTTPIngressRuleValue": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.HTTPIngressRuleValue",
"v1.AzureFileVolumeSource": "k8s.io/kubernetes/pkg/api/v1.AzureFileVolumeSource",
"v1beta1.NetworkPolicySpec": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.NetworkPolicySpec",
"v1.PodTemplateSpec": "k8s.io/kubernetes/pkg/api/v1.PodTemplateSpec",
"v1.SecretVolumeSource": "k8s.io/kubernetes/pkg/api/v1.SecretVolumeSource",
"v1.PodSpec": "k8s.io/kubernetes/pkg/api/v1.PodSpec",
"v1.CephFSVolumeSource": "k8s.io/kubernetes/pkg/api/v1.CephFSVolumeSource",
"v1beta1.CPUTargetUtilization": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.CPUTargetUtilization",
"v1.Volume": "k8s.io/kubernetes/pkg/api/v1.Volume",
"v1beta1.Ingress": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.Ingress",
"v1beta1.HorizontalPodAutoscalerList": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.HorizontalPodAutoscalerList",
"v1.PersistentVolumeStatus": "k8s.io/kubernetes/pkg/api/v1.PersistentVolumeStatus",
"v1beta1.IDRange": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.IDRange",
"v2alpha1.JobCondition": "k8s.io/kubernetes/pkg/apis/batch/v2alpha1.JobCondition",
"v1beta1.IngressRule": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.IngressRule",
"v1alpha1.RoleRef": "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1.RoleRef",
"v1.PodAffinityTerm": "k8s.io/kubernetes/pkg/api/v1.PodAffinityTerm",
"v1.ObjectReference": "k8s.io/kubernetes/pkg/api/v1.ObjectReference",
"v1.ServiceStatus": "k8s.io/kubernetes/pkg/api/v1.ServiceStatus",
"v1.APIResource": "k8s.io/apimachinery/pkg/apis/meta/v1.APIResource",
"v1beta1.Scale": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.Scale",
"v1.AzureDiskVolumeSource": "k8s.io/kubernetes/pkg/api/v1.AzureDiskVolumeSource",
"v1beta1.SubjectAccessReviewStatus": "k8s.io/kubernetes/pkg/apis/authorization/v1beta1.SubjectAccessReviewStatus",
"v1.ConfigMap": "k8s.io/kubernetes/pkg/api/v1.ConfigMap",
"v1.CrossVersionObjectReference": "k8s.io/kubernetes/pkg/apis/autoscaling/v1.CrossVersionObjectReference",
"v1.APIVersions": "k8s.io/apimachinery/pkg/apis/meta/v1.APIVersions",
"v1alpha1.ClusterRoleList": "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1.ClusterRoleList",
"v1.Node": "k8s.io/kubernetes/pkg/api/v1.Node",
"resource.Quantity": "k8s.io/kubernetes/pkg/api/resource.Quantity",
"v1.Event": "k8s.io/kubernetes/pkg/api/v1.Event",
"v1.JobStatus": "k8s.io/kubernetes/pkg/apis/batch/v1.JobStatus",
"v1.PersistentVolumeSpec": "k8s.io/kubernetes/pkg/api/v1.PersistentVolumeSpec",
"v1beta1.SubjectAccessReviewSpec": "k8s.io/kubernetes/pkg/apis/authorization/v1beta1.SubjectAccessReviewSpec",
"v1.ResourceFieldSelector": "k8s.io/kubernetes/pkg/api/v1.ResourceFieldSelector",
"v1.EndpointSubset": "k8s.io/kubernetes/pkg/api/v1.EndpointSubset",
"v1alpha1.CertificateSigningRequestSpec": "k8s.io/kubernetes/pkg/apis/certificates/v1alpha1.CertificateSigningRequestSpec",
"v1.HostPathVolumeSource": "k8s.io/kubernetes/pkg/api/v1.HostPathVolumeSource",
"v1.LoadBalancerStatus": "k8s.io/kubernetes/pkg/api/v1.LoadBalancerStatus",
"v1beta1.HTTPIngressPath": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.HTTPIngressPath",
"v1beta1.Role": "k8s.io/kubernetes/pkg/apis/rbac/v1beta1.Role",
"v1beta1.DeploymentStrategy": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.DeploymentStrategy",
"v1beta1.RunAsUserStrategyOptions": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.RunAsUserStrategyOptions",
"v1beta1.DeploymentSpec": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.DeploymentSpec",
"v1.ExecAction": "k8s.io/kubernetes/pkg/api/v1.ExecAction",
"v1beta1.PodSecurityPolicySpec": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.PodSecurityPolicySpec",
"v1.HorizontalPodAutoscalerStatus": "k8s.io/kubernetes/pkg/apis/autoscaling/v1.HorizontalPodAutoscalerStatus",
"v1.PersistentVolumeList": "k8s.io/kubernetes/pkg/api/v1.PersistentVolumeList",
"v1alpha1.ClusterRole": "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1.ClusterRole",
"v1.JobSpec": "k8s.io/kubernetes/pkg/apis/batch/v1.JobSpec",
"v1beta1.DaemonSetSpec": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.DaemonSetSpec",
"v2alpha1.CronJobList": "k8s.io/kubernetes/pkg/apis/batch/v2alpha1.CronJobList",
"v1.Endpoints": "k8s.io/kubernetes/pkg/api/v1.Endpoints",
"v1.SELinuxOptions": "k8s.io/kubernetes/pkg/api/v1.SELinuxOptions",
"v1beta1.SelfSubjectAccessReviewSpec": "k8s.io/kubernetes/pkg/apis/authorization/v1beta1.SelfSubjectAccessReviewSpec",
"v1beta1.ScaleStatus": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.ScaleStatus",
"v1.NodeSelectorTerm": "k8s.io/kubernetes/pkg/api/v1.NodeSelectorTerm",
"v1alpha1.CertificateSigningRequestStatus": "k8s.io/kubernetes/pkg/apis/certificates/v1alpha1.CertificateSigningRequestStatus",
"v1.StatusDetails": "k8s.io/apimachinery/pkg/apis/meta/v1.StatusDetails",
"v2alpha1.JobStatus": "k8s.io/kubernetes/pkg/apis/batch/v2alpha1.JobStatus",
"v1beta1.DeploymentRollback": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.DeploymentRollback",
"v1.GlusterfsVolumeSource": "k8s.io/kubernetes/pkg/api/v1.GlusterfsVolumeSource",
"v1.ServiceAccountList": "k8s.io/kubernetes/pkg/api/v1.ServiceAccountList",
"v1.JobList": "k8s.io/kubernetes/pkg/apis/batch/v1.JobList",
"v1.EventList": "k8s.io/kubernetes/pkg/api/v1.EventList",
"v1.ContainerStateWaiting": "k8s.io/kubernetes/pkg/api/v1.ContainerStateWaiting",
"v1.APIResourceList": "k8s.io/apimachinery/pkg/apis/meta/v1.APIResourceList",
"v1.ContainerStatus": "k8s.io/kubernetes/pkg/api/v1.ContainerStatus",
"v2alpha1.JobList": "k8s.io/kubernetes/pkg/apis/batch/v2alpha1.JobList",
"v1.ConfigMapKeySelector": "k8s.io/kubernetes/pkg/api/v1.ConfigMapKeySelector",
"v1.PhotonPersistentDiskVolumeSource": "k8s.io/kubernetes/pkg/api/v1.PhotonPersistentDiskVolumeSource",
"v1.PodTemplateList": "k8s.io/kubernetes/pkg/api/v1.PodTemplateList",
"v1.PersistentVolumeClaimStatus": "k8s.io/kubernetes/pkg/api/v1.PersistentVolumeClaimStatus",
"v1.ServiceAccount": "k8s.io/kubernetes/pkg/api/v1.ServiceAccount",
"v1alpha1.CertificateSigningRequestList": "k8s.io/kubernetes/pkg/apis/certificates/v1alpha1.CertificateSigningRequestList",
"v1beta1.SupplementalGroupsStrategyOptions": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.SupplementalGroupsStrategyOptions",
"v1.HTTPHeader": "k8s.io/kubernetes/pkg/api/v1.HTTPHeader",
"version.Info": "k8s.io/apimachinery/pkg/version.Info",
"v1.EventSource": "k8s.io/kubernetes/pkg/api/v1.EventSource",
"v1alpha1.ClusterRoleBindingList": "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1.ClusterRoleBindingList",
"v1.OwnerReference": "k8s.io/apimachinery/pkg/apis/meta/v1.OwnerReference",
"v1beta1.ClusterRoleBindingList": "k8s.io/kubernetes/pkg/apis/rbac/v1beta1.ClusterRoleBindingList",
"v1beta1.ScaleSpec": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.ScaleSpec",
"v1.GitRepoVolumeSource": "k8s.io/kubernetes/pkg/api/v1.GitRepoVolumeSource",
"v1beta1.NetworkPolicy": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.NetworkPolicy",
"v1.ConfigMapEnvSource": "k8s.io/kubernetes/pkg/api/v1.ConfigMapEnvSource",
"v1.PodTemplate": "k8s.io/kubernetes/pkg/api/v1.PodTemplate",
"v1beta1.DeploymentCondition": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.DeploymentCondition",
"v1beta1.PodDisruptionBudgetStatus": "k8s.io/kubernetes/pkg/apis/policy/v1beta1.PodDisruptionBudgetStatus",
"v1.EnvVar": "k8s.io/kubernetes/pkg/api/v1.EnvVar",
"v1.LimitRangeSpec": "k8s.io/kubernetes/pkg/api/v1.LimitRangeSpec",
"v1.DownwardAPIVolumeSource": "k8s.io/kubernetes/pkg/api/v1.DownwardAPIVolumeSource",
"v1.NodeDaemonEndpoints": "k8s.io/kubernetes/pkg/api/v1.NodeDaemonEndpoints",
"v1.ComponentCondition": "k8s.io/kubernetes/pkg/api/v1.ComponentCondition",
"v1alpha1.CertificateSigningRequestCondition": "k8s.io/kubernetes/pkg/apis/certificates/v1alpha1.CertificateSigningRequestCondition",
"v1.SecurityContext": "k8s.io/kubernetes/pkg/api/v1.SecurityContext",
"v1beta1.LocalSubjectAccessReview": "k8s.io/kubernetes/pkg/apis/authorization/v1beta1.LocalSubjectAccessReview",
"v1beta1.StatefulSetSpec": "k8s.io/kubernetes/pkg/apis/apps/v1beta1.StatefulSetSpec",
"v1.NodeAddress": "k8s.io/kubernetes/pkg/api/v1.NodeAddress",
"v1.QuobyteVolumeSource": "k8s.io/kubernetes/pkg/api/v1.QuobyteVolumeSource",
"v1.Capabilities": "k8s.io/kubernetes/pkg/api/v1.Capabilities",
"v1.GCEPersistentDiskVolumeSource": "k8s.io/kubernetes/pkg/api/v1.GCEPersistentDiskVolumeSource",
"v1beta1.ReplicaSet": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.ReplicaSet",
"v1beta1.HorizontalPodAutoscalerStatus": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.HorizontalPodAutoscalerStatus",
"v1beta1.PolicyRule": "k8s.io/kubernetes/pkg/apis/rbac/v1beta1.PolicyRule",
"v1.ConfigMapList": "k8s.io/kubernetes/pkg/api/v1.ConfigMapList",
"v1.Lifecycle": "k8s.io/kubernetes/pkg/api/v1.Lifecycle",
"v1beta1.SelfSubjectAccessReview": "k8s.io/kubernetes/pkg/apis/authorization/v1beta1.SelfSubjectAccessReview",
"v2alpha1.CronJob": "k8s.io/kubernetes/pkg/apis/batch/v2alpha1.CronJob",
"v2alpha1.CronJobStatus": "k8s.io/kubernetes/pkg/apis/batch/v2alpha1.CronJobStatus",
"v1beta1.SubjectAccessReview": "k8s.io/kubernetes/pkg/apis/authorization/v1beta1.SubjectAccessReview",
"v1.Preconditions": "k8s.io/kubernetes/pkg/api/v1.Preconditions",
"v1beta1.DaemonSetList": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.DaemonSetList",
"v1.PersistentVolumeClaim": "k8s.io/kubernetes/pkg/api/v1.PersistentVolumeClaim",
"v1.Scale": "k8s.io/kubernetes/pkg/apis/autoscaling/v1.Scale",
"v1beta1.StatefulSetStatus": "k8s.io/kubernetes/pkg/apis/apps/v1beta1.StatefulSetStatus",
"v1.NFSVolumeSource": "k8s.io/kubernetes/pkg/api/v1.NFSVolumeSource",
"v1.ObjectFieldSelector": "k8s.io/kubernetes/pkg/api/v1.ObjectFieldSelector",
"v1.ResourceRequirements": "k8s.io/kubernetes/pkg/api/v1.ResourceRequirements",
"v1.WatchEvent": "k8s.io/apimachinery/pkg/apis/meta/v1.WatchEvent",
"v1.ReplicationControllerSpec": "k8s.io/kubernetes/pkg/api/v1.ReplicationControllerSpec",
"v1.HTTPGetAction": "k8s.io/kubernetes/pkg/api/v1.HTTPGetAction",
"v1beta1.RollbackConfig": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.RollbackConfig",
"v1beta1.TokenReviewSpec": "k8s.io/kubernetes/pkg/apis/authentication/v1beta1.TokenReviewSpec",
"v1.PodSecurityContext": "k8s.io/kubernetes/pkg/api/v1.PodSecurityContext",
"v1beta1.PodDisruptionBudgetList": "k8s.io/kubernetes/pkg/apis/policy/v1beta1.PodDisruptionBudgetList",
"v1.VolumeMount": "k8s.io/kubernetes/pkg/api/v1.VolumeMount",
"v1.ReplicationController": "k8s.io/kubernetes/pkg/api/v1.ReplicationController",
"v1.NamespaceList": "k8s.io/kubernetes/pkg/api/v1.NamespaceList",
"v1alpha1.ClusterRoleBinding": "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1.ClusterRoleBinding",
"v1.TCPSocketAction": "k8s.io/kubernetes/pkg/api/v1.TCPSocketAction",
"v1.Binding": "k8s.io/kubernetes/pkg/api/v1.Binding",
"v1beta1.ReplicaSetStatus": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.ReplicaSetStatus",
"intstr.IntOrString": "k8s.io/kubernetes/pkg/util/intstr.IntOrString",
"v1.EndpointsList": "k8s.io/kubernetes/pkg/api/v1.EndpointsList",
"v1.Container": "k8s.io/kubernetes/pkg/api/v1.Container",
"v1alpha1.RoleList": "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1.RoleList",
"v1.VsphereVirtualDiskVolumeSource": "k8s.io/kubernetes/pkg/api/v1.VsphereVirtualDiskVolumeSource",
"v1.NodeList": "k8s.io/kubernetes/pkg/api/v1.NodeList",
"v1.EmptyDirVolumeSource": "k8s.io/kubernetes/pkg/api/v1.EmptyDirVolumeSource",
"v1beta1.FSGroupStrategyOptions": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.FSGroupStrategyOptions",
"v1.Status": "k8s.io/apimachinery/pkg/apis/meta/v1.Status",
}
for k, v := range compatibilityMap {
if _, found := s.Definitions[v]; !found {
continue
}
s.Definitions[k] = spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: spec.MustCreateRef("#/definitions/" + openapi.EscapeJsonPointer(v)),
Description: fmt.Sprintf("Deprecated. Please use %s instead.", v),
},
}
}
return s, nil
}
| [
"\"KUBE_API_VERSIONS\""
]
| []
| [
"KUBE_API_VERSIONS"
]
| [] | ["KUBE_API_VERSIONS"] | go | 1 | 0 | |
alveo/neptune/service.py | import os
class Service(object):
"""
The base class for all services. All services inherit from this class
"""
def __init__(self, prefix, artifacts, graph):
self._artifacts = artifacts
self._prefix = prefix
self._proc = None
self._graph = graph
def start(self, args):
# os.environ["XDNN_VERBOSE"] = "1"
# os.environ["XBLAS_EMIT_PROFILING_INFO"] = "1"
self._graph.serve(args, background=True)
def stop(self):
self._graph.stop()
| []
| []
| [
"XBLAS_EMIT_PROFILING_INFO",
"XDNN_VERBOSE"
]
| [] | ["XBLAS_EMIT_PROFILING_INFO", "XDNN_VERBOSE"] | python | 2 | 0 | |
src/cmd/cgo/main.go | // Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Cgo; see gmp.go for an overview.
// TODO(rsc):
// Emit correct line number annotations.
// Make gc understand the annotations.
package main
import (
"crypto/md5"
"flag"
"fmt"
"go/ast"
"go/printer"
"go/token"
"io"
"os"
"path/filepath"
"reflect"
"runtime"
"sort"
"strings"
)
// A Package collects information about the package we're going to write.
type Package struct {
PackageName string // name of package
PackagePath string
PtrSize int64
IntSize int64
GccOptions []string
GccIsClang bool
CgoFlags map[string][]string // #cgo flags (CFLAGS, LDFLAGS)
Written map[string]bool
Name map[string]*Name // accumulated Name from Files
ExpFunc []*ExpFunc // accumulated ExpFunc from Files
Decl []ast.Decl
GoFiles []string // list of Go files
GccFiles []string // list of gcc output files
Preamble string // collected preamble for _cgo_export.h
CgoChecks []string // see unsafeCheckPointerName
}
// A File collects information about a single Go input file.
type File struct {
AST *ast.File // parsed AST
Comments []*ast.CommentGroup // comments from file
Package string // Package name
Preamble string // C preamble (doc comment on import "C")
Ref []*Ref // all references to C.xxx in AST
Calls []*ast.CallExpr // all calls to C.xxx in AST
ExpFunc []*ExpFunc // exported functions for this file
Name map[string]*Name // map from Go name to Name
}
func nameKeys(m map[string]*Name) []string {
var ks []string
for k := range m {
ks = append(ks, k)
}
sort.Strings(ks)
return ks
}
// A Ref refers to an expression of the form C.xxx in the AST.
type Ref struct {
Name *Name
Expr *ast.Expr
Context string // "type", "expr", "call", or "call2"
}
func (r *Ref) Pos() token.Pos {
return (*r.Expr).Pos()
}
// A Name collects information about C.xxx.
type Name struct {
Go string // name used in Go referring to package C
Mangle string // name used in generated Go
C string // name used in C
Define string // #define expansion
Kind string // "const", "type", "var", "fpvar", "func", "not-type"
Type *Type // the type of xxx
FuncType *FuncType
AddError bool
Const string // constant definition
}
// IsVar reports whether Kind is either "var" or "fpvar"
func (n *Name) IsVar() bool {
return n.Kind == "var" || n.Kind == "fpvar"
}
// A ExpFunc is an exported function, callable from C.
// Such functions are identified in the Go input file
// by doc comments containing the line //export ExpName
type ExpFunc struct {
Func *ast.FuncDecl
ExpName string // name to use from C
Doc string
}
// A TypeRepr contains the string representation of a type.
type TypeRepr struct {
Repr string
FormatArgs []interface{}
}
// A Type collects information about a type in both the C and Go worlds.
type Type struct {
Size int64
Align int64
C *TypeRepr
Go ast.Expr
EnumValues map[string]int64
Typedef string
}
// A FuncType collects information about a function type in both the C and Go worlds.
type FuncType struct {
Params []*Type
Result *Type
Go *ast.FuncType
}
func usage() {
fmt.Fprint(os.Stderr, "usage: cgo -- [compiler options] file.go ...\n")
flag.PrintDefaults()
os.Exit(2)
}
var ptrSizeMap = map[string]int64{
"386": 4,
"amd64": 8,
"arm": 4,
"arm64": 8,
"mips64": 8,
"mips64le": 8,
"ppc64": 8,
"ppc64le": 8,
"s390": 4,
"s390x": 8,
}
var intSizeMap = map[string]int64{
"386": 4,
"amd64": 8,
"arm": 4,
"arm64": 8,
"mips64": 8,
"mips64le": 8,
"ppc64": 8,
"ppc64le": 8,
"s390": 4,
"s390x": 8,
}
var cPrefix string
var fset = token.NewFileSet()
var dynobj = flag.String("dynimport", "", "if non-empty, print dynamic import data for that file")
var dynout = flag.String("dynout", "", "write -dynimport output to this file")
var dynpackage = flag.String("dynpackage", "main", "set Go package for -dynimport output")
var dynlinker = flag.Bool("dynlinker", false, "record dynamic linker information in -dynimport mode")
// This flag is for bootstrapping a new Go implementation,
// to generate Go types that match the data layout and
// constant values used in the host's C libraries and system calls.
var godefs = flag.Bool("godefs", false, "for bootstrap: write Go definitions for C file to standard output")
var objDir = flag.String("objdir", "", "object directory")
var importPath = flag.String("importpath", "", "import path of package being built (for comments in generated files)")
var exportHeader = flag.String("exportheader", "", "where to write export header if any exported functions")
var gccgo = flag.Bool("gccgo", false, "generate files for use with gccgo")
var gccgoprefix = flag.String("gccgoprefix", "", "-fgo-prefix option used with gccgo")
var gccgopkgpath = flag.String("gccgopkgpath", "", "-fgo-pkgpath option used with gccgo")
var importRuntimeCgo = flag.Bool("import_runtime_cgo", true, "import runtime/cgo in generated code")
var importSyscall = flag.Bool("import_syscall", true, "import syscall in generated code")
var goarch, goos string
func main() {
flag.Usage = usage
flag.Parse()
if *dynobj != "" {
// cgo -dynimport is essentially a separate helper command
// built into the cgo binary. It scans a gcc-produced executable
// and dumps information about the imported symbols and the
// imported libraries. The 'go build' rules for cgo prepare an
// appropriate executable and then use its import information
// instead of needing to make the linkers duplicate all the
// specialized knowledge gcc has about where to look for imported
// symbols and which ones to use.
dynimport(*dynobj)
return
}
if *godefs {
// Generating definitions pulled from header files,
// to be checked into Go repositories.
// Line numbers are just noise.
conf.Mode &^= printer.SourcePos
}
args := flag.Args()
if len(args) < 1 {
usage()
}
// Find first arg that looks like a go file and assume everything before
// that are options to pass to gcc.
var i int
for i = len(args); i > 0; i-- {
if !strings.HasSuffix(args[i-1], ".go") {
break
}
}
if i == len(args) {
usage()
}
goFiles := args[i:]
p := newPackage(args[:i])
// Record CGO_LDFLAGS from the environment for external linking.
if ldflags := os.Getenv("CGO_LDFLAGS"); ldflags != "" {
args, err := splitQuoted(ldflags)
if err != nil {
fatalf("bad CGO_LDFLAGS: %q (%s)", ldflags, err)
}
p.addToFlag("LDFLAGS", args)
}
// Need a unique prefix for the global C symbols that
// we use to coordinate between gcc and ourselves.
// We already put _cgo_ at the beginning, so the main
// concern is other cgo wrappers for the same functions.
// Use the beginning of the md5 of the input to disambiguate.
h := md5.New()
for _, input := range goFiles {
f, err := os.Open(input)
if err != nil {
fatalf("%s", err)
}
io.Copy(h, f)
f.Close()
}
cPrefix = fmt.Sprintf("_%x", h.Sum(nil)[0:6])
fs := make([]*File, len(goFiles))
for i, input := range goFiles {
f := new(File)
f.ReadGo(input)
f.DiscardCgoDirectives()
fs[i] = f
}
if *objDir == "" {
// make sure that _obj directory exists, so that we can write
// all the output files there.
os.Mkdir("_obj", 0777)
*objDir = "_obj"
}
*objDir += string(filepath.Separator)
for i, input := range goFiles {
f := fs[i]
p.Translate(f)
for _, cref := range f.Ref {
switch cref.Context {
case "call", "call2":
if cref.Name.Kind != "type" {
break
}
*cref.Expr = cref.Name.Type.Go
}
}
if nerrors > 0 {
os.Exit(2)
}
p.PackagePath = f.Package
p.Record(f)
if *godefs {
os.Stdout.WriteString(p.godefs(f, input))
} else {
p.writeOutput(f, input)
}
}
if !*godefs {
p.writeDefs()
}
if nerrors > 0 {
os.Exit(2)
}
}
// newPackage returns a new Package that will invoke
// gcc with the additional arguments specified in args.
func newPackage(args []string) *Package {
goarch = runtime.GOARCH
if s := os.Getenv("GOARCH"); s != "" {
goarch = s
}
goos = runtime.GOOS
if s := os.Getenv("GOOS"); s != "" {
goos = s
}
ptrSize := ptrSizeMap[goarch]
if ptrSize == 0 {
fatalf("unknown ptrSize for $GOARCH %q", goarch)
}
intSize := intSizeMap[goarch]
if intSize == 0 {
fatalf("unknown intSize for $GOARCH %q", goarch)
}
// Reset locale variables so gcc emits English errors [sic].
os.Setenv("LANG", "en_US.UTF-8")
os.Setenv("LC_ALL", "C")
p := &Package{
PtrSize: ptrSize,
IntSize: intSize,
CgoFlags: make(map[string][]string),
Written: make(map[string]bool),
}
p.addToFlag("CFLAGS", args)
return p
}
// Record what needs to be recorded about f.
func (p *Package) Record(f *File) {
if p.PackageName == "" {
p.PackageName = f.Package
} else if p.PackageName != f.Package {
error_(token.NoPos, "inconsistent package names: %s, %s", p.PackageName, f.Package)
}
if p.Name == nil {
p.Name = f.Name
} else {
for k, v := range f.Name {
if p.Name[k] == nil {
p.Name[k] = v
} else if !reflect.DeepEqual(p.Name[k], v) {
error_(token.NoPos, "inconsistent definitions for C.%s", fixGo(k))
}
}
}
if f.ExpFunc != nil {
p.ExpFunc = append(p.ExpFunc, f.ExpFunc...)
p.Preamble += "\n" + f.Preamble
}
p.Decl = append(p.Decl, f.AST.Decls...)
}
| [
"\"CGO_LDFLAGS\"",
"\"GOARCH\"",
"\"GOOS\""
]
| []
| [
"CGO_LDFLAGS",
"GOARCH",
"GOOS"
]
| [] | ["CGO_LDFLAGS", "GOARCH", "GOOS"] | go | 3 | 0 | |
pkg/vars/stream.go | package vars
import (
"fmt"
"io/ioutil"
"log"
"net/http"
"regexp"
"strings"
)
var re = regexp.MustCompile(`(?m)\((.*)\)`)
type Stream struct {
}
func (st Stream) Fill(holders []string) map[string][]string {
vars := make(map[string][]string)
for _, tag := range holders {
if strings.HasPrefix(tag, "file.contents(") {
vars[tag] = append(vars[tag], st.getOutput(st.getFileContents(tag)))
} else if strings.HasPrefix(tag, "http.contents(") {
vars[tag] = append(vars[tag], st.getOutput(st.getHttpContents(tag)))
}
}
return vars
}
func (st Stream) getOutput(o []byte, err error) string {
if err != nil {
log.Printf("Impossible read mock stream: %s", err)
return fmt.Sprintf("ERROR: %s", err.Error())
}
return string(o)
}
func (st Stream) getFileContents(tag string) ([]byte, error) {
path := st.getInputParam(tag)
return ioutil.ReadFile(path)
}
func (st Stream) getHttpContents(tag string) ([]byte, error) {
url := st.getInputParam(tag)
resp, err := http.Get(url)
if err != nil {
return nil, err
}
defer resp.Body.Close()
content, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
return content, nil
}
func (st Stream) getInputParam(param string) string {
match := re.FindStringSubmatch(param)
if len(match) > 1 {
return match[1]
}
return ""
}
| []
| []
| []
| [] | [] | go | null | null | null |
python/mxnet/gluon/utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=
"""Parallelization utility optimizer."""
__all__ = ['split_data', 'split_and_load', 'clip_global_norm',
'check_sha1', 'download']
import os
import hashlib
import warnings
import collections
import weakref
try:
import requests
except ImportError:
class requests_failed_to_import(object):
pass
requests = requests_failed_to_import
import numpy as np
from .. import ndarray
import gc
def split_data(data, num_slice, batch_axis=0, even_split=True):
"""Splits an NDArray into `num_slice` slices along `batch_axis`.
Usually used for data parallelism where each slices is sent
to one device (i.e. GPU).
Parameters
----------
data : NDArray
A batch of data.
num_slice : int
Number of desired slices.
batch_axis : int, default 0
The axis along which to slice.
even_split : bool, default True
Whether to force all slices to have the same number of elements.
If `True`, an error will be raised when `num_slice` does not evenly
divide `data.shape[batch_axis]`.
Returns
-------
list of NDArray
Return value is a list even if `num_slice` is 1.
"""
size = data.shape[batch_axis]
if size < num_slice:
raise ValueError(
"Too many slices for data with shape %s. Arguments are " \
"num_slice=%d and batch_axis=%d."%(str(data.shape), num_slice, batch_axis))
if even_split and size % num_slice != 0:
raise ValueError(
"data with shape %s cannot be evenly split into %d slices along axis %d. " \
"Use a batch size that's multiple of %d or set even_split=False to allow " \
"uneven partitioning of data."%(
str(data.shape), num_slice, batch_axis, num_slice))
step = size // num_slice
if batch_axis == 0:
slices = [data[i*step:(i+1)*step] if i < num_slice - 1 else data[i*step:size]
for i in range(num_slice)]
elif even_split:
slices = ndarray.split(data, num_outputs=num_slice, axis=batch_axis)
else:
slices = [ndarray.slice_axis(data, batch_axis, i*step, (i+1)*step)
if i < num_slice - 1 else
ndarray.slice_axis(data, batch_axis, i*step, size)
for i in range(num_slice)]
return slices
def split_and_load(data, ctx_list, batch_axis=0, even_split=True):
"""Splits an NDArray into `len(ctx_list)` slices along `batch_axis` and loads
each slice to one context in `ctx_list`.
Parameters
----------
data : NDArray
A batch of data.
ctx_list : list of Context
A list of Contexts.
batch_axis : int, default 0
The axis along which to slice.
even_split : bool, default True
Whether to force all slices to have the same number of elements.
Returns
-------
list of NDArray
Each corresponds to a context in `ctx_list`.
"""
if not isinstance(data, ndarray.NDArray):
data = ndarray.array(data, ctx=ctx_list[0])
if len(ctx_list) == 1:
return [data.as_in_context(ctx_list[0])]
slices = split_data(data, len(ctx_list), batch_axis, even_split)
return [i.as_in_context(ctx) for i, ctx in zip(slices, ctx_list)]
class PiplineLoader(object):
def __init__(self, pool_size):
self._pool_size = pool_size
self._pool = []
self._pool_counter = 0
def split_and_load(self, data, ctx_list, batch_axis=0, even_split=True):
if not isinstance(data, ndarray.NDArray):
data = ndarray.array(data, ctx=ctx_list[0])
if self._pool_counter < self._pool_size:
if len(ctx_list) == 1:
data_list = [data.as_in_context(ctx_list[0])]
else:
slices = split_data(data, len(ctx_list), batch_axis, even_split)
data_list = [i.as_in_context(ctx) for i, ctx in zip(slices, ctx_list)]
self._pool.append(data_list)
else:
data_list = self._pool[self._pool_counter % self._pool_size]
if len(ctx_list) == 1:
data.copyto(data_list[0])
else:
slices = split_data(data, len(data_list), batch_axis, even_split)
for i, d in zip(slices, data_list):
i.copyto(d)
self._pool_counter = self._pool_counter + 1
if self._pool_counter == self._pool_size * 100:
self._pool_counter = self._pool_size
return data_list
def reset(self):
for data_list in self._pool:
for d in data_list:
del d
del data_list
self._pool = []
self._pool_counter = 0
gc.collect()
def clip_global_norm(arrays, max_norm):
"""Rescales NDArrays so that the sum of their 2-norm is smaller than `max_norm`.
"""
def _norm(array):
if array.stype == 'default':
x = array.reshape((-1,))
return ndarray.dot(x, x)
return array.norm().square()
assert len(arrays) > 0
ctx = arrays[0].context
total_norm = ndarray.add_n(*[_norm(arr).as_in_context(ctx) for arr in arrays])
total_norm = ndarray.sqrt(total_norm).asscalar()
if not np.isfinite(total_norm):
warnings.warn(UserWarning('nan or inf is detected. Clipping results will be undefined.'),
stacklevel=2)
scale = max_norm / (total_norm + 1e-8)
if scale < 1.0:
for arr in arrays:
arr *= scale
return total_norm
def _indent(s_, numSpaces):
"""Indent string
"""
s = s_.split('\n')
if len(s) == 1:
return s_
first = s.pop(0)
s = [first] + [(numSpaces * ' ') + line for line in s]
s = '\n'.join(s)
return s
def check_sha1(filename, sha1_hash):
"""Check whether the sha1 hash of the file content matches the expected hash.
Parameters
----------
filename : str
Path to the file.
sha1_hash : str
Expected sha1 hash in hexadecimal digits.
Returns
-------
bool
Whether the file content matches the expected hash.
"""
sha1 = hashlib.sha1()
with open(filename, 'rb') as f:
while True:
data = f.read(1048576)
if not data:
break
sha1.update(data)
return sha1.hexdigest() == sha1_hash
def download(url, path=None, overwrite=False, sha1_hash=None, retries=5, verify_ssl=True):
"""Download an given URL
Parameters
----------
url : str
URL to download
path : str, optional
Destination path to store downloaded file. By default stores to the
current directory with same name as in url.
overwrite : bool, optional
Whether to overwrite destination file if already exists.
sha1_hash : str, optional
Expected sha1 hash in hexadecimal digits. Will ignore existing file when hash is specified
but doesn't match.
retries : integer, default 5
The number of times to attempt the download in case of failure or non 200 return codes
verify_ssl : bool, default True
Verify SSL certificates.
Returns
-------
str
The file path of the downloaded file.
"""
if path is None:
fname = url.split('/')[-1]
# Empty filenames are invalid
assert fname, 'Can\'t construct file-name from this URL. ' \
'Please set the `path` option manually.'
else:
path = os.path.expanduser(path)
if os.path.isdir(path):
fname = os.path.join(path, url.split('/')[-1])
else:
fname = path
assert retries >= 0, "Number of retries should be at least 0"
if not verify_ssl:
warnings.warn(
'Unverified HTTPS request is being made (verify_ssl=False). '
'Adding certificate verification is strongly advised.')
if overwrite or not os.path.exists(fname) or (sha1_hash and not check_sha1(fname, sha1_hash)):
dirname = os.path.dirname(os.path.abspath(os.path.expanduser(fname)))
if not os.path.exists(dirname):
os.makedirs(dirname)
while retries+1 > 0:
# Disable pyling too broad Exception
# pylint: disable=W0703
try:
print('Downloading %s from %s...'%(fname, url))
r = requests.get(url, stream=True, verify=verify_ssl)
if r.status_code != 200:
raise RuntimeError("Failed downloading url %s"%url)
with open(fname, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
if sha1_hash and not check_sha1(fname, sha1_hash):
raise UserWarning('File {} is downloaded but the content hash does not match.'\
' The repo may be outdated or download may be incomplete. '\
'If the "repo_url" is overridden, consider switching to '\
'the default repo.'.format(fname))
break
except Exception as e:
retries -= 1
if retries <= 0:
raise e
else:
print("download failed, retrying, {} attempt{} left"
.format(retries, 's' if retries > 1 else ''))
return fname
def _get_repo_url():
"""Return the base URL for Gluon dataset and model repository."""
default_repo = 'https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/'
repo_url = os.environ.get('MXNET_GLUON_REPO', default_repo)
if repo_url[-1] != '/':
repo_url = repo_url+'/'
return repo_url
def _get_repo_file_url(namespace, filename):
"""Return the URL for hosted file in Gluon repository.
Parameters
----------
namespace : str
Namespace of the file.
filename : str
Name of the file
"""
return '{base_url}{namespace}/{filename}'.format(base_url=_get_repo_url(),
namespace=namespace,
filename=filename)
def _brief_print_list(lst, limit=7):
"""Print at most `limit` elements of list."""
lst = list(lst)
if len(lst) > limit:
return _brief_print_list(lst[:limit//2], limit) + ', ..., ' + \
_brief_print_list(lst[-limit//2:], limit)
return ', '.join(["'%s'"%str(i) for i in lst])
class HookHandle(object):
"""A handle that can attach/detach a hook."""
def __init__(self):
self._hooks_dict_ref = None
self._id = None
def attach(self, hooks_dict, hook):
assert not self._hooks_dict_ref, 'The same handle cannot be attached twice.'
self._id = id(hook)
hooks_dict[self._id] = hook
self._hooks_dict_ref = weakref.ref(hooks_dict)
def detach(self):
hooks_dict = self._hooks_dict_ref()
if hooks_dict is not None and self._id in hooks_dict:
del hooks_dict[self._id]
def __getstate__(self):
return (self._hooks_dict_ref(), self._id)
def __setstate__(self, state):
if state[0] is None:
self._hooks_dict_ref = weakref.ref(collections.OrderedDict())
else:
self._hooks_dict_ref = weakref.ref(state[0])
self._id = state[1]
def __enter__(self):
return self
def __exit__(self, ptype, value, trace):
self.detach()
| []
| []
| [
"MXNET_GLUON_REPO"
]
| [] | ["MXNET_GLUON_REPO"] | python | 1 | 0 | |
setup.py | import os
import platform
import shlex
import shutil
import subprocess
from setuptools import find_packages, setup
from setuptools.command.egg_info import egg_info
from sqllineage import NAME, STATIC_FOLDRE, VERSION
with open("README.md", "r") as f:
long_description = f.read()
class EggInfoWithJS(egg_info):
"""
egginfo is a hook both for
1) building source code distribution (python setup.py sdist)
2) building wheel distribution (python setup.py bdist_wheel)
3) installing from source code (python setup.py install) or pip install from GitHub
In this step, frontend code will be built to match MANIFEST.in list so that later the static files will be copied to
site-packages correctly as package_data. When building a distribution, no building process is needed at install time
"""
def run(self) -> None:
static_path = os.path.join(NAME, STATIC_FOLDRE)
if os.path.exists(static_path) or "READTHEDOCS" in os.environ:
pass
else:
js_path = "sqllineagejs"
use_shell = True if platform.system() == "Windows" else False
subprocess.check_call(
shlex.split("npm install"), cwd=js_path, shell=use_shell
)
subprocess.check_call(
shlex.split("npm run build"), cwd=js_path, shell=use_shell
)
shutil.move(os.path.join(js_path, STATIC_FOLDRE), static_path)
super().run()
setup(
name=NAME,
version=VERSION,
author="Reata",
author_email="[email protected]",
description="SQL Lineage Analysis Tool powered by Python",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/reata/sqllineage",
packages=find_packages(exclude=("tests",)),
package_data={"": [f"{STATIC_FOLDRE}/*", f"{STATIC_FOLDRE}/**/**/*"]},
include_package_data=True,
classifiers=[
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: Implementation :: CPython",
],
python_requires=">=3.6",
install_requires=["sqlparse>=0.3.0", "networkx>=2.4", "flask", "flask_cors"],
entry_points={"console_scripts": ["sqllineage = sqllineage.cli:main"]},
extras_require={
"ci": [
"bandit",
"black",
"flake8",
"flake8-blind-except",
"flake8-builtins",
"flake8-import-order",
"flake8-logging-format",
"mypy",
"pytest",
"pytest-cov",
"tox",
"twine",
"wheel",
],
"docs": ["Sphinx>=3.2.0", "sphinx_rtd_theme>=0.5.0"],
},
cmdclass={"egg_info": EggInfoWithJS},
)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
examples/pwr_run/checkpointing/socket_short/max_par/job14.py | """
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.densenet import DenseNet121, DenseNet169, DenseNet201
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 256
args_lr = 0.001
args_model = 'densenet121'
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_max_param/' + job_name + '*'
total_epochs = 7
starting_epoch = 0
# first step is to update the PID
pid_dict = {}
with open('pid_lock.json', 'r') as fp:
pid_dict = json.load(fp)
pid_dict[job_name] = os.getpid()
json_file = json.dumps(pid_dict)
with open('pid_lock.json', 'w') as fp:
fp.write(json_file)
os.rename('pid_lock.json', 'pid.json')
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
model = keras.models.load_model(save_file)
else:
print('train from start')
model = models.Sequential()
if '121' in args_model:
base_model = DenseNet121(weights=None, include_top=False, input_shape=(32, 32, 3), pooling='avg')
elif '169' in args_model:
base_model = DenseNet169(weights=None, include_top=False, input_shape=(32, 32, 3), pooling='avg')
elif '201' in args_model:
base_model = DenseNet201(weights=None, include_top=False, input_shape=(32, 32, 3), pooling='avg')
model.add(base_model)
#model.add(layers.Flatten())
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(128, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(64, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
global epoch_begin_time
if epoch_begin_time == 0:
epoch_waste_time = 0
else:
epoch_waste_time = int(time.time() - epoch_begin_time)
epoch_waste_dict = {}
with open('epoch_waste.json', 'r') as fp:
epoch_waste_dict = json.load(fp)
epoch_waste_dict[job_name] += epoch_waste_time
json_file3 = json.dumps(epoch_waste_dict)
with open('epoch_waste.json', 'w') as fp:
fp.write(json_file3)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_max_param/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
checkpoint_dict = {}
with open('checkpoint.json', 'r') as fp:
checkpoint_dict = json.load(fp)
checkpoint_dict[job_name] = 1
json_file3 = json.dumps(checkpoint_dict)
with open('checkpoint.json', 'w') as fp:
fp.write(json_file3)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
if not args.resume:
trainable_count = int(np.sum([K.count_params(p) for p in set(model.trainable_weights)]))
# send signal 'jobxx param xxxxx'
message = job_name + ' param ' + str(trainable_count)
send_signal.send(args.node, 10002, message)
# send signal to indicate checkpoint is qualified
message = job_name + ' ckpt_qual'
send_signal.send(args.node, 10002, message)
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# send signal to indicate job has finished
message = job_name + ' finish'
send_signal.send(args.node, 10002, message)
| []
| []
| [
"CUDA_DEVICE_ORDER",
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES"] | python | 2 | 0 | |
jd_blueCoin.py | #!/bin/env python3
# -*- coding: utf-8 -*
'''
项目名称: JD-Script / jd_blueCoin
Author: Curtin
功能: 东东超市商品兑换
Date: 2021/4/17 上午11:22
update: 2021/11/08 20:30
TG交流 https://t.me/topstyle996
TG频道 https://t.me/TopStyle2021
建议cron: 59 23 * * * python3 jd_blueCoin.py
new Env('东东超市商品兑换');
'''
################【参数】######################
# ck 优先读取【JDCookies.txt】 文件内的ck 再到 ENV的 变量 JD_COOKIE='ck1&ck2' 最后才到脚本内 cookies=ck
#ENV设置:export JD_COOKIE='cookie1&cookie2'
cookies = ''
#【填写您要兑换的商品】ENV设置: export coinToBeans='京豆包'
coinToBeans = ''
#多账号并发,默认关闭 ENV设置开启: export blueCoin_Cc=True
blueCoin_Cc = False
#单击次数,同时发生点击兑换按钮次数,适当调整。
dd_thread = 5
###############################################
import time, datetime, os, sys, random
import requests, re, json
from urllib.parse import quote, unquote
import threading
requests.packages.urllib3.disable_warnings()
try:
from jd_cookie import getJDCookie
getCk = getJDCookie()
except:
print("请先下载依赖脚本,\n下载链接:https://raw.githubusercontent.com/zqy0412/JD-Script/main/jd_tool_dl.py")
sys.exit(3)
pwd = os.path.dirname(os.path.abspath(__file__)) + os.sep
# timestamp = int(round(time.time() * 1000))
script_name = '东东超市商品兑换'
title = ''
prizeId = ''
blueCost = ''
inStock = ''
UserAgent = ''
periodId = ''
#最长抢兑结束时间
endtime='00:00:10.00000000'
today = datetime.datetime.now().strftime('%Y-%m-%d')
unstartTime = datetime.datetime.now().strftime('%Y-%m-%d 23:55:00.00000000')
tomorrow = (datetime.datetime.now() + datetime.timedelta(days=1)).strftime('%Y-%m-%d')
starttime = (datetime.datetime.now() + datetime.timedelta(days=1)).strftime('%Y-%m-%d 00:00:00.00000000')
def printT(s):
print("[{0}]: {1}".format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), s))
sys.stdout.flush()
def getEnvs(label):
try:
if label == 'True' or label == 'yes' or label == 'true' or label == 'Yes':
return True
elif label == 'False' or label == 'no' or label == 'false' or label == 'No':
return False
except:
pass
try:
if '.' in label:
return float(label)
elif '&' in label:
return label.split('&')
elif '@' in label:
return label.split('@')
else:
return int(label)
except:
return label
if "coinToBeans" in os.environ:
if len(os.environ["coinToBeans"]) > 1:
coinToBeans = os.environ["coinToBeans"]
printT(f"已获取并使用Env环境 coinToBeans:{coinToBeans}")
if "blueCoin_Cc" in os.environ:
if len(os.environ["blueCoin_Cc"]) > 1:
blueCoin_Cc = getEnvs(os.environ["blueCoin_Cc"])
printT(f"已获取并使用Env环境 blueCoin_Cc:{blueCoin_Cc}")
if "dd_thread" in os.environ:
if len(os.environ["dd_thread"]) > 1:
dd_thread = getEnvs(os.environ["dd_thread"])
printT(f"已获取并使用Env环境 dd_thread:{dd_thread}")
class TaskThread(threading.Thread):
"""
处理task相关的线程类
"""
def __init__(self, func, args=()):
super(TaskThread, self).__init__()
self.func = func # 要执行的task类型
self.args = args # 要传入的参数
def run(self):
# 线程类实例调用start()方法将执行run()方法,这里定义具体要做的异步任务
# printT("start func {}".format(self.func.__name__)) # 打印task名字 用方法名.__name__
self.result = self.func(*self.args) # 将任务执行结果赋值给self.result变量
def get_result(self):
# 改方法返回task函数的执行结果,方法名不是非要get_result
try:
return self.result
except Exception as ex:
printT(ex)
return "ERROR"
def userAgent():
"""
随机生成一个UA
:return: jdapp;iPhone;9.4.8;14.3;xxxx;network/wifi;ADID/201EDE7F-5111-49E8-9F0D-CCF9677CD6FE;supportApplePay/0;hasUPPay/0;hasOCPay/0;model/iPhone13,4;addressid/2455696156;supportBestPay/0;appBuild/167629;jdSupportDarkMode/0;Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148;supportJDSHWK/1
"""
if not UserAgent:
uuid = ''.join(random.sample('123456789abcdef123456789abcdef123456789abcdef123456789abcdef', 40))
addressid = ''.join(random.sample('1234567898647', 10))
iosVer = ''.join(
random.sample(["14.5.1", "14.4", "14.3", "14.2", "14.1", "14.0.1", "13.7", "13.1.2", "13.1.1"], 1))
iosV = iosVer.replace('.', '_')
iPhone = ''.join(random.sample(["8", "9", "10", "11", "12", "13"], 1))
ADID = ''.join(random.sample('0987654321ABCDEF', 8)) + '-' + ''.join(
random.sample('0987654321ABCDEF', 4)) + '-' + ''.join(random.sample('0987654321ABCDEF', 4)) + '-' + ''.join(
random.sample('0987654321ABCDEF', 4)) + '-' + ''.join(random.sample('0987654321ABCDEF', 12))
return f'jdapp;iPhone;10.0.4;{iosVer};{uuid};network/wifi;ADID/{ADID};supportApplePay/0;hasUPPay/0;hasOCPay/0;model/iPhone{iPhone},1;addressid/{addressid};supportBestPay/0;appBuild/167629;jdSupportDarkMode/0;Mozilla/5.0 (iPhone; CPU iPhone OS {iosV} like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148;supportJDSHWK/1'
else:
return UserAgent
## 获取通知服务
class msg(object):
def __init__(self, m=''):
self.str_msg = m
self.message()
def message(self):
global msg_info
printT(self.str_msg)
try:
msg_info = "{}\n{}".format(msg_info, self.str_msg)
except:
msg_info = "{}".format(self.str_msg)
sys.stdout.flush()
def getsendNotify(self, a=0):
if a == 0:
a += 1
try:
url = 'https://gitee.com/curtinlv/Public/raw/master/sendNotify.py'
response = requests.get(url)
if 'curtinlv' in response.text:
with open('sendNotify.py', "w+", encoding="utf-8") as f:
f.write(response.text)
else:
if a < 5:
a += 1
return self.getsendNotify(a)
else:
pass
except:
if a < 5:
a += 1
return self.getsendNotify(a)
else:
pass
def main(self):
global send
cur_path = os.path.abspath(os.path.dirname(__file__))
sys.path.append(cur_path)
if os.path.exists(cur_path + "/sendNotify.py"):
try:
from sendNotify import send
except:
self.getsendNotify()
try:
from sendNotify import send
except:
printT("加载通知服务失败~")
else:
self.getsendNotify()
try:
from sendNotify import send
except:
printT("加载通知服务失败~")
###################
msg().main()
def setHeaders(cookie):
headers = {
'Origin': 'https://jdsupermarket.jd.com',
'Cookie': cookie,
'Connection': 'keep-alive',
'Accept': 'application/json, text/plain, */*',
'Referer': 'https://jdsupermarket.jd.com/game/?tt={}'.format(int(round(time.time() * 1000))-314),
'Host': 'api.m.jd.com',
'User-Agent': userAgent(),
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-cn'
}
return headers
#查询东东超市蓝币数量
def getBlueCoinInfo(headers):
try:
url='https://api.m.jd.com/api?appid=jdsupermarket&functionId=smtg_newHome&clientVersion=8.0.0&client=m&body=%7B%22channel%22:%2218%22%7D&t={0}'.format(int(round(time.time() * 1000)))
respon = requests.get(url=url, verify=False, headers=headers)
result = respon.json()
if result['data']['bizCode'] == 0:
totalBlue = result['data']['result']['totalBlue']
shopName = result['data']['result']['shopName']
return totalBlue, shopName
else:
totalBlue = 0
shopName = result['data']['bizMsg']
return totalBlue, shopName
except Exception as e:
printT(e)
#查询所有用户蓝币、等级
def getAllUserInfo(userName):
id_num = 1
for ck in cookies:
headers = setHeaders(ck)
try:
totalBlue,shopName = getBlueCoinInfo(headers)
url = 'https://api.m.jd.com/api?appid=jdsupermarket&functionId=smtg_receiveCoin&clientVersion=8.0.0&client=m&body=%7B%22type%22:4,%22channel%22:%2218%22%7D&t={0}'.format(int(round(time.time() * 1000)))
respon = requests.get(url=url, verify=False, headers=headers)
result = respon.json()
level = result['data']['result']['level']
printT("【用户{4}:{5}】: {0} {3}\n【等级】: {1}\n【蓝币】: {2}万\n------------------".format(shopName, level, totalBlue / 10000,totalBlue, id_num,userName))
except Exception as e:
# printT(e)
printT(f"账号{id_num}【{userName}】异常请检查ck是否正常~")
id_num += 1
#查询商品
def smtg_queryPrize(headers, coinToBeans):
url = 'https://api.m.jd.com/api?appid=jdsupermarket&functionId=smt_queryPrizeAreas&clientVersion=8.0.0&client=m&body=%7B%22channel%22:%2218%22%7D&t={}'.format(int(round(time.time() * 1000)))
try:
respone = requests.get(url=url, verify=False, headers=headers)
result = respone.json()
allAreas = result['data']['result']['areas']
for alist in allAreas:
for x in alist['prizes']:
if coinToBeans in x['name']:
areaId = alist['areaId']
periodId = alist['periodId']
if alist['areaId'] != 6:
skuId = x['skuId']
else:
skuId = 0
title = x['name']
prizeId = x['prizeId']
blueCost = x['cost']
status = x['status']
return title, prizeId, blueCost, status, skuId, areaId, periodId
# printT("请检查设置的兑换商品名称是否正确?")
# return 0, 0, 0, 0, 0
except Exception as e:
printT(e)
#判断设置的商品是否存在 存在则返回 商品标题、prizeId、蓝币价格、是否有货
def isCoinToBeans(coinToBeans,headers):
if coinToBeans.strip() != '':
try:
title, prizeId, blueCost, status, skuId, areaId, periodId = smtg_queryPrize(headers,coinToBeans)
return title, prizeId, blueCost, status, skuId, areaId, periodId
except Exception as e:
printT(e)
pass
else:
printT("1.请检查设置的兑换商品名称是否正确?")
exit(0)
#抢兑换
def smtg_obtainPrize(prizeId, areaId, periodId, headers, username):
body = {
"connectId": prizeId,
"areaId": areaId,
"periodId": periodId,
"informationParam": {
"eid": "",
"referUrl": -1,
"shshshfp": "",
"openId": -1,
"isRvc": 0,
"fp": -1,
"shshshfpa": "",
"shshshfpb": "",
"userAgent": -1
},
"channel": "18"
}
timestamp = int(round(time.time() * 1000))
url = f'https://api.m.jd.com/api?appid=jdsupermarket&functionId=smt_exchangePrize&clientVersion=8.0.0&client=m&body={quote(json.dumps(body))}&t={timestamp}'
try:
respon = requests.post(url=url, verify=False, headers=headers)
result = respon.json()
printT(result)
success = result['data']['success']
bizMsg = result['data']['bizMsg']
if success:
printT(f"【{username}】{bizMsg}...恭喜兑换成功!")
return 0
else:
printT(f"【{username}】{bizMsg}")
return 999
except Exception as e:
printT(e)
return 999
def issmtg_obtainPrize(ck, user_num, prizeId, areaId, periodId, title):
try:
userName = userNameList[cookiesList.index(ck)]
t_num = range(dd_thread)
threads = []
for t in t_num:
thread = TaskThread(smtg_obtainPrize, args=(prizeId, areaId, periodId, setHeaders(ck), userName))
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
result = thread.get_result()
if result == 0:
msg(f"账号{user_num}:{userName} 成功兑换【{title}】")
return 0
nowtime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f8')
if nowtime > qgendtime:
title, prizeId, blueCost, status, skuId, areaId, periodId = isCoinToBeans(coinToBeans, setHeaders(ck))
if status == 2:
printT("{1}, 你好呀~【{0}】 当前没货了......".format(title, userName))
return 2
else:
return 0
else:
return 0
except Exception as e:
printT(e)
return 1
def checkUser(cookies,): #返回符合条件的ck list
global title, prizeId, blueCost, status, skuId, areaId, periodId
cookieList=[]
user_num=1
a = 0
for i in cookies:
headers = setHeaders(i)
userName = userNameList[cookiesList.index(i)]
try:
totalBlue, shopName = getBlueCoinInfo(headers)
if totalBlue != 0:
if a == 0:
a = 1
title, prizeId, blueCost, status, skuId, areaId, periodId = isCoinToBeans(coinToBeans,headers)
totalBlueW = totalBlue / 10000
if user_num == 1:
printT("您已设置兑换的商品:【{0}】 需要{1}w蓝币".format(title, blueCost / 10000))
printT("********** 首先检测您是否有钱呀 ********** ")
if totalBlue > blueCost:
cookieList.append(i)
printT(f"账号{user_num}:【{userName}】蓝币:{totalBlueW}万...yes")
else:
printT(f"账号{user_num}:【{userName}】蓝币:{totalBlueW}万...no")
except Exception as e:
printT(f"账号{user_num}:【{userName}】,该用户异常,查不到商品关键词【{coinToBeans}】")
user_num += 1
if len(cookieList) >0:
printT("共有{0}个账号符合兑换条件".format(len(cookieList)))
return cookieList
else:
printT("共有{0}个账号符合兑换条件...已退出,请继续加油赚够钱再来~".format(len(cookieList)))
exit(0)
#Start
def start():
try:
global cookiesList, userNameList, cookies, qgendtime
printT("{} Start".format(script_name))
cookiesList, userNameList = getCk.iscookie()
cookies = checkUser(cookiesList)
qgendtime = '{} {}'.format(tomorrow, endtime)
if blueCoin_Cc:
msg("并发模式:多账号")
else:
msg("并发模式:单账号")
printT(f"开始抢兑时间[{starttime}]")
a = 0
while True:
nowtime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f8')
if nowtime > starttime:
if blueCoin_Cc:
ttt = []
user_num = 1
for ck in cookies:
thread = TaskThread(issmtg_obtainPrize, args=(ck, user_num, prizeId, areaId, periodId, title))
ttt.append(thread)
thread.start()
user_num += 1
for thread in ttt:
thread.join()
result = thread.get_result()
if result == 2:
break
if result == 2:
break
else:
user_num = 1
for ck in cookies:
response = issmtg_obtainPrize(ck, user_num, prizeId, areaId, periodId, title)
user_num += 1
if response == 2:
break
if response == 2:
break
elif nowtime > qgendtime:
break
elif nowtime < unstartTime:
printT("Sorry,还没到时间。")
printT("【皮卡丘】建议cron: 59 23 * * * python3 jd_blueCoin.py")
break
else:
if a == 0:
a = 1
printT(f"正在等待,请勿终止退出...")
except Exception as e:
printT(e)
if __name__ == '__main__':
start()
try:
if '成功兑换' in msg_info:
send(script_name, msg_info)
except:
pass | []
| []
| [
"dd_thread",
"coinToBeans",
"blueCoin_Cc"
]
| [] | ["dd_thread", "coinToBeans", "blueCoin_Cc"] | python | 3 | 0 | |
core/src/main/java/com/linecorp/armeria/common/Flags.java | /*
* Copyright 2017 LINE Corporation
*
* LINE Corporation licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package com.linecorp.armeria.common;
import java.util.Arrays;
import java.util.List;
import java.util.Optional;
import java.util.function.IntPredicate;
import java.util.function.LongPredicate;
import java.util.function.Predicate;
import javax.annotation.Nullable;
import javax.net.ssl.SSLEngine;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.github.benmanes.caffeine.cache.Cache;
import com.github.benmanes.caffeine.cache.Caffeine;
import com.github.benmanes.caffeine.cache.CaffeineSpec;
import com.google.common.base.Ascii;
import com.google.common.base.CharMatcher;
import com.google.common.base.Splitter;
import com.linecorp.armeria.client.ClientFactoryBuilder;
import com.linecorp.armeria.client.retry.Backoff;
import com.linecorp.armeria.client.retry.RetryingHttpClient;
import com.linecorp.armeria.client.retry.RetryingRpcClient;
import com.linecorp.armeria.common.util.Exceptions;
import com.linecorp.armeria.server.PathMappingContext;
import com.linecorp.armeria.server.ServerBuilder;
import com.linecorp.armeria.server.ServiceConfig;
import com.linecorp.armeria.server.annotation.ExceptionHandler;
import com.linecorp.armeria.server.annotation.ExceptionVerbosity;
import io.netty.channel.epoll.Epoll;
import io.netty.handler.codec.http2.Http2CodecUtil;
import io.netty.handler.ssl.OpenSsl;
/**
* The system properties that affect Armeria's runtime behavior.
*/
public final class Flags {
private static final Logger logger = LoggerFactory.getLogger(Flags.class);
private static final Splitter CSV_SPLITTER = Splitter.on(',').trimResults().omitEmptyStrings();
private static final String PREFIX = "com.linecorp.armeria.";
private static final int NUM_CPU_CORES = Runtime.getRuntime().availableProcessors();
private static final boolean VERBOSE_EXCEPTIONS = getBoolean("verboseExceptions", false);
private static final boolean VERBOSE_RESPONSES = getBoolean("verboseResponses", false);
private static final boolean HAS_WSLENV = System.getenv("WSLENV") != null;
private static final boolean USE_EPOLL = getBoolean("useEpoll", isEpollAvailable(),
value -> isEpollAvailable() || !value);
private static final boolean USE_OPENSSL = getBoolean("useOpenSsl", OpenSsl.isAvailable(),
value -> OpenSsl.isAvailable() || !value);
private static final int DEFAULT_MAX_NUM_CONNECTIONS = Integer.MAX_VALUE;
private static final int MAX_NUM_CONNECTIONS =
getInt("maxNumConnections", DEFAULT_MAX_NUM_CONNECTIONS, value -> value > 0);
private static final int DEFAULT_NUM_COMMON_WORKERS = NUM_CPU_CORES * 2;
private static final int NUM_COMMON_WORKERS =
getInt("numCommonWorkers", DEFAULT_NUM_COMMON_WORKERS, value -> value > 0);
private static final int DEFAULT_NUM_COMMON_BLOCKING_TASK_THREADS = 200; // from Tomcat default maxThreads
private static final int NUM_COMMON_BLOCKING_TASK_THREADS =
getInt("numCommonBlockingTaskThreads",
DEFAULT_NUM_COMMON_BLOCKING_TASK_THREADS,
value -> value > 0);
private static final long DEFAULT_DEFAULT_MAX_REQUEST_LENGTH = 10 * 1024 * 1024; // 10 MiB
private static final long DEFAULT_MAX_REQUEST_LENGTH =
getLong("defaultMaxRequestLength",
DEFAULT_DEFAULT_MAX_REQUEST_LENGTH,
value -> value >= 0);
private static final long DEFAULT_DEFAULT_MAX_RESPONSE_LENGTH = 10 * 1024 * 1024; // 10 MiB
private static final long DEFAULT_MAX_RESPONSE_LENGTH =
getLong("defaultMaxResponseLength",
DEFAULT_DEFAULT_MAX_RESPONSE_LENGTH,
value -> value >= 0);
private static final long DEFAULT_DEFAULT_REQUEST_TIMEOUT_MILLIS = 10 * 1000; // 10 seconds
private static final long DEFAULT_REQUEST_TIMEOUT_MILLIS =
getLong("defaultRequestTimeoutMillis",
DEFAULT_DEFAULT_REQUEST_TIMEOUT_MILLIS,
value -> value >= 0);
// Use slightly greater value than the default request timeout so that clients have a higher chance of
// getting proper 503 Service Unavailable response when server-side timeout occurs.
private static final long DEFAULT_DEFAULT_RESPONSE_TIMEOUT_MILLIS = 15 * 1000; // 15 seconds
private static final long DEFAULT_RESPONSE_TIMEOUT_MILLIS =
getLong("defaultResponseTimeoutMillis",
DEFAULT_DEFAULT_RESPONSE_TIMEOUT_MILLIS,
value -> value >= 0);
private static final long DEFAULT_DEFAULT_CONNECT_TIMEOUT_MILLIS = 3200; // 3.2 seconds
private static final long DEFAULT_CONNECT_TIMEOUT_MILLIS =
getLong("defaultConnectTimeoutMillis",
DEFAULT_DEFAULT_CONNECT_TIMEOUT_MILLIS,
value -> value > 0);
// Use slightly greater value than the client-side default so that clients close the connection more often.
private static final long DEFAULT_DEFAULT_SERVER_IDLE_TIMEOUT_MILLIS = 15000; // 15 seconds
private static final long DEFAULT_SERVER_IDLE_TIMEOUT_MILLIS =
getLong("defaultServerIdleTimeoutMillis",
DEFAULT_DEFAULT_SERVER_IDLE_TIMEOUT_MILLIS,
value -> value >= 0);
private static final long DEFAULT_DEFAULT_CLIENT_IDLE_TIMEOUT_MILLIS = 10000; // 10 seconds
private static final long DEFAULT_CLIENT_IDLE_TIMEOUT_MILLIS =
getLong("defaultClientIdleTimeoutMillis",
DEFAULT_DEFAULT_CLIENT_IDLE_TIMEOUT_MILLIS,
value -> value >= 0);
private static final int DEFAULT_DEFAULT_HTTP2_INITIAL_CONNECTION_WINDOW_SIZE = 1024 * 1024; // 1MiB
private static final int DEFAULT_HTTP2_INITIAL_CONNECTION_WINDOW_SIZE =
getInt("defaultHttp2InitialConnectionWindowSize",
DEFAULT_DEFAULT_HTTP2_INITIAL_CONNECTION_WINDOW_SIZE,
value -> value > 0);
private static final int DEFAULT_DEFAULT_HTTP2_INITIAL_STREAM_WINDOW_SIZE = 1024 * 1024; // 1MiB
private static final int DEFAULT_HTTP2_INITIAL_STREAM_WINDOW_SIZE =
getInt("defaultHttp2InitialStreamWindowSize",
DEFAULT_DEFAULT_HTTP2_INITIAL_STREAM_WINDOW_SIZE,
value -> value > 0);
private static final int DEFAULT_DEFAULT_HTTP2_MAX_FRAME_SIZE = 16384; // From HTTP/2 specification
private static final int DEFAULT_HTTP2_MAX_FRAME_SIZE =
getInt("defaultHttp2MaxFrameSize",
DEFAULT_DEFAULT_HTTP2_MAX_FRAME_SIZE,
value -> value >= Http2CodecUtil.MAX_FRAME_SIZE_LOWER_BOUND &&
value <= Http2CodecUtil.MAX_FRAME_SIZE_UPPER_BOUND);
// Can't use 0xFFFFFFFFL because some implementations use a signed 32-bit integer to store HTTP/2 SETTINGS
// parameter values, thus anything greater than 0x7FFFFFFF will break them or make them unhappy.
private static final long DEFAULT_DEFAULT_HTTP2_MAX_STREAMS_PER_CONNECTION = Integer.MAX_VALUE;
private static final long DEFAULT_HTTP2_MAX_STREAMS_PER_CONNECTION =
getLong("defaultHttp2MaxStreamsPerConnection",
DEFAULT_DEFAULT_HTTP2_MAX_STREAMS_PER_CONNECTION,
value -> value > 0 && value <= 0xFFFFFFFFL);
// from Netty default maxHeaderSize
private static final long DEFAULT_DEFAULT_HTTP2_MAX_HEADER_LIST_SIZE = 8192;
private static final long DEFAULT_HTTP2_MAX_HEADER_LIST_SIZE =
getLong("defaultHttp2MaxHeaderListSize",
DEFAULT_DEFAULT_HTTP2_MAX_HEADER_LIST_SIZE,
value -> value > 0 && value <= 0xFFFFFFFFL);
private static final int DEFAULT_DEFAULT_HTTP1_MAX_INITIAL_LINE_LENGTH = 4096; // from Netty
private static final int DEFAULT_MAX_HTTP1_INITIAL_LINE_LENGTH =
getInt("defaultHttp1MaxInitialLineLength",
DEFAULT_DEFAULT_HTTP1_MAX_INITIAL_LINE_LENGTH,
value -> value >= 0);
private static final int DEFAULT_DEFAULT_HTTP1_MAX_HEADER_SIZE = 8192; // from Netty
private static final int DEFAULT_MAX_HTTP1_HEADER_SIZE =
getInt("defaultHttp1MaxHeaderSize",
DEFAULT_DEFAULT_HTTP1_MAX_HEADER_SIZE,
value -> value >= 0);
private static final int DEFAULT_DEFAULT_HTTP1_MAX_CHUNK_SIZE = 8192; // from Netty
private static final int DEFAULT_HTTP1_MAX_CHUNK_SIZE =
getInt("defaultHttp1MaxChunkSize",
DEFAULT_DEFAULT_HTTP1_MAX_CHUNK_SIZE,
value -> value >= 0);
private static final boolean DEFAULT_USE_HTTP2_PREFACE = getBoolean("defaultUseHttp2Preface", true);
private static final boolean DEFAULT_USE_HTTP1_PIPELINING = getBoolean("defaultUseHttp1Pipelining", false);
private static final String DEFAULT_DEFAULT_BACKOFF_SPEC =
"exponential=200:10000,jitter=0.2";
private static final String DEFAULT_BACKOFF_SPEC =
getNormalized("defaultBackoffSpec", DEFAULT_DEFAULT_BACKOFF_SPEC, value -> {
try {
Backoff.of(value);
return true;
} catch (Exception e) {
// Invalid backoff specification
return false;
}
});
private static final int DEFAULT_DEFAULT_MAX_TOTAL_ATTEMPTS = 10;
private static final int DEFAULT_MAX_TOTAL_ATTEMPTS =
getInt("defaultMaxTotalAttempts",
DEFAULT_DEFAULT_MAX_TOTAL_ATTEMPTS,
value -> value > 0);
private static final String DEFAULT_ROUTE_CACHE_SPEC = "maximumSize=4096";
private static final Optional<String> ROUTE_CACHE_SPEC =
caffeineSpec("routeCache", DEFAULT_ROUTE_CACHE_SPEC);
private static final String DEFAULT_COMPOSITE_SERVICE_CACHE_SPEC = "maximumSize=256";
private static final Optional<String> COMPOSITE_SERVICE_CACHE_SPEC =
caffeineSpec("compositeServiceCache", DEFAULT_COMPOSITE_SERVICE_CACHE_SPEC);
private static final String DEFAULT_PARSED_PATH_CACHE_SPEC = "maximumSize=4096";
private static final Optional<String> PARSED_PATH_CACHE_SPEC =
caffeineSpec("parsedPathCache", DEFAULT_PARSED_PATH_CACHE_SPEC);
private static final String DEFAULT_HEADER_VALUE_CACHE_SPEC = "maximumSize=4096";
private static final Optional<String> HEADER_VALUE_CACHE_SPEC =
caffeineSpec("headerValueCache", DEFAULT_HEADER_VALUE_CACHE_SPEC);
private static final String DEFAULT_CACHED_HEADERS =
":authority,:scheme,:method,accept-encoding,content-type";
private static final List<String> CACHED_HEADERS =
CSV_SPLITTER.splitToList(getNormalized(
"cachedHeaders", DEFAULT_CACHED_HEADERS, CharMatcher.ascii()::matchesAllOf));
private static final String DEFAULT_ANNOTATED_SERVICE_EXCEPTION_VERBOSITY = "unhandled";
private static final ExceptionVerbosity ANNOTATED_SERVICE_EXCEPTION_VERBOSITY =
exceptionLoggingMode("annotatedServiceExceptionVerbosity",
DEFAULT_ANNOTATED_SERVICE_EXCEPTION_VERBOSITY);
static {
if (!isEpollAvailable()) {
final Throwable cause = Epoll.unavailabilityCause();
if (cause != null) {
logger.info("/dev/epoll not available: {}", Exceptions.peel(cause).toString());
} else {
if (HAS_WSLENV) {
logger.info("/dev/epoll not available: WSL not supported");
} else {
logger.info("/dev/epoll not available: ?");
}
}
} else if (USE_EPOLL) {
logger.info("Using /dev/epoll");
}
if (!OpenSsl.isAvailable()) {
final Throwable cause = Exceptions.peel(OpenSsl.unavailabilityCause());
logger.info("OpenSSL not available: {}", cause.toString());
} else if (USE_OPENSSL) {
logger.info("Using OpenSSL: {}, 0x{}",
OpenSsl.versionString(),
Long.toHexString(OpenSsl.version() & 0xFFFFFFFFL));
}
}
private static boolean isEpollAvailable() {
// Netty epoll transport does not work with WSL (Windows Sybsystem for Linux) yet.
// TODO(trustin): Re-enable on WSL if https://github.com/Microsoft/WSL/issues/1982 is resolved.
return Epoll.isAvailable() && !HAS_WSLENV;
}
/**
* Returns whether the verbose exception mode is enabled. When enabled, the exceptions frequently thrown by
* Armeria will have full stack trace. When disabled, such exceptions will have empty stack trace to
* eliminate the cost of capturing the stack trace.
*
* <p>This flag is disabled by default. Specify the {@code -Dcom.linecorp.armeria.verboseExceptions=true}
* JVM option to enable it.
*/
public static boolean verboseExceptions() {
return VERBOSE_EXCEPTIONS;
}
/**
* Returns whether the verbose response mode is enabled. When enabled, the server responses will contain
* the exception type and its full stack trace, which may be useful for debugging while potentially
* insecure. When disabled, the server responses will not expose such server-side details to the client.
*
* <p>This flag is disabled by default. Specify the {@code -Dcom.linecorp.armeria.verboseResponses=true}
* JVM option to enable it.
*/
public static boolean verboseResponses() {
return VERBOSE_RESPONSES;
}
/**
* Returns whether the JNI-based {@code /dev/epoll} socket I/O is enabled. When enabled on Linux, Armeria
* uses {@code /dev/epoll} directly for socket I/O. When disabled, {@code java.nio} socket API is used
* instead.
*
* <p>This flag is enabled by default for supported platforms. Specify the
* {@code -Dcom.linecorp.armeria.useEpoll=false} JVM option to disable it.
*/
public static boolean useEpoll() {
return USE_EPOLL;
}
/**
* Returns whether the JNI-based TLS support with OpenSSL is enabled. When enabled, Armeria uses OpenSSL
* for processing TLS connections. When disabled, the current JVM's default {@link SSLEngine} is used
* instead.
*
* <p>This flag is enabled by default for supported platforms. Specify the
* {@code -Dcom.linecorp.armeria.useOpenSsl=false} JVM option to disable it.
*/
public static boolean useOpenSsl() {
return USE_OPENSSL;
}
/**
* Returns the default server-side maximum number of connections.
*
* <p>The default value of this flag is {@value #DEFAULT_MAX_NUM_CONNECTIONS}. Specify the
* {@code -Dcom.linecorp.armeria.maxNumConnections=<integer>} JVM option to override
* the default value.
*/
public static int maxNumConnections() {
return MAX_NUM_CONNECTIONS;
}
/**
* Returns the default number of {@linkplain CommonPools#workerGroup() common worker group} threads.
* Note that this value has effect only if a user did not specify a worker group.
*
* <p>The default value of this flag is {@code 2 * <numCpuCores>}. Specify the
* {@code -Dcom.linecorp.armeria.numCommonWorkers=<integer>} JVM option to override the default value.
*/
public static int numCommonWorkers() {
return NUM_COMMON_WORKERS;
}
/**
* Returns the default number of {@linkplain CommonPools#blockingTaskExecutor() blocking task executor}
* threads. Note that this value has effect only if a user did not specify a blocking task executor.
*
* <p>The default value of this flag is {@value #DEFAULT_NUM_COMMON_BLOCKING_TASK_THREADS}. Specify the
* {@code -Dcom.linecorp.armeria.numCommonBlockingTaskThreads=<integer>} JVM option to override
* the default value.
*/
public static int numCommonBlockingTaskThreads() {
return NUM_COMMON_BLOCKING_TASK_THREADS;
}
/**
* Returns the default server-side maximum length of a request. Note that this value has effect
* only if a user did not specify it.
*
* <p>The default value of this flag is {@value #DEFAULT_DEFAULT_MAX_REQUEST_LENGTH}. Specify the
* {@code -Dcom.linecorp.armeria.defaultMaxRequestLength=<long>} to override the default value.
* {@code 0} disables the length limit.
*/
public static long defaultMaxRequestLength() {
return DEFAULT_MAX_REQUEST_LENGTH;
}
/**
* Returns the default client-side maximum length of a response. Note that this value has effect
* only if a user did not specify it.
*
* <p>The default value of this flag is {@value #DEFAULT_DEFAULT_MAX_RESPONSE_LENGTH}. Specify the
* {@code -Dcom.linecorp.armeria.defaultMaxResponseLength=<long>} to override the default value.
* {@code 0} disables the length limit.
*/
public static long defaultMaxResponseLength() {
return DEFAULT_MAX_RESPONSE_LENGTH;
}
/**
* Returns the default server-side timeout of a request in milliseconds. Note that this value has effect
* only if a user did not specify it.
*
* <p>The default value of this flag is {@value #DEFAULT_DEFAULT_REQUEST_TIMEOUT_MILLIS}.
* Specify the {@code -Dcom.linecorp.armeria.defaultRequestTimeoutMillis=<long>} to override
* the default value. {@code 0} disables the timeout.
*/
public static long defaultRequestTimeoutMillis() {
return DEFAULT_REQUEST_TIMEOUT_MILLIS;
}
/**
* Returns the default client-side timeout of a response in milliseconds. Note that this value has effect
* only if a user did not specify it.
*
* <p>The default value of this flag is {@value #DEFAULT_DEFAULT_RESPONSE_TIMEOUT_MILLIS}.
* Specify the {@code -Dcom.linecorp.armeria.defaultResponseTimeoutMillis=<long>} to override
* the default value. {@code 0} disables the timeout.
*/
public static long defaultResponseTimeoutMillis() {
return DEFAULT_RESPONSE_TIMEOUT_MILLIS;
}
/**
* Returns the default client-side timeout of a socket connection attempt in milliseconds.
* Note that this value has effect only if a user did not specify it.
*
* <p>The default value of this flag is {@value #DEFAULT_DEFAULT_CONNECT_TIMEOUT_MILLIS}. Specify the
* {@code -Dcom.linecorp.armeria.defaultConnectTimeoutMillis=<integer>} JVM option to override
* the default value.
*/
public static long defaultConnectTimeoutMillis() {
return DEFAULT_CONNECT_TIMEOUT_MILLIS;
}
/**
* Returns the default server-side idle timeout of a connection for keep-alive in milliseconds.
* Note that this value has effect only if a user did not specify it.
*
* <p>The default value of this flag is {@value #DEFAULT_DEFAULT_SERVER_IDLE_TIMEOUT_MILLIS}. Specify the
* {@code -Dcom.linecorp.armeria.defaultServerIdleTimeoutMillis=<integer>} JVM option to override
* the default value.
*/
public static long defaultServerIdleTimeoutMillis() {
return DEFAULT_SERVER_IDLE_TIMEOUT_MILLIS;
}
/**
* Returns the default client-side idle timeout of a connection for keep-alive in milliseconds.
* Note that this value has effect only if a user did not specify it.
*
* <p>This default value of this flag is {@value #DEFAULT_DEFAULT_CLIENT_IDLE_TIMEOUT_MILLIS}. Specify the
* {@code -Dcom.linecorp.armeria.defaultClientIdleTimeoutMillis=<integer>} JVM option to override
* the default value.
*/
public static long defaultClientIdleTimeoutMillis() {
return DEFAULT_CLIENT_IDLE_TIMEOUT_MILLIS;
}
/**
* Returns the default maximum length of an HTTP/1 response initial line.
* Note that this value has effect only if a user did not specify it.
*
* <p>This default value of this flag is {@value #DEFAULT_DEFAULT_HTTP1_MAX_INITIAL_LINE_LENGTH}.
* Specify the {@code -Dcom.linecorp.armeria.defaultHttp1MaxInitialLineLength=<integer>} JVM option
* to override the default value.
*/
public static int defaultHttp1MaxInitialLineLength() {
return DEFAULT_MAX_HTTP1_INITIAL_LINE_LENGTH;
}
/**
* Returns the default maximum length of all headers in an HTTP/1 response.
* Note that this value has effect only if a user did not specify it.
*
* <p>This default value of this flag is {@value #DEFAULT_DEFAULT_HTTP1_MAX_HEADER_SIZE}.
* Specify the {@code -Dcom.linecorp.armeria.defaultHttp1MaxHeaderSize=<integer>} JVM option
* to override the default value.
*/
public static int defaultHttp1MaxHeaderSize() {
return DEFAULT_MAX_HTTP1_HEADER_SIZE;
}
/**
* Returns the default maximum length of each chunk in an HTTP/1 response content.
* The content or a chunk longer than this value will be split into smaller chunks
* so that their lengths never exceed it.
* Note that this value has effect only if a user did not specify it.
*
* <p>The default value of this flag is {@value #DEFAULT_DEFAULT_HTTP1_MAX_CHUNK_SIZE}.
* Specify the {@code -Dcom.linecorp.armeria.defaultHttp1MaxChunkSize=<integer>} JVM option
* to override the default value.
*/
public static int defaultHttp1MaxChunkSize() {
return DEFAULT_HTTP1_MAX_CHUNK_SIZE;
}
/**
* Returns the default value of the {@link ClientFactoryBuilder#useHttp2Preface(boolean)} option.
* Note that this value has effect only if a user did not specify it.
*
* <p>This flag is enabled by default. Specify the
* {@code -Dcom.linecorp.armeria.defaultUseHttp2Preface=false} JVM option to disable it.
*/
public static boolean defaultUseHttp2Preface() {
return DEFAULT_USE_HTTP2_PREFACE;
}
/**
* Returns the default value of the {@link ClientFactoryBuilder#useHttp1Pipelining(boolean)} option.
* Note that this value has effect only if a user did not specify it.
*
* <p>This flag is disabled by default. Specify the
* {@code -Dcom.linecorp.armeria.defaultUseHttp1Pipelining=true} JVM option to enable it.
*/
public static boolean defaultUseHttp1Pipelining() {
return DEFAULT_USE_HTTP1_PIPELINING;
}
/**
* Returns the default value of the {@link ServerBuilder#http2InitialConnectionWindowSize(int)} and
* {@link ClientFactoryBuilder#http2InitialConnectionWindowSize(int)} option.
* Note that this value has effect only if a user did not specify it.
*
* <p>The default value of this flag is {@value #DEFAULT_DEFAULT_HTTP2_INITIAL_CONNECTION_WINDOW_SIZE}.
* Specify the {@code -Dcom.linecorp.armeria.defaultHttp2InitialConnectionWindowSize=<integer>} JVM option
* to override the default value.
*/
public static int defaultHttp2InitialConnectionWindowSize() {
return DEFAULT_HTTP2_INITIAL_CONNECTION_WINDOW_SIZE;
}
/**
* Returns the default value of the {@link ServerBuilder#http2InitialStreamWindowSize(int)} and
* {@link ClientFactoryBuilder#http2InitialStreamWindowSize(int)} option.
* Note that this value has effect only if a user did not specify it.
*
* <p>The default value of this flag is {@value #DEFAULT_DEFAULT_HTTP2_INITIAL_STREAM_WINDOW_SIZE}.
* Specify the {@code -Dcom.linecorp.armeria.defaultHttp2InitialStreamWindowSize=<integer>} JVM option
* to override the default value.
*/
public static int defaultHttp2InitialStreamWindowSize() {
return DEFAULT_HTTP2_INITIAL_STREAM_WINDOW_SIZE;
}
/**
* Returns the default value of the {@link ServerBuilder#http2MaxFrameSize(int)} and
* {@link ClientFactoryBuilder#http2MaxFrameSize(int)} option.
* Note that this value has effect only if a user did not specify it.
*
* <p>The default value of this flag is {@value #DEFAULT_DEFAULT_HTTP2_MAX_FRAME_SIZE}.
* Specify the {@code -Dcom.linecorp.armeria.defaultHttp2MaxFrameSize=<integer>} JVM option
* to override the default value.
*/
public static int defaultHttp2MaxFrameSize() {
return DEFAULT_HTTP2_MAX_FRAME_SIZE;
}
/**
* Returns the default value of the {@link ServerBuilder#http2MaxStreamsPerConnection(long)} option.
* Note that this value has effect only if a user did not specify it.
*
* <p>The default value of this flag is {@value #DEFAULT_DEFAULT_HTTP2_MAX_STREAMS_PER_CONNECTION}.
* Specify the {@code -Dcom.linecorp.armeria.defaultHttp2MaxStreamsPerConnection=<integer>} JVM option
* to override the default value.
*/
public static long defaultHttp2MaxStreamsPerConnection() {
return DEFAULT_HTTP2_MAX_STREAMS_PER_CONNECTION;
}
/**
* Returns the default value of the {@link ServerBuilder#http2MaxHeaderListSize(long)} and
* {@link ClientFactoryBuilder#http2MaxHeaderListSize(long)} option.
* Note that this value has effect only if a user did not specify it.
*
* <p>The default value of this flag is {@value #DEFAULT_DEFAULT_HTTP2_MAX_HEADER_LIST_SIZE}.
* Specify the {@code -Dcom.linecorp.armeria.defaultHttp2MaxHeaderListSize=<integer>} JVM option
* to override the default value.
*/
public static long defaultHttp2MaxHeaderListSize() {
return DEFAULT_HTTP2_MAX_HEADER_LIST_SIZE;
}
/**
* Returns the default value of the {@code backoffSpec} parameter when instantiating a {@link Backoff}
* using {@link Backoff#of(String)}. Note that this value has effect only if a user did not specify the
* {@code defaultBackoffSpec} in the constructor call.
*
* <p>The default value of this flag is {@value DEFAULT_DEFAULT_BACKOFF_SPEC}. Specify the
* {@code -Dcom.linecorp.armeria.defaultBackoffSpec=<spec>} JVM option to override the default value.
*/
public static String defaultBackoffSpec() {
return DEFAULT_BACKOFF_SPEC;
}
/**
* Returns the default maximum number of total attempts. Note that this value has effect only if a user
* did not specify it when creating a {@link RetryingHttpClient} or a {@link RetryingRpcClient}.
*
* <p>The default value of this flag is {@value #DEFAULT_DEFAULT_MAX_TOTAL_ATTEMPTS}. Specify the
* {@code -Dcom.linecorp.armeria.defaultMaxTotalAttempts=<integer>} JVM option to
* override the default value.
*/
public static int defaultMaxTotalAttempts() {
return DEFAULT_MAX_TOTAL_ATTEMPTS;
}
/**
* Returns the value of the {@code routeCache} parameter. It would be used to create a Caffeine
* {@link Cache} instance using {@link Caffeine#from(String)} for routing a request. The {@link Cache}
* would hold the mappings of {@link PathMappingContext} and the designated {@link ServiceConfig}
* for a request to improve server performance.
*
* <p>The default value of this flag is {@value DEFAULT_ROUTE_CACHE_SPEC}. Specify the
* {@code -Dcom.linecorp.armeria.routeCache=<spec>} JVM option to override the default value.
* Also, specify {@code -Dcom.linecorp.armeria.routeCache=off} JVM option to disable it.
*/
public static Optional<String> routeCacheSpec() {
return ROUTE_CACHE_SPEC;
}
/**
* Returns the value of the {@code parsedPathCache} parameter. It would be used to create a Caffeine
* {@link Cache} instance using {@link Caffeine#from(String)} mapping raw HTTP paths to parsed pair of
* path and query, after validation.
*
* <p>The default value of this flag is {@value DEFAULT_PARSED_PATH_CACHE_SPEC}. Specify the
* {@code -Dcom.linecorp.armeria.parsedPathCache=<spec>} JVM option to override the default value.
* Also, specify {@code -Dcom.linecorp.armeria.parsedPathCache=off} JVM option to disable it.
*/
public static Optional<String> parsedPathCacheSpec() {
return PARSED_PATH_CACHE_SPEC;
}
/**
* Returns the value of the {@code headerValueCache} parameter. It would be used to create a Caffeine
* {@link Cache} instance using {@link Caffeine#from(String)} mapping raw HTTP ascii header values to
* {@link String}.
*
* <p>The default value of this flag is {@value DEFAULT_HEADER_VALUE_CACHE_SPEC}. Specify the
* {@code -Dcom.linecorp.armeria.headerValueCache=<spec>} JVM option to override the default value.
* Also, specify {@code -Dcom.linecorp.armeria.headerValueCache=off} JVM option to disable it.
*/
public static Optional<String> headerValueCacheSpec() {
return HEADER_VALUE_CACHE_SPEC;
}
/**
* Returns the value of the {@code cachedHeaders} parameter which contains a comma-separated list of
* headers whose values are cached using {@code headerValueCache}.
*
* <p>The default value of this flag is {@value DEFAULT_CACHED_HEADERS}. Specify the
* {@code -Dcom.linecorp.armeria.cachedHeaders=<csv>} JVM option to override the default value.
*/
public static List<String> cachedHeaders() {
return CACHED_HEADERS;
}
/**
* Returns the value of the {@code compositeServiceCache} parameter. It would be used to create a
* Caffeine {@link Cache} instance using {@link Caffeine#from(String)} for routing a request.
* The {@link Cache} would hold the mappings of {@link PathMappingContext} and the designated
* {@link ServiceConfig} for a request to improve server performance.
*
* <p>The default value of this flag is {@value DEFAULT_COMPOSITE_SERVICE_CACHE_SPEC}. Specify the
* {@code -Dcom.linecorp.armeria.compositeServiceCache=<spec>} JVM option to override the default value.
* Also, specify {@code -Dcom.linecorp.armeria.compositeServiceCache=off} JVM option to disable it.
*/
public static Optional<String> compositeServiceCacheSpec() {
return COMPOSITE_SERVICE_CACHE_SPEC;
}
/**
* Returns the verbosity of exceptions logged by annotated HTTP services. The value of this property
* is one of the following:
* <ul>
* <li>{@link ExceptionVerbosity#ALL} - logging all exceptions raised from annotated HTTP services</li>
* <li>{@link ExceptionVerbosity#UNHANDLED} - logging exceptions which are not handled by
* {@link ExceptionHandler}s provided by a user and are not well-known exceptions
* <li>{@link ExceptionVerbosity#NONE} - no logging exceptions</li>
* </ul>
* A log message would be written at {@code WARN} level.
*
* <p>The default value of this flag is {@value DEFAULT_ANNOTATED_SERVICE_EXCEPTION_VERBOSITY}.
* Specify the
* {@code -Dcom.linecorp.armeria.annotatedServiceExceptionVerbosity=<all|unhandled|none>} JVM option
* to override the default value.
*
* @see ExceptionVerbosity
*/
public static ExceptionVerbosity annotatedServiceExceptionVerbosity() {
return ANNOTATED_SERVICE_EXCEPTION_VERBOSITY;
}
private static Optional<String> caffeineSpec(String name, String defaultValue) {
final String spec = get(name, defaultValue, value -> {
try {
if (!"off".equals(value)) {
CaffeineSpec.parse(value);
}
return true;
} catch (Exception e) {
return false;
}
});
return "off".equals(spec) ? Optional.empty()
: Optional.of(spec);
}
private static ExceptionVerbosity exceptionLoggingMode(String name, String defaultValue) {
final String mode = getNormalized(name, defaultValue,
value -> Arrays.stream(ExceptionVerbosity.values())
.anyMatch(v -> v.name().equalsIgnoreCase(value)));
return ExceptionVerbosity.valueOf(mode.toUpperCase());
}
private static boolean getBoolean(String name, boolean defaultValue) {
return getBoolean(name, defaultValue, value -> true);
}
private static boolean getBoolean(String name, boolean defaultValue, Predicate<Boolean> validator) {
return "true".equals(getNormalized(name, String.valueOf(defaultValue), value -> {
if ("true".equals(value)) {
return validator.test(true);
}
if ("false".equals(value)) {
return validator.test(false);
}
return false;
}));
}
private static int getInt(String name, int defaultValue, IntPredicate validator) {
return Integer.parseInt(getNormalized(name, String.valueOf(defaultValue), value -> {
try {
return validator.test(Integer.parseInt(value));
} catch (Exception e) {
// null or non-integer
return false;
}
}));
}
private static long getLong(String name, long defaultValue, LongPredicate validator) {
return Long.parseLong(getNormalized(name, String.valueOf(defaultValue), value -> {
try {
return validator.test(Long.parseLong(value));
} catch (Exception e) {
// null or non-integer
return false;
}
}));
}
private static String get(String name, String defaultValue, Predicate<String> validator) {
final String fullName = PREFIX + name;
final String value = System.getProperty(fullName);
if (value == null) {
logger.info("{}: {} (default)", fullName, defaultValue);
return defaultValue;
}
if (validator.test(value)) {
logger.info("{}: {}", fullName, value);
return value;
}
logger.info("{}: {} (default instead of: {})", fullName, defaultValue, value);
return defaultValue;
}
private static String getNormalized(String name, String defaultValue, Predicate<String> validator) {
final String fullName = PREFIX + name;
final String value = getLowerCased(fullName);
if (value == null) {
logger.info("{}: {} (default)", fullName, defaultValue);
return defaultValue;
}
if (validator.test(value)) {
logger.info("{}: {}", fullName, value);
return value;
}
logger.info("{}: {} (default instead of: {})", fullName, defaultValue, value);
return defaultValue;
}
@Nullable
private static String getLowerCased(String fullName) {
String value = System.getProperty(fullName);
if (value != null) {
value = Ascii.toLowerCase(value);
}
return value;
}
private Flags() {}
}
| [
"\"WSLENV\""
]
| []
| [
"WSLENV"
]
| [] | ["WSLENV"] | java | 1 | 0 | |
setup.py | import os
import re
import sys
import platform
import subprocess
from setuptools import setup, Extension, find_packages
from setuptools.command.build_ext import build_ext
from distutils.version import LooseVersion
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=""):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
class CMakeBuild(build_ext):
def run(self):
try:
out = subprocess.check_output(["cmake", "--version"])
except OSError:
raise RuntimeError(
"CMake must be installed to build the following extensions: "
+ ", ".join(e.name for e in self.extensions)
)
if platform.system() == "Windows":
cmake_version = LooseVersion(
re.search(r"version\s*([\d.]+)", out.decode()).group(1)
)
if cmake_version < "3.9.0":
raise RuntimeError("CMake >= 3.9.0 is required on Windows")
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
extdir = (
os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
+ "/affine_transform"
).replace("\\", "/")
cmake_args = [
"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=" + extdir,
"-DPYTHON_EXECUTABLE=" + sys.executable,
]
cfg = "Debug" if self.debug else "Release"
build_args = ["--config", cfg]
if platform.system() == "Windows":
cmake_args += [
"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}".format(cfg.upper(), extdir)
]
if sys.maxsize > 2 ** 32:
cmake_args += ["-A", "x64"]
build_args += ["--", "/m"]
else:
cmake_args += ["-DCMAKE_BUILD_TYPE=" + cfg]
build_args += ["--", "-j2"]
env = os.environ.copy()
# env['CXXFLAGS'] = '{} -DVERSION_INFO=\\"{}\\"'.format(env.get('CXXFLAGS', ''),
# self.distribution.get_version())
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(
["cmake", ext.sourcedir] + cmake_args, cwd=self.build_temp, env=env
)
subprocess.check_call(
["cmake", "--build", "."] + build_args, cwd=self.build_temp
)
this_directory = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(this_directory, "README.rst"), encoding="utf-8") as f:
long_description = f.read()
from subprocess import check_output, CalledProcessError
try:
# If in git repository, get git label
v = (
check_output(
["git", "describe", "--always", "--dirty", "--tags"], cwd=this_directory
)
.decode("utf-8")
.strip()
)
if not "." in v:
v = "0.0.0"
with open(
os.path.join(
os.path.abspath(os.path.dirname(__file__)),
"affine_transform",
"version.txt",
),
"w",
encoding="utf-8",
) as f:
f.write(v)
except CalledProcessError:
# Otherwise get version from version.txt (sdist for example)
with open(
os.path.join(
os.path.abspath(os.path.dirname(__file__)),
"affine_transform",
"version.txt",
),
encoding="utf-8",
) as f:
v = f.read()
setup(
name="affine_transform",
author="NOhs, TobelRunner",
version=v,
url="https://github.com/NOhs/affine_transform_nd",
description="Easy to use multi-core affine transformations",
python_requires='>=3.6',
long_description=long_description,
license="MIT",
ext_modules=[CMakeExtension("affine_transform")],
package_data={"affine_transform": ["version.txt"]},
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: C++",
"License :: OSI Approved :: MIT License",
],
packages=find_packages(),
cmdclass=dict(build_ext=CMakeBuild),
zip_safe=False,
install_requires=["numpy"],
setup_requires=["pytest-runner"],
tests_require=["numpy", "mgen", "pytest"],
)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
src/yolo_detector/work.py | # coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import json
import time
from celery import Celery
from celery.signals import worker_process_init
from celery.concurrency import asynpool
from yolo import Yolo
from face_filter import FaceFilterClass
from scipy import misc
# deeepeye
asynpool.PROC_ALIVE_TIMEOUT = 60.0 # set this long enough
redis_host = os.getenv("REDIS_HOST", default="localhost")
redis_port = os.getenv("REDIS_PORT", default="6379")
ENABLE_STATIC_OBJECT_FILTER = json.loads(os.getenv("ENABLE_STATIC_OBJECT_FILTER", default="false").lower())
deepeye = Celery('upload_api-v2',
broker='redis://guest@'+redis_host+':'+redis_port+'/0',
backend='redis://guest@'+redis_host+':'+redis_port+'/0')
@worker_process_init.connect()
def setup(sender=None, **kwargs):
global yolo
yolo = Yolo()
global face_filter
face_filter = FaceFilterClass()
#warm up
yolo.crop_persons("test3.png")
@deepeye.task
def detect(image_path, trackerid, ts, cameraId):
if ENABLE_STATIC_OBJECT_FILTER:
img = misc.imread(image_path)
result, resized_img = face_filter.resize_image(img, 480)
if result is not None:
print("Resize image error!")
return
star_time = time.time()
result, rects, min_value, max_value = face_filter.motion_detect(cameraId, resized_img)
end_time = time.time()
print('Performance: motion_detect is {}S'.format(end_time-star_time))
face_filter.save_static_image(cameraId, result, image_path, min_value, max_value)
result = yolo.detect(image_path, trackerid, ts, cameraId, face_filter)
else:
result = yolo.detect(image_path, trackerid, ts, cameraId, face_filter=None)
return result
deepeye.conf.task_routes = {
'upload_api-v2.detect': {'queue': 'detect'}
}
if __name__ == '__main__':
deepeye.start()
| []
| []
| [
"REDIS_PORT",
"REDIS_HOST",
"ENABLE_STATIC_OBJECT_FILTER"
]
| [] | ["REDIS_PORT", "REDIS_HOST", "ENABLE_STATIC_OBJECT_FILTER"] | python | 3 | 0 | |
api_test.go | package larkslim_test
import (
"bytes"
"encoding/json"
"image"
"image/color"
"image/png"
"io"
"math/rand"
"os"
"testing"
"time"
"github.com/caiguanhao/larkslim"
)
func TestAPI(t *testing.T) {
appId := os.Getenv("LARK_APP_ID")
appSecret := os.Getenv("LARK_APP_SECRET")
l := larkslim.API{
AppId: appId,
AppSecret: appSecret,
}
_, err := l.GetAccessToken()
if err != nil {
t.Fatal(err)
}
t.Log("GetAccessToken() passed")
chats, err := l.ListAllChats()
if err != nil {
t.Fatal(err)
}
t.Log("ListAllChats() passed")
if len(chats) > 0 {
chat, err := l.GetChatInfo(chats[0].ChatId)
if err != nil {
t.Fatal(err)
}
t.Log("GetChatInfo() passed")
if len(chat.Members) > 0 {
user := chat.Members[0].OpenId
users := []string{user}
_, err := l.GetUserInfo(users)
if err != nil {
t.Fatal(err)
}
err = l.AddUsersToChat(chat.ChatId, users)
if err != nil {
t.Fatal(err)
}
t.Log("AddUsersToChat() passed")
key, err := l.UploadMessageImage(randomImage())
if err != nil {
t.Fatal(err)
}
t.Log("UploadMessageImage() passed")
err = l.SendImageMessage(user, key)
if err != nil {
t.Fatal(err)
}
t.Log("SendImageMessage() passed")
}
}
}
func ExamplePost() {
post := larkslim.Post{
"zh_cn": larkslim.PostOfLocale{
Title: "post",
Content: larkslim.PostLines{
{
{
Tag: "text",
Text: "Name: ",
},
{
Tag: "a",
Text: "Hello",
Href: "https://www.google.com",
},
},
},
},
}
enc := json.NewEncoder(os.Stdout)
enc.SetIndent("", "\t")
enc.Encode(post)
// Output:
// {
// "zh_cn": {
// "title": "post",
// "content": [
// [
// {
// "tag": "text",
// "text": "Name: "
// },
// {
// "tag": "a",
// "text": "Hello",
// "href": "https://www.google.com"
// }
// ]
// ]
// }
// }
}
func randomImage() io.Reader {
rand.Seed(time.Now().Unix())
n := rand.Perm(200)
img := image.NewRGBA(image.Rect(0, 0, 200, 200))
for i := 0; i < len(n)/2; i++ {
img.Set(n[2*i], n[2*i+1], color.RGBA{255, 0, 0, 255})
}
var w bytes.Buffer
err := png.Encode(&w, img)
if err != nil {
panic(err)
}
return &w
}
| [
"\"LARK_APP_ID\"",
"\"LARK_APP_SECRET\""
]
| []
| [
"LARK_APP_ID",
"LARK_APP_SECRET"
]
| [] | ["LARK_APP_ID", "LARK_APP_SECRET"] | go | 2 | 0 | |
hydrus/conf.py | """
Global variables are loaded or set here:
DEBUG
PORT
API_NAME
DB_URL
APIDOC_OBJ
HYDRUS_SERVER_URL
FOUND_DOC
"""
import os
import logging
from os.path import abspath, dirname
from pathlib import Path
from importlib.machinery import SourceFileLoader
logger = logging.getLogger(__file__)
try:
DEBUG = bool(os.environ['DEBUG'])
except KeyError:
DEBUG = False
# load form environment (as many globals as possible shall be in
# environment configuration)
PORT = int(os.environ['PORT']) if 'PORT' in dict(os.environ).keys() else 8080
API_NAME = os.environ['API_NAME'] if 'API_NAME' in dict(os.environ).keys() else 'api'
DB_URL = os.environ['DB_URL'] if 'DB_URL' in dict(os.environ).keys() else 'sqlite:///database.db'
def get_apidoc_path():
"""
Get the path of the apidoc.
:return - Tuple (path, boolean). path denotes path of the apidoc.
If apidoc is not present at specified path then it falls back at sample apidoc.
boolean is true if the apidoc is present at the specified path.
boolean is false if sample apidoc is being used.
"""
cwd_path = Path(dirname(dirname(abspath(__file__))))
try:
apidoc_env = os.environ['APIDOC_REL_PATH']
apidoc_path = cwd_path / Path(apidoc_env)
found_doc = True
except KeyError:
found_doc = False
apidoc_path = cwd_path / 'hydrus' / 'samples' / 'hydra_doc_sample.py'
return (apidoc_path, found_doc)
def load_apidoc(path):
"""
Parses docs of .jsonld, .py, .yaml format and loads apidoc from the given path.
:param path - Path for the apidoc to be loaded
:return - apidoc
:Raises:
FileNotFoundError: If the wrong path of hydradoc is specified.
BaseException: If hydradoc is specified in wrong format.
"""
path = str(path)
try:
apidoc_format = path.split(".")[-1]
if apidoc_format == 'jsonld':
with open(path, 'r') as f:
api_doc = json.load(f)
elif apidoc_format == 'py':
api_doc = SourceFileLoader(
'doc', path).load_module().doc
elif apidoc_format == 'yaml':
with open(path, 'r') as stream:
api_doc = parse(yaml.load(stream))
else:
raise(
"Error - hydradoc format not supported."
"The supported formats are .py, .jsonld and .yaml")
logger.info(f'APIDOC path loaded from: {path}')
return api_doc
except FileNotFoundError:
logger.critical(f'No Hydra ApiDoc file to load has been found'
f' at {path}. Cannot set APIDOC_OBJ')
raise
except BaseException:
logger.critical("Problem parsing specified hydradoc file")
raise
(path, FOUND_DOC) = get_apidoc_path()
APIDOC_OBJ = load_apidoc(path)
HYDRUS_SERVER_URL = f'http://localhost:{PORT}/'
| []
| []
| [
"PORT",
"DEBUG",
"API_NAME",
"DB_URL",
"APIDOC_REL_PATH"
]
| [] | ["PORT", "DEBUG", "API_NAME", "DB_URL", "APIDOC_REL_PATH"] | python | 5 | 0 | |
mucp/transport/memory/memory_test.go | package memory
import (
"os"
"testing"
"github.com/micro/network/mucp/transport"
)
func TestMemoryTransport(t *testing.T) {
tr := NewTransport()
// bind / listen
l, err := tr.Listen("127.0.0.1:8080")
if err != nil {
t.Fatalf("Unexpected error listening %v", err)
}
defer l.Close()
// accept
go func() {
if err := l.Accept(func(sock transport.Socket) {
for {
var m transport.Message
if err := sock.Recv(&m); err != nil {
return
}
if len(os.Getenv("IN_TRAVIS_CI")) == 0 {
t.Logf("Server Received %s", string(m.Body))
}
if err := sock.Send(&transport.Message{
Body: []byte(`pong`),
}); err != nil {
return
}
}
}); err != nil {
t.Fatalf("Unexpected error accepting %v", err)
}
}()
// dial
c, err := tr.Dial("127.0.0.1:8080")
if err != nil {
t.Fatalf("Unexpected error dialing %v", err)
}
defer c.Close()
// send <=> receive
for i := 0; i < 3; i++ {
if err := c.Send(&transport.Message{
Body: []byte(`ping`),
}); err != nil {
return
}
var m transport.Message
if err := c.Recv(&m); err != nil {
return
}
if len(os.Getenv("IN_TRAVIS_CI")) == 0 {
t.Logf("Client Received %s", string(m.Body))
}
}
}
func TestListener(t *testing.T) {
tr := NewTransport()
// bind / listen on random port
l, err := tr.Listen(":0")
if err != nil {
t.Fatalf("Unexpected error listening %v", err)
}
defer l.Close()
// try again
l2, err := tr.Listen(":0")
if err != nil {
t.Fatalf("Unexpected error listening %v", err)
}
defer l2.Close()
// now make sure it still fails
l3, err := tr.Listen(":8080")
if err != nil {
t.Fatalf("Unexpected error listening %v", err)
}
defer l3.Close()
if _, err := tr.Listen(":8080"); err == nil {
t.Fatal("Expected error binding to :8080 got nil")
}
}
| [
"\"IN_TRAVIS_CI\"",
"\"IN_TRAVIS_CI\""
]
| []
| [
"IN_TRAVIS_CI"
]
| [] | ["IN_TRAVIS_CI"] | go | 1 | 0 | |
SecondTest.py | from csv import DictReader
from datetime import datetime
import logging
from pprint import pprint
import sys
from time import sleep
import click
import requests
from rich.console import Console
from rich.logging import RichHandler
from rich.prompt import Confirm, Prompt
from rich.progress import Progress
from rich.status import Status
from MapItFastLib.Projects import Project
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@click.command("AgTerraImport", context_settings=CONTEXT_SETTINGS, help=f"CSV Importer for AgTerra")
@click.option("--csv", "csv_file", type=click.Path(exists=True, dir_okay=False, readable=True, resolve_path=True,
allow_dash=True), required=True)
@click.option("--project-name", type=click.STRING, required=True)
@click.option("--username", default=lambda: os.environ.get("AGTERRAUSER", ""))
@click.option("--password", hide_input=True, default=lambda: os.environ.get("AGTERRAPASS", ""), show_default=False)
@click.pass_context
def main(ctx, csv_file, project_name, username, password):
"""
First pass as the API. This app is a basic bitch version. Will upgrade to a full class and library later on
- Input:
- - Path to CSV
- - Project Name or ID
- - Folder? This is a maybe
- - Username
- - Password
- Read points from CSV
- - LAT, LONG, NAME
- Append points to project, one at a time
"""
# List of projects
proj_obj_list = list()
# If the given project is found, set to True
proj_title_found = False
# Create the console
console = Console()
debug = False
show_off_mode = False
FORMAT = "%(message)s"
if debug:
# noinspection PyArgumentList
logging.basicConfig(level="DEBUG", format=FORMAT, datefmt="[%X]",
handlers=[RichHandler(console=console, rich_tracebacks=True)])
else:
logging.basicConfig(level="WARNING", format=FORMAT, datefmt="[%X]",
handlers=[RichHandler(console=console, rich_tracebacks=True)])
# csv_dict = get_csv_coordinates(csv_file)
with requests.Session() as sess:
sess.auth = (username, password)
with Status("[magenta]Connecting", console=console, spinner='arrow3') as status:
status.update(f"Requesting Project List From Server")
if show_off_mode:
sleep(0.7)
projects_list = get_projects(sess=sess)
# Turn each project from JSON into an object we can actually use
for raw_proj_dict in projects_list:
proj_obj_list.append(Project(raw_data=raw_proj_dict))
# Look for project by title
for proj in proj_obj_list:
if proj.Title == project_name:
# Project found, we can move on
# Save the object for later
project_by_name = proj
console.log(f"Project {project_name} has been found")
proj_title_found = True
# If project not found, print error and exit
if not proj_title_found:
console.log(f"Project '{project_name}' wasn't found as an option")
for proj in proj_obj_list:
console.log(f"{proj.Title}")
sys.exit(1)
status.update(f"Opening CSV File")
if show_off_mode:
sleep(2)
with open(csv_file, 'r') as f:
csv_dict = DictReader(f)
for row_dict in csv_dict:
if "Title" in row_dict:
# If the title column exists, append it to the request
# console.log(f"Found Title!")
title = row_dict.get("Title")
else:
title = ""
if "Latitude" in row_dict:
# Make sure Latitude exists before we try to call against it
lat = row_dict.get("Latitude")
else:
console.log(f"Latitude doesn't exist in the CSV, check the file and try again")
if "Longitude" in row_dict:
# Make sure Latitude exists before we try to call against it
long = row_dict.get("Longitude")
else:
console.log(f"Longitude doesn't exist in the CSV, check the file and try again")
if "Description" in row_dict:
description = row_dict.get("Description")
else:
description = ""
# console.log(f"Lat: {lat}\nLong: {long}")
# ToDo: Check if the project already has the points in question
status.update(f"Adding Lat: {lat} and Long: {long}")
post_coordinates(sess=sess, console=console, status=status, lat=lat, long=long,
projectID=project_by_name.ProjectId, title=title, description=description)
if show_off_mode:
sleep(1)
def post_coordinates(sess: requests.Session, console: Console, status: Status, lat: float, long: float, projectID: int, title: str, description: str,
elevation: int = 0, iconid: int = 3):
"""
Function to post the variables to the API
"""
base_url = f"https://mapitfast.agterra.com/api/Points"
post_data_dict = {"ProjectID": projectID,
"IconId": iconid,
"Title": title,
"Description": description,
"ItemTime": datetime.now().strftime("%Y-%m-%dT%H:%M:%S"),
"Latitude": lat,
"Longitude": long,
"Elevation": elevation}
resp = sess.post(base_url, data=post_data_dict)
if resp.status_code == 201:
# console.log(f"Added coordinates to project")
pass # console.log(f"It worked!")
else:
console.log(f"{resp.json()}")
console.log(f"Something went wrong, check error code {resp.status_code}")
def get_csv_coordinates(path: str):
"""
Quick function to validate the lat,long,title columns of the CSV
Also return the CSV to the user
"""
csv_dict = dict()
with open(path, 'r') as f:
csv_dict_tmp = DictReader(f)
for i in csv_dict_tmp:
csv_dict.update(i)
# ToDo: Error trap for the correct column names
# if "Longitude" not in csv_dict:
# logging.error(f"CSV Error! Longitude column not found!")
# sys.exit(1)
# if "Latitude" not in csv_dict:
# logging.error(f"CSV Error! Latitude column not found!")
# sys.exit(1)
# if "Title" not in csv_dict:
# logging.warning(f"CSV Warning! Title not found! ")
return csv_dict
def get_projects(sess: requests.Session, url: str = f"https://mapitfast.agterra.com/api/Projects"):
"""
Helper function to get projects list
"""
# Get the folder list
raw_resp = get_url(sess=sess, url=url)
json_data = raw_resp.json()
return json_data
def get_url(sess: requests.Session, url: str):
"""
Basic function to make getting various URLs easier
"""
return sess.get(url=url)
if __name__ == "__main__":
main() | []
| []
| [
"AGTERRAUSER",
"AGTERRAPASS"
]
| [] | ["AGTERRAUSER", "AGTERRAPASS"] | python | 2 | 0 | |
src/main/java/com/rpg2014/wrappers/EncryptionWrapper.java | package com.rpg2014.wrappers;
import com.amazonaws.encryptionsdk.AwsCrypto;
import com.amazonaws.encryptionsdk.CryptoResult;
import com.amazonaws.encryptionsdk.kms.KmsMasterKey;
import com.amazonaws.encryptionsdk.kms.KmsMasterKeyProvider;
import com.fasterxml.jackson.annotation.JsonAutoDetect;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.PropertyAccessor;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.DeserializationFeature;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.SerializationFeature;
import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule;
import com.rpg2014.model.EncryptionResult;
import com.rpg2014.model.journal.JournalEntry;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import software.amazon.awssdk.core.SdkBytes;
import javax.ws.rs.ForbiddenException;
import javax.ws.rs.InternalServerErrorException;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import java.util.Map;
@Slf4j
public class EncryptionWrapper {
private static final String KEY_ARN = System.getenv("key_arn");
private static final String USERNAME = "username";
@Getter
private static EncryptionWrapper ourInstance = new EncryptionWrapper();
private final AwsCrypto crypto;
private final KmsMasterKeyProvider keyProvider;
private final ObjectMapper jsonObjectMapper;
private EncryptionWrapper() {
this.crypto = new AwsCrypto();
this.keyProvider = KmsMasterKeyProvider.builder().withKeysForEncryption(KEY_ARN).build();
ObjectMapper mapper = new ObjectMapper();
mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
mapper.setSerializationInclusion(JsonInclude.Include.ALWAYS);
mapper.setVisibility(PropertyAccessor.ALL, JsonAutoDetect.Visibility.ANY);
mapper.registerModule(new JavaTimeModule());
mapper.configure(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS, false);
this.jsonObjectMapper = mapper;
}
public EncryptionResult encryptJournalEntries(List<JournalEntry> journalEntryList, final String username) {
final Map<String, String> context = Collections.singletonMap(USERNAME, username);
try {
String listString = jsonObjectMapper.writeValueAsString(journalEntryList);
CryptoResult<byte[], KmsMasterKey> result = crypto.encryptData(keyProvider, listString.getBytes(), context);
return EncryptionResult.builder().encryptedKey(KEY_ARN).encryptedBytes(result.getResult()).build();
} catch (JsonProcessingException e) {
e.printStackTrace();
throw new InternalServerErrorException("unable to marshall journal entry list into json.");
} catch (Exception e) {
log.error(e.getMessage());
throw new InternalServerErrorException(e);
}
}
public List<JournalEntry> decryptBytesToJournalList(SdkBytes bytes, String username) {
try {
CryptoResult<byte[], KmsMasterKey> result = crypto.decryptData(keyProvider, bytes.asByteArray());
if (!result.getEncryptionContext().get(USERNAME).equals(username)) {
throw new ForbiddenException("Username from request, " + username + "; does not equal username from entries, " + result.getEncryptionContext().get(USERNAME));
}
return jsonObjectMapper.readValue(new String(result.getResult()), new TypeReference<List<JournalEntry>>() {
});
} catch (IOException e) {
e.printStackTrace();
log.error(e.getMessage());
throw new InternalServerErrorException("Unable to decrypt/ unmarshal the entries for user" + username);
} catch (Exception e) {
log.error(e.getMessage());
throw new InternalServerErrorException(e);
}
}
}
| [
"\"key_arn\""
]
| []
| [
"key_arn"
]
| [] | ["key_arn"] | java | 1 | 0 | |
python/snippets/stt_streaming_recognize_vad_customization.py | #!/usr/bin/env python3
import sys
sys.path.append("..")
from tinkoff.cloud.stt.v1 import stt_pb2_grpc, stt_pb2
from auth import authorization_metadata
import grpc
import os
import wave
endpoint = os.environ.get("VOICEKIT_ENDPOINT") or "api.tinkoff.ai:443"
api_key = os.environ["VOICEKIT_API_KEY"]
secret_key = os.environ["VOICEKIT_SECRET_KEY"]
def build_first_request(sample_rate_hertz, num_channels):
request = stt_pb2.StreamingRecognizeRequest()
request.streaming_config.config.encoding = stt_pb2.AudioEncoding.LINEAR16
request.streaming_config.config.sample_rate_hertz = sample_rate_hertz
request.streaming_config.config.num_channels = num_channels
request.streaming_config.config.vad_config.min_speech_duration = 1.0
request.streaming_config.config.vad_config.max_speech_duration = 30.0
request.streaming_config.config.vad_config.silence_duration_threshold = 3.0
request.streaming_config.config.vad_config.silence_prob_threshold = 0.2
return request
def generate_requests():
try:
with wave.open("../../audio/sample_3.wav") as f:
yield build_first_request(f.getframerate(), f.getnchannels())
frame_samples = f.getframerate()//10 # Send 100ms at a time
for data in iter(lambda:f.readframes(frame_samples), b''):
request = stt_pb2.StreamingRecognizeRequest()
request.audio_content = data
yield request
except Exception as e:
print("Got exception in generate_requests", e)
raise
def print_streaming_recognition_responses(responses):
for response in responses:
for result in response.results:
print("Channel", result.recognition_result.channel)
print("Phrase start:", result.recognition_result.start_time.ToTimedelta())
print("Phrase end: ", result.recognition_result.end_time.ToTimedelta())
for alternative in result.recognition_result.alternatives:
print('"' + alternative.transcript + '"')
print("------------------")
stub = stt_pb2_grpc.SpeechToTextStub(grpc.secure_channel(endpoint, grpc.ssl_channel_credentials()))
metadata = authorization_metadata(api_key, secret_key, "tinkoff.cloud.stt")
responses = stub.StreamingRecognize(generate_requests(), metadata=metadata)
print_streaming_recognition_responses(responses)
| []
| []
| [
"VOICEKIT_ENDPOINT",
"VOICEKIT_API_KEY",
"VOICEKIT_SECRET_KEY"
]
| [] | ["VOICEKIT_ENDPOINT", "VOICEKIT_API_KEY", "VOICEKIT_SECRET_KEY"] | python | 3 | 0 | |
cmd/grifter.go | package cmd
import (
"html/template"
"os"
"path"
"path/filepath"
"strings"
"sync"
"github.com/pkg/errors"
)
const exePath = ".grifter/main.go"
var once = &sync.Once{}
type grifter struct {
GriftsPackagePath string
CommandName string
Verbose bool
GriftsAbsolutePath string
}
func hasGriftDir(path string) bool {
stat, err := os.Stat(filepath.Join(path, "grifts"))
if err != nil {
if os.IsNotExist(err) {
return false
}
return false
}
if !stat.IsDir() {
return false
}
return true
}
func newGrifter(name string) (*grifter, error) {
g := &grifter{
CommandName: name,
}
currentPath, err := os.Getwd()
if err != nil {
return g, errors.WithStack(err)
}
if strings.HasPrefix(currentPath, os.Getenv("GOPATH")) {
for !strings.HasSuffix(currentPath, "/src") && currentPath != "/" {
if hasGriftDir(currentPath) {
break
}
currentPath = filepath.Dir(currentPath)
}
p := strings.SplitN(currentPath, filepath.FromSlash("/src/"), 2)
if len(p) == 1 {
return g, errors.Errorf("There is no directory named 'grifts'. Run '%s init' or switch to the appropriate directory", name)
}
g.GriftsAbsolutePath = filepath.ToSlash(filepath.Join(currentPath, "grifts"))
g.GriftsPackagePath = filepath.ToSlash(filepath.Join(p[1], "grifts"))
} else {
//is outside of gopath, dont loop to parent
if !hasGriftDir(currentPath) {
return g, errors.Errorf("There is no directory named 'grifts'. Run '%s init' or switch to the appropriate directory", name)
}
g.GriftsAbsolutePath = filepath.ToSlash(filepath.Join(currentPath, "grifts"))
g.GriftsPackagePath = filepath.ToSlash(filepath.Join(path.Base(currentPath), "grifts"))
}
return g, nil
}
func (g *grifter) Setup() error {
t, err := template.New("main").Parse(mainTmpl)
if err != nil {
return errors.WithStack(err)
}
err = os.MkdirAll(filepath.Dir(exePath), 0755)
if err != nil {
return errors.WithStack(err)
}
f, err := os.Create(exePath)
if err != nil {
return errors.WithStack(err)
}
err = t.Execute(f, g)
if err != nil {
return errors.WithStack(err)
}
return nil
}
func (g *grifter) TearDown() error {
return os.RemoveAll(filepath.Dir(exePath))
}
| [
"\"GOPATH\""
]
| []
| [
"GOPATH"
]
| [] | ["GOPATH"] | go | 1 | 0 | |
pkg/acquisition/modules/journalctl/journalctl_test.go | package journalctlacquisition
import (
"os"
"os/exec"
"testing"
"time"
"github.com/crowdsecurity/crowdsec/pkg/types"
log "github.com/sirupsen/logrus"
"github.com/sirupsen/logrus/hooks/test"
"github.com/stretchr/testify/assert"
"gopkg.in/tomb.v2"
)
func TestBadConfiguration(t *testing.T) {
tests := []struct {
config string
expectedErr string
}{
{
config: `foobar: asd.log`,
expectedErr: "line 1: field foobar not found in type journalctlacquisition.JournalCtlConfiguration",
},
{
config: `
mode: tail
source: journalctl`,
expectedErr: "journalctl_filter is required",
},
{
config: `
mode: cat
source: journalctl
journalctl_filter:
- _UID=42`,
expectedErr: "",
},
}
subLogger := log.WithFields(log.Fields{
"type": "journalctl",
})
for _, test := range tests {
f := JournalCtlSource{}
err := f.Configure([]byte(test.config), subLogger)
if test.expectedErr != "" && err == nil {
t.Fatalf("Expected err %s but got nil !", test.expectedErr)
}
if test.expectedErr != "" {
assert.Contains(t, err.Error(), test.expectedErr)
}
}
}
func TestConfigureDSN(t *testing.T) {
tests := []struct {
dsn string
expectedErr string
}{
{
dsn: "asd://",
expectedErr: "invalid DSN asd:// for journalctl source, must start with journalctl://",
},
{
dsn: "journalctl://",
expectedErr: "empty journalctl:// DSN",
},
{
dsn: "journalctl://foobar=42",
expectedErr: "unsupported key foobar in journalctl DSN",
},
{
dsn: "journalctl://filters=%ZZ",
expectedErr: "could not parse journalctl DSN : invalid URL escape \"%ZZ\"",
},
{
dsn: "journalctl://filters=_UID=42?log_level=warn",
expectedErr: "",
},
{
dsn: "journalctl://filters=_UID=1000&log_level=foobar",
expectedErr: "unknown level foobar: not a valid logrus Level:",
},
{
dsn: "journalctl://filters=_UID=1000&log_level=warn&since=yesterday",
expectedErr: "",
},
}
subLogger := log.WithFields(log.Fields{
"type": "journalctl",
})
for _, test := range tests {
f := JournalCtlSource{}
err := f.ConfigureByDSN(test.dsn, map[string]string{"type": "testtype"}, subLogger)
if test.expectedErr != "" {
assert.Contains(t, err.Error(), test.expectedErr)
} else {
assert.Equal(t, err, nil)
}
}
}
func TestOneShot(t *testing.T) {
tests := []struct {
config string
expectedErr string
expectedOutput string
expectedLines int
logLevel log.Level
}{
{
config: `
source: journalctl
mode: cat
journalctl_filter:
- "-_UID=42"`,
expectedErr: "",
expectedOutput: "journalctl: invalid option",
logLevel: log.WarnLevel,
expectedLines: 0,
},
{
config: `
source: journalctl
mode: cat
journalctl_filter:
- _SYSTEMD_UNIT=ssh.service`,
expectedErr: "",
expectedOutput: "",
logLevel: log.WarnLevel,
expectedLines: 14,
},
}
for _, ts := range tests {
var logger *log.Logger
var subLogger *log.Entry
var hook *test.Hook
if ts.expectedOutput != "" {
logger, hook = test.NewNullLogger()
logger.SetLevel(ts.logLevel)
subLogger = logger.WithFields(log.Fields{
"type": "journalctl",
})
} else {
subLogger = log.WithFields(log.Fields{
"type": "journalctl",
})
}
tomb := tomb.Tomb{}
out := make(chan types.Event)
j := JournalCtlSource{}
err := j.Configure([]byte(ts.config), subLogger)
if err != nil {
t.Fatalf("Unexpected error : %s", err)
}
actualLines := 0
if ts.expectedLines != 0 {
go func() {
READLOOP:
for {
select {
case <-out:
actualLines++
case <-time.After(1 * time.Second):
break READLOOP
}
}
}()
}
err = j.OneShotAcquisition(out, &tomb)
if ts.expectedErr == "" && err != nil {
t.Fatalf("Unexpected error : %s", err)
} else if ts.expectedErr != "" && err != nil {
assert.Contains(t, err.Error(), ts.expectedErr)
continue
} else if ts.expectedErr != "" && err == nil {
t.Fatalf("Expected error %s, but got nothing !", ts.expectedErr)
}
if ts.expectedLines != 0 {
assert.Equal(t, ts.expectedLines, actualLines)
}
if ts.expectedOutput != "" {
if hook.LastEntry() == nil {
t.Fatalf("Expected log output '%s' but got nothing !", ts.expectedOutput)
}
assert.Contains(t, hook.LastEntry().Message, ts.expectedOutput)
hook.Reset()
}
}
}
func TestStreaming(t *testing.T) {
tests := []struct {
config string
expectedErr string
expectedOutput string
expectedLines int
logLevel log.Level
}{
{
config: `
source: journalctl
mode: cat
journalctl_filter:
- _SYSTEMD_UNIT=ssh.service`,
expectedErr: "",
expectedOutput: "",
logLevel: log.WarnLevel,
expectedLines: 14,
},
}
for _, ts := range tests {
var logger *log.Logger
var subLogger *log.Entry
var hook *test.Hook
if ts.expectedOutput != "" {
logger, hook = test.NewNullLogger()
logger.SetLevel(ts.logLevel)
subLogger = logger.WithFields(log.Fields{
"type": "journalctl",
})
} else {
subLogger = log.WithFields(log.Fields{
"type": "journalctl",
})
}
tomb := tomb.Tomb{}
out := make(chan types.Event)
j := JournalCtlSource{}
err := j.Configure([]byte(ts.config), subLogger)
if err != nil {
t.Fatalf("Unexpected error : %s", err)
}
actualLines := 0
if ts.expectedLines != 0 {
go func() {
READLOOP:
for {
select {
case <-out:
actualLines++
case <-time.After(1 * time.Second):
break READLOOP
}
}
}()
}
err = j.StreamingAcquisition(out, &tomb)
if ts.expectedErr == "" && err != nil {
t.Fatalf("Unexpected error : %s", err)
} else if ts.expectedErr != "" && err != nil {
assert.Contains(t, err.Error(), ts.expectedErr)
continue
} else if ts.expectedErr != "" && err == nil {
t.Fatalf("Expected error %s, but got nothing !", ts.expectedErr)
}
if ts.expectedLines != 0 {
time.Sleep(1 * time.Second)
assert.Equal(t, ts.expectedLines, actualLines)
}
tomb.Kill(nil)
tomb.Wait()
output, _ := exec.Command("pgrep", "-x", "journalctl").CombinedOutput()
if string(output) != "" {
t.Fatalf("Found a journalctl process after killing the tomb !")
}
if ts.expectedOutput != "" {
if hook.LastEntry() == nil {
t.Fatalf("Expected log output '%s' but got nothing !", ts.expectedOutput)
}
assert.Contains(t, hook.LastEntry().Message, ts.expectedOutput)
hook.Reset()
}
}
}
func TestMain(m *testing.M) {
if os.Getenv("USE_SYSTEM_JOURNALCTL") == "" {
os.Setenv("PATH", "./test_files"+":"+os.Getenv("PATH"))
}
os.Exit(m.Run())
}
| [
"\"USE_SYSTEM_JOURNALCTL\"",
"\"PATH\""
]
| []
| [
"PATH",
"USE_SYSTEM_JOURNALCTL"
]
| [] | ["PATH", "USE_SYSTEM_JOURNALCTL"] | go | 2 | 0 | |
yolov3_tiny_deer_detection/evaluate_mAP.py |
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import cv2
import numpy as np
import tensorflow as tf
from tensorflow.python.saved_model import tag_constants
from yolov3.dataset import Dataset
from yolov3.yolov4 import Create_Yolo
from yolov3.utils import load_yolo_weights, detect_image, image_preprocess, postprocess_boxes, nms, read_class_names
from yolov3.configs import *
import shutil
import json
import time
gpus = tf.config.experimental.list_physical_devices('GPU')
if len(gpus) > 0:
try: tf.config.experimental.set_memory_growth(gpus[0], True)
except RuntimeError: print("RuntimeError in tf.config.experimental.list_physical_devices('GPU')")
def voc_ap(rec, prec):
"""
--- Official matlab code VOC2012---
mrec=[0 ; rec ; 1];
mpre=[0 ; prec ; 0];
for i=numel(mpre)-1:-1:1
mpre(i)=max(mpre(i),mpre(i+1));
end
i=find(mrec(2:end)~=mrec(1:end-1))+1;
ap=sum((mrec(i)-mrec(i-1)).*mpre(i));
"""
rec.insert(0, 0.0) # insert 0.0 at begining of list
rec.append(1.0) # insert 1.0 at end of list
mrec = rec[:]
prec.insert(0, 0.0) # insert 0.0 at begining of list
prec.append(0.0) # insert 0.0 at end of list
mpre = prec[:]
"""
This part makes the precision monotonically decreasing
(goes from the end to the beginning)
matlab: for i=numel(mpre)-1:-1:1
mpre(i)=max(mpre(i),mpre(i+1));
"""
# matlab indexes start in 1 but python in 0, so I have to do:
# range(start=(len(mpre) - 2), end=0, step=-1)
# also the python function range excludes the end, resulting in:
# range(start=(len(mpre) - 2), end=-1, step=-1)
for i in range(len(mpre)-2, -1, -1):
mpre[i] = max(mpre[i], mpre[i+1])
"""
This part creates a list of indexes where the recall changes
matlab: i=find(mrec(2:end)~=mrec(1:end-1))+1;
"""
i_list = []
for i in range(1, len(mrec)):
if mrec[i] != mrec[i-1]:
i_list.append(i) # if it was matlab would be i + 1
"""
The Average Precision (AP) is the area under the curve
(numerical integration)
matlab: ap=sum((mrec(i)-mrec(i-1)).*mpre(i));
"""
ap = 0.0
for i in i_list:
ap += ((mrec[i]-mrec[i-1])*mpre[i])
return ap, mrec, mpre
def get_mAP(Yolo, dataset, score_threshold=0.25, iou_threshold=0.50, TEST_INPUT_SIZE=TEST_INPUT_SIZE):
MINOVERLAP = 0.5 # default value (defined in the PASCAL VOC2012 challenge)
NUM_CLASS = read_class_names(TRAIN_CLASSES)
ground_truth_dir_path = 'mAP/ground-truth'
if os.path.exists(ground_truth_dir_path): shutil.rmtree(ground_truth_dir_path)
if not os.path.exists('mAP'): os.mkdir('mAP')
os.mkdir(ground_truth_dir_path)
print(f'\ncalculating mAP{int(iou_threshold*100)}...\n')
gt_counter_per_class = {}
for index in range(dataset.num_samples):
ann_dataset = dataset.annotations[index]
original_image, bbox_data_gt = dataset.parse_annotation(ann_dataset, True)
if len(bbox_data_gt) == 0:
bboxes_gt = []
classes_gt = []
else:
bboxes_gt, classes_gt = bbox_data_gt[:, :4], bbox_data_gt[:, 4]
ground_truth_path = os.path.join(ground_truth_dir_path, str(index) + '.txt')
num_bbox_gt = len(bboxes_gt)
bounding_boxes = []
for i in range(num_bbox_gt):
class_name = NUM_CLASS[classes_gt[i]]
xmin, ymin, xmax, ymax = list(map(str, bboxes_gt[i]))
bbox = xmin + " " + ymin + " " + xmax + " " +ymax
bounding_boxes.append({"class_name":class_name, "bbox":bbox, "used":False})
# count that object
if class_name in gt_counter_per_class:
gt_counter_per_class[class_name] += 1
else:
# if class didn't exist yet
gt_counter_per_class[class_name] = 1
bbox_mess = ' '.join([class_name, xmin, ymin, xmax, ymax]) + '\n'
with open(f'{ground_truth_dir_path}/{str(index)}_ground_truth.json', 'w') as outfile:
json.dump(bounding_boxes, outfile)
gt_classes = list(gt_counter_per_class.keys())
# sort the classes alphabetically
gt_classes = sorted(gt_classes)
n_classes = len(gt_classes)
times = []
json_pred = [[] for i in range(n_classes)]
for index in range(dataset.num_samples):
ann_dataset = dataset.annotations[index]
image_name = ann_dataset[0].split('/')[-1]
original_image, bbox_data_gt = dataset.parse_annotation(ann_dataset, True)
image = image_preprocess(np.copy(original_image), [TEST_INPUT_SIZE, TEST_INPUT_SIZE])
image_data = image[np.newaxis, ...].astype(np.float32)
t1 = time.time()
if YOLO_FRAMEWORK == "tf":
pred_bbox = Yolo.predict(image_data)
elif YOLO_FRAMEWORK == "trt":
batched_input = tf.constant(image_data)
result = Yolo(batched_input)
pred_bbox = []
for key, value in result.items():
value = value.numpy()
pred_bbox.append(value)
t2 = time.time()
times.append(t2-t1)
pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]
pred_bbox = tf.concat(pred_bbox, axis=0)
bboxes = postprocess_boxes(pred_bbox, original_image, TEST_INPUT_SIZE, score_threshold)
bboxes = nms(bboxes, iou_threshold, method='nms')
for bbox in bboxes:
coor = np.array(bbox[:4], dtype=np.int32)
score = bbox[4]
class_ind = int(bbox[5])
class_name = NUM_CLASS[class_ind]
score = '%.4f' % score
xmin, ymin, xmax, ymax = list(map(str, coor))
bbox = xmin + " " + ymin + " " + xmax + " " +ymax
json_pred[gt_classes.index(class_name)].append({"confidence": str(score), "file_id": str(index), "bbox": str(bbox)})
ms = sum(times)/len(times)*1000
fps = 1000 / ms
for class_name in gt_classes:
json_pred[gt_classes.index(class_name)].sort(key=lambda x:float(x['confidence']), reverse=True)
with open(f'{ground_truth_dir_path}/{class_name}_predictions.json', 'w') as outfile:
json.dump(json_pred[gt_classes.index(class_name)], outfile)
# Calculate the AP for each class
sum_AP = 0.0
ap_dictionary = {}
# open file to store the results
with open("mAP/results.txt", 'w') as results_file:
results_file.write("# AP and precision/recall per class\n")
count_true_positives = {}
for class_index, class_name in enumerate(gt_classes):
count_true_positives[class_name] = 0
# Load predictions of that class
predictions_file = f'{ground_truth_dir_path}/{class_name}_predictions.json'
predictions_data = json.load(open(predictions_file))
# Assign predictions to ground truth objects
nd = len(predictions_data)
tp = [0] * nd # creates an array of zeros of size nd
fp = [0] * nd
for idx, prediction in enumerate(predictions_data):
file_id = prediction["file_id"]
# assign prediction to ground truth object if any
# open ground-truth with that file_id
gt_file = f'{ground_truth_dir_path}/{str(file_id)}_ground_truth.json'
ground_truth_data = json.load(open(gt_file))
ovmax = -1
gt_match = -1
# load prediction bounding-box
bb = [ float(x) for x in prediction["bbox"].split() ] # bounding box of prediction
for obj in ground_truth_data:
# look for a class_name match
if obj["class_name"] == class_name:
bbgt = [ float(x) for x in obj["bbox"].split() ] # bounding box of ground truth
bi = [max(bb[0],bbgt[0]), max(bb[1],bbgt[1]), min(bb[2],bbgt[2]), min(bb[3],bbgt[3])]
iw = bi[2] - bi[0] + 1
ih = bi[3] - bi[1] + 1
if iw > 0 and ih > 0:
# compute overlap (IoU) = area of intersection / area of union
ua = (bb[2] - bb[0] + 1) * (bb[3] - bb[1] + 1) + (bbgt[2] - bbgt[0]
+ 1) * (bbgt[3] - bbgt[1] + 1) - iw * ih
ov = iw * ih / ua
if ov > ovmax:
ovmax = ov
gt_match = obj
# assign prediction as true positive/don't care/false positive
if ovmax >= MINOVERLAP:# if ovmax > minimum overlap
if not bool(gt_match["used"]):
# true positive
tp[idx] = 1
gt_match["used"] = True
count_true_positives[class_name] += 1
# update the ".json" file
with open(gt_file, 'w') as f:
f.write(json.dumps(ground_truth_data))
else:
# false positive (multiple detection)
fp[idx] = 1
else:
# false positive
fp[idx] = 1
# compute precision/recall
cumsum = 0
for idx, val in enumerate(fp):
fp[idx] += cumsum
cumsum += val
cumsum = 0
for idx, val in enumerate(tp):
tp[idx] += cumsum
cumsum += val
#print(tp)
rec = tp[:]
for idx, val in enumerate(tp):
rec[idx] = float(tp[idx]) / gt_counter_per_class[class_name]
#print(rec)
prec = tp[:]
for idx, val in enumerate(tp):
prec[idx] = float(tp[idx]) / (fp[idx] + tp[idx])
#print(prec)
ap, mrec, mprec = voc_ap(rec, prec)
sum_AP += ap
text = "{0:.3f}%".format(ap*100) + " = " + class_name + " AP " #class_name + " AP = {0:.2f}%".format(ap*100)
rounded_prec = [ '%.3f' % elem for elem in prec ]
rounded_rec = [ '%.3f' % elem for elem in rec ]
# Write to results.txt
results_file.write(text + "\n Precision: " + str(rounded_prec) + "\n Recall :" + str(rounded_rec) + "\n\n")
print(text)
ap_dictionary[class_name] = ap
results_file.write("\n# mAP of all classes\n")
mAP = sum_AP / n_classes
text = "mAP = {:.3f}%, {:.2f} FPS".format(mAP*100, fps)
results_file.write(text + "\n")
print(text)
return mAP*100
if __name__ == '__main__':
if YOLO_FRAMEWORK == "tf": # TensorFlow detection
if YOLO_TYPE == "yolov4":
Darknet_weights = YOLO_V4_TINY_WEIGHTS if TRAIN_YOLO_TINY else YOLO_V4_WEIGHTS
if YOLO_TYPE == "yolov3":
Darknet_weights = YOLO_V3_TINY_WEIGHTS if TRAIN_YOLO_TINY else YOLO_V3_WEIGHTS
if YOLO_CUSTOM_WEIGHTS == False:
yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE, CLASSES=YOLO_COCO_CLASSES)
load_yolo_weights(yolo, Darknet_weights) # use Darknet weights
else:
yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE, CLASSES=TRAIN_CLASSES)
yolo.load_weights(f"./checkpoints/{TRAIN_MODEL_NAME}") # use custom weights
elif YOLO_FRAMEWORK == "trt": # TensorRT detection
saved_model_loaded = tf.saved_model.load(f"./checkpoints/{TRAIN_MODEL_NAME}", tags=[tag_constants.SERVING])
signature_keys = list(saved_model_loaded.signatures.keys())
yolo = saved_model_loaded.signatures['serving_default']
testset = Dataset('test', TEST_INPUT_SIZE=YOLO_INPUT_SIZE)
get_mAP(yolo, testset, score_threshold=0.05, iou_threshold=0.50, TEST_INPUT_SIZE=YOLO_INPUT_SIZE)
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
topdown/tokens_test.go | package topdown
import (
"context"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rsa"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"os"
"strings"
"testing"
"github.com/open-policy-agent/opa/ast"
"github.com/open-policy-agent/opa/storage"
"github.com/open-policy-agent/opa/storage/inmem"
"github.com/open-policy-agent/opa/topdown/internal/jwx/jwk"
"github.com/open-policy-agent/opa/topdown/internal/jwx/jws"
)
func TestParseTokenConstraints(t *testing.T) {
t.Run("Empty", func(t *testing.T) {
var constraints tokenConstraints
var err error
c := ast.NewObject()
constraints, err = parseTokenConstraints(c)
if err != nil {
t.Fatalf("parseTokenConstraints: %v", err)
}
if constraints.alg != "" {
t.Errorf("alg: %v", constraints.alg)
}
if constraints.keys != nil {
t.Errorf("key: %v", constraints.keys)
}
})
t.Run("Alg", func(t *testing.T) {
var constraints tokenConstraints
var err error
c := ast.NewObject()
c.Insert(ast.StringTerm("alg"), ast.StringTerm("RS256"))
constraints, err = parseTokenConstraints(c)
if err != nil {
t.Fatalf("parseTokenConstraints: %v", err)
}
if constraints.alg != "RS256" {
t.Errorf("alg: %v", constraints.alg)
}
})
t.Run("Cert", func(t *testing.T) {
var constraints tokenConstraints
var err error
c := ast.NewObject()
c.Insert(ast.StringTerm("cert"), ast.StringTerm(`-----BEGIN CERTIFICATE-----
MIIBcDCCARagAwIBAgIJAMZmuGSIfvgzMAoGCCqGSM49BAMCMBMxETAPBgNVBAMM
CHdoYXRldmVyMB4XDTE4MDgxMDE0Mjg1NFoXDTE4MDkwOTE0Mjg1NFowEzERMA8G
A1UEAwwId2hhdGV2ZXIwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAATPwn3WCEXL
mjp/bFniDwuwsfu7bASlPae2PyWhqGeWwe23Xlyx+tSqxlkXYe4pZ23BkAAscpGj
yn5gXHExyDlKo1MwUTAdBgNVHQ4EFgQUElRjSoVgKjUqY5AXz2o74cLzzS8wHwYD
VR0jBBgwFoAUElRjSoVgKjUqY5AXz2o74cLzzS8wDwYDVR0TAQH/BAUwAwEB/zAK
BggqhkjOPQQDAgNIADBFAiEA4yQ/88ZrUX68c6kOe9G11u8NUaUzd8pLOtkKhniN
OHoCIHmNX37JOqTcTzGn2u9+c8NlnvZ0uDvsd1BmKPaUmjmm
-----END CERTIFICATE-----`))
constraints, err = parseTokenConstraints(c)
if err != nil {
t.Fatalf("parseTokenConstraints: %v", err)
}
pubKey := constraints.keys[0].(*ecdsa.PublicKey)
if pubKey.Curve != elliptic.P256() {
t.Errorf("curve: %v", pubKey.Curve)
}
if pubKey.X.Text(16) != "cfc27dd60845cb9a3a7f6c59e20f0bb0b1fbbb6c04a53da7b63f25a1a86796c1" {
t.Errorf("x: %x", pubKey.X)
}
if pubKey.Y.Text(16) != "edb75e5cb1fad4aac6591761ee29676dc190002c7291a3ca7e605c7131c8394a" {
t.Errorf("y: %x", pubKey.Y)
}
})
t.Run("Cert Multi Key", func(t *testing.T) {
var constraints tokenConstraints
var err error
c := ast.NewObject()
c.Insert(ast.StringTerm("cert"), ast.StringTerm(`{
"keys": [
{
"kty": "EC",
"use": "sig",
"crv": "P-256",
"kid": "k1",
"x": "9Qq5S5VqMQoH-FOI4atcH6V3bua03C-5ZMZMG1rszwA",
"y": "LLbFxWkGBEBrTm1GMYZJy1OXCH1KLweJMCgIEPIsibU",
"alg": "ES256"
},
{
"kty": "RSA",
"e": "AQAB",
"use": "enc",
"kid": "k2",
"alg": "RS256",
"n": "sGu-fYVE2nq2dPxJlqAMI0Z8G3FD0XcWDnD8mkfO1ddKRGuUQZmfj4gWeZGyIk3cnuoy7KJCEqa3daXc08QHuFZyfn0rH33t8_AFsvb0q0i7R2FK-Gdqs_E0-sGpYMsRJdZWfCioLkYjIHEuVnRbi3DEsWqe484rEGbKF60jNRgGC4b-8pz-E538ZkssWxcqHrYIj5bjGEU36onjS3M_yrTuNvzv_8wRioK4fbcwmGne9bDxu8LcoSReWpPn0CnUkWnfqroRcMJnC87ZuJagDW1ZWCmU3psdsVanmFFh0DP6z0fsA4h8G2n9-qp-LEKFaWwo3IWlOsIzU3MHdcEiGw"
}
]
}
`))
constraints, err = parseTokenConstraints(c)
if err != nil {
t.Fatalf("parseTokenConstraints: %v", err)
}
elPubKey := constraints.keys[0].(*ecdsa.PublicKey)
if elPubKey.Curve != elliptic.P256() {
t.Errorf("curve: %v", elPubKey.Curve)
}
rsaPubKey := constraints.keys[1].(*rsa.PublicKey)
if rsaPubKey.Size() != 256 {
t.Errorf("expected size 256 found %d", rsaPubKey.Size())
}
})
t.Run("Unrecognized", func(t *testing.T) {
var err error
c := ast.NewObject()
c.Insert(ast.StringTerm("hatever"), ast.StringTerm("junk"))
_, err = parseTokenConstraints(c)
if err == nil {
t.Fatalf("parseTokenConstraints: %v", err)
}
})
t.Run("IllFormed", func(t *testing.T) {
var err error
c := ast.Array{ast.StringTerm("alg")}
_, err = parseTokenConstraints(c)
if err == nil {
t.Fatalf("parseTokenConstraints: %v", err)
}
})
}
func TestParseTokenHeader(t *testing.T) {
t.Run("Errors", func(t *testing.T) {
token := &JSONWebToken{
header: "",
}
var err error
if err = token.decodeHeader(); err == nil {
t.Fatalf("token.decodeHeader: %v", err)
}
token.header = "###"
if err = token.decodeHeader(); err == nil {
t.Fatalf("token.decodeHeader: %v", err)
}
token.header = base64.RawURLEncoding.EncodeToString([]byte(`{`))
if err = token.decodeHeader(); err == nil {
t.Fatalf("token.decodeHeader: %v", err)
}
token.header = base64.RawURLEncoding.EncodeToString([]byte(`{}`))
if err = token.decodeHeader(); err != nil {
t.Fatalf("token.decodeHeader: %v", err)
}
var header tokenHeader
header, err = parseTokenHeader(token)
if err != nil {
t.Fatalf("parseTokenHeader: %v", err)
}
if header.valid() {
t.Fatalf("tokenHeader valid")
}
})
t.Run("Alg", func(t *testing.T) {
token := &JSONWebToken{
header: base64.RawURLEncoding.EncodeToString([]byte(`{"alg":"RS256"}`)),
}
var err error
if err = token.decodeHeader(); err != nil {
t.Fatalf("token.decodeHeader: %v", err)
}
var header tokenHeader
header, err = parseTokenHeader(token)
if err != nil {
t.Fatalf("parseTokenHeader: %v", err)
}
if !header.valid() {
t.Fatalf("tokenHeader !valid")
}
if header.alg != "RS256" {
t.Fatalf("alg: %s", header.alg)
}
})
}
func TestTopDownJWTEncodeSignPayloadErrors(t *testing.T) {
const examplePayloadError = `{"iss:"joe",` + "\r\n" + ` "exp":1300819380,` + "\r\n" + ` "http://example.com/is_root":true}`
const hs256Hdr = `{"typ":"JWT",` + "\r\n " + `"alg":"HS256"}`
params := []struct {
note string
input1 string
input2 string
input3 string
result string
err string
}{
{
"No Payload",
hs256Hdr,
"",
`{
"kty":"oct",
"k":"AyM1SysPpbyDfgZld3umj1qzKObwVMkoqQ-EstJQLr_T-1qS0gZH75aKtMN3Yj0iPS4hcgUuTwjAzZr1Z9CAow"
}`,
"",
"type is JWT but payload is not JSON",
},
{
"Payload JSON Error",
hs256Hdr,
examplePayloadError,
`{
"kty":"oct",
"k":"AyM1SysPpbyDfgZld3umj1qzKObwVMkoqQ-EstJQLr_T-1qS0gZH75aKtMN3Yj0iPS4hcgUuTwjAzZr1Z9CAow"
}`,
"",
"type is JWT but payload is not JSON",
},
{
"Non JSON Error",
hs256Hdr,
"e",
`{
"kty":"oct",
"k":"AyM1SysPpbyDfgZld3umj1qzKObwVMkoqQ-EstJQLr_T-1qS0gZH75aKtMN3Yj0iPS4hcgUuTwjAzZr1Z9CAow"
}`,
"",
"type is JWT but payload is not JSON",
},
}
type test struct {
note string
rules []string
expected interface{}
}
var tests []test
for _, p := range params {
var exp interface{}
exp = fmt.Sprintf(`%s`, p.result)
if p.err != "" {
exp = &Error{Code: BuiltinErr, Message: p.err}
}
tests = append(tests, test{
p.note,
[]string{fmt.Sprintf(`p = x { io.jwt.encode_sign_raw(%q, %q, %q, x) }`, p.input1, p.input2, p.input3)},
exp,
})
}
data := loadSmallTestData()
for _, tc := range tests {
runTopDownTestCase(t, data, tc.note, tc.rules, tc.expected)
}
}
func TestTopDownJWTEncodeSignHeaderErrors(t *testing.T) {
const examplePayload = `{"iss":"joe",` + "\r\n" + ` "exp":1300819380,` + "\r\n" + ` "http://example.com/is_root":true}`
const hs256HdrError = `{"typ:"JWT",` + "\r\n " + `"alg":"HS256"}`
params := []struct {
note string
input1 string
input2 string
input3 string
result string
err string
}{
{
"Unknown signature algorithm",
hs256HdrError,
examplePayload,
`{
"kty":"oct",
"k":"AyM1SysPpbyDfgZld3umj1qzKObwVMkoqQ-EstJQLr_T-1qS0gZH75aKtMN3Yj0iPS4hcgUuTwjAzZr1Z9CAow"
}`,
"",
"invalid character",
},
{
"Unknown signature algorithm",
`{"alg":"dummy"}`,
examplePayload,
`{
"kty":"oct",
"k":"AyM1SysPpbyDfgZld3umj1qzKObwVMkoqQ-EstJQLr_T-1qS0gZH75aKtMN3Yj0iPS4hcgUuTwjAzZr1Z9CAow"
}`,
"",
"Unknown signature algorithm",
},
{
"Empty JSON header Error",
"{}",
examplePayload,
`{
"kty":"oct",
"k":"AyM1SysPpbyDfgZld3umj1qzKObwVMkoqQ-EstJQLr_T-1qS0gZH75aKtMN3Yj0iPS4hcgUuTwjAzZr1Z9CAow"
}`,
"",
"unsupported signature algorithm",
},
{
"Empty headers input error",
"",
examplePayload,
`{
"kty":"oct",
"k":"AyM1SysPpbyDfgZld3umj1qzKObwVMkoqQ-EstJQLr_T-1qS0gZH75aKtMN3Yj0iPS4hcgUuTwjAzZr1Z9CAow"
}`,
"",
"unexpected end of JSON input",
},
{
"No JSON Error",
"e",
examplePayload,
`{
"kty":"oct",
"k":"AyM1SysPpbyDfgZld3umj1qzKObwVMkoqQ-EstJQLr_T-1qS0gZH75aKtMN3Yj0iPS4hcgUuTwjAzZr1Z9CAow"
}`,
"",
"invalid character",
},
}
type test struct {
note string
rules []string
expected interface{}
}
var tests []test
for _, p := range params {
var exp interface{}
exp = fmt.Sprintf(`%s`, p.result)
if p.err != "" {
exp = &Error{Code: BuiltinErr, Message: p.err}
}
tests = append(tests, test{
p.note,
[]string{fmt.Sprintf(`p = x { io.jwt.encode_sign_raw(%q, %q, %q, x) }`, p.input1, p.input2, p.input3)},
exp,
})
}
data := loadSmallTestData()
for _, tc := range tests {
runTopDownTestCase(t, data, tc.note, tc.rules, tc.expected)
}
}
func TestTopDownJWTEncodeSignRaw(t *testing.T) {
const examplePayload = `{"iss":"joe",` + "\r\n" + ` "exp":1300819380,` + "\r\n" + ` "http://example.com/is_root":true}`
const hs256Hdr = `{"typ":"JWT",` + "\r\n " + `"alg":"HS256"}`
const rs256Hdr = `{"alg":"RS256"}`
const hs256HdrPlain = `{"typ":"text/plain",` + "\r\n " + `"alg":"HS256"}`
const symmetricKey = `{
"kty":"oct",
"k":"AyM1SysPpbyDfgZld3umj1qzKObwVMkoqQ-EstJQLr_T-1qS0gZH75aKtMN3Yj0iPS4hcgUuTwjAzZr1Z9CAow"
}`
const rsaKey = `{
"kty":"RSA",
"n":"ofgWCuLjybRlzo0tZWJjNiuSfb4p4fAkd_wWJcyQoTbji9k0l8W26mPddxHmfHQp-Vaw-4qPCJrcS2mJPMEzP1Pt0Bm4d4QlL-yRT-SFd2lZS-pCgNMsD1W_YpRPEwOWvG6b32690r2jZ47soMZo9wGzjb_7OMg0LOL-bSf63kpaSHSXndS5z5rexMdbBYUsLA9e-KXBdQOS-UTo7WTBEMa2R2CapHg665xsmtdVMTBQY4uDZlxvb3qCo5ZwKh9kG4LT6_I5IhlJH7aGhyxXFvUK-DWNmoudF8NAco9_h9iaGNj8q2ethFkMLs91kzk2PAcDTW9gb54h4FRWyuXpoQ",
"e":"AQAB",
"d":"Eq5xpGnNCivDflJsRQBXHx1hdR1k6Ulwe2JZD50LpXyWPEAeP88vLNO97IjlA7_GQ5sLKMgvfTeXZx9SE-7YwVol2NXOoAJe46sui395IW_GO-pWJ1O0BkTGoVEn2bKVRUCgu-GjBVaYLU6f3l9kJfFNS3E0QbVdxzubSu3Mkqzjkn439X0M_V51gfpRLI9JYanrC4D4qAdGcopV_0ZHHzQlBjudU2QvXt4ehNYTCBr6XCLQUShb1juUO1ZdiYoFaFQT5Tw8bGUl_x_jTj3ccPDVZFD9pIuhLhBOneufuBiB4cS98l2SR_RQyGWSeWjnczT0QU91p1DhOVRuOopznQ",
"p":"4BzEEOtIpmVdVEZNCqS7baC4crd0pqnRH_5IB3jw3bcxGn6QLvnEtfdUdiYrqBdss1l58BQ3KhooKeQTa9AB0Hw_Py5PJdTJNPY8cQn7ouZ2KKDcmnPGBY5t7yLc1QlQ5xHdwW1VhvKn-nXqhJTBgIPgtldC-KDV5z-y2XDwGUc",
"q":"uQPEfgmVtjL0Uyyx88GZFF1fOunH3-7cepKmtH4pxhtCoHqpWmT8YAmZxaewHgHAjLYsp1ZSe7zFYHj7C6ul7TjeLQeZD_YwD66t62wDmpe_HlB-TnBA-njbglfIsRLtXlnDzQkv5dTltRJ11BKBBypeeF6689rjcJIDEz9RWdc",
"dp":"BwKfV3Akq5_MFZDFZCnW-wzl-CCo83WoZvnLQwCTeDv8uzluRSnm71I3QCLdhrqE2e9YkxvuxdBfpT_PI7Yz-FOKnu1R6HsJeDCjn12Sk3vmAktV2zb34MCdy7cpdTh_YVr7tss2u6vneTwrA86rZtu5Mbr1C1XsmvkxHQAdYo0",
"dq":"h_96-mK1R_7glhsum81dZxjTnYynPbZpHziZjeeHcXYsXaaMwkOlODsWa7I9xXDoRwbKgB719rrmI2oKr6N3Do9U0ajaHF-NKJnwgjMd2w9cjz3_-kyNlxAr2v4IKhGNpmM5iIgOS1VZnOZ68m6_pbLBSp3nssTdlqvd0tIiTHU",
"qi":"IYd7DHOhrWvxkwPQsRM2tOgrjbcrfvtQJipd-DlcxyVuuM9sQLdgjVk2oy26F0EmpScGLq2MowX7fhd_QJQ3ydy5cY7YIBi87w93IKLEdfnbJtoOPLUW0ITrJReOgo1cq9SbsxYawBgfp_gh6A5603k2-ZQwVK0JKSHuLFkuQ3U"
}`
params := []struct {
note string
input1 string
input2 string
input3 string
result string
err string
}{
{
"https://tools.ietf.org/html/rfc7515#appendix-A.1",
"`" + hs256Hdr + "`",
"`" + examplePayload + "`",
"`" + symmetricKey + "`",
`"eyJ0eXAiOiJKV1QiLA0KICJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJqb2UiLA0KICJleHAiOjEzMDA4MTkzODAsDQogImh0dHA6Ly9leGFtcGxlLmNvbS9pc19yb290Ijp0cnVlfQ.dBjftJeZ4CVP-mB92K27uhbUJU1p1r_wW1gFWFOEjXk"`,
"",
},
{
"No Payload but Media Type is Plain",
"`" + hs256HdrPlain + "`",
"`" + "" + "`",
"`" + symmetricKey + "`",
`"eyJ0eXAiOiJ0ZXh0L3BsYWluIiwNCiAiYWxnIjoiSFMyNTYifQ..sXoGQMWwM-SmX495-htA7kndgbkwz1PnqsDeY275gnI""`,
"",
},
{
"text/plain media type",
"`" + hs256HdrPlain + "`",
"`" + "e" + "`",
"`" + symmetricKey + "`",
`"eyJ0eXAiOiJ0ZXh0L3BsYWluIiwNCiAiYWxnIjoiSFMyNTYifQ.ZQ.oO8Vnc4Jv7-J231a1bEcQrgXfKbNW-kEvVY7BP1v5rM""`,
"",
},
{
"Empty JSON payload",
"`" + hs256Hdr + "`",
"`" + "{}" + "`",
"`" + symmetricKey + "`",
`"eyJ0eXAiOiJKV1QiLA0KICJhbGciOiJIUzI1NiJ9.e30.KAml6HRetE0sq22SYNh_CQExhf-X31ChYTfGwUBIWu8"`,
"",
},
{
"https://tools.ietf.org/html/rfc7515#appendix-A.2",
"`" + rs256Hdr + "`",
"`" + examplePayload + "`",
"`" + rsaKey + "`",
`"eyJhbGciOiJSUzI1NiJ9.eyJpc3MiOiJqb2UiLA0KICJleHAiOjEzMDA4MTkzODAsDQogImh0dHA6Ly9leGFtcGxlLmNvbS9pc19yb290Ijp0cnVlfQ.cC4hiUPoj9Eetdgtv3hF80EGrhuB__dzERat0XF9g2VtQgr9PJbu3XOiZj5RZmh7AAuHIm4Bh-0Qc_lF5YKt_O8W2Fp5jujGbds9uJdbF9CUAr7t1dnZcAcQjbKBYNX4BAynRFdiuB--f_nZLgrnbyTyWzO75vRK5h6xBArLIARNPvkSjtQBMHlb1L07Qe7K0GarZRmB_eSN9383LcOLn6_dO--xi12jzDwusC-eOkHWEsqtFZESc6BfI7noOPqvhJ1phCnvWh6IeYI2w9QOYEUipUTI8np6LbgGY9Fs98rqVt5AXLIhWkWywlVmtVrBp0igcN_IoypGlUPQGe77Rw"`,
"",
},
}
type test struct {
note string
rules []string
expected interface{}
}
var rawTests []test
for _, p := range params {
var exp interface{}
exp = fmt.Sprintf(`%s`, p.result)
if p.err != "" {
exp = &Error{Code: BuiltinErr, Message: p.err}
}
rawTests = append(rawTests, test{
p.note,
[]string{fmt.Sprintf(`p = x { io.jwt.encode_sign_raw(%s, %s, %s, x) }`, p.input1, p.input2, p.input3)},
exp,
})
}
data := loadSmallTestData()
for _, tc := range rawTests {
runTopDownTestCase(t, data, tc.note, tc.rules, tc.expected)
}
}
func TestTopDownJWTEncodeSignES256(t *testing.T) {
const examplePayload = `{"iss":"joe",` + "\r\n" + ` "exp":1300819380,` + "\r\n" + ` "http://example.com/is_root":true}`
const es256Hdr = `{"alg":"ES256"}`
const ecKey = `{
"kty":"EC",
"crv":"P-256",
"x":"f83OJ3D2xF1Bg8vub9tLe1gHMzV76e8Tus9uPHvRVEU",
"y":"x_FEzRu9m36HLN_tue659LNpXW6pCyStikYjKIWI5a0",
"d":"jpsQnnGQmL-YBIffH1136cspYG6-0iY7X1fCE9-E9LI"
}`
params := struct {
note string
input1 string
input2 string
input3 string
err string
}{
"https://tools.ietf.org/html/rfc7515#appendix-A.3",
"`" + es256Hdr + "`",
"`" + examplePayload + "`",
"`" + ecKey + "`",
"",
}
type test struct {
note string
rules []string
}
tc := test{
params.note,
[]string{fmt.Sprintf(`p = x { io.jwt.encode_sign_raw(%s, %s, %s, x) }`, params.input1, params.input2, params.input3)},
}
compiler, err := compileRules(nil, tc.rules, nil)
if err != nil {
t.Errorf("%v: Compiler error: %v", tc.note, err)
return
}
store := inmem.New()
path := []string{"p"}
var inputTerm *ast.Term
ctx := context.Background()
txn := storage.NewTransactionOrDie(ctx, store)
defer store.Abort(ctx, txn)
var lhs *ast.Term
if len(path) == 0 {
lhs = ast.NewTerm(ast.DefaultRootRef)
} else {
lhs = ast.MustParseTerm("data." + strings.Join(path, "."))
}
rhs := ast.VarTerm(ast.WildcardPrefix + "result")
body := ast.NewBody(ast.Equality.Expr(lhs, rhs))
query := NewQuery(body).
WithCompiler(compiler).
WithStore(store).
WithTransaction(txn).
WithInput(inputTerm)
var tracer BufferTracer
if os.Getenv("OPA_TRACE_TEST") != "" {
query = query.WithTracer(&tracer)
}
qrs, err := query.Run(ctx)
if tracer != nil {
PrettyTrace(os.Stdout, tracer)
}
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if len(qrs) == 0 {
t.Fatal("Undefined result")
}
result, err := ast.JSON(qrs[0][rhs.Value.(ast.Var)].Value)
if err != nil {
t.Fatal(err)
}
// Verification
standardHeaders := &jws.StandardHeaders{}
err = json.Unmarshal([]byte(es256Hdr), standardHeaders)
if err != nil {
t.Fatal("Failed to parse header")
}
alg := standardHeaders.GetAlgorithm()
keys, err := jwk.ParseString(ecKey)
if err != nil {
t.Fatal("Failed to parse JWK")
}
key, err := keys.Keys[0].Materialize()
if err != nil {
t.Fatal("Failed to create private key")
}
publicKey, err := jwk.GetPublicKey(key)
// Verify with vendor library
verifiedPayload, err := jws.Verify([]byte(result.(string)), alg, publicKey)
if err != nil || string(verifiedPayload) != examplePayload {
t.Fatal("Failed to verify message")
}
}
// TestTopDownJWTEncodeSignEC needs to perform all tests inline because we do not know the
// expected values before hand
func TestTopDownJWTEncodeSignES512(t *testing.T) {
const examplePayload = `{"iss":"joe",` + "\r\n" + ` "exp":1300819380,` + "\r\n" + ` "http://example.com/is_root":true}`
const es512Hdr = `{"alg":"ES512"}`
const ecKey = `{
"kty":"EC",
"crv":"P-521",
"x":"AekpBQ8ST8a8VcfVOTNl353vSrDCLLJXmPk06wTjxrrjcBpXp5EOnYG_NjFZ6OvLFV1jSfS9tsz4qUxcWceqwQGk",
"y":"ADSmRA43Z1DSNx_RvcLI87cdL07l6jQyyBXMoxVg_l2Th-x3S1WDhjDly79ajL4Kkd0AZMaZmh9ubmf63e3kyMj2",
"d":"AY5pb7A0UFiB3RELSD64fTLOSV_jazdF7fLYyuTw8lOfRhWg6Y6rUrPAxerEzgdRhajnu0ferB0d53vM9mE15j2C"
}`
params := struct {
note string
input1 string
input2 string
input3 string
err string
}{
"https://tools.ietf.org/html/rfc7515#appendix-A.4",
"`" + es512Hdr + "`",
"`" + examplePayload + "`",
"`" + ecKey + "`",
"",
}
type test struct {
note string
rules []string
}
var tests []test
tests = append(tests, test{
params.note,
[]string{fmt.Sprintf(`p = x { io.jwt.encode_sign_raw(%s, %s, %s, x) }`, params.input1, params.input2, params.input3)},
})
tc := tests[0]
compiler, err := compileRules(nil, tc.rules, nil)
if err != nil {
t.Errorf("%v: Compiler error: %v", tc.note, err)
return
}
store := inmem.New()
path := []string{"p"}
var inputTerm *ast.Term
ctx := context.Background()
txn := storage.NewTransactionOrDie(ctx, store)
defer store.Abort(ctx, txn)
var lhs *ast.Term
if len(path) == 0 {
lhs = ast.NewTerm(ast.DefaultRootRef)
} else {
lhs = ast.MustParseTerm("data." + strings.Join(path, "."))
}
rhs := ast.VarTerm(ast.WildcardPrefix + "result")
body := ast.NewBody(ast.Equality.Expr(lhs, rhs))
query := NewQuery(body).
WithCompiler(compiler).
WithStore(store).
WithTransaction(txn).
WithInput(inputTerm)
var tracer BufferTracer
if os.Getenv("OPA_TRACE_TEST") != "" {
query = query.WithTracer(&tracer)
}
qrs, err := query.Run(ctx)
if tracer != nil {
PrettyTrace(os.Stdout, tracer)
}
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if len(qrs) == 0 {
t.Fatal("Undefined result")
}
result, err := ast.JSON(qrs[0][rhs.Value.(ast.Var)].Value)
if err != nil {
t.Fatal(err)
}
// Verification
standardHeaders := &jws.StandardHeaders{}
err = json.Unmarshal([]byte(es512Hdr), standardHeaders)
if err != nil {
t.Fatal("Failed to parse header")
}
alg := standardHeaders.GetAlgorithm()
keys, err := jwk.ParseString(ecKey)
if err != nil {
t.Fatal("Failed to parse JWK")
}
key, err := keys.Keys[0].Materialize()
if err != nil {
t.Fatal("Failed to create private key")
}
publicKey, err := jwk.GetPublicKey(key)
// Verify with vendor library
verifiedPayload, err := jws.Verify([]byte(result.(string)), alg, publicKey)
if err != nil || string(verifiedPayload) != examplePayload {
t.Fatal("Failed to verify message")
}
}
func TestTopDownJWTBuiltins(t *testing.T) {
params := []struct {
note string
input string
header string
payload string
signature string
err string
}{
{
"simple",
`eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIwIiwiaXNzIjoib3BhIn0.XmVoLoHI3pxMtMO_WRONMSJzGUDP9pDjy8Jp0_tdRXY`,
`{ "alg": "HS256", "typ": "JWT" }`,
`{ "sub": "0", "iss": "opa" }`,
`5e65682e81c8de9c4cb4c3bf59138d3122731940cff690e3cbc269d3fb5d4576`,
"",
},
{
"simple-non-registered",
`eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJuZXciOiJJIGFtIGEgdXNlciBjcmVhdGVkIGZpZWxkIiwiaXNzIjoib3BhIn0.6UmjsclVDGD9jcmX_F8RJzVgHtUZuLu2pxkF_UEQCrE`,
`{ "alg": "HS256", "typ": "JWT" }`,
`{ "new": "I am a user created field", "iss": "opa" }`,
`e949a3b1c9550c60fd8dc997fc5f112735601ed519b8bbb6a71905fd41100ab1`,
"",
},
{
"no-support-jwe",
`eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCIsImVuYyI6ImJsYWgifQ.eyJuZXciOiJJIGFtIGEgdXNlciBjcmVhdGVkIGZpZWxkIiwiaXNzIjoib3BhIn0.McGUb1e-UviZKy6UyQErNNQzEUgeV25Buwk7OHOa8U8`,
``,
``,
``,
"JWT is a JWE object, which is not supported",
},
{
"no-periods",
`eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9eyJzdWIiOiIwIiwiaXNzIjoib3BhIn0XmVoLoHI3pxMtMO_WRONMSJzGUDP9pDjy8Jp0_tdRXY`,
``,
``,
``,
"encoded JWT had no period separators",
},
{
"wrong-period-count",
`eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXV.CJ9eyJzdWIiOiIwIiwiaXNzIjoib3BhIn0XmVoLoHI3pxMtMO_WRONMSJzGUDP9pDjy8Jp0_tdRXY`,
``,
``,
``,
"encoded JWT must have 3 sections, found 2",
},
{
"bad-header-encoding",
`eyJhbGciOiJIU^%zI1NiI+sInR5cCI6IkpXVCJ9.eyJzdWIiOiIwIiwiaXNzIjoib3BhIn0.XmVoLoHI3pxMtMO_WRONMSJzGUDP9pDjy8Jp0_tdRXY`,
``,
``,
``,
"JWT header had invalid encoding: illegal base64 data at input byte 13",
},
{
"bad-payload-encoding",
`eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIwIiwia/XNzIjoib3BhIn0.XmVoLoHI3pxMtMO_WRONMSJzGUDP9pDjy8Jp0_tdRXY`,
``,
``,
``,
"JWT payload had invalid encoding: illegal base64 data at input byte 17",
},
{
"bad-signature-encoding",
`eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIwIiwiaXNzIjoib3BhIn0.XmVoLoHI3pxMtMO(_WRONMSJzGUDP9pDjy8Jp0_tdRXY`,
``,
``,
``,
"JWT signature had invalid encoding: illegal base64 data at input byte 15",
},
{
"nested",
`eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCIsImN0eSI6IkpXVCJ9.ImV5SmhiR2NpT2lKSVV6STFOaUlzSW5SNWNDSTZJa3BYVkNKOS5leUp6ZFdJaU9pSXdJaXdpYVhOeklqb2liM0JoSW4wLlhtVm9Mb0hJM3B4TXRNT19XUk9OTVNKekdVRFA5cERqeThKcDBfdGRSWFki.8W0qx4mLxslmZl7wEMUWBxH7tST3XsEuWXxesXqFnRI`,
`{ "alg": "HS256", "typ": "JWT" }`,
`{ "sub": "0", "iss": "opa" }`,
`5e65682e81c8de9c4cb4c3bf59138d3122731940cff690e3cbc269d3fb5d4576`,
"",
},
{
"double-nested",
`eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCIsImN0eSI6IkpXVCJ9.ImV5SmhiR2NpT2lKSVV6STFOaUlzSW5SNWNDSTZJa3BYVkNJc0ltTjBlU0k2SWtwWFZDSjkuSW1WNVNtaGlSMk5wVDJsS1NWVjZTVEZPYVVselNXNVNOV05EU1RaSmEzQllWa05LT1M1bGVVcDZaRmRKYVU5cFNYZEphWGRwWVZoT2VrbHFiMmxpTTBKb1NXNHdMbGh0Vm05TWIwaEpNM0I0VFhSTlQxOVhVazlPVFZOS2VrZFZSRkE1Y0VScWVUaEtjREJmZEdSU1dGa2kuOFcwcXg0bUx4c2xtWmw3d0VNVVdCeEg3dFNUM1hzRXVXWHhlc1hxRm5SSSI.U8rwnGAJ-bJoGrAYKEzNtbJQWd3x1eW0Y25nLKHDCgo`,
`{ "alg": "HS256", "typ": "JWT" }`,
`{ "sub": "0", "iss": "opa" }`,
`5e65682e81c8de9c4cb4c3bf59138d3122731940cff690e3cbc269d3fb5d4576`,
"",
},
{
"complex-values",
`eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIwIiwiaXNzIjoib3BhIiwiZXh0Ijp7ImFiYyI6IjEyMyIsImNiYSI6WzEwLCIxMCJdfX0.IIxF-uJ6i4K5Dj71xNLnUeqB9jmujl6ujTInhii1PxE`,
`{ "alg": "HS256", "typ": "JWT" }`,
`{ "sub": "0", "iss": "opa", "ext": { "abc": "123", "cba": [10, "10"] } }`,
`208c45fae27a8b82b90e3ef5c4d2e751ea81f639ae8e5eae8d32278628b53f11`,
"",
},
// The test below checks that payloads with duplicate keys
// in their encoding produce a token object that binds the key
// to the last occurring value, as per RFC 7519 Section 4.
// It tests a payload encoding that has 3 duplicates of the
// "iss" key, with the values "not opa", "also not opa" and
// "opa", in that order.
// Go's json.Unmarshal exhibits this behavior, but it is not
// documented, so this test is meant to catch that behavior
// if it changes.
{
"duplicate-keys",
`eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiAiMCIsImlzcyI6ICJub3Qgb3BhIiwgImlzcyI6ICJhbHNvIG5vdCBvcGEiLCAiaXNzIjogIm9wYSJ9.XmVoLoHI3pxMtMO_WRONMSJzGUDP9pDjy8Jp0_tdRXY`,
`{ "alg": "HS256", "typ": "JWT" }`,
`{ "sub": "0", "iss": "opa" }`,
`5e65682e81c8de9c4cb4c3bf59138d3122731940cff690e3cbc269d3fb5d4576`,
"",
},
}
type test struct {
note string
rules []string
expected interface{}
}
tests := []test{}
for _, p := range params {
var exp interface{}
exp = fmt.Sprintf(`[%s, %s, "%s"]`, p.header, p.payload, p.signature)
if p.err != "" {
exp = &Error{Code: BuiltinErr, Message: p.err}
}
tests = append(tests, test{
p.note,
[]string{fmt.Sprintf(`p = [x, y, z] { io.jwt.decode("%s", [x, y, z]) }`, p.input)},
exp,
})
}
data := loadSmallTestData()
for _, tc := range tests {
runTopDownTestCase(t, data, tc.note, tc.rules, tc.expected)
}
}
const (
certPem = `-----BEGIN CERTIFICATE-----\nMIIFiDCCA3ACCQCGV6XsfG/oRTANBgkqhkiG9w0BAQUFADCBhTELMAkGA1UEBhMC\nVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFTATBgNVBAcMDFJlZHdvb2QgQ2l0eTEO\nMAwGA1UECgwFU3R5cmExDDAKBgNVBAsMA0RldjESMBAGA1UEAwwJbG9jYWxob3N0\nMRgwFgYJKoZIhvcNAQkBFglhc2hAc3R5cmEwHhcNMTgwMzA2MDAxNTU5WhcNMTkw\nMzA2MDAxNTU5WjCBhTELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWEx\nFTATBgNVBAcMDFJlZHdvb2QgQ2l0eTEOMAwGA1UECgwFU3R5cmExDDAKBgNVBAsM\nA0RldjESMBAGA1UEAwwJbG9jYWxob3N0MRgwFgYJKoZIhvcNAQkBFglhc2hAc3R5\ncmEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDucnAwTRA0zqDQ671L\nKWOVwhjhycFyzyhZUd7vhsnslOBiYM6TYIDXhETfAk2RQoRE/9xF16woMD8FOglc\nlSuhi+GNfFRif6LfArm84ZFj1ZS1MX2logikhXhRJQ7AOHe5+ED0re3KH5lWyqfz\nR6bQuPYwTQSBJy6Tq7T9RiOM29yadCX64OaCEbzEFmHtNlbb5px4zCVvgskg/fpV\nGGCMpAYjGDatbxE5eAloVs1EJuI5RSqWr1JRm6EejxM04BFdfGn1HgWrsKXtlvBa\n00/AC0zXL5n6LK7+L3WbRguVTZcE4Yu70gDwhmM+VsKeT9LKClX003BNj0NJDRB9\ndw9MaWxsXDNHNOWEfbnASXeP7ZRv3D81ftij6P8SL14ZnxyrRty8TAN4ij3wd41l\nastRQCtrJFi+HzO606XOp6HDzBoWT0DGl8Sn2hZ6RLPyBnD04vvvcSGeCVjHGOQ8\nc3OTroK58u5MR/q4T00sTkeeVAxuKoEWKsjIBYYrJTe/a2mEq9yiDGbPNYDnWnQZ\njSUZm+Us23Y2sm/agZ5zKXcEuoecGL6sYCixr/xeB9BPxEiTthH+0M8OY99qpIhz\nSmj41wdgQfzZi/6B8pIr77V/KywYKxJEmzw8Uy48aC/rZ8WsT8QdKwclo1aiNJhx\n79OvGbZFoeHD/w7igpx+ttpF/wIDAQABMA0GCSqGSIb3DQEBBQUAA4ICAQC3wWUs\nfXz+aSfFVz+O3mLFkr65NIgazbGAySgMgMNVuadheIkPL4k21atyflfpx4pg9FGv\n40vWCLMajpvynfz4oqah0BACnpqzQ8Dx6HYkmlXK8fLB+WtPrZBeUEsGPKuJYt4M\nd5TeY3VpNgWOPXmnE4lvxHZqh/8OwmOpjBfC9E3e2eqgwiwOkXnMaZEPgKP6JiWk\nEFaQ9jgMQqJZnNcv6NmiqqsZeI0/NNjBpkmEWQl+wLegVusHiQ0FMBMQ0taEo21r\nzUwHoNJR3h3wgGQiKxKOH1FUKHBV7hEqObLraD/hfG5xYucJfvvAAP1iH0ycPs+9\nhSccrn5/HY1c9AZnW8Kh7atp/wFP+sHjtECWK/lUmXfhASS293hprCpJk2n9pkmR\nziXKJhjwkxlC8NcHuiVfaxdfDa4+1Qta2gK7GEypbvLoEmIt/dsYUsxUg84lwJJ9\nnyC/pfZ5a8wFSf186JeVH4kHd3bnkzlQz460HndOMSJ/Xi1wSfuZlOVupFf8TVKl\np4j28MTLH2Wqx50NssKThdaX6hoCiMqreYa+EVaN1f/cIGQxZSCzdzMCKqdB8lKB\n3Eax+5zsIa/UyPwGxZcyXBRHAlz5ZnkjuRxInyiMkBWWz3IZXjTe6Fq8BNd2UWNc\nw35+2nO5n1LKXgR2+nzhZUOk8TPsi9WUywRluQ==\n-----END CERTIFICATE-----`
keyJWK = `{"kty":"RSA","e":"AQAB","kid":"4db88b6b-cda9-4242-b79e-51346edc313c","n":"7nJwME0QNM6g0Ou9SyljlcIY4cnBcs8oWVHe74bJ7JTgYmDOk2CA14RE3wJNkUKERP_cRdesKDA_BToJXJUroYvhjXxUYn-i3wK5vOGRY9WUtTF9paIIpIV4USUOwDh3ufhA9K3tyh-ZVsqn80em0Lj2ME0EgScuk6u0_UYjjNvcmnQl-uDmghG8xBZh7TZW2-aceMwlb4LJIP36VRhgjKQGIxg2rW8ROXgJaFbNRCbiOUUqlq9SUZuhHo8TNOARXXxp9R4Fq7Cl7ZbwWtNPwAtM1y-Z-iyu_i91m0YLlU2XBOGLu9IA8IZjPlbCnk_SygpV9NNwTY9DSQ0QfXcPTGlsbFwzRzTlhH25wEl3j-2Ub9w_NX7Yo-j_Ei9eGZ8cq0bcvEwDeIo98HeNZWrLUUArayRYvh8zutOlzqehw8waFk9AxpfEp9oWekSz8gZw9OL773EhnglYxxjkPHNzk66CufLuTEf6uE9NLE5HnlQMbiqBFirIyAWGKyU3v2tphKvcogxmzzWA51p0GY0lGZvlLNt2NrJv2oGecyl3BLqHnBi-rGAosa_8XgfQT8RIk7YR_tDPDmPfaqSIc0po-NcHYEH82Yv-gfKSK--1fyssGCsSRJs8PFMuPGgv62fFrE_EHSsHJaNWojSYce_Trxm2RaHhw_8O4oKcfrbaRf8"}`
certPemPs = `-----BEGIN CERTIFICATE-----\nMIIC/DCCAeSgAwIBAgIJAJRvYDU3ei3EMA0GCSqGSIb3DQEBCwUAMBMxETAPBgNV\nBAMMCHdoYXRldmVyMB4XDTE4MDgxMDEwMzgxNloXDTE4MDkwOTEwMzgxNlowEzER\nMA8GA1UEAwwId2hhdGV2ZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB\nAQC4kCmzLMW/5jzkzkmN7Me8wPD+ymBUIjsGqliGfMrfFfDV2eTPVtZcYD3IXoB4\nAOUT7XJzWjOsBRFOcVKKEiCPjXiLcwLb/QWQ1x0Budft32r3+N0KQd1rgcRHTPNc\nJoeWCfOgDPp51RTzTT6HQuV4ud+CDhRJP7QMVMIgal9Nuzs49LLZaBPW8/rFsHjk\nJQ4kDujSrpcT6F2FZY3SmWsOJgP7RjVKk5BheYeFKav5ZV4p6iHn/TN4RVpvpNBh\n5z/XoHITJ6lpkHSDpbIaQUTpobU2um8N3biz+HsEAmD9Laa27WUpYSpiM6DDMSXl\ndBDJdumerVRJvXYCtfXqtl17AgMBAAGjUzBRMB0GA1UdDgQWBBRz74MkVzT2K52/\nFJC4mTa9coM/DTAfBgNVHSMEGDAWgBRz74MkVzT2K52/FJC4mTa9coM/DTAPBgNV\nHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQAD1ZE4IaIAetqGG+vt9oz1\nIx0j4EPok0ONyhhmiSsF6rSv8zlNWweVf5y6Z+AoTNY1Fym0T7dbpbqIox0EdKV3\nFLzniWOjznupbnqfXwHX/g1UAZSyt3akSatVhvNpGlnd7efTIAiNinX/TkzIjhZ7\nihMIZCGykT1P0ys1OaeEf57wAzviatD4pEMTIW0OOqY8bdRGhuJR1kKUZ/2Nm8Ln\ny7E0y8uODVbH9cAwGyzWB/QFc+bffNgi9uJaPQQc5Zxwpu9utlqyzFvXgV7MBYUK\nEYSLyxp4g4e5aujtLugaC8H6n9vP1mEBr/+T8HGynBZHNTKlDhhL9qDbpkkNB6/w\n-----END CERTIFICATE-----`
keyJWKPs = `{"kty":"RSA","e":"AQAB","kid":"bf688c97-bf51-49ba-b9d3-115195bb0eb8","n":"uJApsyzFv-Y85M5JjezHvMDw_spgVCI7BqpYhnzK3xXw1dnkz1bWXGA9yF6AeADlE-1yc1ozrAURTnFSihIgj414i3MC2_0FkNcdAbnX7d9q9_jdCkHda4HER0zzXCaHlgnzoAz6edUU800-h0LleLnfgg4UST-0DFTCIGpfTbs7OPSy2WgT1vP6xbB45CUOJA7o0q6XE-hdhWWN0plrDiYD-0Y1SpOQYXmHhSmr-WVeKeoh5_0zeEVab6TQYec_16ByEyepaZB0g6WyGkFE6aG1NrpvDd24s_h7BAJg_S2mtu1lKWEqYjOgwzEl5XQQyXbpnq1USb12ArX16rZdew"}`
certPemEs256 = `-----BEGIN CERTIFICATE-----\nMIIBcDCCARagAwIBAgIJAMZmuGSIfvgzMAoGCCqGSM49BAMCMBMxETAPBgNVBAMM\nCHdoYXRldmVyMB4XDTE4MDgxMDE0Mjg1NFoXDTE4MDkwOTE0Mjg1NFowEzERMA8G\nA1UEAwwId2hhdGV2ZXIwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAATPwn3WCEXL\nmjp/bFniDwuwsfu7bASlPae2PyWhqGeWwe23Xlyx+tSqxlkXYe4pZ23BkAAscpGj\nyn5gXHExyDlKo1MwUTAdBgNVHQ4EFgQUElRjSoVgKjUqY5AXz2o74cLzzS8wHwYD\nVR0jBBgwFoAUElRjSoVgKjUqY5AXz2o74cLzzS8wDwYDVR0TAQH/BAUwAwEB/zAK\nBggqhkjOPQQDAgNIADBFAiEA4yQ/88ZrUX68c6kOe9G11u8NUaUzd8pLOtkKhniN\nOHoCIHmNX37JOqTcTzGn2u9+c8NlnvZ0uDvsd1BmKPaUmjmm\n-----END CERTIFICATE-----\n`
keyJWKEs256 = `{"kty":"EC","crv":"P-256","x":"z8J91ghFy5o6f2xZ4g8LsLH7u2wEpT2ntj8loahnlsE","y":"7bdeXLH61KrGWRdh7ilnbcGQACxykaPKfmBccTHIOUo"}`
certPemBadBlock = `-----BEGIN CERT-----\nMIIFiDCCA3ACCQCGV6XsfG/oRTANBgkqhkiG9w0BAQUFADCBhTELMAkGA1UEBhMC\nVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFTATBgNVBAcMDFJlZHdvb2QgQ2l0eTEO\nMAwGA1UECgwFU3R5cmExDDAKBgNVBAsMA0RldjESMBAGA1UEAwwJbG9jYWxob3N0\nMRgwFgYJKoZIhvcNAQkBFglhc2hAc3R5cmEwHhcNMTgwMzA2MDAxNTU5WhcNMTkw\nMzA2MDAxNTU5WjCBhTELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWEx\nFTATBgNVBAcMDFJlZHdvb2QgQ2l0eTEOMAwGA1UECgwFU3R5cmExDDAKBgNVBAsM\nA0RldjESMBAGA1UEAwwJbG9jYWxob3N0MRgwFgYJKoZIhvcNAQkBFglhc2hAc3R5\ncmEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDucnAwTRA0zqDQ671L\nKWOVwhjhycFyzyhZUd7vhsnslOBiYM6TYIDXhETfAk2RQoRE/9xF16woMD8FOglc\nlSuhi+GNfFRif6LfArm84ZFj1ZS1MX2logikhXhRJQ7AOHe5+ED0re3KH5lWyqfz\nR6bQuPYwTQSBJy6Tq7T9RiOM29yadCX64OaCEbzEFmHtNlbb5px4zCVvgskg/fpV\nGGCMpAYjGDatbxE5eAloVs1EJuI5RSqWr1JRm6EejxM04BFdfGn1HgWrsKXtlvBa\n00/AC0zXL5n6LK7+L3WbRguVTZcE4Yu70gDwhmM+VsKeT9LKClX003BNj0NJDRB9\ndw9MaWxsXDNHNOWEfbnASXeP7ZRv3D81ftij6P8SL14ZnxyrRty8TAN4ij3wd41l\nastRQCtrJFi+HzO606XOp6HDzBoWT0DGl8Sn2hZ6RLPyBnD04vvvcSGeCVjHGOQ8\nc3OTroK58u5MR/q4T00sTkeeVAxuKoEWKsjIBYYrJTe/a2mEq9yiDGbPNYDnWnQZ\njSUZm+Us23Y2sm/agZ5zKXcEuoecGL6sYCixr/xeB9BPxEiTthH+0M8OY99qpIhz\nSmj41wdgQfzZi/6B8pIr77V/KywYKxJEmzw8Uy48aC/rZ8WsT8QdKwclo1aiNJhx\n79OvGbZFoeHD/w7igpx+ttpF/wIDAQABMA0GCSqGSIb3DQEBBQUAA4ICAQC3wWUs\nfXz+aSfFVz+O3mLFkr65NIgazbGAySgMgMNVuadheIkPL4k21atyflfpx4pg9FGv\n40vWCLMajpvynfz4oqah0BACnpqzQ8Dx6HYkmlXK8fLB+WtPrZBeUEsGPKuJYt4M\nd5TeY3VpNgWOPXmnE4lvxHZqh/8OwmOpjBfC9E3e2eqgwiwOkXnMaZEPgKP6JiWk\nEFaQ9jgMQqJZnNcv6NmiqqsZeI0/NNjBpkmEWQl+wLegVusHiQ0FMBMQ0taEo21r\nzUwHoNJR3h3wgGQiKxKOH1FUKHBV7hEqObLraD/hfG5xYucJfvvAAP1iH0ycPs+9\nhSccrn5/HY1c9AZnW8Kh7atp/wFP+sHjtECWK/lUmXfhASS293hprCpJk2n9pkmR\nziXKJhjwkxlC8NcHuiVfaxdfDa4+1Qta2gK7GEypbvLoEmIt/dsYUsxUg84lwJJ9\nnyC/pfZ5a8wFSf186JeVH4kHd3bnkzlQz460HndOMSJ/Xi1wSfuZlOVupFf8TVKl\np4j28MTLH2Wqx50NssKThdaX6hoCiMqreYa+EVaN1f/cIGQxZSCzdzMCKqdB8lKB\n3Eax+5zsIa/UyPwGxZcyXBRHAlz5ZnkjuRxInyiMkBWWz3IZXjTe6Fq8BNd2UWNc\nw35+2nO5n1LKXgR2+nzhZUOk8TPsi9WUywRluQ==\n-----END CERT-----`
certPemExtraData = `-----BEGIN CERTIFICATE-----\nMIIFiDCCA3ACCQCGV6XsfG/oRTANBgkqhkiG9w0BAQUFADCBhTELMAkGA1UEBhMC\nVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFTATBgNVBAcMDFJlZHdvb2QgQ2l0eTEO\nMAwGA1UECgwFU3R5cmExDDAKBgNVBAsMA0RldjESMBAGA1UEAwwJbG9jYWxob3N0\nMRgwFgYJKoZIhvcNAQkBFglhc2hAc3R5cmEwHhcNMTgwMzA2MDAxNTU5WhcNMTkw\nMzA2MDAxNTU5WjCBhTELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWEx\nFTATBgNVBAcMDFJlZHdvb2QgQ2l0eTEOMAwGA1UECgwFU3R5cmExDDAKBgNVBAsM\nA0RldjESMBAGA1UEAwwJbG9jYWxob3N0MRgwFgYJKoZIhvcNAQkBFglhc2hAc3R5\ncmEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDucnAwTRA0zqDQ671L\nKWOVwhjhycFyzyhZUd7vhsnslOBiYM6TYIDXhETfAk2RQoRE/9xF16woMD8FOglc\nlSuhi+GNfFRif6LfArm84ZFj1ZS1MX2logikhXhRJQ7AOHe5+ED0re3KH5lWyqfz\nR6bQuPYwTQSBJy6Tq7T9RiOM29yadCX64OaCEbzEFmHtNlbb5px4zCVvgskg/fpV\nGGCMpAYjGDatbxE5eAloVs1EJuI5RSqWr1JRm6EejxM04BFdfGn1HgWrsKXtlvBa\n00/AC0zXL5n6LK7+L3WbRguVTZcE4Yu70gDwhmM+VsKeT9LKClX003BNj0NJDRB9\ndw9MaWxsXDNHNOWEfbnASXeP7ZRv3D81ftij6P8SL14ZnxyrRty8TAN4ij3wd41l\nastRQCtrJFi+HzO606XOp6HDzBoWT0DGl8Sn2hZ6RLPyBnD04vvvcSGeCVjHGOQ8\nc3OTroK58u5MR/q4T00sTkeeVAxuKoEWKsjIBYYrJTe/a2mEq9yiDGbPNYDnWnQZ\njSUZm+Us23Y2sm/agZ5zKXcEuoecGL6sYCixr/xeB9BPxEiTthH+0M8OY99qpIhz\nSmj41wdgQfzZi/6B8pIr77V/KywYKxJEmzw8Uy48aC/rZ8WsT8QdKwclo1aiNJhx\n79OvGbZFoeHD/w7igpx+ttpF/wIDAQABMA0GCSqGSIb3DQEBBQUAA4ICAQC3wWUs\nfXz+aSfFVz+O3mLFkr65NIgazbGAySgMgMNVuadheIkPL4k21atyflfpx4pg9FGv\n40vWCLMajpvynfz4oqah0BACnpqzQ8Dx6HYkmlXK8fLB+WtPrZBeUEsGPKuJYt4M\nd5TeY3VpNgWOPXmnE4lvxHZqh/8OwmOpjBfC9E3e2eqgwiwOkXnMaZEPgKP6JiWk\nEFaQ9jgMQqJZnNcv6NmiqqsZeI0/NNjBpkmEWQl+wLegVusHiQ0FMBMQ0taEo21r\nzUwHoNJR3h3wgGQiKxKOH1FUKHBV7hEqObLraD/hfG5xYucJfvvAAP1iH0ycPs+9\nhSccrn5/HY1c9AZnW8Kh7atp/wFP+sHjtECWK/lUmXfhASS293hprCpJk2n9pkmR\nziXKJhjwkxlC8NcHuiVfaxdfDa4+1Qta2gK7GEypbvLoEmIt/dsYUsxUg84lwJJ9\nnyC/pfZ5a8wFSf186JeVH4kHd3bnkzlQz460HndOMSJ/Xi1wSfuZlOVupFf8TVKl\np4j28MTLH2Wqx50NssKThdaX6hoCiMqreYa+EVaN1f/cIGQxZSCzdzMCKqdB8lKB\n3Eax+5zsIa/UyPwGxZcyXBRHAlz5ZnkjuRxInyiMkBWWz3IZXjTe6Fq8BNd2UWNc\nw35+2nO5n1LKXgR2+nzhZUOk8TPsi9WUywRluQ==\n-----END CERTIFICATE-----\nEXTRA`
certPemBadCertificate = `-----BEGIN CERTIFICATE-----\ndeadiDCCA3ACCQCGV6XsfG/oRTANBgkqhkiG9w0BAQUFADCBhTELMAkGA1UEBhMC\nVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFTATBgNVBAcMDFJlZHdvb2QgQ2l0eTEO\nMAwGA1UECgwFU3R5cmExDDAKBgNVBAsMA0RldjESMBAGA1UEAwwJbG9jYWxob3N0\nMRgwFgYJKoZIhvcNAQkBFglhc2hAc3R5cmEwHhcNMTgwMzA2MDAxNTU5WhcNMTkw\nMzA2MDAxNTU5WjCBhTELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWEx\nFTATBgNVBAcMDFJlZHdvb2QgQ2l0eTEOMAwGA1UECgwFU3R5cmExDDAKBgNVBAsM\nA0RldjESMBAGA1UEAwwJbG9jYWxob3N0MRgwFgYJKoZIhvcNAQkBFglhc2hAc3R5\ncmEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDucnAwTRA0zqDQ671L\nKWOVwhjhycFyzyhZUd7vhsnslOBiYM6TYIDXhETfAk2RQoRE/9xF16woMD8FOglc\nlSuhi+GNfFRif6LfArm84ZFj1ZS1MX2logikhXhRJQ7AOHe5+ED0re3KH5lWyqfz\nR6bQuPYwTQSBJy6Tq7T9RiOM29yadCX64OaCEbzEFmHtNlbb5px4zCVvgskg/fpV\nGGCMpAYjGDatbxE5eAloVs1EJuI5RSqWr1JRm6EejxM04BFdfGn1HgWrsKXtlvBa\n00/AC0zXL5n6LK7+L3WbRguVTZcE4Yu70gDwhmM+VsKeT9LKClX003BNj0NJDRB9\ndw9MaWxsXDNHNOWEfbnASXeP7ZRv3D81ftij6P8SL14ZnxyrRty8TAN4ij3wd41l\nastRQCtrJFi+HzO606XOp6HDzBoWT0DGl8Sn2hZ6RLPyBnD04vvvcSGeCVjHGOQ8\nc3OTroK58u5MR/q4T00sTkeeVAxuKoEWKsjIBYYrJTe/a2mEq9yiDGbPNYDnWnQZ\njSUZm+Us23Y2sm/agZ5zKXcEuoecGL6sYCixr/xeB9BPxEiTthH+0M8OY99qpIhz\nSmj41wdgQfzZi/6B8pIr77V/KywYKxJEmzw8Uy48aC/rZ8WsT8QdKwclo1aiNJhx\n79OvGbZFoeHD/w7igpx+ttpF/wIDAQABMA0GCSqGSIb3DQEBBQUAA4ICAQC3wWUs\nfXz+aSfFVz+O3mLFkr65NIgazbGAySgMgMNVuadheIkPL4k21atyflfpx4pg9FGv\n40vWCLMajpvynfz4oqah0BACnpqzQ8Dx6HYkmlXK8fLB+WtPrZBeUEsGPKuJYt4M\nd5TeY3VpNgWOPXmnE4lvxHZqh/8OwmOpjBfC9E3e2eqgwiwOkXnMaZEPgKP6JiWk\nEFaQ9jgMQqJZnNcv6NmiqqsZeI0/NNjBpkmEWQl+wLegVusHiQ0FMBMQ0taEo21r\nzUwHoNJR3h3wgGQiKxKOH1FUKHBV7hEqObLraD/hfG5xYucJfvvAAP1iH0ycPs+9\nhSccrn5/HY1c9AZnW8Kh7atp/wFP+sHjtECWK/lUmXfhASS293hprCpJk2n9pkmR\nziXKJhjwkxlC8NcHuiVfaxdfDa4+1Qta2gK7GEypbvLoEmIt/dsYUsxUg84lwJJ9\nnyC/pfZ5a8wFSf186JeVH4kHd3bnkzlQz460HndOMSJ/Xi1wSfuZlOVupFf8TVKl\np4j28MTLH2Wqx50NssKThdaX6hoCiMqreYa+EVaN1f/cIGQxZSCzdzMCKqdB8lKB\n3Eax+5zsIa/UyPwGxZcyXBRHAlz5ZnkjuRxInyiMkBWWz3IZXjTe6Fq8BNd2UWNc\nw35+2nO5n1LKXgR2+nzhZUOk8TPsi9WUywRluQ==\n-----END CERTIFICATE-----`
keyJWKBadKey = `{"kty":"bogus key type","e":"AQAB","kid":"4db88b6b-cda9-4242-b79e-51346edc313c","n":"7nJwME0QNM6g0Ou9SyljlcIY4cnBcs8oWVHe74bJ7JTgYmDOk2CA14RE3wJNkUKERP_cRdesKDA_BToJXJUroYvhjXxUYn-i3wK5vOGRY9WUtTF9paIIpIV4USUOwDh3ufhA9K3tyh-ZVsqn80em0Lj2ME0EgScuk6u0_UYjjNvcmnQl-uDmghG8xBZh7TZW2-aceMwlb4LJIP36VRhgjKQGIxg2rW8ROXgJaFbNRCbiOUUqlq9SUZuhHo8TNOARXXxp9R4Fq7Cl7ZbwWtNPwAtM1y-Z-iyu_i91m0YLlU2XBOGLu9IA8IZjPlbCnk_SygpV9NNwTY9DSQ0QfXcPTGlsbFwzRzTlhH25wEl3j-2Ub9w_NX7Yo-j_Ei9eGZ8cq0bcvEwDeIo98HeNZWrLUUArayRYvh8zutOlzqehw8waFk9AxpfEp9oWekSz8gZw9OL773EhnglYxxjkPHNzk66CufLuTEf6uE9NLE5HnlQMbiqBFirIyAWGKyU3v2tphKvcogxmzzWA51p0GY0lGZvlLNt2NrJv2oGecyl3BLqHnBi-rGAosa_8XgfQT8RIk7YR_tDPDmPfaqSIc0po-NcHYEH82Yv-gfKSK--1fyssGCsSRJs8PFMuPGgv62fFrE_EHSsHJaNWojSYce_Trxm2RaHhw_8O4oKcfrbaRf8"}`
multiKeyJWkS = `{
"keys": [
{
"kty": "EC",
"use": "sig",
"crv": "P-256",
"kid": "k1",
"x": "9Qq5S5VqMQoH-FOI4atcH6V3bua03C-5ZMZMG1rszwA",
"y": "LLbFxWkGBEBrTm1GMYZJy1OXCH1KLweJMCgIEPIsibU",
"alg": "ES256"
},
{
"kty": "RSA",
"e": "AQAB",
"use": "enc",
"kid": "k2",
"alg": "RS256",
"n": "sGu-fYVE2nq2dPxJlqAMI0Z8G3FD0XcWDnD8mkfO1ddKRGuUQZmfj4gWeZGyIk3cnuoy7KJCEqa3daXc08QHuFZyfn0rH33t8_AFsvb0q0i7R2FK-Gdqs_E0-sGpYMsRJdZWfCioLkYjIHEuVnRbi3DEsWqe484rEGbKF60jNRgGC4b-8pz-E538ZkssWxcqHrYIj5bjGEU36onjS3M_yrTuNvzv_8wRioK4fbcwmGne9bDxu8LcoSReWpPn0CnUkWnfqroRcMJnC87ZuJagDW1ZWCmU3psdsVanmFFh0DP6z0fsA4h8G2n9-qp-LEKFaWwo3IWlOsIzU3MHdcEiGw"
}
]
}`
)
func TestTopDownJWTVerifyRSA(t *testing.T) {
params := []struct {
note string
alg string
input1 string
input2 string
result bool
err string
}{
{
"success-cert",
"rs256",
`eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIiLCJuYmYiOjE0NDQ0Nzg0MDB9.N0-EVdv5pvUfZYFRzMGnsWpNLHgwMEgViPwpuLBEtt32682OgnOK-N4X-2gpQEjQIbUr0IFym8YsRQU9GZvqQP72Sd6yOQNGSNeE74DpUZCAjBa9SBIb1UlD2MxZB-e7YJiEyo7pZhimaqorXrgorlaXYGMvsCFWDYmBLzGaGYaGJyEpkZHzHb7ujsDrJJjdEtDV3kh13gTHzLPvqnoXuuxelXye_8LPIhvgDy52gT4shUEso71pJCMv_IqAR19ljVE17lJzoi6VhRn6ReNUE-yg4KfCO4Ypnuu-mcQr7XtmSYoWkX72L5UQ-EyWkoz-w0SYKoJTPzHkTL2thYStksVpeNkGuck25aUdtrQgmPbao0QOWBFlkg03e6mPCD2-aXOt1ofth9mZGjxWMHX-mUqHaNmaWM3WhRztJ73hWrmB1YOdYQtOEHejfvR_td5tqIw4W6ufRy2ScOypGQe7kNaUZxpgxZ1927ZGNiQgawIOAQwXOcFx1JNSEIeg55-cYJrHPxsXGOB9ZxW-qnswmFJp474iUVXjzGhLexJDXBwvKGs_O3JFjMsvyV9_hm7bnQU0vG_HgPYs5i9VOHRMujq1vFBcm52TFVOBGdWaGfb9RRdLLYvVkJLk0Poh19rsCWb7-Vc3mAaGGpvuk4Wv-PnGGNC-V-FQqIbijHDrn_g`,
fmt.Sprintf(`"%s"`, certPem),
true,
"",
},
{
"success-jwk",
"rs256",
`eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIiLCJuYmYiOjE0NDQ0Nzg0MDB9.N0-EVdv5pvUfZYFRzMGnsWpNLHgwMEgViPwpuLBEtt32682OgnOK-N4X-2gpQEjQIbUr0IFym8YsRQU9GZvqQP72Sd6yOQNGSNeE74DpUZCAjBa9SBIb1UlD2MxZB-e7YJiEyo7pZhimaqorXrgorlaXYGMvsCFWDYmBLzGaGYaGJyEpkZHzHb7ujsDrJJjdEtDV3kh13gTHzLPvqnoXuuxelXye_8LPIhvgDy52gT4shUEso71pJCMv_IqAR19ljVE17lJzoi6VhRn6ReNUE-yg4KfCO4Ypnuu-mcQr7XtmSYoWkX72L5UQ-EyWkoz-w0SYKoJTPzHkTL2thYStksVpeNkGuck25aUdtrQgmPbao0QOWBFlkg03e6mPCD2-aXOt1ofth9mZGjxWMHX-mUqHaNmaWM3WhRztJ73hWrmB1YOdYQtOEHejfvR_td5tqIw4W6ufRy2ScOypGQe7kNaUZxpgxZ1927ZGNiQgawIOAQwXOcFx1JNSEIeg55-cYJrHPxsXGOB9ZxW-qnswmFJp474iUVXjzGhLexJDXBwvKGs_O3JFjMsvyV9_hm7bnQU0vG_HgPYs5i9VOHRMujq1vFBcm52TFVOBGdWaGfb9RRdLLYvVkJLk0Poh19rsCWb7-Vc3mAaGGpvuk4Wv-PnGGNC-V-FQqIbijHDrn_g`,
fmt.Sprintf("`%s`", keyJWK),
true,
"",
},
{
"success-ps256-cert",
"ps256",
`eyJ0eXAiOiAiSldUIiwgImFsZyI6ICJQUzI1NiJ9.eyJuYmYiOiAxNDQ0NDc4NDAwLCAiZm9vIjogImJhciJ9.i0F3MHWzOsBNLqjQzK1UVeQid9xPMowCoUsoM-C2BDxUY-FMKmCeJ1NJ4TGnS9HzFK1ftEvRnPT7EOxOkHPoCk1rz3feTFgtHtNzQqLM1IBTnz6aHHOrda_bKPHH9ZIYCRQUPXhpC90ivW_IJR-f7Z1WLrMXaJ71i1XteruENHrJJJDn0HedHG6N0VHugBHrak5k57cbE31utAdx83TEd8v2Y8wAkCJXKrdmTa-8419LNxW_yjkvoDD53n3X5CHhYkSymU77p0v6yWO38qDWeKJ-Fm_PrMAo72_rizDBj_yPa5LA3bT_EnsgZtC-sp8_SCDIH41bjiCGpRHhqgZmyw`,
fmt.Sprintf(`"%s"`, certPemPs),
true,
"",
},
{
"success-ps256-jwk",
"ps256",
`eyJ0eXAiOiAiSldUIiwgImFsZyI6ICJQUzI1NiJ9.eyJuYmYiOiAxNDQ0NDc4NDAwLCAiZm9vIjogImJhciJ9.i0F3MHWzOsBNLqjQzK1UVeQid9xPMowCoUsoM-C2BDxUY-FMKmCeJ1NJ4TGnS9HzFK1ftEvRnPT7EOxOkHPoCk1rz3feTFgtHtNzQqLM1IBTnz6aHHOrda_bKPHH9ZIYCRQUPXhpC90ivW_IJR-f7Z1WLrMXaJ71i1XteruENHrJJJDn0HedHG6N0VHugBHrak5k57cbE31utAdx83TEd8v2Y8wAkCJXKrdmTa-8419LNxW_yjkvoDD53n3X5CHhYkSymU77p0v6yWO38qDWeKJ-Fm_PrMAo72_rizDBj_yPa5LA3bT_EnsgZtC-sp8_SCDIH41bjiCGpRHhqgZmyw`,
fmt.Sprintf("`%s`", keyJWKPs),
true,
"",
},
{
"success-es256-cert",
"es256",
`eyJ0eXAiOiAiSldUIiwgImFsZyI6ICJFUzI1NiJ9.eyJuYmYiOiAxNDQ0NDc4NDAwLCAiaXNzIjogInh4eCJ9.lArczfN-pIL8oUU-7PU83u-zfXougXBZj6drFeKFsPEoVhy9WAyiZlRshYqjTSXdaw8yw2L-ovt4zTUZb2PWMg`,
fmt.Sprintf(`"%s"`, certPemEs256),
true,
"",
},
{
"success-es256-jwk",
"es256",
`eyJ0eXAiOiAiSldUIiwgImFsZyI6ICJFUzI1NiJ9.eyJuYmYiOiAxNDQ0NDc4NDAwLCAiaXNzIjogInh4eCJ9.lArczfN-pIL8oUU-7PU83u-zfXougXBZj6drFeKFsPEoVhy9WAyiZlRshYqjTSXdaw8yw2L-ovt4zTUZb2PWMg`,
fmt.Sprintf("`%s`", keyJWKEs256),
true,
"",
},
{
"failure-bad token",
"rs256",
`eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIiLCJuYmYiOjE0NDQ0Nzg0MDB9.Yt89BjaPCNgol478rYyH66-XgkHos02TsVwxLH3ZlvOoIVjbhYW8q1_MHehct1-yBf1UOX3g-lUrIjpoDtX1TfAESuaWTjYPixRvjfJ-Nn75JF8QuAl5PD27C6aJ4PjUPNfj0kwYBnNQ_oX-ZFb781xRi7qRDB6swE4eBUxzHqKUJBLaMM2r8k1-9iE3ERNeqTJUhV__p0aSyRj-i62rdZ4TC5nhxtWodiGP4e4GrYlXkdaKduK63cfdJF-kfZfTsoDs_xy84pZOkzlflxuNv9bNqd-3ISAdWe4gsEvWWJ8v70-QWkydnH8rhj95DaqoXrjfzbOgDpKtdxJC4daVPKvntykzrxKhZ9UtWzm3OvJSKeyWujFZlldiTfBLqNDgdi-Boj_VxO5Pdh-67lC3L-pBMm4BgUqf6rakBQvoH7AV6zD5CbFixh7DuqJ4eJHHItWzJwDctMrV3asm-uOE1E2B7GErGo3iX6S9Iun_kvRUp6kyvOaDq5VvXzQOKyLQIQyHGGs0aIV5cFI2IuO5Rt0uUj5mzPQrQWHgI4r6Mc5bzmq2QLxBQE8OJ1RFhRpsuoWQyDM8aRiMQIJe1g3x4dnxbJK4dYheYblKHFepScYqT1hllDp3oUNn89sIjQIhJTe8KFATu4K8ppluys7vhpE2a_tq8i5O0MFxWmsxN4Q`,
fmt.Sprintf(`"%s"`, certPem),
false,
"",
},
{
"failure-wrong key",
"ps256",
`eyJ0eXAiOiAiSldUIiwgImFsZyI6ICJQUzI1NiJ9.eyJuYmYiOiAxNDQ0NDc4NDAwLCAiZm9vIjogImJhciJ9.i0F3MHWzOsBNLqjQzK1UVeQid9xPMowCoUsoM-C2BDxUY-FMKmCeJ1NJ4TGnS9HzFK1ftEvRnPT7EOxOkHPoCk1rz3feTFgtHtNzQqLM1IBTnz6aHHOrda_bKPHH9ZIYCRQUPXhpC90ivW_IJR-f7Z1WLrMXaJ71i1XteruENHrJJJDn0HedHG6N0VHugBHrak5k57cbE31utAdx83TEd8v2Y8wAkCJXKrdmTa-8419LNxW_yjkvoDD53n3X5CHhYkSymU77p0v6yWO38qDWeKJ-Fm_PrMAo72_rizDBj_yPa5LA3bT_EnsgZtC-sp8_SCDIH41bjiCGpRHhqgZmyw`,
fmt.Sprintf(`"%s"`, certPem),
false,
"",
},
{
"failure-wrong alg",
"ps256",
`eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIiLCJuYmYiOjE0NDQ0Nzg0MDB9.N0-EVdv5pvUfZYFRzMGnsWpNLHgwMEgViPwpuLBEtt32682OgnOK-N4X-2gpQEjQIbUr0IFym8YsRQU9GZvqQP72Sd6yOQNGSNeE74DpUZCAjBa9SBIb1UlD2MxZB-e7YJiEyo7pZhimaqorXrgorlaXYGMvsCFWDYmBLzGaGYaGJyEpkZHzHb7ujsDrJJjdEtDV3kh13gTHzLPvqnoXuuxelXye_8LPIhvgDy52gT4shUEso71pJCMv_IqAR19ljVE17lJzoi6VhRn6ReNUE-yg4KfCO4Ypnuu-mcQr7XtmSYoWkX72L5UQ-EyWkoz-w0SYKoJTPzHkTL2thYStksVpeNkGuck25aUdtrQgmPbao0QOWBFlkg03e6mPCD2-aXOt1ofth9mZGjxWMHX-mUqHaNmaWM3WhRztJ73hWrmB1YOdYQtOEHejfvR_td5tqIw4W6ufRy2ScOypGQe7kNaUZxpgxZ1927ZGNiQgawIOAQwXOcFx1JNSEIeg55-cYJrHPxsXGOB9ZxW-qnswmFJp474iUVXjzGhLexJDXBwvKGs_O3JFjMsvyV9_hm7bnQU0vG_HgPYs5i9VOHRMujq1vFBcm52TFVOBGdWaGfb9RRdLLYvVkJLk0Poh19rsCWb7-Vc3mAaGGpvuk4Wv-PnGGNC-V-FQqIbijHDrn_g`,
fmt.Sprintf(`"%s"`, certPem),
false,
"",
},
{
"failure-invalid token",
"rs256",
`eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIiLCJuYmYiOjE0NDQ0Nzg0MDB9`,
fmt.Sprintf(`"%s"`, certPem),
false,
"encoded JWT must have 3 sections, found 2",
},
{
"failure-bad pem certificate block",
"rs256",
`eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIiLCJuYmYiOjE0NDQ0Nzg0MDB9.N0-EVdv5pvUfZYFRzMGnsWpNLHgwMEgViPwpuLBEtt32682OgnOK-N4X-2gpQEjQIbUr0IFym8YsRQU9GZvqQP72Sd6yOQNGSNeE74DpUZCAjBa9SBIb1UlD2MxZB-e7YJiEyo7pZhimaqorXrgorlaXYGMvsCFWDYmBLzGaGYaGJyEpkZHzHb7ujsDrJJjdEtDV3kh13gTHzLPvqnoXuuxelXye_8LPIhvgDy52gT4shUEso71pJCMv_IqAR19ljVE17lJzoi6VhRn6ReNUE-yg4KfCO4Ypnuu-mcQr7XtmSYoWkX72L5UQ-EyWkoz-w0SYKoJTPzHkTL2thYStksVpeNkGuck25aUdtrQgmPbao0QOWBFlkg03e6mPCD2-aXOt1ofth9mZGjxWMHX-mUqHaNmaWM3WhRztJ73hWrmB1YOdYQtOEHejfvR_td5tqIw4W6ufRy2ScOypGQe7kNaUZxpgxZ1927ZGNiQgawIOAQwXOcFx1JNSEIeg55-cYJrHPxsXGOB9ZxW-qnswmFJp474iUVXjzGhLexJDXBwvKGs_O3JFjMsvyV9_hm7bnQU0vG_HgPYs5i9VOHRMujq1vFBcm52TFVOBGdWaGfb9RRdLLYvVkJLk0Poh19rsCWb7-Vc3mAaGGpvuk4Wv-PnGGNC-V-FQqIbijHDrn_g`,
fmt.Sprintf(`"%s"`, certPemBadBlock),
false,
"failed to find a PEM certificate block",
},
{
"failure-extra data after pem certificate block",
"rs256",
`eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIiLCJuYmYiOjE0NDQ0Nzg0MDB9.N0-EVdv5pvUfZYFRzMGnsWpNLHgwMEgViPwpuLBEtt32682OgnOK-N4X-2gpQEjQIbUr0IFym8YsRQU9GZvqQP72Sd6yOQNGSNeE74DpUZCAjBa9SBIb1UlD2MxZB-e7YJiEyo7pZhimaqorXrgorlaXYGMvsCFWDYmBLzGaGYaGJyEpkZHzHb7ujsDrJJjdEtDV3kh13gTHzLPvqnoXuuxelXye_8LPIhvgDy52gT4shUEso71pJCMv_IqAR19ljVE17lJzoi6VhRn6ReNUE-yg4KfCO4Ypnuu-mcQr7XtmSYoWkX72L5UQ-EyWkoz-w0SYKoJTPzHkTL2thYStksVpeNkGuck25aUdtrQgmPbao0QOWBFlkg03e6mPCD2-aXOt1ofth9mZGjxWMHX-mUqHaNmaWM3WhRztJ73hWrmB1YOdYQtOEHejfvR_td5tqIw4W6ufRy2ScOypGQe7kNaUZxpgxZ1927ZGNiQgawIOAQwXOcFx1JNSEIeg55-cYJrHPxsXGOB9ZxW-qnswmFJp474iUVXjzGhLexJDXBwvKGs_O3JFjMsvyV9_hm7bnQU0vG_HgPYs5i9VOHRMujq1vFBcm52TFVOBGdWaGfb9RRdLLYvVkJLk0Poh19rsCWb7-Vc3mAaGGpvuk4Wv-PnGGNC-V-FQqIbijHDrn_g`,
fmt.Sprintf(`"%s"`, certPemExtraData),
false,
"extra data after a PEM certificate block",
},
{
"failure-bad pem certificate",
"rs256",
`eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIiLCJuYmYiOjE0NDQ0Nzg0MDB9.N0-EVdv5pvUfZYFRzMGnsWpNLHgwMEgViPwpuLBEtt32682OgnOK-N4X-2gpQEjQIbUr0IFym8YsRQU9GZvqQP72Sd6yOQNGSNeE74DpUZCAjBa9SBIb1UlD2MxZB-e7YJiEyo7pZhimaqorXrgorlaXYGMvsCFWDYmBLzGaGYaGJyEpkZHzHb7ujsDrJJjdEtDV3kh13gTHzLPvqnoXuuxelXye_8LPIhvgDy52gT4shUEso71pJCMv_IqAR19ljVE17lJzoi6VhRn6ReNUE-yg4KfCO4Ypnuu-mcQr7XtmSYoWkX72L5UQ-EyWkoz-w0SYKoJTPzHkTL2thYStksVpeNkGuck25aUdtrQgmPbao0QOWBFlkg03e6mPCD2-aXOt1ofth9mZGjxWMHX-mUqHaNmaWM3WhRztJ73hWrmB1YOdYQtOEHejfvR_td5tqIw4W6ufRy2ScOypGQe7kNaUZxpgxZ1927ZGNiQgawIOAQwXOcFx1JNSEIeg55-cYJrHPxsXGOB9ZxW-qnswmFJp474iUVXjzGhLexJDXBwvKGs_O3JFjMsvyV9_hm7bnQU0vG_HgPYs5i9VOHRMujq1vFBcm52TFVOBGdWaGfb9RRdLLYvVkJLk0Poh19rsCWb7-Vc3mAaGGpvuk4Wv-PnGGNC-V-FQqIbijHDrn_g`,
fmt.Sprintf(`"%s"`, certPemBadCertificate),
false,
"failed to parse a PEM certificate",
},
{
"failure-bad jwk key",
"rs256",
`eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIiLCJuYmYiOjE0NDQ0Nzg0MDB9.N0-EVdv5pvUfZYFRzMGnsWpNLHgwMEgViPwpuLBEtt32682OgnOK-N4X-2gpQEjQIbUr0IFym8YsRQU9GZvqQP72Sd6yOQNGSNeE74DpUZCAjBa9SBIb1UlD2MxZB-e7YJiEyo7pZhimaqorXrgorlaXYGMvsCFWDYmBLzGaGYaGJyEpkZHzHb7ujsDrJJjdEtDV3kh13gTHzLPvqnoXuuxelXye_8LPIhvgDy52gT4shUEso71pJCMv_IqAR19ljVE17lJzoi6VhRn6ReNUE-yg4KfCO4Ypnuu-mcQr7XtmSYoWkX72L5UQ-EyWkoz-w0SYKoJTPzHkTL2thYStksVpeNkGuck25aUdtrQgmPbao0QOWBFlkg03e6mPCD2-aXOt1ofth9mZGjxWMHX-mUqHaNmaWM3WhRztJ73hWrmB1YOdYQtOEHejfvR_td5tqIw4W6ufRy2ScOypGQe7kNaUZxpgxZ1927ZGNiQgawIOAQwXOcFx1JNSEIeg55-cYJrHPxsXGOB9ZxW-qnswmFJp474iUVXjzGhLexJDXBwvKGs_O3JFjMsvyV9_hm7bnQU0vG_HgPYs5i9VOHRMujq1vFBcm52TFVOBGdWaGfb9RRdLLYvVkJLk0Poh19rsCWb7-Vc3mAaGGpvuk4Wv-PnGGNC-V-FQqIbijHDrn_g`,
fmt.Sprintf("`%s`", keyJWKBadKey),
false,
"failed to parse a JWK key (set)",
},
}
type test struct {
note string
rules []string
expected interface{}
}
tests := []test{}
for _, p := range params {
var exp interface{}
exp = fmt.Sprintf(`%t`, p.result)
if p.err != "" {
exp = &Error{Code: BuiltinErr, Message: p.err}
}
tests = append(tests, test{
p.note,
[]string{fmt.Sprintf(`p = x { io.jwt.verify_%s("%s", %s, x) }`, p.alg, p.input1, p.input2)},
exp,
})
}
data := loadSmallTestData()
for _, tc := range tests {
runTopDownTestCase(t, data, tc.note, tc.rules, tc.expected)
}
}
func TestTopDownJWTVerifyHS256(t *testing.T) {
params := []struct {
note string
input1 string
input2 string
result bool
err string
}{
{
"success",
`eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyIjoiYWxpY2UiLCJhenAiOiJhbGljZSIsInN1Ym9yZGluYXRlcyI6W10sImhyIjpmYWxzZX0.rz3jTY033z-NrKfwrK89_dcLF7TN4gwCMj-fVBDyLoM`,
"secret",
true,
"",
},
{
"failure-bad token",
`eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyIjoiYWxpY2UiLCJhenAiOiJhbGljZSIsInN1Ym9yZGluYXRlcyI6W10sImhyIjpmYWxzZX0.R0NDxM1gHTucWQKwayMDre2PbMNR9K9efmOfygDZWcE`,
"secret",
false,
"",
},
{
"failure-invalid token",
`eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyIjoiYWxpY2UiLCJhenAiOiJhbGljZSIsInN1Ym9yZGluYXRlcyI6W10sImhyIjpmYWxzZX0`,
"secret",
false,
"encoded JWT must have 3 sections, found 2",
},
}
type test struct {
note string
rules []string
expected interface{}
}
tests := []test{}
for _, p := range params {
var exp interface{}
exp = fmt.Sprintf(`%t`, p.result)
if p.err != "" {
exp = &Error{Code: BuiltinErr, Message: p.err}
}
tests = append(tests, test{
p.note,
[]string{fmt.Sprintf(`p = x { io.jwt.verify_hs256("%s", "%s", x) }`, p.input1, p.input2)},
exp,
})
}
data := loadSmallTestData()
for _, tc := range tests {
runTopDownTestCase(t, data, tc.note, tc.rules, tc.expected)
}
}
func TestTopDownJWTDecodeVerify(t *testing.T) {
params := []struct {
note string // test name
token string // JWT
constraints string // constraints argument
valid bool // expected validity value
header string // expected header
payload string // expected claims
err string // expected error or "" for succes
}{
{
"ps256-unconstrained", // no constraints at all (apart from supplying a key)
"eyJhbGciOiAiUFMyNTYiLCAidHlwIjogIkpXVCJ9.eyJpc3MiOiAieHh4In0.iCePYnD1U13oBe_6ylhmojmkY_VZNYXqVszAej8RImMGv51OEqARmYFkRZYTiYCiVFober7vcDq_stOj1uAJCuttygGW_dpHiN-3EWsU2E2vCnXlygWe0ud38pOC-OVyEFbXxO9-m51vnS-3VmBjEO8G1UE8bLFXTeFOGkUIj9dqlefJSWh5wa8XA3g9mj0jqpuJi-7QgEIeVHk-JzhGpoFqI2f-Df_agVvc2x4V-6fJmj7wV2IsaFPRi36mVQmg8S-dkxu4AlaeCILhyNZl8ewjBHHBjJFRwzcy88L00mzdO51ZxEYsBdQav3ux2sc6vjT9PvvjAwzcthQxEoEaNA",
fmt.Sprintf(`{"cert": "%s"}`, certPemPs),
true,
`{"alg": "PS256", "typ": "JWT"}`,
`{"iss": "xxx"}`,
"",
},
{
"ps256-key-wrong", // wrong key for signature
"eyJhbGciOiAiUFMyNTYiLCAidHlwIjogIkpXVCJ9.eyJpc3MiOiAieHh4In0.iCePYnD1U13oBe_6ylhmojmkY_VZNYXqVszAej8RImMGv51OEqARmYFkRZYTiYCiVFober7vcDq_stOj1uAJCuttygGW_dpHiN-3EWsU2E2vCnXlygWe0ud38pOC-OVyEFbXxO9-m51vnS-3VmBjEO8G1UE8bLFXTeFOGkUIj9dqlefJSWh5wa8XA3g9mj0jqpuJi-7QgEIeVHk-JzhGpoFqI2f-Df_agVvc2x4V-6fJmj7wV2IsaFPRi36mVQmg8S-dkxu4AlaeCILhyNZl8ewjBHHBjJFRwzcy88L00mzdO51ZxEYsBdQav3ux2sc6vjT9PvvjAwzcthQxEoEaNA",
fmt.Sprintf(`{"cert": "%s"}`, certPem),
false,
`{}`,
`{}`,
"",
},
{
"rs256-key-wrong", // wrong key for signature
"eyJhbGciOiAiUlMyNTYiLCAidHlwIjogIkpXVCJ9.eyJpc3MiOiAieHh4IiwgImV4cCI6IDMwMDB9.hqDP3AzshNhUZMI02U3nLPrj93QFrgs-74XFrF1Vry2bplrz-NKpdVdfTu8iY_bhmkWf2Om5DdwRZj2ZgpGahtnshnHaRq0RyqF-m3Y7oNj6JL_YMwgxsFIIHtBlagBqDU-gZK99iqSOSGqVhvxqX6gCqFgE7vnEGHeeDedtRM53coAJuwzy8rQV9m3TewoofPdPasGv-dBLQZ3qgmnibkSgb7SmFpjXBy8zL3xJXOZhAHYlgcmcEoFVaWlBguIcWA87WZlpCLYcdYTJzSZweC3QLUhZ4RLJW84-LMKp6xWLLPrp3OgnsduB2G9PYMmYw_qCkuY1KGwfH4PvCQbAzQ",
fmt.Sprintf(`{"cert": "%s"}`, certPem),
false,
`{}`,
`{}`,
"",
},
{
"ps256-iss-ok", // enforce issuer
"eyJhbGciOiAiUFMyNTYiLCAidHlwIjogIkpXVCJ9.eyJpc3MiOiAieHh4In0.iCePYnD1U13oBe_6ylhmojmkY_VZNYXqVszAej8RImMGv51OEqARmYFkRZYTiYCiVFober7vcDq_stOj1uAJCuttygGW_dpHiN-3EWsU2E2vCnXlygWe0ud38pOC-OVyEFbXxO9-m51vnS-3VmBjEO8G1UE8bLFXTeFOGkUIj9dqlefJSWh5wa8XA3g9mj0jqpuJi-7QgEIeVHk-JzhGpoFqI2f-Df_agVvc2x4V-6fJmj7wV2IsaFPRi36mVQmg8S-dkxu4AlaeCILhyNZl8ewjBHHBjJFRwzcy88L00mzdO51ZxEYsBdQav3ux2sc6vjT9PvvjAwzcthQxEoEaNA",
fmt.Sprintf(`{"cert": "%s", "iss": "xxx"}`, certPemPs),
true,
`{"alg": "PS256", "typ": "JWT"}`,
`{"iss": "xxx"}`,
"",
},
{
"ps256-iss-wrong", // wrong issuer
"eyJhbGciOiAiUFMyNTYiLCAidHlwIjogIkpXVCJ9.eyJpc3MiOiAieHh4In0.iCePYnD1U13oBe_6ylhmojmkY_VZNYXqVszAej8RImMGv51OEqARmYFkRZYTiYCiVFober7vcDq_stOj1uAJCuttygGW_dpHiN-3EWsU2E2vCnXlygWe0ud38pOC-OVyEFbXxO9-m51vnS-3VmBjEO8G1UE8bLFXTeFOGkUIj9dqlefJSWh5wa8XA3g9mj0jqpuJi-7QgEIeVHk-JzhGpoFqI2f-Df_agVvc2x4V-6fJmj7wV2IsaFPRi36mVQmg8S-dkxu4AlaeCILhyNZl8ewjBHHBjJFRwzcy88L00mzdO51ZxEYsBdQav3ux2sc6vjT9PvvjAwzcthQxEoEaNA",
fmt.Sprintf(`{"cert": "%s", "iss": "yyy"}`, certPemPs),
false,
`{}`,
`{}`,
"",
},
{
"ps256-alg-ok", // constrained algorithm
"eyJhbGciOiAiUFMyNTYiLCAidHlwIjogIkpXVCJ9.eyJpc3MiOiAieHh4In0.iCePYnD1U13oBe_6ylhmojmkY_VZNYXqVszAej8RImMGv51OEqARmYFkRZYTiYCiVFober7vcDq_stOj1uAJCuttygGW_dpHiN-3EWsU2E2vCnXlygWe0ud38pOC-OVyEFbXxO9-m51vnS-3VmBjEO8G1UE8bLFXTeFOGkUIj9dqlefJSWh5wa8XA3g9mj0jqpuJi-7QgEIeVHk-JzhGpoFqI2f-Df_agVvc2x4V-6fJmj7wV2IsaFPRi36mVQmg8S-dkxu4AlaeCILhyNZl8ewjBHHBjJFRwzcy88L00mzdO51ZxEYsBdQav3ux2sc6vjT9PvvjAwzcthQxEoEaNA",
fmt.Sprintf(`{"cert": "%s", "alg": "PS256"}`, certPemPs),
true,
`{"alg": "PS256", "typ": "JWT"}`,
`{"iss": "xxx"}`,
"",
},
{
"ps256-alg-wrong", // constrained algorithm, and it's wrong
"eyJhbGciOiAiUFMyNTYiLCAidHlwIjogIkpXVCJ9.eyJpc3MiOiAieHh4In0.iCePYnD1U13oBe_6ylhmojmkY_VZNYXqVszAej8RImMGv51OEqARmYFkRZYTiYCiVFober7vcDq_stOj1uAJCuttygGW_dpHiN-3EWsU2E2vCnXlygWe0ud38pOC-OVyEFbXxO9-m51vnS-3VmBjEO8G1UE8bLFXTeFOGkUIj9dqlefJSWh5wa8XA3g9mj0jqpuJi-7QgEIeVHk-JzhGpoFqI2f-Df_agVvc2x4V-6fJmj7wV2IsaFPRi36mVQmg8S-dkxu4AlaeCILhyNZl8ewjBHHBjJFRwzcy88L00mzdO51ZxEYsBdQav3ux2sc6vjT9PvvjAwzcthQxEoEaNA",
fmt.Sprintf(`{"cert": "%s", "alg": "RS256"}`, certPemPs),
false,
`{}`,
`{}`,
"",
},
{
"rs256-exp-ok", // token expires, and it's still valid
"eyJhbGciOiAiUlMyNTYiLCAidHlwIjogIkpXVCJ9.eyJpc3MiOiAieHh4IiwgImV4cCI6IDMwMDB9.hqDP3AzshNhUZMI02U3nLPrj93QFrgs-74XFrF1Vry2bplrz-NKpdVdfTu8iY_bhmkWf2Om5DdwRZj2ZgpGahtnshnHaRq0RyqF-m3Y7oNj6JL_YMwgxsFIIHtBlagBqDU-gZK99iqSOSGqVhvxqX6gCqFgE7vnEGHeeDedtRM53coAJuwzy8rQV9m3TewoofPdPasGv-dBLQZ3qgmnibkSgb7SmFpjXBy8zL3xJXOZhAHYlgcmcEoFVaWlBguIcWA87WZlpCLYcdYTJzSZweC3QLUhZ4RLJW84-LMKp6xWLLPrp3OgnsduB2G9PYMmYw_qCkuY1KGwfH4PvCQbAzQ",
fmt.Sprintf(`{"cert": "%s", "time": 2000000000000}`, certPemPs),
true,
`{"alg": "RS256", "typ": "JWT"}`,
`{"iss": "xxx", "exp": 3000}`,
"",
},
{
"rs256-exp-expired", // token expires, and it's stale at a chosen time
"eyJhbGciOiAiUlMyNTYiLCAidHlwIjogIkpXVCJ9.eyJpc3MiOiAieHh4IiwgImV4cCI6IDMwMDB9.hqDP3AzshNhUZMI02U3nLPrj93QFrgs-74XFrF1Vry2bplrz-NKpdVdfTu8iY_bhmkWf2Om5DdwRZj2ZgpGahtnshnHaRq0RyqF-m3Y7oNj6JL_YMwgxsFIIHtBlagBqDU-gZK99iqSOSGqVhvxqX6gCqFgE7vnEGHeeDedtRM53coAJuwzy8rQV9m3TewoofPdPasGv-dBLQZ3qgmnibkSgb7SmFpjXBy8zL3xJXOZhAHYlgcmcEoFVaWlBguIcWA87WZlpCLYcdYTJzSZweC3QLUhZ4RLJW84-LMKp6xWLLPrp3OgnsduB2G9PYMmYw_qCkuY1KGwfH4PvCQbAzQ",
fmt.Sprintf(`{"cert": "%s", "time": 4000000000000}`, certPemPs),
false,
`{}`,
`{}`,
"",
},
{
"rs256-exp-now-expired", // token expires, and it's stale at the current implicitly specified real time
"eyJhbGciOiAiUlMyNTYiLCAidHlwIjogIkpXVCJ9.eyJpc3MiOiAieHh4IiwgImV4cCI6IDMwMDB9.hqDP3AzshNhUZMI02U3nLPrj93QFrgs-74XFrF1Vry2bplrz-NKpdVdfTu8iY_bhmkWf2Om5DdwRZj2ZgpGahtnshnHaRq0RyqF-m3Y7oNj6JL_YMwgxsFIIHtBlagBqDU-gZK99iqSOSGqVhvxqX6gCqFgE7vnEGHeeDedtRM53coAJuwzy8rQV9m3TewoofPdPasGv-dBLQZ3qgmnibkSgb7SmFpjXBy8zL3xJXOZhAHYlgcmcEoFVaWlBguIcWA87WZlpCLYcdYTJzSZweC3QLUhZ4RLJW84-LMKp6xWLLPrp3OgnsduB2G9PYMmYw_qCkuY1KGwfH4PvCQbAzQ",
fmt.Sprintf(`{"cert": "%s"}`, certPemPs),
false,
`{}`,
`{}`,
"",
},
{
"rs256-exp-now-explicit-expired", // token expires, and it's stale at the current explicitly specified real time
"eyJhbGciOiAiUlMyNTYiLCAidHlwIjogIkpXVCJ9.eyJpc3MiOiAieHh4IiwgImV4cCI6IDMwMDB9.hqDP3AzshNhUZMI02U3nLPrj93QFrgs-74XFrF1Vry2bplrz-NKpdVdfTu8iY_bhmkWf2Om5DdwRZj2ZgpGahtnshnHaRq0RyqF-m3Y7oNj6JL_YMwgxsFIIHtBlagBqDU-gZK99iqSOSGqVhvxqX6gCqFgE7vnEGHeeDedtRM53coAJuwzy8rQV9m3TewoofPdPasGv-dBLQZ3qgmnibkSgb7SmFpjXBy8zL3xJXOZhAHYlgcmcEoFVaWlBguIcWA87WZlpCLYcdYTJzSZweC3QLUhZ4RLJW84-LMKp6xWLLPrp3OgnsduB2G9PYMmYw_qCkuY1KGwfH4PvCQbAzQ",
fmt.Sprintf(`{"cert": "%s", "time": now}`, certPemPs),
false,
`{}`,
`{}`,
"",
},
{
"rs256-nbf-ok", // token has a commencement time, and it's commenced at a chosen time
"eyJhbGciOiAiUlMyNTYiLCAidHlwIjogIkpXVCJ9.eyJuYmYiOiAxMDAwLCAiaXNzIjogInh4eCJ9.cwwYDfJhU_ambPIpwBJwDek05miffoudprr41IAYsl0IKekb1ii2uEgwkNM-LJtVXHe9hsK3gANFyfqoJuCZIBvaNMx_3Z0BUdeBs4k1UwBiZCpuud0ofgHKURwvehNgqDvRfchq_-K_Agi2iRdl0oShgLjN-gVbBl8pRwUbQrvASlcsCpZIKUyOzXNtaIZEFh1z6ISDy8UHHOdoieKpN23swya7QAcEb0wXEEKMkkhiRd5QHgWLk37Lnw2K89mKcq4Om0CtV9nHrxxmpYGSMPojCy16Gjdg5-xKyJWvxCfb3YUBUVM4RWa7ICOPRJWPuHxu9pPYG63hb_qDU6NLsw",
fmt.Sprintf(`{"cert": "%s", "time": 2000000000000}`, certPemPs),
true,
`{"alg": "RS256", "typ": "JWT"}`,
`{"iss": "xxx", "nbf": 1000}`,
"",
},
{
"rs256-nbf-now-ok", // token has a commencement time, and it's commenced at the current implicitly specified time
"eyJhbGciOiAiUlMyNTYiLCAidHlwIjogIkpXVCJ9.eyJuYmYiOiAxMDAwLCAiaXNzIjogInh4eCJ9.cwwYDfJhU_ambPIpwBJwDek05miffoudprr41IAYsl0IKekb1ii2uEgwkNM-LJtVXHe9hsK3gANFyfqoJuCZIBvaNMx_3Z0BUdeBs4k1UwBiZCpuud0ofgHKURwvehNgqDvRfchq_-K_Agi2iRdl0oShgLjN-gVbBl8pRwUbQrvASlcsCpZIKUyOzXNtaIZEFh1z6ISDy8UHHOdoieKpN23swya7QAcEb0wXEEKMkkhiRd5QHgWLk37Lnw2K89mKcq4Om0CtV9nHrxxmpYGSMPojCy16Gjdg5-xKyJWvxCfb3YUBUVM4RWa7ICOPRJWPuHxu9pPYG63hb_qDU6NLsw",
fmt.Sprintf(`{"cert": "%s"}`, certPemPs),
true,
`{"alg": "RS256", "typ": "JWT"}`,
`{"iss": "xxx", "nbf": 1000}`,
"",
},
{
"rs256-nbf-toosoon", // token has a commencement time, and the chosen time is too early
"eyJhbGciOiAiUlMyNTYiLCAidHlwIjogIkpXVCJ9.eyJuYmYiOiAxMDAwLCAiaXNzIjogInh4eCJ9.cwwYDfJhU_ambPIpwBJwDek05miffoudprr41IAYsl0IKekb1ii2uEgwkNM-LJtVXHe9hsK3gANFyfqoJuCZIBvaNMx_3Z0BUdeBs4k1UwBiZCpuud0ofgHKURwvehNgqDvRfchq_-K_Agi2iRdl0oShgLjN-gVbBl8pRwUbQrvASlcsCpZIKUyOzXNtaIZEFh1z6ISDy8UHHOdoieKpN23swya7QAcEb0wXEEKMkkhiRd5QHgWLk37Lnw2K89mKcq4Om0CtV9nHrxxmpYGSMPojCy16Gjdg5-xKyJWvxCfb3YUBUVM4RWa7ICOPRJWPuHxu9pPYG63hb_qDU6NLsw",
fmt.Sprintf(`{"cert": "%s", "time": 500000000000}`, certPemPs),
false,
`{}`,
`{}`,
"",
},
{
"rs256-alg-missing", // alg is missing from the JOSE header
"eyJ0eXAiOiAiSldUIiwgImtpZCI6ICJrMSJ9.eyJpc3MiOiAieHh4IiwgInN1YiI6ICJmcmVkIn0.J4J4FgUD_P5fviVVjgvQWJDg-5XYTP_tHCwB3kSlYVKv8vmnZRNh4ke68OxfMP96iM-LZswG2fNqe-_piGIMepF5rCe1iIWAuz3qqkxfS9YVF3hvwoXhjJT0yIgrDMl1lfW5_XipNshZoxddWK3B7dnVW74MFazEEFuefiQm3PdMUX8jWGsmfgPnqBIZTizErNhoIMuRvYaVM1wA2nfrpVGONxMTaw8T0NRwYIuZwubbnNQ1yLhI0y3dsZvQ_lrh9Khtk9fS1V3SRh7aa9AvferJ4T-48qn_V1m3sINPgoA-uLGyyu3k_GkXRYW1yGNC-MH4T2cwhj89WITbIhusgQ",
fmt.Sprintf(`{"cert": "%s"}`, certPemPs),
false,
`{}`,
`{}`,
"",
},
{
"rs256-crit-junk", // the JOSE header contains an unrecognized critical parameter
"eyJjcml0IjogWyJqdW5rIl0sICJraWQiOiAiazEiLCAiYWxnIjogIlJTMjU2IiwgInR5cCI6ICJKV1QiLCAianVuayI6ICJ4eHgifQ.eyJpc3MiOiAieHh4IiwgInN1YiI6ICJmcmVkIn0.YfoUpW5CgDBtxtBuOix3cdYJGT8cX9Mq7wOhIbjDK7eRQUsAmMY_0EQPh7bd7Yi1gLI3e11BKzguf2EHqAa1kbkHWwFniBO-RIi8q42v2uxC4lpEpIjfaaXB5XmsLfAXtYRqh0AObvbSho6VDXBP_Kn81nhIiE2yFbH14_jhRMSxDBs5ToSkXV-XJHw5bONP8NxPqEk9KF3ZJGzN7J_KoD6LjqfYai5K0eLNEIZh4C1WjTdmCKMR4K6ieZRQWZiSsnhSqLSQERir4n22G3QsdY7dOnCp-SS4VYu3V-PfsOSFMvQ-TTAN1geqMZ9A7k1CCLW0wxKBs-KCiYzmRTzwxA",
fmt.Sprintf(`{"cert": "%s"}`, certPemPs),
false,
`{}`,
`{}`,
"",
},
{
"rsa256-nested", // one nesting level
"eyJhbGciOiAiUlMyNTYiLCAidHlwIjogIkpXVCIsICJjdHkiOiAiSldUIn0.ZXlKaGJHY2lPaUFpVWxNeU5UWWlMQ0FpZEhsd0lqb2dJa3BYVkNKOS5leUpwYzNNaU9pQWllSGg0SW4wLnJSUnJlUU9DYW9ZLW1Nazcyak5GZVk1YVlFUWhJZ0lFdFZkUTlYblltUUwyTHdfaDdNbkk0U0VPMVBwa0JIVEpyZnljbEplTHpfalJ2UGdJMlcxaDFCNGNaVDhDZ21pVXdxQXI5c0puZHlVQ1FtSWRrbm53WkI5cXAtX3BTdGRHWEo5WnAzeEo4NXotVEJpWlN0QUNUZFdlUklGSUU3VkxPa20tRmxZdzh5OTdnaUN4TmxUdWl3amxlTjMwZDhnWHUxNkZGQzJTSlhtRjZKbXYtNjJHbERhLW1CWFZ0bGJVSTVlWVUwaTdueTNyQjBYUVQxRkt4ZUZ3OF85N09FdV9jY3VLcl82ZHlHZVFHdnQ5Y3JJeEFBMWFZbDdmbVBrNkVhcjllTTNKaGVYMi00Wkx0d1FOY1RDT01YV0dIck1DaG5MWVc4WEFrTHJEbl9yRmxUaVMtZw.Xicc2sWCZ_Nithucsw9XD7YOKrirUdEnH3MyiPM-Ck3vEU2RsTBsfU2JPhfjp3phc0VOgsAXCzwU5PwyNyUo1490q8YSym-liMyO2Lk-hjH5fAxoizg9yD4II_lK6Wz_Tnpc0bBGDLdbuUhvgvO7yqo-leBQlsfRXOvw4VSPSEy8QPtbURtbnLpWY2jGBKz7vGI_o4qDJ3PicG0kyEiWZNh3wjeeCYRCWvXN8qh7Uk5EA-8J5vX651GqV-7gmaX1n-8DXamhaCQcE-p1cjSj04-X-_bJlQtmb-TT3bSyUPxgHVncvxNUby8jkUTzfi5MMbmIzWWkxI5YtJTdtmCkPQ",
fmt.Sprintf(`{"cert": "%s"}`, certPemPs),
true,
`{"alg": "RS256", "typ": "JWT"}`,
`{"iss": "xxx"}`,
"",
},
{
"rsa256-nested2", // two nesting levels
"eyJhbGciOiAiUlMyNTYiLCAidHlwIjogIkpXVCIsICJjdHkiOiAiSldUIn0.ZXlKaGJHY2lPaUFpVWxNeU5UWWlMQ0FpZEhsd0lqb2dJa3BYVkNJc0lDSmpkSGtpT2lBaVNsZFVJbjAuWlhsS2FHSkhZMmxQYVVGcFZXeE5lVTVVV1dsTVEwRnBaRWhzZDBscWIyZEphM0JZVmtOS09TNWxlVXB3WXpOTmFVOXBRV2xsU0dnMFNXNHdMbkpTVW5KbFVVOURZVzlaTFcxTmF6Y3lhazVHWlZrMVlWbEZVV2hKWjBsRmRGWmtVVGxZYmxsdFVVd3lUSGRmYURkTmJrazBVMFZQTVZCd2EwSklWRXB5Wm5samJFcGxUSHBmYWxKMlVHZEpNbGN4YURGQ05HTmFWRGhEWjIxcFZYZHhRWEk1YzBwdVpIbFZRMUZ0U1dScmJtNTNXa0k1Y1hBdFgzQlRkR1JIV0VvNVduQXplRW80TlhvdFZFSnBXbE4wUVVOVVpGZGxVa2xHU1VVM1ZreFBhMjB0Um14WmR6aDVPVGRuYVVONFRteFVkV2wzYW14bFRqTXdaRGhuV0hVeE5rWkdRekpUU2xodFJqWktiWFl0TmpKSGJFUmhMVzFDV0ZaMGJHSlZTVFZsV1ZVd2FUZHVlVE55UWpCWVVWUXhSa3Q0WlVaM09GODVOMDlGZFY5alkzVkxjbDgyWkhsSFpWRkhkblE1WTNKSmVFRkJNV0ZaYkRkbWJWQnJOa1ZoY2psbFRUTkthR1ZZTWkwMFdreDBkMUZPWTFSRFQwMVlWMGRJY2sxRGFHNU1XVmM0V0VGclRISkVibDl5Um14VWFWTXRady5YaWNjMnNXQ1pfTml0aHVjc3c5WEQ3WU9LcmlyVWRFbkgzTXlpUE0tQ2szdkVVMlJzVEJzZlUySlBoZmpwM3BoYzBWT2dzQVhDendVNVB3eU55VW8xNDkwcThZU3ltLWxpTXlPMkxrLWhqSDVmQXhvaXpnOXlENElJX2xLNld6X1RucGMwYkJHRExkYnVVaHZndk83eXFvLWxlQlFsc2ZSWE92dzRWU1BTRXk4UVB0YlVSdGJuTHBXWTJqR0JLejd2R0lfbzRxREozUGljRzBreUVpV1pOaDN3amVlQ1lSQ1d2WE44cWg3VWs1RUEtOEo1dlg2NTFHcVYtN2dtYVgxbi04RFhhbWhhQ1FjRS1wMWNqU2owNC1YLV9iSmxRdG1iLVRUM2JTeVVQeGdIVm5jdnhOVWJ5OGprVVR6Zmk1TU1ibUl6V1dreEk1WXRKVGR0bUNrUFE.ODBVH_gooCLJxtPVr1MjJC1syG4MnVUFP9LkI9pSaj0QABV4vpfqrBshHn8zOPgUTDeHwbc01Qy96cQlTMQQb94YANmZyL1nzwmdR4piiGXMGSlcCNfDg1o8DK4msMSR-X-j2IkxBDB8rfeFSfLRMgDCjAF0JolW7qWmMD9tBmFNYAjly4vMwToOXosDmFLl5eqyohXDf-3Ohljm5kIjtyMWkt5S9EVuwlIXh2owK5l59c4-TH29gkuaZ3uU4LFPjD7XKUrlOQnEMuu2QD8LAqTyxbnY4JyzUWEvyTM1dVmGnFpLKCg9QBly__y1u2ffhvDsHyuCmEKAbhPE98YvFA",
fmt.Sprintf(`{"cert": "%s"}`, certPemPs),
true,
`{"alg": "RS256", "typ": "JWT"}`,
`{"iss": "xxx"}`,
"",
},
{
"es256-unconstrained", // ECC key, no constraints
"eyJhbGciOiAiRVMyNTYiLCAidHlwIjogIkpXVCJ9.eyJpc3MiOiAieHh4In0.JvbTLBF06FR70gb7lCbx_ojhp4bk9--B_aULgNlYM0fYf9OSawaqBQp2lwW6FADFtRJ2WFUk5g0zwVOUlnrlzw",
fmt.Sprintf(`{"cert": "%s"}`, certPemEs256),
true,
`{"alg": "ES256", "typ": "JWT"}`,
`{"iss": "xxx"}`,
"",
},
{
"hs256-unconstrained", // HMAC key, no constraints
`eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyIjoiYWxpY2UiLCJhenAiOiJhbGljZSIsInN1Ym9yZGluYXRlcyI6W10sImhyIjpmYWxzZX0.rz3jTY033z-NrKfwrK89_dcLF7TN4gwCMj-fVBDyLoM`,
`{"secret": "secret"}`,
true,
`{"alg": "HS256", "typ": "JWT"}`,
`{"user": "alice", "azp": "alice", "subordinates": [], "hr": false}`,
"",
},
{
"hs256-key-wrong", // HMAC with wrong key
`eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyIjoiYWxpY2UiLCJhenAiOiJhbGljZSIsInN1Ym9yZGluYXRlcyI6W10sImhyIjpmYWxzZX0.rz3jTY033z-NrKfwrK89_dcLF7TN4gwCMj-fVBDyLoM`,
`{"secret": "the wrong key"}`,
false,
`{}`,
`{}`,
"",
},
{
"rs256-aud", // constraint requires an audience, found right one in JWT
"eyJhbGciOiAiUlMyNTYiLCAidHlwIjogIkpXVCJ9.eyJpc3MiOiAieHh4IiwgImF1ZCI6ICJmcmVkIn0.F-9m2Tx8r1tuQFirazsI4FK05bXX3uP4ut8M2FryJ07k3bQhy262fdwNDmuFcGx0NfL-c80agcwGoTzMWXkVEgZ2KTz0QSAdcdGk3ZWtUy-Mj2IilZ1dzkVvW8LsithYFTGcUtkelFDrJwtMQ0Kum7SXJpC_HCBk4PbftY0XD6jRgHLnQdeT9_J11L4sd19vCdpxxxm3_m_yvUV3ZynzB4vhQbS3CET4EClAVhi-m_gMh9mj85gY1ycIz6-FxWv8xM2Igm2SMeIdyJwAvEGnIauRS928P_OqVCZgCH2Pafnxtzy77Llpxy8XS0xu5PtPw3_azhg33GaXDCFsfz6GpA",
fmt.Sprintf(`{"cert": "%s", "aud": "fred"}`, certPemPs),
true,
`{"alg": "RS256", "typ": "JWT"}`,
`{"aud": "fred", "iss": "xxx"}`,
"",
},
{
"rs256-aud-list", // constraint requires an audience, found list including right one in JWT
"eyJhbGciOiAiUlMyNTYiLCAidHlwIjogIkpXVCJ9.eyJpc3MiOiAieHh4IiwgImF1ZCI6IFsiZnJlZCIsICJib2IiXX0.k8jW7PUiMkQCKCjnSFBFFKPDO0RXwZgVkLUwUfi8sMdrrcKi12LC8wd5fLBn0YraFtMXWKdMweKf9ZC-K33h5TK7kkTVKOXctF50mleMlUn0Up_XjtdP1v-2WOfivUXcexN1o-hu0kH7sSQnielXIjC2EAleG6A54YUOZFBdzvd1PKHlsxA7x2iiL73uGeFlyxoaMki8E5tx7FY6JGF1RdhWCoIV5A5J8QnwI5EetduJQ505U65Pk7UApWYWu4l2DT7KCCJa5dJaBvCBemVxWaBhCQWtJKU2ZgOEkpiK7b_HsdeRBmpG9Oi1o5mt5ybC09VxSD-lEda_iJO_7i042A",
fmt.Sprintf(`{"cert": "%s", "aud": "bob"}`, certPemPs),
true,
`{"alg": "RS256", "typ": "JWT"}`,
`{"aud": ["fred", "bob"], "iss": "xxx"}`,
"",
},
{
"ps256-no-aud", // constraint requires an audience, none in JWT
"eyJhbGciOiAiUFMyNTYiLCAidHlwIjogIkpXVCJ9.eyJpc3MiOiAieHh4In0.iCePYnD1U13oBe_6ylhmojmkY_VZNYXqVszAej8RImMGv51OEqARmYFkRZYTiYCiVFober7vcDq_stOj1uAJCuttygGW_dpHiN-3EWsU2E2vCnXlygWe0ud38pOC-OVyEFbXxO9-m51vnS-3VmBjEO8G1UE8bLFXTeFOGkUIj9dqlefJSWh5wa8XA3g9mj0jqpuJi-7QgEIeVHk-JzhGpoFqI2f-Df_agVvc2x4V-6fJmj7wV2IsaFPRi36mVQmg8S-dkxu4AlaeCILhyNZl8ewjBHHBjJFRwzcy88L00mzdO51ZxEYsBdQav3ux2sc6vjT9PvvjAwzcthQxEoEaNA",
fmt.Sprintf(`{"cert": "%s", "aud": "cath"}`, certPemPs),
false,
`{}`,
`{}`,
"",
},
{
"rs256-missing-aud", // constraint requires no audience, found one in JWT
"eyJhbGciOiAiUlMyNTYiLCAidHlwIjogIkpXVCJ9.eyJpc3MiOiAieHh4IiwgImF1ZCI6ICJmcmVkIn0.F-9m2Tx8r1tuQFirazsI4FK05bXX3uP4ut8M2FryJ07k3bQhy262fdwNDmuFcGx0NfL-c80agcwGoTzMWXkVEgZ2KTz0QSAdcdGk3ZWtUy-Mj2IilZ1dzkVvW8LsithYFTGcUtkelFDrJwtMQ0Kum7SXJpC_HCBk4PbftY0XD6jRgHLnQdeT9_J11L4sd19vCdpxxxm3_m_yvUV3ZynzB4vhQbS3CET4EClAVhi-m_gMh9mj85gY1ycIz6-FxWv8xM2Igm2SMeIdyJwAvEGnIauRS928P_OqVCZgCH2Pafnxtzy77Llpxy8XS0xu5PtPw3_azhg33GaXDCFsfz6GpA",
fmt.Sprintf(`{"cert": "%s"}`, certPemPs),
false,
`{}`,
`{}`,
"",
},
{
"rs256-wrong-aud", // constraint requires an audience, found wrong one in JWT
"eyJhbGciOiAiUlMyNTYiLCAidHlwIjogIkpXVCJ9.eyJpc3MiOiAieHh4IiwgImF1ZCI6ICJmcmVkIn0.F-9m2Tx8r1tuQFirazsI4FK05bXX3uP4ut8M2FryJ07k3bQhy262fdwNDmuFcGx0NfL-c80agcwGoTzMWXkVEgZ2KTz0QSAdcdGk3ZWtUy-Mj2IilZ1dzkVvW8LsithYFTGcUtkelFDrJwtMQ0Kum7SXJpC_HCBk4PbftY0XD6jRgHLnQdeT9_J11L4sd19vCdpxxxm3_m_yvUV3ZynzB4vhQbS3CET4EClAVhi-m_gMh9mj85gY1ycIz6-FxWv8xM2Igm2SMeIdyJwAvEGnIauRS928P_OqVCZgCH2Pafnxtzy77Llpxy8XS0xu5PtPw3_azhg33GaXDCFsfz6GpA",
fmt.Sprintf(`{"cert": "%s", "aud": "cath"}`, certPemPs),
false,
`{}`,
`{}`,
"",
},
{
"rs256-wrong-aud-list", // constraint requires an audience, found list of wrong ones in JWT
"eyJhbGciOiAiUlMyNTYiLCAidHlwIjogIkpXVCJ9.eyJpc3MiOiAieHh4IiwgImF1ZCI6IFsiZnJlZCIsICJib2IiXX0.k8jW7PUiMkQCKCjnSFBFFKPDO0RXwZgVkLUwUfi8sMdrrcKi12LC8wd5fLBn0YraFtMXWKdMweKf9ZC-K33h5TK7kkTVKOXctF50mleMlUn0Up_XjtdP1v-2WOfivUXcexN1o-hu0kH7sSQnielXIjC2EAleG6A54YUOZFBdzvd1PKHlsxA7x2iiL73uGeFlyxoaMki8E5tx7FY6JGF1RdhWCoIV5A5J8QnwI5EetduJQ505U65Pk7UApWYWu4l2DT7KCCJa5dJaBvCBemVxWaBhCQWtJKU2ZgOEkpiK7b_HsdeRBmpG9Oi1o5mt5ybC09VxSD-lEda_iJO_7i042A",
fmt.Sprintf(`{"cert": "%s", "aud": "cath"}`, certPemPs),
false,
`{}`,
`{}`,
"",
},
{
"multiple-keys-one-valid",
"eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiYWRtaW4iOnRydWUsImlhdCI6MTUxNjIzOTAyMn0.ZcLZbBKpPFFz8YGD2jEbXzwHT7DWtqRVk1PTV-cAWUV8jr6f2a--Fw9SFR3vSbrtFif06AQ3aWY7PMM2AuxDjiUVGjItmHRz0sJBEijcE2QVkDN7MNK3Kk1fsM_hbEXzNCzChZpEkTZnLy9ijkJJFD0j6lBat4lO5Zc_LC2lXUftV_hU2aW9mQ7pLSgJjItzRymivnN0g-WUDq5IPK_M8b3yPy_N9iByj8B2FO0sC3TuOrXWbrYrX4ve4bAaSqOFOXiL5Z5BJfmmtT--xKdWDGJxnei8lbv7in7t223fVsUpsH-zmybp529Fya37BsaIlcgLrl38ghvoqy2sHu2wAA",
fmt.Sprintf("{\"cert\": `%s`, \"time\": 1574723450396363500}", multiKeyJWkS),
true,
`{
"alg": "RS256",
"typ": "JWT"
}`,
`{
"admin": true,
"iat": 1516239022,
"name": "John Doe",
"sub": "1234567890"
}`,
"",
},
{
"multiple-keys-no-valid",
"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiYWRtaW4iOnRydWUsImlhdCI6MTUxNjIzOTAyMn0.G051ZlKno4XdDz4pdPthPKH1cKlFqkREvx_dHhl6kwM",
fmt.Sprintf("{\"cert\": `%s`, \"time\": 1574723450396363500}", multiKeyJWkS),
false,
`{}`,
`{}`,
"",
},
}
type test struct {
note string
rules []string
expected interface{}
}
tests := []test{}
for _, p := range params {
var exp interface{}
exp = fmt.Sprintf(`[%#v, %s, %s]`, p.valid, p.header, p.payload)
if p.err != "" {
exp = errors.New(p.err)
}
tests = append(tests, test{
p.note,
[]string{fmt.Sprintf(`p = [x, y, z] { time.now_ns(now); io.jwt.decode_verify("%s", %s, [x, y, z]) }`, p.token, p.constraints)},
exp,
})
}
data := loadSmallTestData()
for _, tc := range tests {
runTopDownTestCase(t, data, tc.note, tc.rules, tc.expected)
}
}
func TestTopDownJWTEncodeSign(t *testing.T) {
astHeaderHS256Term := ast.MustParseTerm(`{"typ": "JWT", "alg": "HS256"}`)
astPayloadTerm := ast.MustParseTerm(`{"iss": "joe", "exp": 1300819380, "aud": ["bob", "saul"], "http://example.com/is_root": true, "privateParams": {"private_one": "one", "private_two": "two"}}`)
astSymmetricKeyTerm := ast.MustParseTerm(`{"kty": "oct", "k": "AyM1SysPpbyDfgZld3umj1qzKObwVMkoqQ-EstJQLr_T-1qS0gZH75aKtMN3Yj0iPS4hcgUuTwjAzZr1Z9CAow"}`)
astHeaderHS256Obj := astHeaderHS256Term.Value.(ast.Object)
astPayloadObj := astPayloadTerm.Value.(ast.Object)
astSymmetricKeyObj := astSymmetricKeyTerm.Value.(ast.Object)
astHeaderRS256Term := ast.MustParseTerm(`{"alg": "RS256"}`)
astHeaderRS256Obj := astHeaderRS256Term.Value.(ast.Object)
astRSAKeyTerm := ast.MustParseTerm(`{"kty": "RSA", "n": "ofgWCuLjybRlzo0tZWJjNiuSfb4p4fAkd_wWJcyQoTbji9k0l8W26mPddxHmfHQp-Vaw-4qPCJrcS2mJPMEzP1Pt0Bm4d4QlL-yRT-SFd2lZS-pCgNMsD1W_YpRPEwOWvG6b32690r2jZ47soMZo9wGzjb_7OMg0LOL-bSf63kpaSHSXndS5z5rexMdbBYUsLA9e-KXBdQOS-UTo7WTBEMa2R2CapHg665xsmtdVMTBQY4uDZlxvb3qCo5ZwKh9kG4LT6_I5IhlJH7aGhyxXFvUK-DWNmoudF8NAco9_h9iaGNj8q2ethFkMLs91kzk2PAcDTW9gb54h4FRWyuXpoQ", "e": "AQAB", "d": "Eq5xpGnNCivDflJsRQBXHx1hdR1k6Ulwe2JZD50LpXyWPEAeP88vLNO97IjlA7_GQ5sLKMgvfTeXZx9SE-7YwVol2NXOoAJe46sui395IW_GO-pWJ1O0BkTGoVEn2bKVRUCgu-GjBVaYLU6f3l9kJfFNS3E0QbVdxzubSu3Mkqzjkn439X0M_V51gfpRLI9JYanrC4D4qAdGcopV_0ZHHzQlBjudU2QvXt4ehNYTCBr6XCLQUShb1juUO1ZdiYoFaFQT5Tw8bGUl_x_jTj3ccPDVZFD9pIuhLhBOneufuBiB4cS98l2SR_RQyGWSeWjnczT0QU91p1DhOVRuOopznQ", "p": "4BzEEOtIpmVdVEZNCqS7baC4crd0pqnRH_5IB3jw3bcxGn6QLvnEtfdUdiYrqBdss1l58BQ3KhooKeQTa9AB0Hw_Py5PJdTJNPY8cQn7ouZ2KKDcmnPGBY5t7yLc1QlQ5xHdwW1VhvKn-nXqhJTBgIPgtldC-KDV5z-y2XDwGUc", "q": "uQPEfgmVtjL0Uyyx88GZFF1fOunH3-7cepKmtH4pxhtCoHqpWmT8YAmZxaewHgHAjLYsp1ZSe7zFYHj7C6ul7TjeLQeZD_YwD66t62wDmpe_HlB-TnBA-njbglfIsRLtXlnDzQkv5dTltRJ11BKBBypeeF6689rjcJIDEz9RWdc", "dp": "BwKfV3Akq5_MFZDFZCnW-wzl-CCo83WoZvnLQwCTeDv8uzluRSnm71I3QCLdhrqE2e9YkxvuxdBfpT_PI7Yz-FOKnu1R6HsJeDCjn12Sk3vmAktV2zb34MCdy7cpdTh_YVr7tss2u6vneTwrA86rZtu5Mbr1C1XsmvkxHQAdYo0", "dq": "h_96-mK1R_7glhsum81dZxjTnYynPbZpHziZjeeHcXYsXaaMwkOlODsWa7I9xXDoRwbKgB719rrmI2oKr6N3Do9U0ajaHF-NKJnwgjMd2w9cjz3_-kyNlxAr2v4IKhGNpmM5iIgOS1VZnOZ68m6_pbLBSp3nssTdlqvd0tIiTHU", "qi": "IYd7DHOhrWvxkwPQsRM2tOgrjbcrfvtQJipd-DlcxyVuuM9sQLdgjVk2oy26F0EmpScGLq2MowX7fhd_QJQ3ydy5cY7YIBi87w93IKLEdfnbJtoOPLUW0ITrJReOgo1cq9SbsxYawBgfp_gh6A5603k2-ZQwVK0JKSHuLFkuQ3U"}`)
astRSAKeyObj := astRSAKeyTerm.Value.(ast.Object)
params := []struct {
note string
input1 ast.Object
input2 ast.Object
input3 ast.Object
result string
err string
}{
{
"https://tools.ietf.org/html/rfc7515#appendix-A.1",
astHeaderHS256Obj,
astPayloadObj,
astSymmetricKeyObj,
`"eyJhbGciOiAiSFMyNTYiLCAidHlwIjogIkpXVCJ9.eyJhdWQiOiBbImJvYiIsICJzYXVsIl0sICJleHAiOiAxMzAwODE5MzgwLCAiaHR0cDovL2V4YW1wbGUuY29tL2lzX3Jvb3QiOiB0cnVlLCAiaXNzIjogImpvZSIsICJwcml2YXRlUGFyYW1zIjogeyJwcml2YXRlX29uZSI6ICJvbmUiLCAicHJpdmF0ZV90d28iOiAidHdvIn19.M10TcaFADr_JYAx7qJ71wktdyuN4IAnhWvVbgrZ5j_4"`,
"",
},
{
"Empty JSON payload",
astHeaderHS256Obj,
ast.NewObject(),
astSymmetricKeyObj,
`"eyJhbGciOiAiSFMyNTYiLCAidHlwIjogIkpXVCJ9.e30.Odp4A0Fj6NoKsV4Gyoy1NAmSs6KVZiC15S9VRGZyR20"`,
"",
},
{
"https://tools.ietf.org/html/rfc7515#appendix-A.2",
astHeaderRS256Obj,
astPayloadObj,
astRSAKeyObj,
`"eyJhbGciOiAiUlMyNTYifQ.eyJhdWQiOiBbImJvYiIsICJzYXVsIl0sICJleHAiOiAxMzAwODE5MzgwLCAiaHR0cDovL2V4YW1wbGUuY29tL2lzX3Jvb3QiOiB0cnVlLCAiaXNzIjogImpvZSIsICJwcml2YXRlUGFyYW1zIjogeyJwcml2YXRlX29uZSI6ICJvbmUiLCAicHJpdmF0ZV90d28iOiAidHdvIn19.ITpfhDICCeVV__1nHRN2CvUFni0yyYESvhNlt4ET0yiySMzJ5iySGynrsM3kgzAv7mVmx5uEtSCs_xPHyLVfVnADKmDFtkZfuvJ8jHfcOe8TUqR1f7j1Zf_kDkdqJAsuGuqkJoFJ3S_gxWcZNwtDXV56O3k_7Mq03Ixuuxtip2oF0X3fB7QtUzjzB8mWPTJDFG2TtLLOYCcobPHmn36aAgesHMzJZj8U8sRLmqPXsIc-Lo_btt8gIUc9zZSgRiy7NOSHxw5mYcIMlKl93qvLXu7AaAcVLvzlIOCGWEnFpGGcRFgSOLnShQX6hDylWavKLQG-VOUJKmtXH99KBK-OYQ"`,
"",
},
}
type test struct {
note string
rules []string
expected interface{}
}
var tests []test
for _, p := range params {
var exp interface{}
exp = fmt.Sprintf(`%s`, p.result)
if p.err != "" {
exp = errors.New(p.err)
}
tests = append(tests, test{
p.note,
[]string{fmt.Sprintf(`p = x { io.jwt.encode_sign(%v, %v, %v, x) }`, p.input1, p.input2, p.input3)},
exp,
})
}
data := loadSmallTestData()
for _, tc := range tests {
runTopDownTestCase(t, data, tc.note, tc.rules, tc.expected)
}
}
| [
"\"OPA_TRACE_TEST\"",
"\"OPA_TRACE_TEST\""
]
| []
| [
"OPA_TRACE_TEST"
]
| [] | ["OPA_TRACE_TEST"] | go | 1 | 0 | |
source/control_plane/python/lambda/cancel_tasks/cancel_tasks.py | # Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# Licensed under the Apache License, Version 2.0 https://aws.amazon.com/apache-2-0/
import json
import boto3
import base64
import os
import traceback
import utils.grid_error_logger as errlog
from utils.state_table_common import TASK_STATE_PENDING, TASK_STATE_PROCESSING, TASK_STATE_RETRYING, StateTableException
client = boto3.client('dynamodb')
dynamodb = boto3.resource('dynamodb')
from api.state_table_manager import state_table_manager
state_table = state_table_manager(
os.environ['STATE_TABLE_SERVICE'],
os.environ['STATE_TABLE_CONFIG'],
os.environ['STATE_TABLE_NAME'])
task_states_to_cancel = [TASK_STATE_RETRYING, TASK_STATE_PENDING, TASK_STATE_PROCESSING]
def cancel_tasks_by_status(session_id, task_state):
"""
Cancel tasks of in the specific state within a session.
Args:
string: session_id
string: task_state
Returns:
dict: results
"""
response = state_table.get_tasks_by_state(session_id, task_state)
print(f"state_table.get_tasks_by_state: {response}")
try:
for row in response['Items']:
state_table.update_task_status_to_cancelled(row['task_id'])
except StateTableException as e:
errlog.log("StateTableException error in setting task's status to cancelled {} [{}]".format(
e, traceback.format_exc()))
raise e
except Exception as e:
errlog.log("Unexpected error in in setting task's status to cancelled {} [{}]".format(
e, traceback.format_exc()))
raise e
return response['Items']
def cancel_session(session_id):
"""
Cancel all tasks within a session
Args:
string: session_id
Returns:
dict: results
"""
lambda_response = {}
all_cancelled_tasks = []
for state in task_states_to_cancel:
res = cancel_tasks_by_status(session_id, state)
print("Cancelling session: {} status: {} result: {}".format(
session_id, state, res))
lambda_response["cancelled_{}".format(state)] = len(res)
all_cancelled_tasks += res
lambda_response["total_cancelled_tasks"] = len(all_cancelled_tasks)
return(lambda_response)
def get_session_id_from_event(event):
"""
Args:
lambda's invocation event
Returns:
str: session id encoded in the event
"""
# If lambda are called through ALB - extracting actual event
if event.get('queryStringParameters') is not None:
all_params = event.get('queryStringParameters')
encoded_json_tasks = all_params.get('submission_content')
if encoded_json_tasks is None:
raise Exception('Invalid submission format, expect submission_content parameter')
decoded_json_tasks = base64.urlsafe_b64decode(encoded_json_tasks).decode('utf-8')
event = json.loads(decoded_json_tasks)
return event['session_ids_to_cancel']
else:
errlog.log("Unimplemented path, exiting")
assert(False)
def lambda_handler(event, context):
try:
lambda_response = {}
session_ids_to_cancel = get_session_id_from_event(event)
for session2cancel in session_ids_to_cancel:
lambda_sub_response = cancel_session(session2cancel)
lambda_response[session2cancel] = lambda_sub_response
return {
'statusCode': 200,
'body': json.dumps(lambda_response)
}
except Exception as e:
errlog.log('Lambda cancel_tasks error: {} trace: {}'.format(e, traceback.format_exc()))
return {
'statusCode': 542,
'body': "{}".format(e)
}
| []
| []
| [
"STATE_TABLE_NAME",
"STATE_TABLE_SERVICE",
"STATE_TABLE_CONFIG"
]
| [] | ["STATE_TABLE_NAME", "STATE_TABLE_SERVICE", "STATE_TABLE_CONFIG"] | python | 3 | 0 | |
tests/keras/test_keras_model_export.py | # pep8: disable=E501
import h5py
import os
import json
import pytest
import shutil
import importlib
import random
from packaging import version
import tensorflow as tf
from tensorflow.keras.models import Sequential as TfSequential
from tensorflow.keras.layers import Dense as TfDense
from tensorflow.keras.optimizers import SGD as TfSGD
from keras.models import Sequential
from keras.layers import Layer, Dense
from keras import backend as K
from keras.optimizers import SGD
import sklearn.datasets as datasets
import pandas as pd
import numpy as np
import yaml
from unittest import mock
import mlflow
import mlflow.keras
import mlflow.pyfunc.scoring_server as pyfunc_scoring_server
from mlflow import pyfunc
from mlflow.exceptions import MlflowException
from mlflow.models import Model, infer_signature
from mlflow.models.utils import _read_example
from mlflow.store.artifact.s3_artifact_repo import S3ArtifactRepository
from mlflow.tracking.artifact_utils import _download_artifact_from_uri
from mlflow.utils.environment import _mlflow_conda_env
from mlflow.utils.file_utils import TempDir
from mlflow.utils.model_utils import _get_flavor_configuration
from tests.helper_functions import pyfunc_serve_and_score_model
from tests.helper_functions import score_model_in_sagemaker_docker_container
from tests.helper_functions import set_boto_credentials # pylint: disable=unused-import
from tests.helper_functions import mock_s3_bucket # pylint: disable=unused-import
from tests.pyfunc.test_spark import score_model_as_udf
from mlflow.tracking._model_registry import DEFAULT_AWAIT_MAX_SLEEP_SECONDS
@pytest.fixture(scope="module", autouse=True)
def fix_random_seed():
SEED = 0
os.environ["PYTHONHASHSEED"] = str(SEED)
random.seed(SEED)
np.random.seed(SEED)
if version.parse(tf.__version__) >= version.parse("2.0.0"):
tf.random.set_seed(SEED)
else:
tf.set_random_seed(SEED)
@pytest.fixture(scope="module")
def data():
iris = datasets.load_iris()
data = pd.DataFrame(
data=np.c_[iris["data"], iris["target"]], columns=iris["feature_names"] + ["target"]
)
y = data["target"]
x = data.drop("target", axis=1)
return x, y
@pytest.fixture(scope="module")
def model(data):
x, y = data
model = Sequential()
model.add(Dense(3, input_dim=4))
model.add(Dense(1))
# Use a small learning rate to prevent exploding gradients which may produce
# infinite prediction values
model.compile(loss="mean_squared_error", optimizer=SGD(learning_rate=0.001))
model.fit(x, y)
return model
@pytest.fixture(scope="module")
def tf_keras_model(data):
x, y = data
model = TfSequential()
model.add(TfDense(3, input_dim=4))
model.add(TfDense(1))
model.compile(loss="mean_squared_error", optimizer=TfSGD(learning_rate=0.001))
model.fit(x, y)
return model
@pytest.fixture(scope="module")
def predicted(model, data):
return model.predict(data[0])
@pytest.fixture(scope="module")
def custom_layer():
class MyDense(Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
super(MyDense, self).__init__(**kwargs)
def build(self, input_shape):
# pylint: disable=attribute-defined-outside-init
self.kernel = self.add_weight(
name="kernel",
shape=(input_shape[1], self.output_dim),
initializer="uniform",
trainable=True,
)
super(MyDense, self).build(input_shape)
def call(self, x):
# pylint: disable=arguments-differ
return K.dot(x, self.kernel)
def compute_output_shape(self, input_shape):
return (input_shape[0], self.output_dim)
def get_config(self):
return {"output_dim": self.output_dim}
return MyDense
@pytest.fixture(scope="module")
def custom_model(data, custom_layer):
x, y = data
x, y = x.values, y.values
model = Sequential()
model.add(custom_layer(6))
model.add(Dense(1))
model.compile(loss="mean_squared_error", optimizer="SGD")
model.fit(x, y, epochs=1)
return model
@pytest.fixture(scope="module")
def custom_predicted(custom_model, data):
return custom_model.predict(data[0])
@pytest.fixture
def model_path(tmpdir):
return os.path.join(tmpdir.strpath, "model")
@pytest.fixture
def keras_custom_env(tmpdir):
conda_env = os.path.join(str(tmpdir), "conda_env.yml")
_mlflow_conda_env(conda_env, additional_conda_deps=["keras", "tensorflow", "pytest"])
return conda_env
def test_that_keras_module_arg_works(model_path):
class MyModel(object):
def __init__(self, x):
self._x = x
def __eq__(self, other):
return self._x == other._x
def save(self, path, **kwargs):
# pylint: disable=unused-argument
with h5py.File(path, "w") as f:
f.create_dataset(name="x", data=self._x)
class FakeKerasModule(object):
__name__ = "some.test.keras.module"
__version__ = "42.42.42"
@staticmethod
def load_model(file, **kwargs):
# pylint: disable=unused-argument
return MyModel(file.get("x").value)
original_import = importlib.import_module
def _import_module(name, **kwargs):
if name.startswith(FakeKerasModule.__name__):
return FakeKerasModule
else:
return original_import(name, **kwargs)
with mock.patch("importlib.import_module") as import_module_mock:
import_module_mock.side_effect = _import_module
x = MyModel("x123")
path0 = os.path.join(model_path, "0")
with pytest.raises(MlflowException):
mlflow.keras.save_model(x, path0)
mlflow.keras.save_model(x, path0, keras_module=FakeKerasModule)
y = mlflow.keras.load_model(path0)
assert x == y
path1 = os.path.join(model_path, "1")
mlflow.keras.save_model(x, path1, keras_module=FakeKerasModule.__name__)
z = mlflow.keras.load_model(path1)
assert x == z
# Tests model log
with mlflow.start_run() as active_run:
with pytest.raises(MlflowException):
mlflow.keras.log_model(x, "model0")
mlflow.keras.log_model(x, "model0", keras_module=FakeKerasModule)
a = mlflow.keras.load_model("runs:/{}/model0".format(active_run.info.run_id))
assert x == a
mlflow.keras.log_model(x, "model1", keras_module=FakeKerasModule.__name__)
b = mlflow.keras.load_model("runs:/{}/model1".format(active_run.info.run_id))
assert x == b
@pytest.mark.parametrize("build_model", [model, tf_keras_model])
@pytest.mark.large
def test_model_save_load(build_model, model_path, data):
x, _ = data
keras_model = build_model(data)
if build_model == tf_keras_model:
model_path = os.path.join(model_path, "tf")
else:
model_path = os.path.join(model_path, "plain")
expected = keras_model.predict(x)
mlflow.keras.save_model(keras_model, model_path)
# Loading Keras model
model_loaded = mlflow.keras.load_model(model_path)
assert type(keras_model) == type(model_loaded)
assert all(expected == model_loaded.predict(x))
# Loading pyfunc model
pyfunc_loaded = mlflow.pyfunc.load_model(model_path)
assert all(pyfunc_loaded.predict(x).values == expected)
# pyfunc serve
scoring_response = pyfunc_serve_and_score_model(
model_uri=os.path.abspath(model_path),
data=pd.DataFrame(x),
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED,
)
print(scoring_response.content)
assert all(
pd.read_json(scoring_response.content, orient="records", encoding="utf8").values.astype(
np.float32
)
== expected
)
# test spark udf
spark_udf_preds = score_model_as_udf(
model_uri=os.path.abspath(model_path), pandas_df=pd.DataFrame(x), result_type="float"
)
np.allclose(np.array(spark_udf_preds), expected.reshape(len(spark_udf_preds)))
@pytest.mark.large
def test_signature_and_examples_are_saved_correctly(model, data):
signature_ = infer_signature(*data)
example_ = data[0].head(3)
for signature in (None, signature_):
for example in (None, example_):
with TempDir() as tmp:
path = tmp.path("model")
mlflow.keras.save_model(
model, path=path, signature=signature, input_example=example
)
mlflow_model = Model.load(path)
assert signature == mlflow_model.signature
if example is None:
assert mlflow_model.saved_input_example_info is None
else:
assert all((_read_example(mlflow_model, path) == example).all())
@pytest.mark.large
def test_custom_model_save_load(custom_model, custom_layer, data, custom_predicted, model_path):
x, _ = data
custom_objects = {"MyDense": custom_layer}
mlflow.keras.save_model(custom_model, model_path, custom_objects=custom_objects)
# Loading Keras model
model_loaded = mlflow.keras.load_model(model_path)
assert all(model_loaded.predict(x) == custom_predicted)
# pyfunc serve
scoring_response = pyfunc_serve_and_score_model(
model_uri=os.path.abspath(model_path),
data=pd.DataFrame(x),
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED,
)
assert np.allclose(
pd.read_json(scoring_response.content, orient="records", encoding="utf8").values.astype(
np.float32
),
custom_predicted,
rtol=1e-5,
atol=1e-9,
)
# Loading pyfunc model
pyfunc_loaded = mlflow.pyfunc.load_model(model_path)
assert all(pyfunc_loaded.predict(x).values == custom_predicted)
# test spark udf
spark_udf_preds = score_model_as_udf(
model_uri=os.path.abspath(model_path), pandas_df=pd.DataFrame(x), result_type="float"
)
np.allclose(np.array(spark_udf_preds), custom_predicted.reshape(len(spark_udf_preds)))
def test_custom_model_save_respects_user_custom_objects(custom_model, custom_layer, model_path):
class DifferentCustomLayer:
def __init__(self):
pass
def __call__(self):
pass
incorrect_custom_objects = {"MyDense": DifferentCustomLayer()}
correct_custom_objects = {"MyDense": custom_layer}
mlflow.keras.save_model(custom_model, model_path, custom_objects=incorrect_custom_objects)
model_loaded = mlflow.keras.load_model(model_path, custom_objects=correct_custom_objects)
assert model_loaded is not None
with pytest.raises(TypeError):
model_loaded = mlflow.keras.load_model(model_path)
@pytest.mark.large
def test_model_load_from_remote_uri_succeeds(model, model_path, mock_s3_bucket, data, predicted):
x, _ = data
mlflow.keras.save_model(model, model_path)
artifact_root = "s3://{bucket_name}".format(bucket_name=mock_s3_bucket)
artifact_path = "model"
artifact_repo = S3ArtifactRepository(artifact_root)
artifact_repo.log_artifacts(model_path, artifact_path=artifact_path)
model_uri = artifact_root + "/" + artifact_path
model_loaded = mlflow.keras.load_model(model_uri=model_uri)
assert all(model_loaded.predict(x) == predicted)
@pytest.mark.large
def test_model_log(model, data, predicted):
x, _ = data
# should_start_run tests whether or not calling log_model() automatically starts a run.
for should_start_run in [False, True]:
try:
if should_start_run:
mlflow.start_run()
artifact_path = "keras_model"
mlflow.keras.log_model(model, artifact_path=artifact_path)
model_uri = "runs:/{run_id}/{artifact_path}".format(
run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path
)
# Load model
model_loaded = mlflow.keras.load_model(model_uri=model_uri)
assert all(model_loaded.predict(x) == predicted)
# Loading pyfunc model
pyfunc_loaded = mlflow.pyfunc.load_model(model_uri=model_uri)
assert all(pyfunc_loaded.predict(x).values == predicted)
finally:
mlflow.end_run()
def test_log_model_calls_register_model(model):
artifact_path = "model"
register_model_patch = mock.patch("mlflow.register_model")
with mlflow.start_run(), register_model_patch:
mlflow.keras.log_model(
model, artifact_path=artifact_path, registered_model_name="AdsModel1"
)
model_uri = "runs:/{run_id}/{artifact_path}".format(
run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path
)
mlflow.register_model.assert_called_once_with(
model_uri, "AdsModel1", await_registration_for=DEFAULT_AWAIT_MAX_SLEEP_SECONDS
)
def test_log_model_no_registered_model_name(model):
artifact_path = "model"
register_model_patch = mock.patch("mlflow.register_model")
with mlflow.start_run(), register_model_patch:
mlflow.keras.log_model(model, artifact_path=artifact_path)
mlflow.register_model.assert_not_called()
@pytest.mark.large
def test_model_save_persists_specified_conda_env_in_mlflow_model_directory(
model, model_path, keras_custom_env
):
mlflow.keras.save_model(keras_model=model, path=model_path, conda_env=keras_custom_env)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])
assert os.path.exists(saved_conda_env_path)
assert saved_conda_env_path != keras_custom_env
with open(keras_custom_env, "r") as f:
keras_custom_env_parsed = yaml.safe_load(f)
with open(saved_conda_env_path, "r") as f:
saved_conda_env_parsed = yaml.safe_load(f)
assert saved_conda_env_parsed == keras_custom_env_parsed
@pytest.mark.large
def test_model_save_accepts_conda_env_as_dict(model, model_path):
conda_env = dict(mlflow.keras.get_default_conda_env())
conda_env["dependencies"].append("pytest")
mlflow.keras.save_model(keras_model=model, path=model_path, conda_env=conda_env)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])
assert os.path.exists(saved_conda_env_path)
with open(saved_conda_env_path, "r") as f:
saved_conda_env_parsed = yaml.safe_load(f)
assert saved_conda_env_parsed == conda_env
@pytest.mark.large
def test_model_log_persists_specified_conda_env_in_mlflow_model_directory(model, keras_custom_env):
artifact_path = "model"
with mlflow.start_run():
mlflow.keras.log_model(
keras_model=model, artifact_path=artifact_path, conda_env=keras_custom_env
)
model_path = _download_artifact_from_uri(
"runs:/{run_id}/{artifact_path}".format(
run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path
)
)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])
assert os.path.exists(saved_conda_env_path)
assert saved_conda_env_path != keras_custom_env
with open(keras_custom_env, "r") as f:
keras_custom_env_parsed = yaml.safe_load(f)
with open(saved_conda_env_path, "r") as f:
saved_conda_env_parsed = yaml.safe_load(f)
assert saved_conda_env_parsed == keras_custom_env_parsed
@pytest.mark.large
def test_model_save_without_specified_conda_env_uses_default_env_with_expected_dependencies(
model, model_path
):
mlflow.keras.save_model(keras_model=model, path=model_path, conda_env=None)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])
with open(conda_env_path, "r") as f:
conda_env = yaml.safe_load(f)
assert conda_env == mlflow.keras.get_default_conda_env()
@pytest.mark.large
def test_model_log_without_specified_conda_env_uses_default_env_with_expected_dependencies(model):
artifact_path = "model"
with mlflow.start_run():
mlflow.keras.log_model(keras_model=model, artifact_path=artifact_path, conda_env=None)
model_path = _download_artifact_from_uri(
"runs:/{run_id}/{artifact_path}".format(
run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path
)
)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])
with open(conda_env_path, "r") as f:
conda_env = yaml.safe_load(f)
assert conda_env == mlflow.keras.get_default_conda_env()
@pytest.mark.large
def test_model_load_succeeds_with_missing_data_key_when_data_exists_at_default_path(
model, model_path, data, predicted
):
"""
This is a backwards compatibility test to ensure that models saved in MLflow version <= 0.8.0
can be loaded successfully. These models are missing the `data` flavor configuration key.
"""
mlflow.keras.save_model(keras_model=model, path=model_path)
shutil.move(os.path.join(model_path, "data", "model.h5"), os.path.join(model_path, "model.h5"))
model_conf_path = os.path.join(model_path, "MLmodel")
model_conf = Model.load(model_conf_path)
flavor_conf = model_conf.flavors.get(mlflow.keras.FLAVOR_NAME, None)
assert flavor_conf is not None
del flavor_conf["data"]
model_conf.save(model_conf_path)
model_loaded = mlflow.keras.load_model(model_path)
assert all(model_loaded.predict(data[0]) == predicted)
@pytest.mark.release
def test_sagemaker_docker_model_scoring_with_default_conda_env(model, model_path, data, predicted):
mlflow.keras.save_model(keras_model=model, path=model_path, conda_env=None)
scoring_response = score_model_in_sagemaker_docker_container(
model_uri=model_path,
data=data[0],
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED,
flavor=mlflow.pyfunc.FLAVOR_NAME,
activity_polling_timeout_seconds=500,
)
deployed_model_preds = pd.DataFrame(json.loads(scoring_response.content))
np.testing.assert_array_almost_equal(deployed_model_preds.values, predicted, decimal=4)
| []
| []
| [
"PYTHONHASHSEED"
]
| [] | ["PYTHONHASHSEED"] | python | 1 | 0 | |
controllers/default.go | package controllers
import (
"bytes"
"fmt"
"github.com/astaxie/beego"
"net"
"os"
"strings"
)
type MainController struct {
beego.Controller
}
func (this *MainController) GetForwarded() {
if len(this.Ctx.Request.Header["X-Forwarded-For"]) > 0 {
this.Data["Value"] = this.Ctx.Request.Header["X-Forwarded-For"][0]
}
this.TplName = "value.tpl"
}
func (this *MainController) GetHost() {
ip := this.Ctx.Input.IP()
names, err := net.LookupAddr(ip)
if err != nil || len(names) == 0 {
this.Data["Value"] = ""
} else {
var value string
for _, v := range names {
value += fmt.Sprintf("%s\n", v)
}
this.Data["Value"] = value
}
this.TplName = "value.tpl"
}
func (this *MainController) GetIP() {
this.Data["Value"] = this.Ctx.Input.IP()
this.TplName = "value.tpl"
}
func (this *MainController) GetPort() {
remote_addr := []byte(this.Ctx.Request.RemoteAddr)
pos := bytes.IndexByte(remote_addr, ':')
this.Data["Value"] = string(remote_addr[pos+1:])
this.TplName = "value.tpl"
}
func (this *MainController) GetVia() {
if len(this.Ctx.Request.Header["Via"]) > 0 {
this.Data["Value"] = this.Ctx.Request.Header["Via"][0]
}
this.TplName = "value.tpl"
}
func (this *MainController) GetMime() {
if len(this.Ctx.Request.Header["Accept"]) > 0 {
this.Data["Value"] = this.Ctx.Request.Header["Accept"][0]
}
this.TplName = "value.tpl"
}
func (this *MainController) GetLang() {
if len(this.Ctx.Request.Header["Accept-Language"]) > 0 {
this.Data["Value"] = this.Ctx.Request.Header["Accept-Language"][0]
}
this.TplName = "value.tpl"
}
func (this *MainController) GetCharset() {
if len(this.Ctx.Request.Header["Charset"]) > 0 {
this.Data["Value"] = this.Ctx.Request.Header["Charset"][0]
}
this.TplName = "value.tpl"
}
func (this *MainController) GetEncoding() {
if len(this.Ctx.Request.Header["Accept-Encoding"]) > 0 {
this.Data["Value"] = this.Ctx.Request.Header["Accept-Encoding"][0]
}
this.TplName = "value.tpl"
}
func (this *MainController) GetUserAgent() {
this.Data["Value"] = this.Ctx.Request.UserAgent()
this.TplName = "value.tpl"
}
func (this *MainController) GetConnection() {
if len(this.Ctx.Request.Header["Connection"]) > 0 {
this.Data["Value"] = this.Ctx.Request.Header["Connection"][0]
}
this.TplName = "value.tpl"
}
func (this *MainController) GetKeepAlive() {
if len(this.Ctx.Request.Header["KeepAlive"]) > 0 {
this.Data["Value"] = this.Ctx.Request.Header["KeepAlive"][0]
}
this.TplName = "value.tpl"
}
func (this *MainController) GetAll() {
this.Data["Email"] = "[email protected]"
this.Data["UserAgent"] = this.Ctx.Request.UserAgent()
ip := this.Ctx.Input.IP()
names, err := net.LookupAddr(ip)
if err != nil || len(names) == 0 {
this.Data["Host"] = ""
} else {
var value string
for _, v := range names {
value += fmt.Sprintf("%s\n", v)
}
this.Data["Host"] = value
}
this.Data["IP"] = this.Ctx.Input.IP()
remote_addr := []byte(this.Ctx.Request.RemoteAddr)
pos := bytes.IndexByte(remote_addr, ':')
this.Data["Port"] = string(remote_addr[pos+1:])
this.Data["Method"] = this.Ctx.Request.Method
if len(this.Ctx.Request.Header["Accept-Encoding"]) > 0 {
this.Data["Encoding"] = this.Ctx.Request.Header["Accept-Encoding"][0]
}
if len(this.Ctx.Request.Header["Accept"]) > 0 {
this.Data["Mime"] = this.Ctx.Request.Header["Accept"][0]
}
if len(this.Ctx.Request.Header["Connection"]) > 0 {
this.Data["Connection"] = this.Ctx.Request.Header["Connection"][0]
}
if len(this.Ctx.Request.Header["Via"]) > 0 {
this.Data["Via"] = this.Ctx.Request.Header["Via"][0]
}
if len(this.Ctx.Request.Header["Charset"]) > 0 {
this.Data["Charset"] = this.Ctx.Request.Header["Charset"][0]
}
if len(this.Ctx.Request.Header["KeepAlive"]) > 0 {
this.Data["Keepalive"] = this.Ctx.Request.Header["KeepAlive"][0]
}
if len(this.Ctx.Request.Header["X-Forwarded-For"]) > 0 {
this.Data["Forwarded"] = this.Ctx.Request.Header["X-Forwarded-For"][0]
}
if len(this.Ctx.Request.Header["Accept-Language"]) > 0 {
this.Data["Lang"] = this.Ctx.Request.Header["Accept-Language"][0]
}
this.Data["Referer"] = this.Ctx.Input.Refer()
this.TplName = "all.tpl"
}
type ifconfig struct {
Email string
UserAgent string
Host string
IP string
Port string
Method string
Encoding string
Mime string
Connection string
Via string
Charset string
Keepalive string
Forwarded string
Lang string
Referer string
}
func (this *MainController) GetAllXML() {
thisData := ifconfig{}
thisData.Email = "[email protected]"
thisData.UserAgent = this.Ctx.Request.UserAgent()
ip := this.Ctx.Input.IP()
names, err := net.LookupAddr(ip)
if err != nil || len(names) == 0 {
thisData.Host = ""
} else {
var value string
for _, v := range names {
value += fmt.Sprintf("%s\n", v)
}
thisData.Host = value
}
thisData.IP = this.Ctx.Input.IP()
remote_addr := []byte(this.Ctx.Request.RemoteAddr)
pos := bytes.IndexByte(remote_addr, ':')
thisData.Port = string(remote_addr[pos+1:])
thisData.Method = this.Ctx.Request.Method
if len(this.Ctx.Request.Header["Accept-Encoding"]) > 0 {
thisData.Encoding = this.Ctx.Request.Header["Accept-Encoding"][0]
}
if len(this.Ctx.Request.Header["Accept"]) > 0 {
thisData.Mime = this.Ctx.Request.Header["Accept"][0]
}
if len(this.Ctx.Request.Header["Connection"]) > 0 {
thisData.Connection = this.Ctx.Request.Header["Connection"][0]
}
if len(this.Ctx.Request.Header["Via"]) > 0 {
thisData.Via = this.Ctx.Request.Header["Via"][0]
}
if len(this.Ctx.Request.Header["Charset"]) > 0 {
thisData.Charset = this.Ctx.Request.Header["Charset"][0]
}
if len(this.Ctx.Request.Header["KeepAlive"]) > 0 {
thisData.Keepalive = this.Ctx.Request.Header["KeepAlive"][0]
}
if len(this.Ctx.Request.Header["X-Forwarded-For"]) > 0 {
thisData.Forwarded = this.Ctx.Request.Header["X-Forwarded-For"][0]
}
if len(this.Ctx.Request.Header["Accept-Language"]) > 0 {
thisData.Lang = this.Ctx.Request.Header["Accept-Language"][0]
}
thisData.Referer = this.Ctx.Input.Refer()
this.Data["xml"] = thisData
this.ServeXML()
}
func (this *MainController) GetAllJSON() {
thisData := make(map[string]interface{})
thisData["Email"] = "[email protected]"
thisData["UserAgent"] = this.Ctx.Request.UserAgent()
ip := this.Ctx.Input.IP()
names, err := net.LookupAddr(ip)
if err != nil || len(names) == 0 {
thisData["Host"] = ""
} else {
var value string
for _, v := range names {
value += fmt.Sprintf("%s\n", v)
}
thisData["Host"] = value
}
thisData["IP"] = this.Ctx.Input.IP()
remote_addr := []byte(this.Ctx.Request.RemoteAddr)
pos := bytes.IndexByte(remote_addr, ':')
thisData["Port"] = string(remote_addr[pos+1:])
thisData["Method"] = this.Ctx.Request.Method
if len(this.Ctx.Request.Header["Accept-Encoding"]) > 0 {
thisData["Encoding"] = this.Ctx.Request.Header["Accept-Encoding"][0]
}
if len(this.Ctx.Request.Header["Accept"]) > 0 {
thisData["Mime"] = this.Ctx.Request.Header["Accept"][0]
}
if len(this.Ctx.Request.Header["Connection"]) > 0 {
thisData["Connection"] = this.Ctx.Request.Header["Connection"][0]
}
if len(this.Ctx.Request.Header["Via"]) > 0 {
thisData["Via"] = this.Ctx.Request.Header["Via"][0]
}
if len(this.Ctx.Request.Header["Charset"]) > 0 {
thisData["Charset"] = this.Ctx.Request.Header["Charset"][0]
}
if len(this.Ctx.Request.Header["KeepAlive"]) > 0 {
thisData["Keepalive"] = this.Ctx.Request.Header["KeepAlive"][0]
}
if len(this.Ctx.Request.Header["X-Forwarded-For"]) > 0 {
thisData["Forwarded"] = this.Ctx.Request.Header["X-Forwarded-For"][0]
}
if len(this.Ctx.Request.Header["Accept-Language"]) > 0 {
thisData["Lang"] = this.Ctx.Request.Header["Accept-Language"][0]
}
thisData["Referer"] = this.Ctx.Input.Refer()
this.Data["json"] = thisData
this.ServeJSON()
}
func (this *MainController) Get() {
if noweb := os.Getenv("NOWEB"); noweb == "1" {
this.Abort("404")
return
}
this.Data["Email"] = "[email protected]"
this.Data["UserAgent"] = this.Ctx.Request.UserAgent()
ip := this.Ctx.Input.IP()
names, err := net.LookupAddr(ip)
if err != nil || len(names) == 0 {
this.Data["Host"] = ""
} else {
var value string
for _, v := range names {
value += fmt.Sprintf("%s\n", v)
}
this.Data["Host"] = value
}
this.Data["IP"] = this.Ctx.Input.IP()
remote_addr := []byte(this.Ctx.Request.RemoteAddr)
pos := bytes.IndexByte(remote_addr, ':')
this.Data["Port"] = string(remote_addr[pos+1:])
this.Data["Method"] = this.Ctx.Request.Method
if len(this.Ctx.Request.Header["Accept-Encoding"]) > 0 {
this.Data["Encoding"] = this.Ctx.Request.Header["Accept-Encoding"][0]
}
if len(this.Ctx.Request.Header["Accept"]) > 0 {
this.Data["Mime"] = this.Ctx.Request.Header["Accept"][0]
}
if len(this.Ctx.Request.Header["Connection"]) > 0 {
this.Data["Connection"] = this.Ctx.Request.Header["Connection"][0]
}
if len(this.Ctx.Request.Header["Via"]) > 0 {
this.Data["Via"] = this.Ctx.Request.Header["Via"][0]
}
if len(this.Ctx.Request.Header["Charset"]) > 0 {
this.Data["Charset"] = this.Ctx.Request.Header["Charset"][0]
}
if len(this.Ctx.Request.Header["KeepAlive"]) > 0 {
this.Data["Keepalive"] = this.Ctx.Request.Header["KeepAlive"][0]
}
if len(this.Ctx.Request.Header["X-Forwarded-For"]) > 0 {
this.Data["Forwarded"] = this.Ctx.Request.Header["X-Forwarded-For"][0]
}
if len(this.Ctx.Request.Header["Accept-Language"]) > 0 {
this.Data["Lang"] = this.Ctx.Request.Header["Accept-Language"][0]
}
this.Data["Referer"] = this.Ctx.Input.Refer()
if strings.Contains(this.Ctx.Request.UserAgent(), "curl") {
this.TplName = "iponly.tpl"
} else {
this.TplName = "index.tpl"
}
}
| [
"\"NOWEB\""
]
| []
| [
"NOWEB"
]
| [] | ["NOWEB"] | go | 1 | 0 | |
v2/ies/trace-reference.go | // Copyright 2019-2020 go-gtp authors. All rights reserved.
// Use of this source code is governed by a MIT-style license that can be
// found in the LICENSE file.
package ies
import (
"io"
"github.com/wmnsk/go-gtp/utils"
)
// NewTraceReference creates a new TraceReference IE.
func NewTraceReference(mcc, mnc string, traceID uint32) *IE {
i := New(TraceReference, 0x00, make([]byte, 6))
plmn, err := utils.EncodePLMN(mcc, mnc)
if err != nil {
return nil
}
copy(i.Payload[0:3], plmn)
copy(i.Payload[3:6], utils.Uint32To24(traceID))
return i
}
// TraceID returns TraceID in uint32 if the type of IE matches.
func (i *IE) TraceID() (uint32, error) {
switch i.Type {
case TraceReference, TraceInformation:
if len(i.Payload) < 6 {
return 0, io.ErrUnexpectedEOF
}
return utils.Uint24To32(i.Payload[3:6]), nil
default:
return 0, &InvalidTypeError{Type: i.Type}
}
}
// MustTraceID returns TraceID in uint32, ignoring errors.
// This should only be used if it is assured to have the value.
func (i *IE) MustTraceID() uint32 {
v, _ := i.TraceID()
return v
}
| []
| []
| []
| [] | [] | go | null | null | null |
main.py | import os
import batch
import preprocessing
import train
import predict
import pandas as pd
from matplotlib import pyplot as plt
from covidDataset import CovidDataset
from torch.utils.data import DataLoader
from transformers import BertForSequenceClassification, BertTokenizer
from sklearn.model_selection import train_test_split
from torch import device, cuda, save
from collections import defaultdict
os.environ['CUDA_VISIBLE_DEVICE'] = '0'
# Preprocess training data
train_file = os.path.join('./', 'TwitterPost', 'train.csv')
df_train, map_en = preprocessing.preprocess(train_file)
# Preprocess testing data
test_file = os.path.join('./', 'TwitterPost', 'test.csv')
df_test, map_en = preprocessing.preprocess(test_file)
# Load bert model and tokenizer
PRETRAINED_MODEL_NAME = 'bert-base-uncased'
tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME, do_lower_case=True)
# Self define Covid-train-Dataset and check the first sample
# i.e., converted (tokens_tensor, segments_tensor, label_tensor)
train_set = CovidDataset(df_train, tokenizer=tokenizer)
label, text = train_set.df.iloc[0].values
tokens_tensor, segments_tensor, label_tensor = train_set[0]
# Deduction to original text
tokens = tokenizer.convert_ids_to_tokens(tokens_tensor)
combined_text = ' '.join(tokens)
print(f"""[Original]\n
Text: {text}
Label: {label}
--------------------
[Coverted tensors]\n
tokens_tensor :{tokens_tensor}
segments_tensor:{segments_tensor}
label_tensor :{label_tensor}
#--------------------
#
#[Original tokens_tensors]\n
#{combined_text}
#\n""")
# DataLoader returned 64 samples at a time,
# "collate_fn" parameter defined the batch output
BATCH_SIZE = 64
train_set, val_set = train_test_split(train_set, test_size=0.1, random_state=2000)
train_loader = DataLoader(train_set, batch_size=BATCH_SIZE, collate_fn=batch.create_mini_batch)
val_loader = DataLoader(val_set, batch_size=BATCH_SIZE, collate_fn=batch.create_mini_batch)
data = next(iter(train_loader))
tokens_tensors, segments_tensors, masks_tensors, label_ids = data
print(f"""
tokens_tensors.shape = {tokens_tensors.shape}
{tokens_tensors}
------------------------
segments_tensors.shape = {segments_tensors.shape}
{segments_tensors}
------------------------
masks_tensors.shape = {masks_tensors.shape}
{masks_tensors}
------------------------
label_ids.shape = {label_ids.shape}
{label_ids}
""")
# Fine-tune task is "BertForSequenceClassification"
model = BertForSequenceClassification.from_pretrained(
PRETRAINED_MODEL_NAME, num_labels=4)
# Numbers of parameters
model_params = [p for p in model.parameters() if p.requires_grad]
clf_params = [p for p in model.classifier.parameters() if p.requires_grad]
print(f"""
Parameters of total classifier(Bert + Linear):{sum(p.numel() for p in model_params)}
Parameters of linear classifier:{sum(p.numel() for p in clf_params)}
""")
## Let's begin to train and fine-tune
device = device('cuda:0' if cuda.is_available() else 'cpu')
print('device:', device)
model = model.to(device)
print('\n###Start training###\n')
print(f"{'Epoch':^7} | {'Train loss':^12} | {'Train accuracy':^9} |{'Val loss':^12} | {'Val accuracy':^9} |")
print("-" * 70)
EPOCHS = 4
history = defaultdict(list)
for epoch in range(EPOCHS):
best_accuracy = 0
# Training
train_acc, train_loss = train.train_epoch(model, train_loader, device)
print(f"{epoch + 1:^7} | {train_loss:^12.6f} | {train_acc:^15.2f}", end='')
# Evaluating
val_acc, val_loss = train.eval_epoch(model, val_loader, device)
print(f"| {val_loss:^11.6f} | {val_acc:^14.2f}")
print()
history['train_acc'].append(train_acc)
history['train_loss'].append(train_loss)
history['val_acc'].append(val_acc)
history['val_loss'].append(val_loss)
# Save the best model
if val_acc > best_accuracy:
save(model.state_dict(), 'best_model_state.bin')
print('Training complete!')
# Plot the result
plt.plot(history['train_acc'], label='train acc')
plt.plot(history['val_acc'], label='val acc')
plt.title('Accuracy history')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend()
plt.ylim([0, 1])
plt.grid()
plt.savefig('acc_history.png')
plt.clf()
plt.plot(history['train_loss'], label='train loss')
plt.plot(history['val_loss'], label='val loss')
plt.title('Loss history')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend()
plt.grid()
plt.savefig('loss_history.png')
# Inference with test set
test_set = CovidDataset(df_test, tokenizer=tokenizer)
test_loader = DataLoader(test_set, batch_size=256,
collate_fn=batch.create_mini_batch)
predictions = predict.get_predictions(model, test_loader, device) ### Currently have a bug here, why prediction get tuple wite 2 same predicted results?
# Concat predition to .csv
df_pred = df_test
df_pred['prediction'] = predictions[0].tolist()
df_pred.to_csv('predict.csv', index=False)
| []
| []
| [
"CUDA_VISIBLE_DEVICE"
]
| [] | ["CUDA_VISIBLE_DEVICE"] | python | 1 | 0 | |
server/main/src/database/db.go | package database
import (
"bufio"
"bytes"
"database/sql"
"encoding/json"
"fmt"
"os"
"path"
"strings"
"time"
"strconv"
_ "github.com/lib/pq"
"github.com/mr-tron/base58/base58"
"github.com/pkg/errors"
"github.com/schollz/find3/server/main/src/models"
"github.com/schollz/sqlite3dump"
"github.com/schollz/stringsizer"
)
// MakeTables creates two tables, a `keystore` table:
//
// KEY (TEXT) VALUE (TEXT)
//
// and also a `sensors` table for the sensor data:
//
// TIMESTAMP (INTEGER) DEVICE(TEXT) LOCATION(TEXT)
//
// the sensor table will dynamically create more columns as new types
// of sensor data are inserted. The LOCATION column is optional and
// only used for learning/classification.
func (d *Database) MakeTables() (err error) {
sqlStmt := `create table keystore (key text not null primary key, value text);`
_, err = d.db.Exec(sqlStmt)
if err != nil {
err = errors.Wrap(err, "MakeTables")
logger.Log.Error(err)
return
}
sqlStmt = `create index keystore_idx on keystore(key);`
_, err = d.db.Exec(sqlStmt)
if err != nil {
err = errors.Wrap(err, "MakeTables")
logger.Log.Error(err)
return
}
sqlStmt = `create table sensors (timestamp numeric(20,0) not null primary key, deviceid text, locationid text, unique(timestamp));`
_, err = d.db.Exec(sqlStmt)
if err != nil {
err = errors.Wrap(err, "MakeTables")
logger.Log.Error(err)
return
}
sqlStmt = `create table Crowdsourcing (timestamp numeric(20,0) not null primary key, deviceid text, locationid text, unique(timestamp));`
_, err = d.db.Exec(sqlStmt)
if err != nil {
err = errors.Wrap(err, "MakeTables")
logger.Log.Error(err)
return
}
sqlStmt = `CREATE TABLE location_predictions (timestamp numeric(20,0) NOT NULL PRIMARY KEY, prediction TEXT, UNIQUE(timestamp));`
_, err = d.db.Exec(sqlStmt)
if err != nil {
err = errors.Wrap(err, "MakeTables")
logger.Log.Error(err)
return
}
sqlStmt = `CREATE TABLE devices (id TEXT PRIMARY KEY, name TEXT);`
_, err = d.db.Exec(sqlStmt)
if err != nil {
err = errors.Wrap(err, "MakeTables")
logger.Log.Error(err)
return
}
sqlStmt = `CREATE TABLE locations (id TEXT PRIMARY KEY, name TEXT);`
_, err = d.db.Exec(sqlStmt)
if err != nil {
err = errors.Wrap(err, "MakeTables")
logger.Log.Error(err)
return
}
sqlStmt = `CREATE TABLE gps (id INTEGER PRIMARY KEY, timestamp numeric(20,0), mac TEXT, loc TEXT, lat REAL, lon REAL, alt REAL);`
_, err = d.db.Exec(sqlStmt)
if err != nil {
err = errors.Wrap(err, "MakeTables")
logger.Log.Error(err)
return
}
sqlStmt = `create index devices_name on devices (name);`
_, err = d.db.Exec(sqlStmt)
if err != nil {
err = errors.Wrap(err, "MakeTables")
logger.Log.Error(err)
return
}
sqlStmt = `CREATE INDEX sensors_devices ON sensors (deviceid);`
_, err = d.db.Exec(sqlStmt)
if err != nil {
err = errors.Wrap(err, "MakeTables")
logger.Log.Error(err)
return
}
sensorDataSS, _ := stringsizer.New()
err = d.Set("sensorDataStringSizer", sensorDataSS.Save())
if err != nil {
return
}
return
}
// Columns will list the columns
func (d *Database) Columns() (columns []string, err error) {
rows, err := d.db.Query("SELECT * FROM sensors LIMIT 1")
if err != nil {
err = errors.Wrap(err, "Columns")
return
}
columns, err = rows.Columns()
rows.Close()
if err != nil {
err = errors.Wrap(err, "Columns")
return
}
return
}
// Get will retrieve the value associated with a key.
func (d *Database) Get(key string, v interface{}) (err error) {
stmt, err := d.db.Prepare("select value from keystore where key = $1")
if err != nil {
return errors.Wrap(err, "problem preparing SQL")
}
defer stmt.Close()
var result string
err = stmt.QueryRow(key).Scan(&result)
if err != nil {
logger.Log.Debugf("error looking for %s", key)
return errors.Wrap(err, "problem getting key")
}
err = json.Unmarshal([]byte(result), &v)
if err != nil {
return
}
// logger.Log.Debugf("got %s from '%s'", string(result), key)
return
}
// Set will set a value in the database, when using it like a keystore.
func (d *Database) Set(key string, value interface{}) (err error) {
var b []byte
b, err = json.Marshal(value)
if err != nil {
logger.Log.Debugf("json error key = " + key)
return err
}
tx, err := d.db.Begin()
if err != nil {
return errors.Wrap(err, "Set")
}
stmt, err := tx.Prepare("insert into keystore(key,value) values ($1, $2) on conflict (key) do update set value = $2")
if err != nil {
return errors.Wrap(err, "Set")
}
defer stmt.Close()
_, err = stmt.Exec(key, string(b))
if err != nil {
return errors.Wrap(err, "Set")
}
err = tx.Commit()
if err != nil {
return errors.Wrap(err, "Set")
}
// logger.Log.Debugf("set '%s' to '%s'", key, string(b))
return
}
// Dump will output the string version of the database
func (d *Database) Dump() (dumped string, err error) {
var b bytes.Buffer
out := bufio.NewWriter(&b)
err = sqlite3dump.Dump(d.name, out)
if err != nil {
return
}
out.Flush()
dumped = string(b.Bytes())
return
}
// AddPrediction will insert or update a prediction in the database
func (d *Database) AddPrediction(timestamp int64, aidata []models.LocationPrediction) (err error) {
// truncate to two digits
for i := range aidata {
aidata[i].Probability = float64(int64(float64(aidata[i].Probability)*100)) / 100
}
var b []byte
b, err = json.Marshal(aidata)
if err != nil {
return err
}
tx, err := d.db.Begin()
if err != nil {
return errors.Wrap(err, "begin AddPrediction")
}
stmt, err := tx.Prepare("insert into location_predictions (timestamp,prediction) values ($1, $2) on conflict (timestamp) do update set prediction = $3")
if err != nil {
return errors.Wrap(err, "stmt AddPrediction")
}
defer stmt.Close()
logger.Log.Debugf("prediccionbasedatos",string(b))
_, err = stmt.Exec(timestamp, string(b), string(b))
if err != nil {
return errors.Wrap(err, "exec AddPrediction")
}
err = tx.Commit()
if err != nil {
return errors.Wrap(err, "commit AddPrediction")
}
return
}
// GetPrediction will retrieve models.LocationAnalysis associated with that timestamp
func (d *Database) GetPrediction(timestamp int64) (aidata []models.LocationPrediction, err error) {
stmt, err := d.db.Prepare("SELECT prediction FROM location_predictions WHERE timestamp = $1")
if err != nil {
err = errors.Wrap(err, "problem preparing SQL")
return
}
defer stmt.Close()
var result string
err = stmt.QueryRow(timestamp).Scan(&result)
if err != nil {
err = errors.Wrap(err, "problem getting key")
return
}
err = json.Unmarshal([]byte(result), &aidata)
if err != nil {
return
}
// logger.Log.Debugf("got %s from '%s'", string(result), key)
return
}
// AddSensor will insert a sensor data into the database
// TODO: AddSensor should be special case of AddSensors
func (d *Database) AddSensor(s models.SensorData) (err error) {
startTime := time.Now()
// determine the current table coluss
oldColumns := make(map[string]struct{})
columnList, err := d.Columns()
if err != nil {
return
}
for _, column := range columnList {
oldColumns[column] = struct{}{}
}
// get string sizer
var sensorDataStringSizerString string
err = d.Get("sensorDataStringSizer", &sensorDataStringSizerString)
if err != nil {
return errors.Wrap(err, "get sensor data")
}
sensorDataSS, err := stringsizer.New(sensorDataStringSizerString)
if err != nil {
return errors.Wrap(err, "stringsizer")
}
previousCurrent := sensorDataSS.Current
// setup the database
tx, err := d.db.Begin()
if err != nil {
return errors.Wrap(err, "AddSensor")
}
// first add new columns in the sensor data
deviceID, err := d.AddName("devices", s.Device)
if err != nil {
return errors.Wrap(err, "problem getting device ID")
}
locationID := ""
if len(s.Location) > 0 {
locationID, err = d.AddName("locations", s.Location)
if err != nil {
return errors.Wrap(err, "problem getting location ID")
}
}
args := make([]interface{}, 3)
args[0] = s.Timestamp
args[1] = deviceID
args[2] = locationID
argsQ := []string{"$1", "$2", "$3"}
cantArgs := 3
for sensor := range s.Sensors {
if _, ok := oldColumns[sensor]; !ok {
stmt, err := tx.Prepare("alter table sensors add column " + sensor + " text")
if err != nil {
return errors.Wrap(err, "AddSensor, adding column")
}
_, err = stmt.Exec()
if err != nil {
return errors.Wrap(err, "AddSensor, adding column")
}
logger.Log.Debugf("adding column %s", sensor)
columnList = append(columnList, sensor)
stmt.Close()
}
}
// organize arguments in the correct order
for _, sensor := range columnList {
if _, ok := s.Sensors[sensor]; !ok {
continue
}
cantArgs ++
sqlVarNumber := "$" + fmt.Sprint(cantArgs)
argsQ = append(argsQ, sqlVarNumber)
args = append(args, sensorDataSS.ShrinkMapToString(s.Sensors[sensor]))
}
// only use the columns that are in the payload
newColumnList := make([]string, len(columnList))
j := 0
for i, c := range columnList {
if i >= 3 {
if _, ok := s.Sensors[c]; !ok {
continue
}
}
newColumnList[j] = c
j++
}
newColumnList = newColumnList[:j]
// FIXME: fix this
updateStatement := ""
for index := range newColumnList {
if index > 1 {
updateStatement += ","
}
if newColumnList[index] == "timestamp" {
continue
}
updateStatement += newColumnList[index] + "=" + argsQ[index]
}
sqlStatement := "insert into sensors(" + strings.Join(newColumnList, ",") + ") values (" + strings.Join(argsQ, ",") + ") on conflict(timestamp) do update set " + updateStatement
stmt, err := tx.Prepare(sqlStatement)
logger.Log.Debug("query!", sqlStatement)
// logger.Log.Debug("args", args)
if err != nil {
return errors.Wrap(err, "AddSensor, prepare "+sqlStatement)
}
defer stmt.Close()
_, err = stmt.Exec(args...)
if err != nil {
return errors.Wrap(err, "AddSensor, execute")
}
err = tx.Commit()
if err != nil {
return errors.Wrap(err, "AddSensor")
}
// update the map key slimmer
if previousCurrent != sensorDataSS.Current {
err = d.Set("sensorDataStringSizer", sensorDataSS.Save())
if err != nil {
return
}
}
logger.Log.Debugf("[%s] inserted sensor data, %s", s.Family, time.Since(startTime))
return
}
// GetSensorFromTime will return a sensor data for a given timestamp
func (d *Database) GetSensorFromTime(timestamp interface{}) (s models.SensorData, err error) {
sensors, err := d.GetAllFromPreparedQuery("SELECT * FROM sensors WHERE timestamp = $1", timestamp)
if err != nil {
err = errors.Wrap(err, "GetSensorFromTime")
} else {
s = sensors[0]
}
return
}
// Get will retrieve the value associated with a key.
func (d *Database) GetLastSensorTimestamp() (timestamp int64, err error) {
stmt, err := d.db.Prepare("SELECT timestamp FROM sensors ORDER BY timestamp DESC LIMIT 1")
if err != nil {
err = errors.Wrap(err, "problem preparing SQL")
return
}
defer stmt.Close()
err = stmt.QueryRow().Scan(×tamp)
if err != nil {
err = errors.Wrap(err, "problem getting key")
}
return
}
// GetLatest will return a sensor data for classifying
func (d *Database) GetLatest15(device string) (s []models.SensorData, err error) {
deviceID, err := d.GetID("devices", device)
if err != nil {
return
}
var sensors []models.SensorData
sensors, err = d.GetAllFromPreparedQuery("SELECT * FROM sensors WHERE deviceID=$1 ORDER BY timestamp DESC LIMIT 15", deviceID)
if err != nil {
//logger.Log.Debugf("sensores hay erroro ",s, len(s))
return
}
s = sensors
// logger.Log.Debugf("sensores ",s, len(s)) //if len(sensors) > 0 {
// s = sensors[0]
//} else {
// err = errors.New("no rows found")
//}
return
}
// Get will retrieve the value associated with a key.
func (d *Database) TotalLearnedCount() (count int64, err error) {
stmt, err := d.db.Prepare("SELECT count(timestamp) FROM sensors WHERE locationid != ''")
if err != nil {
err = errors.Wrap(err, "problem preparing SQL")
return
}
defer stmt.Close()
err = stmt.QueryRow().Scan(&count)
if err != nil {
err = errors.Wrap(err, "problem getting key")
}
return
}
// GetSensorFromGreaterTime will return a sensor data for a given timeframe
func (d *Database) GetSensorFromGreaterTime(timeBlockInMilliseconds int64) (sensors []models.SensorData, err error) {
latestTime, err := d.GetLastSensorTimestamp()
if err != nil {
return
}
minimumTimestamp := latestTime - timeBlockInMilliseconds
sensors, err = d.GetAllFromPreparedQuery("SELECT * FROM sensors WHERE timestamp > $1 GROUP BY deviceid, timestamp ORDER BY timestamp DESC;", minimumTimestamp)
return
}
func (d *Database) NumDevices() (num int, err error) {
stmt, err := d.db.Prepare("select count(id) from devices")
if err != nil {
err = errors.Wrap(err, "problem preparing SQL")
return
}
defer stmt.Close()
err = stmt.QueryRow().Scan(&num)
if err != nil {
err = errors.Wrap(err, "problem getting key")
}
return
}
func (d *Database) GetDeviceFirstTimeFromDevices(devices []string) (firstTime map[string]time.Time, err error) {
firstTime = make(map[string]time.Time)
query := fmt.Sprintf("select n,t from (select devices.name as n,sensors.timestamp as t from sensors inner join devices on sensors.deviceid=devices.id WHERE devices.name IN ('%s') order by timestamp desc) as q group by n, t", strings.Join(devices, "','"))
stmt, err := d.db.Prepare(query)
if err != nil {
err = errors.Wrap(err, query)
return
}
defer stmt.Close()
rows, err := stmt.Query()
if err != nil {
err = errors.Wrap(err, query)
return
}
defer rows.Close()
for rows.Next() {
var name string
var ts int64
err = rows.Scan(&name, &ts)
if err != nil {
err = errors.Wrap(err, "scanning")
return
}
// if _, ok := firstTime[name]; !ok {
firstTime[name] = time.Unix(0, ts*1000000).UTC()
// }
}
err = rows.Err()
if err != nil {
err = errors.Wrap(err, "rows")
}
return
}
func (d *Database) GetDeviceFirstTime() (firstTime map[string]time.Time, err error) {
firstTime = make(map[string]time.Time)
query := "select n,t from (select devices.name as n,sensors.timestamp as t from sensors inner join devices on sensors.deviceid=devices.id order by timestamp desc) group by n"
// query := "select devices.name,sensors.timestamp from sensors inner join devices on sensors.deviceid=devices.id order by timestamp desc"
stmt, err := d.db.Prepare(query)
if err != nil {
err = errors.Wrap(err, query)
return
}
defer stmt.Close()
rows, err := stmt.Query()
if err != nil {
err = errors.Wrap(err, query)
return
}
defer rows.Close()
for rows.Next() {
var name string
var ts int64
err = rows.Scan(&name, &ts)
if err != nil {
err = errors.Wrap(err, "scanning")
return
}
// if _, ok := firstTime[name]; !ok {
firstTime[name] = time.Unix(0, ts*1000000).UTC()
// }
}
err = rows.Err()
if err != nil {
err = errors.Wrap(err, "rows")
}
return
}
func (d *Database) GetDeviceCountsFromDevices(devices []string) (counts map[string]int, err error) {
counts = make(map[string]int)
query := fmt.Sprintf("select devices.name,count(sensors.timestamp) as num from sensors inner join devices on sensors.deviceid=devices.id WHERE devices.name in ('%s') group by sensors.deviceid, devices.name", strings.Join(devices, "','"))
stmt, err := d.db.Prepare(query)
if err != nil {
err = errors.Wrap(err, query)
return
}
defer stmt.Close()
rows, err := stmt.Query()
if err != nil {
err = errors.Wrap(err, query)
return
}
defer rows.Close()
for rows.Next() {
var name string
var count int
err = rows.Scan(&name, &count)
if err != nil {
err = errors.Wrap(err, "scanning")
return
}
counts[name] = count
}
err = rows.Err()
if err != nil {
err = errors.Wrap(err, "rows")
}
return
}
func (d *Database) GetDeviceCounts() (counts map[string]int, err error) {
counts = make(map[string]int)
query := "select devices.name,count(sensors.timestamp) as num from sensors inner join devices on sensors.deviceid=devices.id group by sensors.deviceid"
stmt, err := d.db.Prepare(query)
if err != nil {
err = errors.Wrap(err, query)
return
}
defer stmt.Close()
rows, err := stmt.Query()
if err != nil {
err = errors.Wrap(err, query)
return
}
defer rows.Close()
for rows.Next() {
var name string
var count int
err = rows.Scan(&name, &count)
if err != nil {
err = errors.Wrap(err, "scanning")
return
}
counts[name] = count
}
err = rows.Err()
if err != nil {
err = errors.Wrap(err, "rows")
}
return
}
func (d *Database) GetLocationCounts() (counts map[string]int, err error) {
counts = make(map[string]int)
query := "SELECT locations.name,count(sensors.timestamp) as num from sensors inner join locations on sensors.locationid=locations.id group by sensors.locationid, locations.name"
stmt, err := d.db.Prepare(query)
if err != nil {
err = errors.Wrap(err, query)
return
}
defer stmt.Close()
rows, err := stmt.Query()
if err != nil {
err = errors.Wrap(err, query)
return
}
defer rows.Close()
for rows.Next() {
var name string
var count int
err = rows.Scan(&name, &count)
if err != nil {
err = errors.Wrap(err, "scanning")
return
}
counts[name] = count
}
err = rows.Err()
if err != nil {
err = errors.Wrap(err, "rows")
}
return
}
// GetAnalysisFromGreaterTime will return the analysis for a given timeframe
// func (d *Database) GetAnalysisFromGreaterTime(timestamp interface{}) {
// select sensors.timestamp, devices.name, location_predictions.prediction from sensors inner join location_predictions on location_predictions.timestamp=sensors.timestamp inner join devices on sensors.deviceid=devices.id WHERE sensors.timestamp > 0 GROUP BY devices.name ORDER BY sensors.timestamp DESC;
// }
// GetAllForClassification will return a sensor data for classifying
func (d *Database) GetAllForClassification() (s []models.SensorData, err error) {
return d.GetAllFromQuery("SELECT * FROM sensors WHERE sensors.locationid !='' ORDER BY timestamp")
}
// GetAllForClassification will return a sensor data for classifying
func (d *Database) GetAllNotForClassification() (s []models.SensorData, err error) {
return d.GetAllFromQuery("SELECT * FROM sensors WHERE sensors.locationid = '' ORDER BY timestamp")
}
// GetLatest will return a sensor data for classifying
func (d *Database) GetLatest(device string) (s models.SensorData, err error) {
deviceID, err := d.GetID("devices", device)
if err != nil {
return
}
var sensors []models.SensorData
sensors, err = d.GetAllFromPreparedQuery("SELECT * FROM sensors WHERE deviceID=$1 ORDER BY timestamp DESC LIMIT 1", deviceID)
if err != nil {
return
}
if len(sensors) > 0 {
s = sensors[0]
} else {
err = errors.New("no rows found")
}
return
}
func (d *Database) GetKeys(keylike string) (keys []string, err error) {
query := "SELECT key FROM keystore WHERE key LIKE $1"
stmt, err := d.db.Prepare(query)
if err != nil {
err = errors.Wrap(err, query)
return
}
defer stmt.Close()
rows, err := stmt.Query(keylike)
if err != nil {
err = errors.Wrap(err, query)
return
}
defer rows.Close()
keys = []string{}
for rows.Next() {
var key string
err = rows.Scan(&key)
if err != nil {
err = errors.Wrap(err, "scanning")
return
}
keys = append(keys, key)
}
err = rows.Err()
if err != nil {
err = errors.Wrap(err, "rows")
}
return
}
func (d *Database) GetDevices() (devices []string, err error) {
query := "SELECT devicename FROM (SELECT devices.name as devicename,COUNT(devices.name) as counts FROM sensors INNER JOIN devices ON sensors.deviceid = devices.id GROUP by devices.name ORDER BY counts DESC) as subquery"
stmt, err := d.db.Prepare(query)
if err != nil {
err = errors.Wrap(err, query)
return
}
defer stmt.Close()
rows, err := stmt.Query()
if err != nil {
err = errors.Wrap(err, query)
return
}
defer rows.Close()
devices = []string{}
for rows.Next() {
var name string
err = rows.Scan(&name)
if err != nil {
err = errors.Wrap(err, "scanning")
return
}
devices = append(devices, name)
}
err = rows.Err()
if err != nil {
err = errors.Wrap(err, fmt.Sprintf("problem scanning rows, only got %d devices", len(devices)))
}
return
}
func (d *Database) GetLocations() (locations []string, err error) {
query := "SELECT name FROM locations"
stmt, err := d.db.Prepare(query)
if err != nil {
err = errors.Wrap(err, query)
return
}
defer stmt.Close()
rows, err := stmt.Query()
if err != nil {
err = errors.Wrap(err, query)
return
}
defer rows.Close()
locations = []string{}
for rows.Next() {
var name string
err = rows.Scan(&name)
if err != nil {
err = errors.Wrap(err, "scanning")
return
}
locations = append(locations, name)
}
err = rows.Err()
if err != nil {
err = errors.Wrap(err, "rows")
}
return
}
func (d *Database) GetIDToName(table string) (idToName map[string]string, err error) {
idToName = make(map[string]string)
query := "SELECT id,name FROM " + table
stmt, err := d.db.Prepare(query)
if err != nil {
err = errors.Wrap(err, query)
return
}
defer stmt.Close()
rows, err := stmt.Query()
if err != nil {
err = errors.Wrap(err, query)
return
}
defer rows.Close()
for rows.Next() {
var name, id string
err = rows.Scan(&id, &name)
if err != nil {
err = errors.Wrap(err, "scanning")
return
}
idToName[id] = name
}
err = rows.Err()
if err != nil {
err = errors.Wrap(err, "rows")
}
return
}
func GetFamilies() (families []string) {
families = []string{"No families"}
return
}
func (d *Database) DeleteLocation(locationName string) (err error) {
id, err := d.GetID("locations", locationName)
if err != nil {
return
}
stmt, err := d.db.Prepare("DELETE FROM sensors WHERE locationid = $1")
if err != nil {
err = errors.Wrap(err, "problem preparing SQL")
return
}
defer stmt.Close()
_, err = stmt.Exec(id)
return
}
// GetID will get the ID of an element in a table (devices/locations) and return an error if it doesn't exist
func (d *Database) GetID(table string, name string) (id string, err error) {
// first check to see if it has already been added
stmt, err := d.db.Prepare("SELECT id FROM " + table + " WHERE name = $1")
if err != nil {
err = errors.Wrap(err, "problem preparing SQL")
return
}
defer stmt.Close()
err = stmt.QueryRow(name).Scan(&id)
return
}
// AddName will add a name to a table (devices/locations) and return the ID. If the device already exists it will just return it.
func (d *Database) AddName(table string, name string) (deviceID string, err error) {
// first check to see if it has already been added
deviceID, err = d.GetID(table, name)
if err == nil {
return
}
// logger.Log.Debugf("creating new name for %s in %s", name, table)
// get the current count
stmt, err := d.db.Prepare("SELECT COUNT(id) FROM " + table)
if err != nil {
err = errors.Wrap(err, "problem preparing SQL")
stmt.Close()
return
}
var currentCount int
err = stmt.QueryRow().Scan(¤tCount)
stmt.Close()
if err != nil {
err = errors.Wrap(err, "problem getting device count")
return
}
// transform the device name into an ID with the current count
currentCount++
deviceID = stringsizer.Transform(currentCount)
// logger.Log.Debugf("transformed (%d) %s -> %s", currentCount, name, deviceID)
// add the device name and ID
tx, err := d.db.Begin()
if err != nil {
err = errors.Wrap(err, "AddName")
return
}
query := "insert into " + table + "(id,name) values ($1, $2)"
// logger.Log.Debugf("running query: '%s'", query)
stmt, err = tx.Prepare(query)
if err != nil {
err = errors.Wrap(err, "AddName")
return
}
defer stmt.Close()
_, err = stmt.Exec(deviceID, name)
if err != nil {
err = errors.Wrap(err, "AddName")
}
err = tx.Commit()
if err != nil {
err = errors.Wrap(err, "AddName")
return
}
return
}
func Exists(name string) (err error) {
name = strings.TrimSpace(name)
name = path.Join(DataFolder, base58.FastBase58Encoding([]byte(name))+".sqlite3.db")
if _, err = os.Stat(name); err != nil {
err = errors.New("database '" + name + "' does not exist")
}
return
}
func (d *Database) Delete() (err error) {
logger.Log.Debugf("deleting %s", d.family)
return
}
// Open will open the database for transactions by first aquiring a filelock.
func Open(family string, readOnly ...bool) (d *Database, err error) {
d = new(Database)
d.family = "posifi"
//connStr := os.Getenv("POSTGRESQL_CONN_STRING")
connStr :="postgres://locindoor:[email protected]/locindoor"
// open sqlite3 database
d.db, err = sql.Open("postgres", connStr)
if err != nil {
return
}
// logger.Log.Debug("opened sqlite3 database")
stmt, err := d.db.Prepare("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE tablename = 'keystore')")
if err != nil {
err = errors.Wrap(err, "problem preparing SQL")
stmt.Close()
return
}
var dbExist bool
err = stmt.QueryRow().Scan(&dbExist)
// create new database tables if needed
if dbExist == false {
err = d.MakeTables()
if err != nil {
return
}
logger.Log.Debug("made tables")
}
return
}
func (d *Database) Debug(debugMode bool) {
if debugMode {
logger.SetLevel("debug")
} else {
logger.SetLevel("info")
}
}
// Close will close the database connection and remove the filelock.
func (d *Database) Close() (err error) {
if d.isClosed {
return
}
// close database
err2 := d.db.Close()
if err2 != nil {
err = err2
logger.Log.Error(err)
}
d.isClosed = true
return
}
func (d *Database) GetAllFromQuery(query string) (s []models.SensorData, err error) {
// logger.Log.Debug(query)
rows, err := d.db.Query(query)
if err != nil {
err = errors.Wrap(err, "GetAllFromQuery")
return
}
defer rows.Close()
// parse rows
s, err = d.getRows(rows)
if err != nil {
err = errors.Wrap(err, query)
}
return
}
// GetAllFromPreparedQuery
func (d *Database) GetAllFromPreparedQuery(query string, args ...interface{}) (s []models.SensorData, err error) {
// prepare statement
// startQuery := time.Now()
stmt, err := d.db.Prepare(query)
if err != nil {
err = errors.Wrap(err, query)
return
}
defer stmt.Close()
rows, err := stmt.Query(args...)
if err != nil {
err = errors.Wrap(err, query)
return
}
// logger.Log.Debugf("%s: %s", query, time.Since(startQuery))
// startQuery = time.Now()
defer rows.Close()
s, err = d.getRows(rows)
if err != nil {
err = errors.Wrap(err, query)
}
// logger.Log.Debugf("getRows %s: %s", query, time.Since(startQuery))
return
}
func (d *Database) getRows(rows *sql.Rows) (s []models.SensorData, err error) {
// first get the columns
columnList, err := d.Columns()
if err != nil {
return
}
// get the string sizer for the sensor data
var sensorDataStringSizerString string
err = d.Get("sensorDataStringSizer", &sensorDataStringSizerString)
if err != nil {
return
}
sensorDataSS, err := stringsizer.New(sensorDataStringSizerString)
if err != nil {
return
}
deviceIDToName, err := d.GetIDToName("devices")
if err != nil {
return
}
locationIDToName, err := d.GetIDToName("locations")
if err != nil {
return
}
s = []models.SensorData{}
// loop through rows
for rows.Next() {
var arr []interface{}
for i := 0; i < len(columnList); i++ {
arr = append(arr, new(interface{}))
}
err = rows.Scan(arr...)
if err != nil {
err = errors.Wrap(err, "getRows")
return
}
str_timestamp := string((*arr[0].(*interface{})).([]uint8))
int_timestamp, _ := strconv.ParseInt(str_timestamp, 10, 64)
s0 := models.SensorData{
// the underlying value of the interface pointer and cast it to a pointer interface to cast to a byte to cast to a string
Timestamp: int_timestamp,
Family: d.family,
Device: deviceIDToName[string((*arr[1].(*interface{})).(string))],
Location: locationIDToName[string((*arr[2].(*interface{})).(string))],
Sensors: make(map[string]map[string]interface{}),
}
// add in the sensor data
for i, colName := range columnList {
if i < 3 {
continue
}
if *arr[i].(*interface{}) == nil {
continue
}
shortenedJSON := string((*arr[i].(*interface{})).(string))
s0.Sensors[colName], err = sensorDataSS.ExpandMapFromString(shortenedJSON)
if err != nil {
err = errors.Wrap(err, "getRows")
return
}
}
s = append(s, s0)
}
err = rows.Err()
if err != nil {
err = errors.Wrap(err, "getRows")
}
return
}
// SetGPS will set a GPS value in the GPS database
func (d *Database) SetGPS(p models.SensorData) (err error) {
tx, err := d.db.Begin()
if err != nil {
return errors.Wrap(err, "SetGPS")
}
stmt, err := tx.Prepare("insert into gps(timestamp ,mac, loc, lat, lon, alt) values ($1, $2, $3, $4, $5, $6)")
if err != nil {
return errors.Wrap(err, "SetGPS")
}
defer stmt.Close()
for sensorType := range p.Sensors {
for mac := range p.Sensors[sensorType] {
_, err = stmt.Exec(p.Timestamp, sensorType+"-"+mac, p.Location, p.GPS.Latitude, p.GPS.Longitude, p.GPS.Altitude)
if err != nil {
return errors.Wrap(err, "SetGPS")
}
}
}
err = tx.Commit()
if err != nil {
return errors.Wrap(err, "SetGPS")
}
return
}
// // GetGPS will return a GPS for a given mac, if it exists
// // if it doesn't exist it will return an error
// func (d *Database) GetGPS(mac string) (gps models.GPS, err error) {
// query := "SELECT mac,lat,lon,alt,timestamp FROM gps WHERE mac == $1"
// stmt, err := d.db.Prepare(query)
// if err != nil {
// err = errors.Wrap(err, query)
// return
// }
// defer stmt.Close()
// rows, err := stmt.Query(mac)
// if err != nil {
// err = errors.Wrap(err, query)
// return
// }
// defer rows.Close()
// for rows.Next() {
// err = rows.Scan(&gps.Mac, &gps.Latitude, &gps.Longitude, &gps.Altitude, &gps.Timestamp)
// if err != nil {
// err = errors.Wrap(err, "scanning")
// return
// }
// }
// err = rows.Err()
// if err != nil {
// err = errors.Wrap(err, "rows")
// }
// if gps.Mac == "" {
// err = errors.New(mac + " does not exist in gps table")
// }
// return
// }
| [
"\"POSTGRESQL_CONN_STRING\""
]
| []
| [
"POSTGRESQL_CONN_STRING"
]
| [] | ["POSTGRESQL_CONN_STRING"] | go | 1 | 0 | |
services/mailservice/inbucket.go | // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package mailservice
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"strings"
"time"
)
const (
InbucketAPI = "/api/v1/mailbox/"
)
// OutputJSONHeader holds the received Header to test sending emails (inbucket)
type JSONMessageHeaderInbucket []struct {
Mailbox string
ID string `json:"Id"`
From, Subject, Date string
To []string
Size int
}
// OutputJSONMessage holds the received Message fto test sending emails (inbucket)
type JSONMessageInbucket struct {
Mailbox string
ID string `json:"Id"`
From, Subject, Date string
Size int
Header map[string][]string
Body struct {
Text string
HTML string `json:"Html"`
}
Attachments []struct {
Filename string
ContentType string `json:"content-type"`
DownloadLink string `json:"download-link"`
Bytes []byte `json:"-"`
}
}
func ParseEmail(email string) string {
pos := strings.Index(email, "@")
parsedEmail := email[0:pos]
return parsedEmail
}
func GetMailBox(email string) (results JSONMessageHeaderInbucket, err error) {
parsedEmail := ParseEmail(email)
url := fmt.Sprintf("%s%s%s", getInbucketHost(), InbucketAPI, parsedEmail)
resp, err := http.Get(url)
if err != nil {
return nil, err
}
defer func() {
io.Copy(ioutil.Discard, resp.Body)
resp.Body.Close()
}()
if resp.Body == nil {
return nil, fmt.Errorf("No Mailbox")
}
var record JSONMessageHeaderInbucket
err = json.NewDecoder(resp.Body).Decode(&record)
switch {
case err == io.EOF:
return nil, fmt.Errorf("Error: %s", err)
case err != nil:
return nil, fmt.Errorf("Error: %s", err)
}
if len(record) == 0 {
return nil, fmt.Errorf("No mailbox")
}
return record, nil
}
func GetMessageFromMailbox(email, id string) (JSONMessageInbucket, error) {
parsedEmail := ParseEmail(email)
var record JSONMessageInbucket
url := fmt.Sprintf("%s%s%s/%s", getInbucketHost(), InbucketAPI, parsedEmail, id)
emailResponse, err := http.Get(url)
if err != nil {
return record, err
}
defer func() {
io.Copy(ioutil.Discard, emailResponse.Body)
emailResponse.Body.Close()
}()
if err = json.NewDecoder(emailResponse.Body).Decode(&record); err != nil {
return record, err
}
// download attachments
if record.Attachments != nil && len(record.Attachments) > 0 {
for i := range record.Attachments {
var bytes []byte
bytes, err = downloadAttachment(record.Attachments[i].DownloadLink)
if err != nil {
return record, err
}
record.Attachments[i].Bytes = make([]byte, len(bytes))
copy(record.Attachments[i].Bytes, bytes)
}
}
return record, err
}
func downloadAttachment(url string) ([]byte, error) {
attachmentResponse, err := http.Get(url)
if err != nil {
return nil, err
}
defer attachmentResponse.Body.Close()
buf := new(bytes.Buffer)
io.Copy(buf, attachmentResponse.Body)
return buf.Bytes(), nil
}
func DeleteMailBox(email string) (err error) {
parsedEmail := ParseEmail(email)
url := fmt.Sprintf("%s%s%s", getInbucketHost(), InbucketAPI, parsedEmail)
req, err := http.NewRequest("DELETE", url, nil)
if err != nil {
return err
}
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
return nil
}
func RetryInbucket(attempts int, callback func() error) (err error) {
for i := 0; ; i++ {
err = callback()
if err == nil {
return nil
}
if i >= (attempts - 1) {
break
}
time.Sleep(5 * time.Second)
fmt.Println("retrying...")
}
return fmt.Errorf("After %d attempts, last error: %s", attempts, err)
}
func getInbucketHost() (host string) {
inbucket_host := os.Getenv("CI_INBUCKET_HOST")
if inbucket_host == "" {
inbucket_host = "localhost"
}
inbucket_port := os.Getenv("CI_INBUCKET_PORT")
if inbucket_port == "" {
inbucket_port = "10080"
}
return fmt.Sprintf("http://%s:%s", inbucket_host, inbucket_port)
}
| [
"\"CI_INBUCKET_HOST\"",
"\"CI_INBUCKET_PORT\""
]
| []
| [
"CI_INBUCKET_PORT",
"CI_INBUCKET_HOST"
]
| [] | ["CI_INBUCKET_PORT", "CI_INBUCKET_HOST"] | go | 2 | 0 | |
client-tools/main.go | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Note: the example only works with the code within the same release/branch.
package main
import (
"flag"
"fmt"
"os"
"path/filepath"
"time"
context "golang.org/x/net/context"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
// Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters).
// _ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
)
func main() {
var kubeconfig *string
if home := homeDir(); home != "" {
kubeconfig = flag.String("kubeconfig", filepath.Join(home, ".kube", "config"), "(optional) absolute path to the kubeconfig file")
} else {
kubeconfig = flag.String("kubeconfig", "", "absolute path to the kubeconfig file")
}
flag.Parse()
// use the current context in kubeconfig
config, err := clientcmd.BuildConfigFromFlags("", *kubeconfig)
if err != nil {
panic(err.Error())
}
// create the clientset
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
panic(err.Error())
}
for {
pods, err := clientset.CoreV1().Pods("").List(metav1.ListOptions{})
if err != nil {
panic(err.Error())
}
fmt.Printf("There are %d pods in the cluster\n", len(pods.Items))
// Examples for error handling:
// - Use helper functions like e.g. errors.IsNotFound()
// - And/or cast to StatusError and use its properties like e.g. ErrStatus.Message
namespace := "default"
pod := "example-xxxxx"
_, err = clientset.CoreV1().Pods(namespace).Get(pod, metav1.GetOptions{})
if errors.IsNotFound(err) {
fmt.Printf("Pod %s in namespace %s not found\n", pod, namespace)
} else if statusError, isStatus := err.(*errors.StatusError); isStatus {
fmt.Printf("Error getting pod %s in namespace %s: %v\n",
pod, namespace, statusError.ErrStatus.Message)
} else if err != nil {
panic(err.Error())
} else {
fmt.Printf("Found pod %s in namespace %s\n", pod, namespace)
}
time.Sleep(10 * time.Second)
}
}
func homeDir() string {
if h := os.Getenv("HOME"); h != "" {
return h
}
return os.Getenv("USERPROFILE") // windows
}
| [
"\"HOME\"",
"\"USERPROFILE\""
]
| []
| [
"HOME",
"USERPROFILE"
]
| [] | ["HOME", "USERPROFILE"] | go | 2 | 0 | |
app/admin/main/videoup/dao/task/dao_test.go | package task
import (
"flag"
. "github.com/smartystreets/goconvey/convey"
"go-common/app/admin/main/videoup/conf"
"os"
"testing"
)
func TestMain(m *testing.M) {
if os.Getenv("DEPLOY_ENV") != "" {
flag.Set("app_id", "main.archive.videoup-admin")
flag.Set("conf_token", "gRSfeavV7kJdY9875Gf29pbd2wrdKZ1a")
flag.Set("tree_id", "2307")
flag.Set("conf_version", "docker-1")
flag.Set("deploy_env", "uat")
flag.Set("conf_host", "config.bilibili.co")
flag.Set("conf_path", "/tmp")
flag.Set("region", "sh")
flag.Set("zone", "sh001")
} else {
flag.Set("conf", "../../cmd/videoup-admin.toml")
}
flag.Parse()
if err := conf.Init(); err != nil {
panic(err)
}
d = New(conf.Conf)
os.Exit(m.Run())
}
func WithDao(f func(d *Dao)) func() {
return func() {
Reset(func() {})
f(d)
}
}
| [
"\"DEPLOY_ENV\""
]
| []
| [
"DEPLOY_ENV"
]
| [] | ["DEPLOY_ENV"] | go | 1 | 0 | |
Lib/site-packages/web/wsgi.py | """
WSGI Utilities
(from web.py)
"""
import os
import sys
from . import webapi as web
from .utils import listget, intget
from .net import validaddr
from . import httpserver
def runfcgi(func, addr=("localhost", 8000)):
"""Runs a WSGI function as a FastCGI server."""
import flup.server.fcgi as flups
return flups.WSGIServer(func, multiplexed=True, bindAddress=addr, debug=False).run()
def runscgi(func, addr=("localhost", 4000)):
"""Runs a WSGI function as an SCGI server."""
import flup.server.scgi as flups
return flups.WSGIServer(func, bindAddress=addr, debug=False).run()
def runwsgi(func):
"""
Runs a WSGI-compatible `func` using FCGI, SCGI, or a simple web server,
as appropriate based on context and `sys.argv`.
"""
if "SERVER_SOFTWARE" in os.environ: # cgi
os.environ["FCGI_FORCE_CGI"] = "Y"
# PHP_FCGI_CHILDREN is used by lighttpd fastcgi
if "PHP_FCGI_CHILDREN" in os.environ or "SERVER_SOFTWARE" in os.environ:
return runfcgi(func, None)
if "fcgi" in sys.argv or "fastcgi" in sys.argv:
args = sys.argv[1:]
if "fastcgi" in args:
args.remove("fastcgi")
elif "fcgi" in args:
args.remove("fcgi")
if args:
return runfcgi(func, validaddr(args[0]))
else:
return runfcgi(func, None)
if "scgi" in sys.argv:
args = sys.argv[1:]
args.remove("scgi")
if args:
return runscgi(func, validaddr(args[0]))
else:
return runscgi(func)
server_addr = validaddr(listget(sys.argv, 1, ""))
if "PORT" in os.environ: # e.g. Heroku
server_addr = ("0.0.0.0", intget(os.environ["PORT"]))
return httpserver.runsimple(func, server_addr)
def _is_dev_mode():
# Some embedded python interpreters won't have sys.arv
# For details, see https://github.com/webpy/webpy/issues/87
argv = getattr(sys, "argv", [])
# quick hack to check if the program is running in dev mode.
if (
"SERVER_SOFTWARE" in os.environ
or "PHP_FCGI_CHILDREN" in os.environ
or "fcgi" in argv
or "fastcgi" in argv
or "mod_wsgi" in argv
):
return False
return True
# When running the builtin-server, enable debug mode if not already set.
web.config.setdefault("debug", _is_dev_mode())
| []
| []
| [
"PORT",
"FCGI_FORCE_CGI"
]
| [] | ["PORT", "FCGI_FORCE_CGI"] | python | 2 | 0 | |
AppServer/google/appengine/ext/remote_api/remote_api_stub.py | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""An apiproxy stub that calls a remote handler via HTTP.
This allows easy remote access to the App Engine datastore, and potentially any
of the other App Engine APIs, using the same interface you use when accessing
the service locally.
An example Python script:
---
from google.appengine.ext import db
from google.appengine.ext.remote_api import remote_api_stub
from myapp import models
import getpass
def auth_func():
return (raw_input('Username:'), getpass.getpass('Password:'))
remote_api_stub.ConfigureRemoteApi(None, '/_ah/remote_api', auth_func,
'my-app.appspot.com')
# Now you can access the remote datastore just as if your code was running on
# App Engine!
houses = models.House.all().fetch(100)
for a_house in q:
a_house.doors += 1
db.put(houses)
---
A few caveats:
- Where possible, avoid iterating over queries. Fetching as many results as you
will need is faster and more efficient. If you don't know how many results
you need, or you need 'all of them', iterating is fine.
- Likewise, it's a good idea to put entities in batches. Instead of calling put
for each individual entity, accumulate them and put them in batches using
db.put(), if you can.
- Requests and responses are still limited to 1MB each, so if you have large
entities or try and fetch or put many of them at once, your requests may fail.
"""
import google
import imp
import os
import pickle
import random
import sys
import thread
import threading
import yaml
import hashlib
if os.environ.get('APPENGINE_RUNTIME') == 'python27':
from google.appengine.api import apiproxy_rpc
from google.appengine.api import apiproxy_stub_map
from google.appengine.datastore import datastore_pb
from google.appengine.ext.remote_api import remote_api_pb
from google.appengine.ext.remote_api import remote_api_services
from google.appengine.runtime import apiproxy_errors
else:
from google.appengine.api import apiproxy_rpc
from google.appengine.api import apiproxy_stub_map
from google.appengine.datastore import datastore_pb
from google.appengine.ext.remote_api import remote_api_pb
from google.appengine.ext.remote_api import remote_api_services
from google.appengine.runtime import apiproxy_errors
from google.appengine.tools import appengine_rpc
_REQUEST_ID_HEADER = 'HTTP_X_APPENGINE_REQUEST_ID'
class Error(Exception):
"""Base class for exceptions in this module."""
class ConfigurationError(Error):
"""Exception for configuration errors."""
class UnknownJavaServerError(Error):
"""Exception for exceptions returned from a Java remote_api handler."""
def GetUserAgent():
"""Determines the value of the 'User-agent' header to use for HTTP requests.
Returns:
String containing the 'user-agent' header value, which includes the SDK
version, the platform information, and the version of Python;
e.g., "remote_api/1.0.1 Darwin/9.2.0 Python/2.5.2".
"""
product_tokens = []
product_tokens.append("Google-remote_api/1.0")
product_tokens.append(appengine_rpc.GetPlatformToken())
python_version = ".".join(str(i) for i in sys.version_info)
product_tokens.append("Python/%s" % python_version)
return " ".join(product_tokens)
def GetSourceName():
return "Google-remote_api-1.0"
def HashEntity(entity):
"""Return a very-likely-unique hash of an entity."""
return hashlib.sha1(entity.Encode()).digest()
class TransactionData(object):
"""Encapsulates data about an individual transaction."""
def __init__(self, thread_id, is_xg):
self.thread_id = thread_id
self.preconditions = {}
self.entities = {}
self.is_xg = is_xg
class RemoteStub(object):
"""A stub for calling services on a remote server over HTTP.
You can use this to stub out any service that the remote server supports.
"""
_local = threading.local()
def __init__(self, server, path, _test_stub_map=None):
"""Constructs a new RemoteStub that communicates with the specified server.
Args:
server: An instance of a subclass of
google.appengine.tools.appengine_rpc.AbstractRpcServer.
path: The path to the handler this stub should send requests to.
"""
self._server = server
self._path = path
self._test_stub_map = _test_stub_map
def _PreHookHandler(self, service, call, request, response):
pass
def _PostHookHandler(self, service, call, request, response):
pass
def MakeSyncCall(self, service, call, request, response):
self._PreHookHandler(service, call, request, response)
try:
test_stub = self._test_stub_map and self._test_stub_map.GetStub(service)
if test_stub:
test_stub.MakeSyncCall(service, call, request, response)
else:
self._MakeRealSyncCall(service, call, request, response)
finally:
self._PostHookHandler(service, call, request, response)
@classmethod
def _GetRequestId(cls):
"""Returns the id of the request associated with the current thread."""
try:
return cls._local.request_id
except AttributeError:
return None
@classmethod
def _SetRequestId(cls, request_id):
"""Set the id of the request associated with the current thread."""
cls._local.request_id = request_id
def _MakeRealSyncCall(self, service, call, request, response):
request_pb = remote_api_pb.Request()
request_pb.set_service_name(service)
request_pb.set_method(call)
request_pb.set_request(request.Encode())
if hasattr(self._local, 'request_id'):
request_pb.set_request_id(self._local.request_id)
response_pb = remote_api_pb.Response()
encoded_request = request_pb.Encode()
encoded_response = self._server.Send(self._path, encoded_request)
response_pb.ParseFromString(encoded_response)
if response_pb.has_application_error():
error_pb = response_pb.application_error()
raise apiproxy_errors.ApplicationError(error_pb.code(),
error_pb.detail())
elif response_pb.has_exception():
raise pickle.loads(response_pb.exception())
elif response_pb.has_java_exception():
raise UnknownJavaServerError("An unknown error has occured in the "
"Java remote_api handler for this call.")
else:
response.ParseFromString(response_pb.response())
def CreateRPC(self):
return apiproxy_rpc.RPC(stub=self)
# AppScale: If the runtime is importing a module, fall back to the
# non-threaded RPC. This prevents a deadlock in cases when the RealRPC thread
# tries to acquire the import lock.
class RuntimeRemoteStub(RemoteStub):
""" A RemoteStub that uses a separate thread for RPCs. """
def CreateRPC(self):
""" Create an RPC that can be used asynchronously. """
if imp.lock_held():
return apiproxy_rpc.RPC(stub=self)
else:
return apiproxy_rpc.RealRPC(stub=self)
class RemoteDatastoreStub(RemoteStub):
"""A specialised stub for accessing the App Engine datastore remotely.
A specialised stub is required because there are some datastore operations
that preserve state between calls. This stub makes queries possible.
Transactions on the remote datastore are unfortunately still impossible.
"""
def __init__(self, server, path, default_result_count=20,
_test_stub_map=None):
"""Constructor.
Args:
server: The server name to connect to.
path: The URI path on the server.
default_result_count: The number of items to fetch, by default, in a
datastore Query or Next operation. This affects the batch size of
query iterators.
"""
super(RemoteDatastoreStub, self).__init__(server, path, _test_stub_map)
self.default_result_count = default_result_count
self.__queries = {}
self.__transactions = {}
self.__next_local_cursor = 1
self.__local_cursor_lock = threading.Lock()
self.__next_local_tx = 1
self.__local_tx_lock = threading.Lock()
def MakeSyncCall(self, service, call, request, response):
assert service == 'datastore_v3'
explanation = []
assert request.IsInitialized(explanation), explanation
handler = getattr(self, '_Dynamic_' + call, None)
if handler:
handler(request, response)
else:
super(RemoteDatastoreStub, self).MakeSyncCall(service, call, request,
response)
assert response.IsInitialized(explanation), explanation
def _Dynamic_RunQuery(self, query, query_result, cursor_id = None):
# AppScale: Removed previous check because we remotely handle transactions.
super(RemoteDatastoreStub, self).MakeSyncCall(
'datastore_v3', 'RunQuery', query, query_result)
if cursor_id is None:
self.__local_cursor_lock.acquire()
try:
cursor_id = self.__next_local_cursor
self.__next_local_cursor += 1
finally:
self.__local_cursor_lock.release()
if query_result.more_results():
query.set_offset(query.offset() + query_result.result_size())
if query.has_limit():
query.set_limit(query.limit() - query_result.result_size())
self.__queries[cursor_id] = query
else:
self.__queries[cursor_id] = None
query_result.mutable_cursor().set_cursor(cursor_id)
def _Dynamic_Next(self, next_request, query_result):
assert next_request.offset() == 0
cursor_id = next_request.cursor().cursor()
if cursor_id not in self.__queries:
raise apiproxy_errors.ApplicationError(datastore_pb.Error.BAD_REQUEST,
'Cursor %d not found' % cursor_id)
query = self.__queries[cursor_id]
if query is None:
query_result.set_more_results(False)
return
else:
if next_request.has_count():
query.set_count(next_request.count())
else:
query.clear_count()
self._Dynamic_RunQuery(query, query_result, cursor_id)
query_result.set_skipped_results(0)
def _Dynamic_Get(self, get_request, get_response):
txid = None
if get_request.has_transaction():
txid = get_request.transaction().handle()
txdata = self.__transactions[txid]
assert (txdata.thread_id ==
thread.get_ident()), "Transactions are single-threaded."
keys = [(k, k.Encode()) for k in get_request.key_list()]
new_request = datastore_pb.GetRequest()
for key, enckey in keys:
if enckey not in txdata.entities:
new_request.add_key().CopyFrom(key)
else:
new_request = get_request
if new_request.key_size() > 0:
super(RemoteDatastoreStub, self).MakeSyncCall(
'datastore_v3', 'Get', new_request, get_response)
if txid is not None:
newkeys = new_request.key_list()
entities = get_response.entity_list()
for key, entity in zip(newkeys, entities):
entity_hash = None
if entity.has_entity():
entity_hash = HashEntity(entity.entity())
txdata.preconditions[key.Encode()] = (key, entity_hash)
new_response = datastore_pb.GetResponse()
it = iter(get_response.entity_list())
for key, enckey in keys:
if enckey in txdata.entities:
cached_entity = txdata.entities[enckey][1]
if cached_entity:
new_response.add_entity().mutable_entity().CopyFrom(cached_entity)
else:
new_response.add_entity()
else:
new_entity = it.next()
if new_entity.has_entity():
assert new_entity.entity().key() == key
new_response.add_entity().CopyFrom(new_entity)
else:
new_response.add_entity()
get_response.CopyFrom(new_response)
def _Dynamic_Put(self, put_request, put_response):
if put_request.has_transaction():
entities = put_request.entity_list()
requires_id = lambda x: x.id() == 0 and not x.has_name()
new_ents = [e for e in entities
if requires_id(e.key().path().element_list()[-1])]
id_request = datastore_pb.PutRequest()
txid = put_request.transaction().handle()
txdata = self.__transactions[txid]
assert (txdata.thread_id ==
thread.get_ident()), "Transactions are single-threaded."
if new_ents:
for ent in new_ents:
e = id_request.add_entity()
e.mutable_key().CopyFrom(ent.key())
e.mutable_entity_group()
id_response = datastore_pb.PutResponse()
if txdata.is_xg:
rpc_name = 'GetIDsXG'
else:
rpc_name = 'GetIDs'
super(RemoteDatastoreStub, self).MakeSyncCall(
'remote_datastore', rpc_name, id_request, id_response)
assert id_request.entity_size() == id_response.key_size()
for key, ent in zip(id_response.key_list(), new_ents):
ent.mutable_key().CopyFrom(key)
ent.mutable_entity_group().add_element().CopyFrom(
key.path().element(0))
for entity in entities:
txdata.entities[entity.key().Encode()] = (entity.key(), entity)
put_response.add_key().CopyFrom(entity.key())
else:
super(RemoteDatastoreStub, self).MakeSyncCall(
'datastore_v3', 'Put', put_request, put_response)
def _Dynamic_Delete(self, delete_request, response):
if delete_request.has_transaction():
txid = delete_request.transaction().handle()
txdata = self.__transactions[txid]
assert (txdata.thread_id ==
thread.get_ident()), "Transactions are single-threaded."
for key in delete_request.key_list():
txdata.entities[key.Encode()] = (key, None)
else:
super(RemoteDatastoreStub, self).MakeSyncCall(
'datastore_v3', 'Delete', delete_request, response)
def _Dynamic_BeginTransaction(self, request, transaction):
self.__local_tx_lock.acquire()
try:
txid = self.__next_local_tx
self.__transactions[txid] = TransactionData(thread.get_ident(),
request.allow_multiple_eg())
self.__next_local_tx += 1
finally:
self.__local_tx_lock.release()
transaction.set_handle(txid)
transaction.set_app(request.app())
def _Dynamic_Commit(self, transaction, transaction_response):
txid = transaction.handle()
if txid not in self.__transactions:
raise apiproxy_errors.ApplicationError(
datastore_pb.Error.BAD_REQUEST,
'Transaction %d not found.' % (txid,))
txdata = self.__transactions[txid]
assert (txdata.thread_id ==
thread.get_ident()), "Transactions are single-threaded."
del self.__transactions[txid]
tx = remote_api_pb.TransactionRequest()
tx.set_allow_multiple_eg(txdata.is_xg)
for key, hash in txdata.preconditions.values():
precond = tx.add_precondition()
precond.mutable_key().CopyFrom(key)
if hash:
precond.set_hash(hash)
puts = tx.mutable_puts()
deletes = tx.mutable_deletes()
for key, entity in txdata.entities.values():
if entity:
puts.add_entity().CopyFrom(entity)
else:
deletes.add_key().CopyFrom(key)
super(RemoteDatastoreStub, self).MakeSyncCall(
'remote_datastore', 'Transaction',
tx, datastore_pb.PutResponse())
def _Dynamic_Rollback(self, transaction, transaction_response):
txid = transaction.handle()
self.__local_tx_lock.acquire()
try:
if txid not in self.__transactions:
raise apiproxy_errors.ApplicationError(
datastore_pb.Error.BAD_REQUEST,
'Transaction %d not found.' % (txid,))
txdata = self.__transactions[txid]
assert (txdata.thread_id ==
thread.get_ident()), "Transactions are single-threaded."
del self.__transactions[txid]
finally:
self.__local_tx_lock.release()
def _Dynamic_CreateIndex(self, index, id_response):
raise apiproxy_errors.CapabilityDisabledError(
'The remote datastore does not support index manipulation.')
def _Dynamic_UpdateIndex(self, index, void):
raise apiproxy_errors.CapabilityDisabledError(
'The remote datastore does not support index manipulation.')
def _Dynamic_DeleteIndex(self, index, void):
raise apiproxy_errors.CapabilityDisabledError(
'The remote datastore does not support index manipulation.')
# AppScale: If the runtime is importing a module, fall back to the
# non-threaded RPC. This prevents a deadlock in cases when the RealRPC thread
# tries to acquire the import lock.
class RuntimeDatastoreStub(RemoteDatastoreStub):
""" A datastore stub that uses a separate thread for RPCs. """
def CreateRPC(self):
""" Create an RPC that can be used asynchronously. """
if imp.lock_held():
return apiproxy_rpc.RPC(stub=self)
else:
return apiproxy_rpc.RealRPC(stub=self)
ALL_SERVICES = set(remote_api_services.SERVICE_PB_MAP)
def GetRemoteAppIdFromServer(server, path, remote_token=None):
"""Return the app id from a connection to an existing server.
Args:
server: An appengine_rpc.AbstractRpcServer
path: The path to the remote_api handler for your app
(for example, '/_ah/remote_api').
remote_token: Token to validate that the response was to this request.
Returns:
App ID as reported by the remote server.
Raises:
ConfigurationError: The server returned an invalid response.
"""
if not remote_token:
random.seed()
remote_token = str(random.random())[2:]
remote_token = str(remote_token)
urlargs = {'rtok': remote_token}
response = server.Send(path, payload=None, **urlargs)
if not response.startswith('{'):
raise ConfigurationError(
'Invalid response recieved from server: %s' % response)
app_info = yaml.safe_load(response)
if not app_info or 'rtok' not in app_info or 'app_id' not in app_info:
raise ConfigurationError('Error parsing app_id lookup response')
if str(app_info['rtok']) != remote_token:
raise ConfigurationError('Token validation failed during app_id lookup. '
'(sent %s, got %s)' % (repr(remote_token),
repr(app_info['rtok'])))
return app_info['app_id']
def ConfigureRemoteApiFromServer(server, path, app_id, services=None,
default_auth_domain=None,
use_remote_datastore=True,
use_async_rpc=False):
"""Does necessary setup to allow easy remote access to App Engine APIs.
Args:
server: An AbstractRpcServer
path: The path to the remote_api handler for your app
(for example, '/_ah/remote_api').
app_id: The app_id of your app, as declared in app.yaml.
services: A list of services to set up stubs for. If specified, only those
services are configured; by default all supported services are configured.
default_auth_domain: The authentication domain to use by default.
use_remote_datastore: Whether to use RemoteDatastoreStub instead of passing
through datastore requests. RemoteDatastoreStub batches transactional
datastore requests since, in production, datastore requires are scoped to
a single request.
use_async_rpc: A boolean indicating whether or not to make RPC calls in a
separate thread.
Raises:
urllib2.HTTPError: if app_id is not provided and there is an error while
retrieving it.
ConfigurationError: if there is a error configuring the Remote API.
"""
if services is None:
services = set(ALL_SERVICES)
else:
services = set(services)
unsupported = services.difference(ALL_SERVICES)
if unsupported:
raise ConfigurationError('Unsupported service(s): %s'
% (', '.join(unsupported),))
os.environ['APPLICATION_ID'] = app_id
os.environ.setdefault('AUTH_DOMAIN', default_auth_domain or 'gmail.com')
apiproxy_stub_map.apiproxy = apiproxy_stub_map.APIProxyStubMap()
if 'datastore_v3' in services and use_remote_datastore:
services.remove('datastore_v3')
if use_async_rpc:
datastore_stub = RuntimeDatastoreStub(server, path)
else:
datastore_stub = RemoteDatastoreStub(server, path)
apiproxy_stub_map.apiproxy.RegisterStub('datastore_v3', datastore_stub)
if use_async_rpc:
stub = RuntimeRemoteStub(server, path)
else:
stub = RemoteStub(server, path)
for service in services:
apiproxy_stub_map.apiproxy.RegisterStub(service, stub)
def GetRemoteAppId(servername,
path,
auth_func,
rpc_server_factory=appengine_rpc.HttpRpcServer,
rtok=None,
secure=False,
save_cookies=False):
"""Get the remote appid as reported at servername/path.
This will also return an AbstractRpcServer server, which can be used with
ConfigureRemoteApiFromServer.
Args:
servername: The hostname your app is deployed on.
path: The path to the remote_api handler for your app
(for example, '/_ah/remote_api').
auth_func: A function that takes no arguments and returns a
(username, password) tuple. This will be called if your application
requires authentication to access the remote_api handler (it should!)
and you do not already have a valid auth cookie.
<app_id>.appspot.com.
rpc_server_factory: A factory to construct the rpc server for the datastore.
rtok: The validation token to sent with app_id lookups. If None, a random
token is used.
secure: Use SSL when communicating with the server.
save_cookies: Forwarded to rpc_server_factory function.
Returns:
(app_id, server): The application ID and an AbstractRpcServer.
"""
server = rpc_server_factory(servername, auth_func, GetUserAgent(),
GetSourceName(), save_cookies=save_cookies,
debug_data=False, secure=secure)
app_id = GetRemoteAppIdFromServer(server, path, rtok)
return app_id, server
# AppScale: Add support for async RPCs.
def ConfigureRemoteApi(app_id,
path,
auth_func,
servername=None,
rpc_server_factory=appengine_rpc.HttpRpcServer,
rtok=None,
secure=False,
services=None,
default_auth_domain=None,
save_cookies=False,
use_remote_datastore=True,
use_async_rpc=False):
"""Does necessary setup to allow easy remote access to App Engine APIs.
Either servername must be provided or app_id must not be None. If app_id
is None and a servername is provided, this function will send a request
to the server to retrieve the app_id.
Note that if the app_id is specified, the internal appid must be used;
this may include a partition and a domain. It is often easier to let
remote_api_stub retreive the app_id automatically.
Args:
app_id: The app_id of your app, as declared in app.yaml, or None.
path: The path to the remote_api handler for your app
(for example, '/_ah/remote_api').
auth_func: A function that takes no arguments and returns a
(username, password) tuple. This will be called if your application
requires authentication to access the remote_api handler (it should!)
and you do not already have a valid auth cookie.
servername: The hostname your app is deployed on. Defaults to
<app_id>.appspot.com.
rpc_server_factory: A factory to construct the rpc server for the datastore.
rtok: The validation token to sent with app_id lookups. If None, a random
token is used.
secure: Use SSL when communicating with the server.
services: A list of services to set up stubs for. If specified, only those
services are configured; by default all supported services are configured.
default_auth_domain: The authentication domain to use by default.
save_cookies: Forwarded to rpc_server_factory function.
use_remote_datastore: Whether to use RemoteDatastoreStub instead of passing
through datastore requests. RemoteDatastoreStub batches transactional
datastore requests since, in production, datastore requires are scoped to
a single request.
use_async_rpc: A boolean indicating whether or not to make RPC calls in a
separate thread.
Returns:
server, the server created by rpc_server_factory, which may be useful for
calling the application directly.
Raises:
urllib2.HTTPError: if app_id is not provided and there is an error while
retrieving it.
ConfigurationError: if there is a error configuring the DatstoreFileStub.
"""
if not servername and not app_id:
raise ConfigurationError('app_id or servername required')
if not servername:
servername = '%s.appspot.com' % (app_id,)
server = rpc_server_factory(servername, auth_func, GetUserAgent(),
GetSourceName(), save_cookies=save_cookies,
debug_data=False, secure=secure)
if not app_id:
app_id = GetRemoteAppIdFromServer(server, path, rtok)
ConfigureRemoteApiFromServer(server, path, app_id, services,
default_auth_domain, use_remote_datastore,
use_async_rpc)
return server
def MaybeInvokeAuthentication():
"""Sends an empty request through to the configured end-point.
If authentication is necessary, this will cause the rpc_server to invoke
interactive authentication.
"""
datastore_stub = apiproxy_stub_map.apiproxy.GetStub('datastore_v3')
if isinstance(datastore_stub, RemoteStub):
datastore_stub._server.Send(datastore_stub._path, payload=None)
else:
raise ConfigurationError('remote_api is not configured.')
ConfigureRemoteDatastore = ConfigureRemoteApi
| []
| []
| [
"APPENGINE_RUNTIME",
"APPLICATION_ID"
]
| [] | ["APPENGINE_RUNTIME", "APPLICATION_ID"] | python | 2 | 0 | |
dashboard/tests/conftest.py | import os
import pytest
from ray.tests.conftest import * # noqa
@pytest.fixture
def enable_test_module():
os.environ["RAY_DASHBOARD_MODULE_TEST"] = "true"
yield
os.environ.pop("RAY_DASHBOARD_MODULE_TEST", None)
| []
| []
| [
"RAY_DASHBOARD_MODULE_TEST"
]
| [] | ["RAY_DASHBOARD_MODULE_TEST"] | python | 1 | 0 | |
Godeps/_workspace/src/github.com/cloudfoundry/loggregator_consumer/integration_test/loggregator_consumer_smoke_test.go | package integration_test
import (
"crypto/tls"
"encoding/json"
consumer "github.com/cloudfoundry/loggregator_consumer"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"os"
"io/ioutil"
"strings"
)
var _ = Describe("LoggregatorConsumer:", func() {
var appGuid, authToken string
var connection consumer.LoggregatorConsumer
BeforeEach(func() {
var err error
appGuid = os.Getenv("TEST_APP_GUID")
loggregatorEndpoint := os.Getenv("LOGGREGATOR_ENDPOINT")
connection = consumer.New(loggregatorEndpoint, &tls.Config{InsecureSkipVerify: true}, nil)
authToken, err = getAuthToken()
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
connection.Close()
})
It("should return data for recent", func() {
messages, err := connection.Recent(appGuid, authToken)
Expect(err).NotTo(HaveOccurred())
Expect(messages).To(ContainElement(ContainSubstring("Tick")))
})
It("should return data for tail", func(done Done) {
messagesChan, err := connection.Tail(appGuid, authToken)
Expect(err).NotTo(HaveOccurred())
for m := range messagesChan {
if strings.Contains(string(m.GetMessage()), "Tick") {
break
}
}
close(done)
}, 2)
})
type Config struct {
AccessToken string
}
func getAuthToken() (string, error) {
bytes, err := ioutil.ReadFile(os.ExpandEnv("$HOME/.cf/config.json"))
if err != nil {
return "", err
}
var config Config
err = json.Unmarshal(bytes, &config)
if err != nil {
return "", err
}
return config.AccessToken, nil
}
| [
"\"TEST_APP_GUID\"",
"\"LOGGREGATOR_ENDPOINT\""
]
| []
| [
"LOGGREGATOR_ENDPOINT",
"TEST_APP_GUID"
]
| [] | ["LOGGREGATOR_ENDPOINT", "TEST_APP_GUID"] | go | 2 | 0 | |
main.go | // Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Usage:
//
// goproxy [-listen [host]:port] [-cacheDir /tmp]
//
// goproxy serves the Go module proxy HTTP protocol at the given address (default 0.0.0.0:8081).
// It invokes the local go command to answer requests and therefore reuses
// the current GOPATH's module download cache and configuration (GOPROXY, GOSUMDB, and so on).
//
// While the proxy is running, setting GOPROXY=http://host:port will instruct the go command to use it.
// Note that the module proxy cannot share a GOPATH with its own clients or else fetches will deadlock.
// (The client will lock the entry as “being downloaded” before sending the request to the proxy,
// which will then wait for the apparently-in-progress download to finish.)
package main
import (
"bytes"
"context"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"os/exec"
"path"
"path/filepath"
"strings"
"time"
"github.com/goproxyio/goproxy/proxy"
"golang.org/x/mod/module"
)
var downloadRoot string
const listExpire = proxy.ListExpire
var listen string
var cacheDir string
var proxyHost string
var excludeHost string
func init() {
flag.StringVar(&excludeHost, "exclude", "", "exclude host pattern")
flag.StringVar(&proxyHost, "proxy", "", "next hop proxy for go modules")
flag.StringVar(&cacheDir, "cacheDir", "", "go modules cache dir")
flag.StringVar(&listen, "listen", "0.0.0.0:8081", "service listen address")
flag.Parse()
if os.Getenv("GIT_TERMINAL_PROMPT") == "" {
os.Setenv("GIT_TERMINAL_PROMPT", "0")
}
if os.Getenv("GIT_SSH") == "" && os.Getenv("GIT_SSH_COMMAND") == "" {
os.Setenv("GIT_SSH_COMMAND", "ssh -o ControlMaster=no")
}
if excludeHost != "" {
os.Setenv("GOPRIVATE", excludeHost)
}
}
func main() {
log.SetPrefix("goproxy.io: ")
log.SetFlags(0)
// TODO flags
var env struct {
GOPATH string
}
if cacheDir != "" {
downloadRoot = filepath.Join(cacheDir, "pkg/mod/cache/download")
os.Setenv("GOPATH", cacheDir)
}
if err := goJSON(&env, "go", "env", "-json", "GOPATH"); err != nil {
log.Fatal(err)
}
list := filepath.SplitList(env.GOPATH)
if len(list) == 0 || list[0] == "" {
log.Fatalf("missing $GOPATH")
}
downloadRoot = filepath.Join(list[0], "pkg/mod/cache/download")
var handle http.Handler
if proxyHost != "" {
log.Printf("ProxyHost %s\n", proxyHost)
if excludeHost != "" {
log.Printf("ExcludeHost %s\n", excludeHost)
}
handle = &logger{proxy.NewRouter(proxy.NewServer(new(ops)), &proxy.RouterOptions{
Pattern: excludeHost,
Proxy: proxyHost,
DownloadRoot: downloadRoot,
})}
} else {
handle = &logger{proxy.NewServer(new(ops))}
}
log.Fatal(http.ListenAndServe(listen, handle))
}
// goJSON runs the go command and parses its JSON output into dst.
func goJSON(dst interface{}, command ...string) error {
cmd := exec.Command(command[0], command[1:]...)
var stdout, stderr bytes.Buffer
cmd.Stdout = &stdout
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
return fmt.Errorf("%s:\n%s%s", strings.Join(command, " "), stderr.String(), stdout.String())
}
if err := json.Unmarshal(stdout.Bytes(), dst); err != nil {
return fmt.Errorf("%s: reading json: %v", strings.Join(command, " "), err)
}
return nil
}
// A logger is an http.Handler that logs traffic to standard error.
type logger struct {
h http.Handler
}
type responseLogger struct {
code int
http.ResponseWriter
}
func (r *responseLogger) WriteHeader(code int) {
r.code = code
r.ResponseWriter.WriteHeader(code)
}
func (l *logger) ServeHTTP(w http.ResponseWriter, r *http.Request) {
start := time.Now()
rl := &responseLogger{code: 200, ResponseWriter: w}
l.h.ServeHTTP(rl, r)
log.Printf("%.3fs %d %s\n", time.Since(start).Seconds(), rl.code, r.URL)
}
// An ops is a proxy.ServerOps implementation.
type ops struct{}
func (*ops) NewContext(r *http.Request) (context.Context, error) {
return context.Background(), nil
}
func (*ops) List(ctx context.Context, mpath string) (proxy.File, error) {
escMod, err := module.EscapePath(mpath)
if err != nil {
return nil, err
}
file := filepath.Join(downloadRoot, escMod+"/@v/listproxy")
if info, err := os.Stat(file); err == nil && time.Since(info.ModTime()) < listExpire {
return os.Open(file)
}
var list struct {
Path string
Versions []string
}
if err := goJSON(&list, "go", "list", "-m", "-json", "-versions", mpath+"@latest"); err != nil {
return nil, err
}
if list.Path != mpath {
return nil, fmt.Errorf("go list -m: asked for %s but got %s", mpath, list.Path)
}
data := []byte(strings.Join(list.Versions, "\n") + "\n")
if len(data) == 1 {
data = nil
}
os.MkdirAll(path.Dir(file), 755)
if err := ioutil.WriteFile(file, data, 0666); err != nil {
return nil, err
}
return os.Open(file)
}
func (*ops) Latest(ctx context.Context, path string) (proxy.File, error) {
d, err := download(module.Version{Path: path, Version: "latest"})
if err != nil {
return nil, err
}
return os.Open(d.Info)
}
func (*ops) Info(ctx context.Context, m module.Version) (proxy.File, error) {
d, err := download(m)
if err != nil {
return nil, err
}
return os.Open(d.Info)
}
func (*ops) GoMod(ctx context.Context, m module.Version) (proxy.File, error) {
d, err := download(m)
if err != nil {
return nil, err
}
return os.Open(d.GoMod)
}
func (*ops) Zip(ctx context.Context, m module.Version) (proxy.File, error) {
d, err := download(m)
if err != nil {
return nil, err
}
return os.Open(d.Zip)
}
type downloadInfo struct {
Path string
Version string
Info string
GoMod string
Zip string
Dir string
Sum string
GoModSum string
}
func download(m module.Version) (*downloadInfo, error) {
d := new(downloadInfo)
return d, goJSON(d, "go", "mod", "download", "-json", m.String())
}
| [
"\"GIT_TERMINAL_PROMPT\"",
"\"GIT_SSH\"",
"\"GIT_SSH_COMMAND\""
]
| []
| [
"GIT_SSH",
"GIT_SSH_COMMAND",
"GIT_TERMINAL_PROMPT"
]
| [] | ["GIT_SSH", "GIT_SSH_COMMAND", "GIT_TERMINAL_PROMPT"] | go | 3 | 0 | |
eureka/server/app.py | # -*- coding: utf-8 -*-
__author__ = "Daniel1147 ([email protected])"
__license__ = "Apache 2.0"
# pypi/conda library
import uvicorn
from fastapi import FastAPI, HTTPException, Response, status
# scip plugin
from eureka.model.application_model import ApplicationModel
from eureka.model.applications_model import ApplicationsModel
from eureka.model.instance_info_model import InstanceInfoModel
from eureka.server.registry.instance_registry import InstanceRegistry
DEFAULT_DURATION = 30
app = FastAPI()
sole_registry = InstanceRegistry()
@app.post("/eureka/v2/apps/{app_id}")
def register_instance(request: InstanceInfoModel, app_id: str):
instance_info = request.to_entity()
if instance_info.app_name != app_id:
message = "Application name in the url and the request body should be the same."
raise HTTPException(status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, detail=message)
sole_registry.register(instance_info, DEFAULT_DURATION)
return Response(status_code=status.HTTP_204_NO_CONTENT)
@app.delete("/eureka/v2/apps/{app_id}/{instance_id}")
def cancel_instance(app_id: str, instance_id: str):
result = sole_registry.cancel(app_id, instance_id)
if not result:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Cancellation failed.")
return Response(status_code=status.HTTP_200_OK)
@app.get("/eureka/v2/apps/{app_id}")
def get_application(app_id: str) -> ApplicationModel:
application = sole_registry.get_presenter().query_application(app_id)
if application is None:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND)
return sole_registry.get_presenter().query_application(app_id)
@app.get("/eureka/v2/apps")
def get_applications() -> ApplicationsModel:
return sole_registry.get_presenter().query_applications()
if __name__ == "__main__":
# standard library
import os
port = int(os.getenv("port"))
uvicorn.run(app, host="0.0.0.0", port=port)
| []
| []
| [
"port"
]
| [] | ["port"] | python | 1 | 0 | |
main_one_thread.py | from thread import JaccardCalculator
from edit_distance import editDistDP
import time
start_time = time.time()
query = "conigl"
coef = [0, 0, 0]
words = ["", "", ""]
distance = [0, 0, 0]
thread = JaccardCalculator("thread_1", "th1", query, "660000_parole_italiane.txt")
thread.run()
maximum = max(thread.top3)
coef[0] = maximum
words[0] = thread.similar_words[thread.top3.index(maximum)]
thread.top3[thread.top3.index(maximum)] = 0
maximum = max(thread.top3)
coef[1] = maximum
words[1] = thread.similar_words[thread.top3.index(maximum)]
thread.top3[thread.top3.index(maximum)] = 0
maximum = max(thread.top3)
coef[2] = maximum
words[2] = thread.similar_words[thread.top3.index(maximum)]
thread.top3[thread.top3.index(maximum)] = 0
distance[0] = editDistDP(query, words[0], len(query), len(words[0]))
distance[1] = editDistDP(query, words[1], len(query), len(words[1]))
distance[2] = editDistDP(query, words[2], len(query), len(words[2]))
spent_time = time.time() - start_time
print(f'''
Calculations Done !!
Word : {words[0]}
with Jaccard Coefficient : --- {coef[0]} --- has the minimum distance : {distance[0]}
Word : {words[1]}
with Jaccard Coefficient : --- {coef[1]} --- has the distance : {distance[1]}
Word : {words[2]}
with Jaccard Coefficient : --- {coef[2]} --- has the distance : {distance[2]}
Spent time : {spent_time} seconds ! main 222222 ''')
| []
| []
| []
| [] | [] | python | null | null | null |
pype/plugins/maya/create/create_render.py | import os
import json
import appdirs
import requests
from maya import cmds
import maya.app.renderSetup.model.renderSetup as renderSetup
import pype.maya.lib as lib
import avalon.maya
class CreateRender(avalon.maya.Creator):
"""Create render layer for export"""
label = "Render"
family = "rendering"
icon = "eye"
defaults = ["Main"]
_token = None
_user = None
_password = None
# renderSetup instance
_rs = None
_image_prefix_nodes = {
'mentalray': 'defaultRenderGlobals.imageFilePrefix',
'vray': 'vraySettings.fileNamePrefix',
'arnold': 'defaultRenderGlobals.imageFilePrefix',
'renderman': 'defaultRenderGlobals.imageFilePrefix',
'redshift': 'defaultRenderGlobals.imageFilePrefix'
}
_image_prefixes = {
'mentalray': 'maya/<Scene>/<RenderLayer>/<RenderLayer>_<RenderPass>',
'vray': '"maya/<scene>/<Layer>/<Layer>',
'arnold': 'maya/<Scene>/<RenderLayer>/<RenderLayer>_<RenderPass>',
'renderman': 'maya/<Scene>/<layer>/<layer>_<aov>',
'redshift': 'maya/<Scene>/<RenderLayer>/<RenderLayer>_<RenderPass>'
}
def __init__(self, *args, **kwargs):
super(CreateRender, self).__init__(*args, **kwargs)
def process(self):
exists = cmds.ls(self.name)
if exists:
return cmds.warning("%s already exists." % exists[0])
use_selection = self.options.get("useSelection")
with lib.undo_chunk():
self._create_render_settings()
instance = super(CreateRender, self).process()
cmds.setAttr("{}.machineList".format(instance), lock=True)
self._rs = renderSetup.instance()
layers = self._rs.getRenderLayers()
if use_selection:
print(">>> processing existing layers")
sets = []
for layer in layers:
print(" - creating set for {}".format(layer.name()))
render_set = cmds.sets(n="LAYER_{}".format(layer.name()))
sets.append(render_set)
cmds.sets(sets, forceElement=instance)
# if no render layers are present, create default one with
# asterix selector
if not layers:
rl = self._rs.createRenderLayer('Main')
cl = rl.createCollection("defaultCollection")
cl.getSelector().setPattern('*')
renderer = cmds.getAttr(
'defaultRenderGlobals.currentRenderer').lower()
# handle various renderman names
if renderer.startswith('renderman'):
renderer = 'renderman'
cmds.setAttr(self._image_prefix_nodes[renderer],
self._image_prefixes[renderer],
type="string")
def _create_render_settings(self):
# get pools
pools = []
deadline_url = os.environ.get("DEADLINE_REST_URL", None)
muster_url = os.environ.get("MUSTER_REST_URL", None)
if deadline_url and muster_url:
self.log.error(
"Both Deadline and Muster are enabled. " "Cannot support both."
)
raise RuntimeError("Both Deadline and Muster are enabled")
if deadline_url is None:
self.log.warning("Deadline REST API url not found.")
else:
argument = "{}/api/pools?NamesOnly=true".format(deadline_url)
try:
response = self._requests_get(argument)
except requests.exceptions.ConnectionError as e:
msg = 'Cannot connect to deadline web service'
self.log.error(msg)
raise RuntimeError('{} - {}'.format(msg, e))
if not response.ok:
self.log.warning("No pools retrieved")
else:
pools = response.json()
self.data["primaryPool"] = pools
# We add a string "-" to allow the user to not
# set any secondary pools
self.data["secondaryPool"] = ["-"] + pools
if muster_url is None:
self.log.warning("Muster REST API URL not found.")
else:
self.log.info(">>> Loading Muster credentials ...")
self._load_credentials()
self.log.info(">>> Getting pools ...")
try:
pools = self._get_muster_pools()
except requests.exceptions.HTTPError as e:
if e.startswith("401"):
self.log.warning("access token expired")
self._show_login()
raise RuntimeError("Access token expired")
except requests.exceptions.ConnectionError:
self.log.error("Cannot connect to Muster API endpoint.")
raise RuntimeError("Cannot connect to {}".format(muster_url))
pool_names = []
for pool in pools:
self.log.info(" - pool: {}".format(pool["name"]))
pool_names.append(pool["name"])
self.data["primaryPool"] = pool_names
self.data["suspendPublishJob"] = False
self.data["extendFrames"] = False
self.data["overrideExistingFrame"] = True
# self.data["useLegacyRenderLayers"] = True
self.data["priority"] = 50
self.data["framesPerTask"] = 1
self.data["whitelist"] = False
self.data["machineList"] = ""
self.data["useMayaBatch"] = True
self.options = {"useSelection": False} # Force no content
def _load_credentials(self):
"""
Load Muster credentials from file and set `MUSTER_USER`,
`MUSTER_PASSWORD`, `MUSTER_REST_URL` is loaded from presets.
.. todo::
Show login dialog if access token is invalid or missing.
"""
app_dir = os.path.normpath(appdirs.user_data_dir("pype-app", "pype"))
file_name = "muster_cred.json"
fpath = os.path.join(app_dir, file_name)
file = open(fpath, "r")
muster_json = json.load(file)
self._token = muster_json.get("token", None)
if not self._token:
self._show_login()
raise RuntimeError("Invalid access token for Muster")
file.close()
self.MUSTER_REST_URL = os.environ.get("MUSTER_REST_URL")
if not self.MUSTER_REST_URL:
raise AttributeError("Muster REST API url not set")
def _get_muster_pools(self):
"""
Get render pools from muster
"""
params = {"authToken": self._token}
api_entry = "/api/pools/list"
response = self._requests_get(self.MUSTER_REST_URL + api_entry,
params=params)
if response.status_code != 200:
if response.status_code == 401:
self.log.warning("Authentication token expired.")
self._show_login()
else:
self.log.error(
("Cannot get pools from "
"Muster: {}").format(response.status_code)
)
raise Exception("Cannot get pools from Muster")
try:
pools = response.json()["ResponseData"]["pools"]
except ValueError as e:
self.log.error("Invalid response from Muster server {}".format(e))
raise Exception("Invalid response from Muster server")
return pools
def _show_login(self):
# authentication token expired so we need to login to Muster
# again to get it. We use Pype API call to show login window.
api_url = "{}/muster/show_login".format(
os.environ["PYPE_REST_API_URL"])
self.log.debug(api_url)
login_response = self._requests_post(api_url, timeout=1)
if login_response.status_code != 200:
self.log.error("Cannot show login form to Muster")
raise Exception("Cannot show login form to Muster")
def _requests_post(self, *args, **kwargs):
""" Wrapper for requests, disabling SSL certificate validation if
DONT_VERIFY_SSL environment variable is found. This is useful when
Deadline or Muster server are running with self-signed certificates
and their certificate is not added to trusted certificates on
client machines.
WARNING: disabling SSL certificate validation is defeating one line
of defense SSL is providing and it is not recommended.
"""
if "verify" not in kwargs:
kwargs["verify"] = (
False if os.getenv("PYPE_DONT_VERIFY_SSL", True) else True
) # noqa
return requests.post(*args, **kwargs)
def _requests_get(self, *args, **kwargs):
""" Wrapper for requests, disabling SSL certificate validation if
DONT_VERIFY_SSL environment variable is found. This is useful when
Deadline or Muster server are running with self-signed certificates
and their certificate is not added to trusted certificates on
client machines.
WARNING: disabling SSL certificate validation is defeating one line
of defense SSL is providing and it is not recommended.
"""
if "verify" not in kwargs:
kwargs["verify"] = (
False if os.getenv("PYPE_DONT_VERIFY_SSL", True) else True
) # noqa
return requests.get(*args, **kwargs)
| []
| []
| [
"DEADLINE_REST_URL",
"PYPE_REST_API_URL",
"PYPE_DONT_VERIFY_SSL",
"MUSTER_REST_URL"
]
| [] | ["DEADLINE_REST_URL", "PYPE_REST_API_URL", "PYPE_DONT_VERIFY_SSL", "MUSTER_REST_URL"] | python | 4 | 0 | |
dgraphql/resolvers/resolver_test.go | // Copyright 2019 dfuse Platform Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package resolvers
import (
"context"
"fmt"
"os"
"testing"
"github.com/dfuse-io/dfuse-eosio/trxdb"
pbcodec "github.com/dfuse-io/dfuse-eosio/pb/dfuse/eosio/codec/v1"
pbsearcheos "github.com/dfuse-io/dfuse-eosio/pb/dfuse/eosio/search/v1"
"github.com/dfuse-io/dgraphql"
"github.com/dfuse-io/dtracing"
"github.com/dfuse-io/logging"
pbsearch "github.com/dfuse-io/pbgo/dfuse/search/v1"
"github.com/golang/protobuf/ptypes"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
)
func init() {
if os.Getenv("TEST_LOG") != "" {
zlog = logging.MustCreateLoggerWithLevel("test", zap.NewAtomicLevelAt(zap.DebugLevel))
logging.Set(zlog)
}
}
func newSearchMatchArchive(trxID string) *pbsearch.SearchMatch {
cs, err := ptypes.MarshalAny(&pbsearcheos.Match{})
if err != nil {
panic(err)
}
return &pbsearch.SearchMatch{
TrxIdPrefix: trxID,
BlockNum: 0,
Index: 0,
Cursor: "",
ChainSpecific: cs,
Undo: false,
IrrBlockNum: 0,
}
}
func newSearchMatchLive(trxID string, idx int) *pbsearch.SearchMatch {
cs, err := ptypes.MarshalAny(&pbsearcheos.Match{
Block: &pbsearcheos.BlockTrxPayload{
Trace: &pbcodec.TransactionTrace{Index: uint64(idx)},
},
})
if err != nil {
panic(err)
}
return &pbsearch.SearchMatch{
TrxIdPrefix: trxID,
ChainSpecific: cs,
}
}
func newDgraphqlResponse(trxID string, idx int) *SearchTransactionForwardResponse {
return &SearchTransactionForwardResponse{
SearchTransactionBackwardResponse: SearchTransactionBackwardResponse{
trxIDPrefix: trxID,
trxTrace: &pbcodec.TransactionTrace{
Index: uint64(idx),
},
},
}
}
func TestSubscriptionSearchForward(t *testing.T) {
ctx := dtracing.NewFixedTraceIDInContext(context.Background(), "00000000000000000000000000000000")
tests := []struct {
name string
fromRouter []interface{}
fromDB map[string][]*pbcodec.TransactionEvent
expect []*SearchTransactionForwardResponse
expectError error
}{
{
name: "simple",
fromRouter: []interface{}{
newSearchMatchArchive("trx123"),
fmt.Errorf("failed"),
},
fromDB: map[string][]*pbcodec.TransactionEvent{
"trx123": {
{Id: "trx12399999999999999999", Event: pbcodec.NewSimpleTestExecEvent(5)},
},
},
expect: []*SearchTransactionForwardResponse{
newDgraphqlResponse("trx123", 5),
{
err: dgraphql.Errorf(ctx, "hammer search result: failed"),
},
},
expectError: nil,
},
{
name: "hammered",
fromRouter: []interface{}{
newSearchMatchArchive("trx000"),
newSearchMatchArchive("trx001"),
newSearchMatchArchive("trx002"),
newSearchMatchArchive("trx022"),
newSearchMatchLive("trx003", 8),
newSearchMatchLive("trx004", 9),
newSearchMatchLive("trx005", 10),
},
fromDB: map[string][]*pbcodec.TransactionEvent{
"trx000": {
{Id: "trx000boo", Event: pbcodec.NewSimpleTestExecEvent(5)},
},
"trx001": {
{Id: "trx001boo", Event: pbcodec.NewSimpleTestExecEvent(6)},
},
"trx002": {
{Id: "trx002boo", Event: pbcodec.NewSimpleTestExecEvent(7)},
},
"trx022": {
{Id: "trx022boo", Event: pbcodec.NewSimpleTestExecEvent(11)},
},
},
expect: []*SearchTransactionForwardResponse{
newDgraphqlResponse("trx000", 5),
newDgraphqlResponse("trx001", 6),
newDgraphqlResponse("trx002", 7),
newDgraphqlResponse("trx022", 11),
newDgraphqlResponse("trx003", 8),
newDgraphqlResponse("trx004", 9),
newDgraphqlResponse("trx005", 10),
},
expectError: nil,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
root := &Root{
searchClient: pbsearch.NewTestRouterClient(test.fromRouter),
trxsReader: trxdb.NewTestTransactionsReader(test.fromDB),
}
res, err := root.streamSearchTracesBoth(true, ctx, StreamSearchArgs{})
if test.expectError != nil {
require.Error(t, err)
} else {
require.NoError(t, err)
var expect []*SearchTransactionForwardResponse
for el := range res {
expect = append(expect, el)
}
assert.Equal(t, test.expect, expect)
}
})
}
}
//
//import (
// "context"
// "fmt"
// "testing"
//
// "github.com/sergi/go-diff/diffmatchpatch"
//
// "github.com/graph-gophers/graphql-go"
// "github.com/tidwall/gjson"
//
// "github.com/stretchr/testify/require"
//
// test_schema "github.com/dfuse-io/dgraphql/schema/test"
//
// "github.com/stretchr/testify/assert"
//)
//
//type TestRoot struct {
// Root
//}
//
//func (r *TestRoot) QueryTestSearch() (*SearchTransactionForwardResponse, error) {
// resp := &SearchTransactionForwardResponse{
// SearchTransactionBackwardResponse: SearchTransactionBackwardResponse{
// cursor: "cursor.1",
// irreversibleBlockNum: 99,
// matchingActionIndexes: []uint32{0},
// dbops: []byte(`[{"op":"INS","action_idx":0,"npayer":"laulaulau123","path":"eosio/laulaulau123/userres/laulaulau123","new":"3044d0266a13b589000000000000000004454f5300000000000000000000000004454f53000000000000000000000000"},{"op":"UPD","action_idx":1,"opayer":"eosio","npayer":"eosio","path":"eosio/eosio/rammarket/cpd4ykuhc5d.4","old":"00407a10f35a00000452414d434f5245fe2d1cac0c0000000052414d00000000000000000000e03f6a5495f00200000004454f5300000000000000000000e03f","new":"00407a10f35a00000452414d434f5245fe2d1cac0c0000000052414d00000000000000000000e03f6a5495f00200000004454f5300000000000000000000e03f"},{"op":"UPD","action_idx":1,"opayer":"eosio","npayer":"eosio","path":"eosio/eosio/rexpool/","old":"00e03273470300000004454f5300000000b83f447b0c00000004454f53000000001bbb204b0000000004454f53000000009872b7c20f00000004454f5300000000c8c58b3ce1e800000452455800000000000000000000000004454f53000000002702000000000000","new":"00e03273470300000004454f5300000000bd3f447b0c00000004454f53000000001bbb204b0000000004454f53000000009d72b7c20f00000004454f5300000000c8c58b3ce1e800000452455800000000000000000000000004454f53000000002702000000000000"},{"op":"UPD","action_idx":1,"opayer":"eosio","npayer":"eosio","path":"eosio/eosio/rammarket/cpd4ykuhc5d.4","old":"00407a10f35a00000452414d434f5245fe2d1cac0c0000000052414d00000000000000000000e03f6a5495f00200000004454f5300000000000000000000e03f","new":"00407a10f35a00000452414d434f5245151e1cac0c0000000052414d00000000000000000000e03f1b5895f00200000004454f5300000000000000000000e03f"},{"op":"UPD","action_idx":1,"opayer":"laulaulau123","npayer":"laulaulau123","path":"eosio/laulaulau123/userres/laulaulau123","old":"3044d0266a13b589000000000000000004454f5300000000000000000000000004454f53000000000000000000000000","new":"3044d0266a13b589000000000000000004454f5300000000000000000000000004454f5300000000e90f000000000000"},{"op":"UPD","action_idx":1,"opayer":"eosio","npayer":"eosio","path":"eosio/eosio/global/global","old":"0000080000000000e8030000ffff07000c000000f40100001400000064000000400d0300c4090000f049020064000000100e00005802000080533b000010000004000600000000001000000002d2e353030000002a63869c00000000e486cd4880eae0bd6d880500e09f04f20000000005ba09850500000009a26f00e20f54aab501000040f90d35587b05001500b637373a4605d5434193cb48","new":"0000080000000000e8030000ffff07000c000000f40100001400000064000000400d0300c4090000f049020064000000100e00005802000080533b0000100000040006000000000010000000ebe1e35303000000db66869c00000000e486cd4880eae0bd6d880500e09f04f20000000005ba09850500000009a26f00e20f54aab501000040f90d35587b05001500b637373a4605d5434193cb48"},{"op":"UPD","action_idx":1,"opayer":"eosio","npayer":"eosio","path":"eosio/eosio/global2/global2","old":"00009a86cd480087cd48ecc60b5659d7e24401","new":"00000187cd480087cd48ecc60b5659d7e24401"},{"op":"UPD","action_idx":1,"opayer":"eosio","npayer":"eosio","path":"eosio/eosio/global3/global3","old":"809029437288050014705804f791cd43","new":"809029437288050014705804f791cd43"},{"op":"UPD","action_idx":2,"opayer":"junglefaucet","npayer":"junglefaucet","path":"eosio.token/junglefaucet/accounts/........ehbo5","old":"f5b41d9f1109000004454f5300000000","new":"44b11d9f1109000004454f5300000000"},{"op":"UPD","action_idx":2,"opayer":"eosio.ram","npayer":"eosio.ram","path":"eosio.token/eosio.ram/accounts/........ehbo5","old":"2a63869c0000000004454f5300000000","new":"db66869c0000000004454f5300000000"},{"op":"UPD","action_idx":5,"opayer":"junglefaucet","npayer":"junglefaucet","path":"eosio.token/junglefaucet/accounts/........ehbo5","old":"44b11d9f1109000004454f5300000000","new":"3fb11d9f1109000004454f5300000000"},{"op":"UPD","action_idx":5,"opayer":"eosio.ramfee","npayer":"eosio.ramfee","path":"eosio.token/eosio.ramfee/accounts/........ehbo5","old":"24abd0000000000004454f5300000000","new":"29abd0000000000004454f5300000000"},{"op":"UPD","action_idx":8,"opayer":"eosio.ramfee","npayer":"eosio.ramfee","path":"eosio.token/eosio.ramfee/accounts/........ehbo5","old":"29abd0000000000004454f5300000000","new":"24abd0000000000004454f5300000000"},{"op":"UPD","action_idx":8,"opayer":"eosio.rex","npayer":"eosio.rex","path":"eosio.token/eosio.rex/accounts/........ehbo5","old":"aa7e0d1f1b00000004454f5300000000","new":"af7e0d1f1b00000004454f5300000000"},{"op":"INS","action_idx":11,"npayer":"laulaulau123","path":"eosio/laulaulau123/delband/laulaulau123","new":"3044d0266a13b5893044d0266a13b589102700000000000004454f5300000000102700000000000004454f5300000000"},{"op":"UPD","action_idx":11,"opayer":"laulaulau123","npayer":"laulaulau123","path":"eosio/laulaulau123/userres/laulaulau123","old":"3044d0266a13b589000000000000000004454f5300000000000000000000000004454f5300000000e90f000000000000","new":"3044d0266a13b589102700000000000004454f5300000000102700000000000004454f5300000000e90f000000000000"},{"op":"INS","action_idx":11,"npayer":"laulaulau123","path":"eosio/eosio/voters/laulaulau123","new":"3044d0266a13b589000000000000000000204e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"},{"op":"UPD","action_idx":11,"opayer":"eosio","npayer":"eosio","path":"eosio/eosio/global/global","old":"0000080000000000e8030000ffff07000c000000f40100001400000064000000400d0300c4090000f049020064000000100e00005802000080533b0000100000040006000000000010000000ebe1e35303000000db66869c00000000e486cd4880eae0bd6d880500e09f04f20000000005ba09850500000009a26f00e20f54aab501000040f90d35587b05001500b637373a4605d5434193cb48","new":"0000080000000000e8030000ffff07000c000000f40100001400000064000000400d0300c4090000f049020064000000100e00005802000080533b0000100000040006000000000010000000ebe1e35303000000db66869c00000000e486cd4880eae0bd6d880500e09f04f20000000005ba09850500000009a26f00e20f54aab501000040f90d35587b05001500b637373a4605d5434193cb48"},{"op":"UPD","action_idx":11,"opayer":"eosio","npayer":"eosio","path":"eosio/eosio/global2/global2","old":"00000187cd480087cd48ecc60b5659d7e24401","new":"00000187cd480087cd48ecc60b5659d7e24401"},{"op":"UPD","action_idx":11,"opayer":"eosio","npayer":"eosio","path":"eosio/eosio/global3/global3","old":"809029437288050014705804f791cd43","new":"809029437288050014705804f791cd43"},{"op":"UPD","action_idx":12,"opayer":"junglefaucet","npayer":"junglefaucet","path":"eosio.token/junglefaucet/accounts/........ehbo5","old":"3fb11d9f1109000004454f5300000000","new":"1f631d9f1109000004454f5300000000"},{"op":"UPD","action_idx":12,"opayer":"eosio.stake","npayer":"eosio.stake","path":"eosio.token/eosio.stake/accounts/........ehbo5","old":"c59eb750920c000004454f5300000000","new":"e5ecb750920c000004454f5300000000"}]`),
// blockHeader: []byte(`{"timestamp":"2019-05-09T10:54:56.500","producer":"eosdacserval","confirmed":0,"previous":"01a7dc74fe39f798f33e3ab8b1382c8fa2b79cea7a828bb33aee8387b9cbe85f","transaction_mroot":"ce1ef6dc2f0bb511a8b20b5cde4b9091c6c975efefa805511dfdf9e1cb9792ed","action_mroot":"1b639c974b0f4fba0ef36a9644e41b2ef24bc126b42aef8140838c2ad9b45e7a","schedule_version":178,"header_extensions":[],"producer_signature":"SIG_K1_KhmxyeAgYEUriXYNGaKoK8d8nHMmEpGN5xNg2xZTzFXNZb3eyTuAJkohkhBAuCBD3GBUvWSRTpVeCBQXXoVojCyFF4GsL6"}`),
// trxTraces: []byte(`{"id":"fb611c6e6be3282a5a1d4b7f0f62e2c078d5e0a55bdb944bb400da0f118f1c6c","block_num":27778165,"block_time":"2019-05-09T10:54:56.500","producer_block_id":"01a7dc756cc6f1397b3efafe3433dd815651212f56cf7e6ab11cebd1b65044f7","receipt":{"status":"executed","cpu_usage_us":1360,"net_usage_words":42},"elapsed":35605,"net_usage":336,"scheduled":false,"action_traces":[{"receipt":{"receiver":"eosio","act_digest":"e519e7da08910c3127fa8347a3cd128afca1fb2c6ec871f832eb97cf7fc57246","global_sequence":398248105,"recv_sequence":31715389,"auth_sequence":[["junglefaucet",322466]],"code_sequence":7,"abi_sequence":7},"act":{"account":"eosio","name":"newaccount","authorization":[{"actor":"junglefaucet","permission":"active"}],"data":{"creator":"junglefaucet","name":"laulaulau123","owner":{"threshold":1,"keys":[{"key":"EOS7RdKLHvWkS6y46UxLWEn6jzzkUwiCn8rpHyNutG6qpTe3dF3ga","weight":1}],"accounts":[],"waits":[]},"active":{"threshold":1,"keys":[{"key":"EOS7j3SCLpSpq1pPXajb71L4nzj1KUPnMmMJ3hzPhcAu8ViDRuUHh","weight":1}],"accounts":[],"waits":[]}},"hex_data":"9015d266a9c8a67e3044d0266a13b589010000000100034e17de2b351f0c853e2ed02a68e37f858c2896da7c5fb96b17b1700703c3d8bf010000000100000001000375a354dc4cfbb457e078e01b7f2fc8b2a58d4f4f2e3373c9ae7f069b5467b50301000000"},"context_free":false,"elapsed":27233,"console":"","trx_id":"fb611c6e6be3282a5a1d4b7f0f62e2c078d5e0a55bdb944bb400da0f118f1c6c","block_num":27778165,"block_time":"2019-05-09T10:54:56.500","producer_block_id":"01a7dc756cc6f1397b3efafe3433dd815651212f56cf7e6ab11cebd1b65044f7","account_ram_deltas":[{"account":"laulaulau123","delta":2996}],"except":null,"inline_traces":[]},{"receipt":{"receiver":"eosio","act_digest":"dd2e40946e93b51725f983992f58f064cf05d8266c77b3d634536225e4985bd9","global_sequence":398248106,"recv_sequence":31715390,"auth_sequence":[["junglefaucet",322467]],"code_sequence":7,"abi_sequence":7},"act":{"account":"eosio","name":"buyrambytes","authorization":[{"actor":"junglefaucet","permission":"active"}],"data":{"payer":"junglefaucet","receiver":"laulaulau123","bytes":4096},"hex_data":"9015d266a9c8a67e3044d0266a13b58900100000"},"context_free":false,"elapsed":6651,"console":"","trx_id":"fb611c6e6be3282a5a1d4b7f0f62e2c078d5e0a55bdb944bb400da0f118f1c6c","block_num":27778165,"block_time":"2019-05-09T10:54:56.500","producer_block_id":"01a7dc756cc6f1397b3efafe3433dd815651212f56cf7e6ab11cebd1b65044f7","account_ram_deltas":[],"except":null,"inline_traces":[{"receipt":{"receiver":"eosio.token","act_digest":"0ea67fd9c19d29f0907423ad20169f7ba7a0affc0ee27bd5fcf65dc7f97fa3ca","global_sequence":398248107,"recv_sequence":72508439,"auth_sequence":[["eosio.ram",153681],["junglefaucet",322468]],"code_sequence":3,"abi_sequence":2},"act":{"account":"eosio.token","name":"transfer","authorization":[{"actor":"junglefaucet","permission":"active"},{"actor":"eosio.ram","permission":"active"}],"data":{"from":"junglefaucet","to":"eosio.ram","quantity":"0.0945 EOS","memo":"buy ram"},"hex_data":"9015d266a9c8a67e000090e602ea3055b10300000000000004454f5300000000076275792072616d"},"context_free":false,"elapsed":251,"console":"","trx_id":"fb611c6e6be3282a5a1d4b7f0f62e2c078d5e0a55bdb944bb400da0f118f1c6c","block_num":27778165,"block_time":"2019-05-09T10:54:56.500","producer_block_id":"01a7dc756cc6f1397b3efafe3433dd815651212f56cf7e6ab11cebd1b65044f7","account_ram_deltas":[],"except":null,"inline_traces":[{"receipt":{"receiver":"junglefaucet","act_digest":"0ea67fd9c19d29f0907423ad20169f7ba7a0affc0ee27bd5fcf65dc7f97fa3ca","global_sequence":398248108,"recv_sequence":95312,"auth_sequence":[["eosio.ram",153682],["junglefaucet",322469]],"code_sequence":3,"abi_sequence":2},"act":{"account":"eosio.token","name":"transfer","authorization":[{"actor":"junglefaucet","permission":"active"},{"actor":"eosio.ram","permission":"active"}],"data":{"from":"junglefaucet","to":"eosio.ram","quantity":"0.0945 EOS","memo":"buy ram"},"hex_data":"9015d266a9c8a67e000090e602ea3055b10300000000000004454f5300000000076275792072616d"},"context_free":false,"elapsed":18,"console":"","trx_id":"fb611c6e6be3282a5a1d4b7f0f62e2c078d5e0a55bdb944bb400da0f118f1c6c","block_num":27778165,"block_time":"2019-05-09T10:54:56.500","producer_block_id":"01a7dc756cc6f1397b3efafe3433dd815651212f56cf7e6ab11cebd1b65044f7","account_ram_deltas":[],"except":null,"inline_traces":[]},{"receipt":{"receiver":"eosio.ram","act_digest":"0ea67fd9c19d29f0907423ad20169f7ba7a0affc0ee27bd5fcf65dc7f97fa3ca","global_sequence":398248109,"recv_sequence":215189,"auth_sequence":[["eosio.ram",153683],["junglefaucet",322470]],"code_sequence":3,"abi_sequence":2},"act":{"account":"eosio.token","name":"transfer","authorization":[{"actor":"junglefaucet","permission":"active"},{"actor":"eosio.ram","permission":"active"}],"data":{"from":"junglefaucet","to":"eosio.ram","quantity":"0.0945 EOS","memo":"buy ram"},"hex_data":"9015d266a9c8a67e000090e602ea3055b10300000000000004454f5300000000076275792072616d"},"context_free":false,"elapsed":14,"console":"","trx_id":"fb611c6e6be3282a5a1d4b7f0f62e2c078d5e0a55bdb944bb400da0f118f1c6c","block_num":27778165,"block_time":"2019-05-09T10:54:56.500","producer_block_id":"01a7dc756cc6f1397b3efafe3433dd815651212f56cf7e6ab11cebd1b65044f7","account_ram_deltas":[],"except":null,"inline_traces":[]}]},{"receipt":{"receiver":"eosio.token","act_digest":"5ab1c77b4f08b230b0b256790d35f2453afd2f51d8d9e5a7214d9ac07d5a9986","global_sequence":398248110,"recv_sequence":72508440,"auth_sequence":[["junglefaucet",322471]],"code_sequence":3,"abi_sequence":2},"act":{"account":"eosio.token","name":"transfer","authorization":[{"actor":"junglefaucet","permission":"active"}],"data":{"from":"junglefaucet","to":"eosio.ramfee","quantity":"0.0005 EOS","memo":"ram fee"},"hex_data":"9015d266a9c8a67ea0d492e602ea3055050000000000000004454f53000000000772616d20666565"},"context_free":false,"elapsed":97,"console":"","trx_id":"fb611c6e6be3282a5a1d4b7f0f62e2c078d5e0a55bdb944bb400da0f118f1c6c","block_num":27778165,"block_time":"2019-05-09T10:54:56.500","producer_block_id":"01a7dc756cc6f1397b3efafe3433dd815651212f56cf7e6ab11cebd1b65044f7","account_ram_deltas":[],"except":null,"inline_traces":[{"receipt":{"receiver":"junglefaucet","act_digest":"5ab1c77b4f08b230b0b256790d35f2453afd2f51d8d9e5a7214d9ac07d5a9986","global_sequence":398248111,"recv_sequence":95313,"auth_sequence":[["junglefaucet",322472]],"code_sequence":3,"abi_sequence":2},"act":{"account":"eosio.token","name":"transfer","authorization":[{"actor":"junglefaucet","permission":"active"}],"data":{"from":"junglefaucet","to":"eosio.ramfee","quantity":"0.0005 EOS","memo":"ram fee"},"hex_data":"9015d266a9c8a67ea0d492e602ea3055050000000000000004454f53000000000772616d20666565"},"context_free":false,"elapsed":34,"console":"","trx_id":"fb611c6e6be3282a5a1d4b7f0f62e2c078d5e0a55bdb944bb400da0f118f1c6c","block_num":27778165,"block_time":"2019-05-09T10:54:56.500","producer_block_id":"01a7dc756cc6f1397b3efafe3433dd815651212f56cf7e6ab11cebd1b65044f7","account_ram_deltas":[],"except":null,"inline_traces":[]},{"receipt":{"receiver":"eosio.ramfee","act_digest":"5ab1c77b4f08b230b0b256790d35f2453afd2f51d8d9e5a7214d9ac07d5a9986","global_sequence":398248112,"recv_sequence":258268,"auth_sequence":[["junglefaucet",322473]],"code_sequence":3,"abi_sequence":2},"act":{"account":"eosio.token","name":"transfer","authorization":[{"actor":"junglefaucet","permission":"active"}],"data":{"from":"junglefaucet","to":"eosio.ramfee","quantity":"0.0005 EOS","memo":"ram fee"},"hex_data":"9015d266a9c8a67ea0d492e602ea3055050000000000000004454f53000000000772616d20666565"},"context_free":false,"elapsed":12,"console":"","trx_id":"fb611c6e6be3282a5a1d4b7f0f62e2c078d5e0a55bdb944bb400da0f118f1c6c","block_num":27778165,"block_time":"2019-05-09T10:54:56.500","producer_block_id":"01a7dc756cc6f1397b3efafe3433dd815651212f56cf7e6ab11cebd1b65044f7","account_ram_deltas":[],"except":null,"inline_traces":[]}]},{"receipt":{"receiver":"eosio.token","act_digest":"ebe2e402b101a8c73d118ef7fa86fabdb0900ff61631b8869d92f9c313d92a4e","global_sequence":398248113,"recv_sequence":72508441,"auth_sequence":[["eosio.ramfee",129243]],"code_sequence":3,"abi_sequence":2},"act":{"account":"eosio.token","name":"transfer","authorization":[{"actor":"eosio.ramfee","permission":"active"}],"data":{"from":"eosio.ramfee","to":"eosio.rex","quantity":"0.0005 EOS","memo":"transfer from eosio.ramfee to eosio.rex"},"hex_data":"a0d492e602ea30550000e8ea02ea3055050000000000000004454f5300000000277472616e736665722066726f6d20656f73696f2e72616d66656520746f20656f73696f2e726578"},"context_free":false,"elapsed":113,"console":"","trx_id":"fb611c6e6be3282a5a1d4b7f0f62e2c078d5e0a55bdb944bb400da0f118f1c6c","block_num":27778165,"block_time":"2019-05-09T10:54:56.500","producer_block_id":"01a7dc756cc6f1397b3efafe3433dd815651212f56cf7e6ab11cebd1b65044f7","account_ram_deltas":[],"except":null,"inline_traces":[{"receipt":{"receiver":"eosio.ramfee","act_digest":"ebe2e402b101a8c73d118ef7fa86fabdb0900ff61631b8869d92f9c313d92a4e","global_sequence":398248114,"recv_sequence":258269,"auth_sequence":[["eosio.ramfee",129244]],"code_sequence":3,"abi_sequence":2},"act":{"account":"eosio.token","name":"transfer","authorization":[{"actor":"eosio.ramfee","permission":"active"}],"data":{"from":"eosio.ramfee","to":"eosio.rex","quantity":"0.0005 EOS","memo":"transfer from eosio.ramfee to eosio.rex"},"hex_data":"a0d492e602ea30550000e8ea02ea3055050000000000000004454f5300000000277472616e736665722066726f6d20656f73696f2e72616d66656520746f20656f73696f2e726578"},"context_free":false,"elapsed":5,"console":"","trx_id":"fb611c6e6be3282a5a1d4b7f0f62e2c078d5e0a55bdb944bb400da0f118f1c6c","block_num":27778165,"block_time":"2019-05-09T10:54:56.500","producer_block_id":"01a7dc756cc6f1397b3efafe3433dd815651212f56cf7e6ab11cebd1b65044f7","account_ram_deltas":[],"except":null,"inline_traces":[]},{"receipt":{"receiver":"eosio.rex","act_digest":"ebe2e402b101a8c73d118ef7fa86fabdb0900ff61631b8869d92f9c313d92a4e","global_sequence":398248115,"recv_sequence":46240,"auth_sequence":[["eosio.ramfee",129245]],"code_sequence":3,"abi_sequence":2},"act":{"account":"eosio.token","name":"transfer","authorization":[{"actor":"eosio.ramfee","permission":"active"}],"data":{"from":"eosio.ramfee","to":"eosio.rex","quantity":"0.0005 EOS","memo":"transfer from eosio.ramfee to eosio.rex"},"hex_data":"a0d492e602ea30550000e8ea02ea3055050000000000000004454f5300000000277472616e736665722066726f6d20656f73696f2e72616d66656520746f20656f73696f2e726578"},"context_free":false,"elapsed":11,"console":"","trx_id":"fb611c6e6be3282a5a1d4b7f0f62e2c078d5e0a55bdb944bb400da0f118f1c6c","block_num":27778165,"block_time":"2019-05-09T10:54:56.500","producer_block_id":"01a7dc756cc6f1397b3efafe3433dd815651212f56cf7e6ab11cebd1b65044f7","account_ram_deltas":[],"except":null,"inline_traces":[]}]}]},{"receipt":{"receiver":"eosio","act_digest":"277b27049902dfa6804c311c618ccdd49c9db418556b86a600c69f00928d8e21","global_sequence":398248116,"recv_sequence":31715391,"auth_sequence":[["junglefaucet",322474]],"code_sequence":7,"abi_sequence":7},"act":{"account":"eosio","name":"delegatebw","authorization":[{"actor":"junglefaucet","permission":"active"}],"data":{"from":"junglefaucet","receiver":"laulaulau123","stake_net_quantity":"1.0000 EOS","stake_cpu_quantity":"1.0000 EOS","transfer":1},"hex_data":"9015d266a9c8a67e3044d0266a13b589102700000000000004454f5300000000102700000000000004454f530000000001"},"context_free":false,"elapsed":583,"console":"","trx_id":"fb611c6e6be3282a5a1d4b7f0f62e2c078d5e0a55bdb944bb400da0f118f1c6c","block_num":27778165,"block_time":"2019-05-09T10:54:56.500","producer_block_id":"01a7dc756cc6f1397b3efafe3433dd815651212f56cf7e6ab11cebd1b65044f7","account_ram_deltas":[{"account":"laulaulau123","delta":450}],"except":null,"inline_traces":[{"receipt":{"receiver":"eosio.token","act_digest":"8d3cea2c340b4db23b96b79cec7bf9c3f2bb4ff13be31116014f585b0ea73e84","global_sequence":398248117,"recv_sequence":72508442,"auth_sequence":[["junglefaucet",322475]],"code_sequence":3,"abi_sequence":2},"act":{"account":"eosio.token","name":"transfer","authorization":[{"actor":"junglefaucet","permission":"active"}],"data":{"from":"junglefaucet","to":"eosio.stake","quantity":"2.0000 EOS","memo":"stake bandwidth"},"hex_data":"9015d266a9c8a67e0014341903ea3055204e00000000000004454f53000000000f7374616b652062616e647769647468"},"context_free":false,"elapsed":120,"console":"","trx_id":"fb611c6e6be3282a5a1d4b7f0f62e2c078d5e0a55bdb944bb400da0f118f1c6c","block_num":27778165,"block_time":"2019-05-09T10:54:56.500","producer_block_id":"01a7dc756cc6f1397b3efafe3433dd815651212f56cf7e6ab11cebd1b65044f7","account_ram_deltas":[],"except":null,"inline_traces":[{"receipt":{"receiver":"junglefaucet","act_digest":"8d3cea2c340b4db23b96b79cec7bf9c3f2bb4ff13be31116014f585b0ea73e84","global_sequence":398248118,"recv_sequence":95314,"auth_sequence":[["junglefaucet",322476]],"code_sequence":3,"abi_sequence":2},"act":{"account":"eosio.token","name":"transfer","authorization":[{"actor":"junglefaucet","permission":"active"}],"data":{"from":"junglefaucet","to":"eosio.stake","quantity":"2.0000 EOS","memo":"stake bandwidth"},"hex_data":"9015d266a9c8a67e0014341903ea3055204e00000000000004454f53000000000f7374616b652062616e647769647468"},"context_free":false,"elapsed":6,"console":"","trx_id":"fb611c6e6be3282a5a1d4b7f0f62e2c078d5e0a55bdb944bb400da0f118f1c6c","block_num":27778165,"block_time":"2019-05-09T10:54:56.500","producer_block_id":"01a7dc756cc6f1397b3efafe3433dd815651212f56cf7e6ab11cebd1b65044f7","account_ram_deltas":[],"except":null,"inline_traces":[]},{"receipt":{"receiver":"eosio.stake","act_digest":"8d3cea2c340b4db23b96b79cec7bf9c3f2bb4ff13be31116014f585b0ea73e84","global_sequence":398248119,"recv_sequence":278094,"auth_sequence":[["junglefaucet",322477]],"code_sequence":3,"abi_sequence":2},"act":{"account":"eosio.token","name":"transfer","authorization":[{"actor":"junglefaucet","permission":"active"}],"data":{"from":"junglefaucet","to":"eosio.stake","quantity":"2.0000 EOS","memo":"stake bandwidth"},"hex_data":"9015d266a9c8a67e0014341903ea3055204e00000000000004454f53000000000f7374616b652062616e647769647468"},"context_free":false,"elapsed":72,"console":"","trx_id":"fb611c6e6be3282a5a1d4b7f0f62e2c078d5e0a55bdb944bb400da0f118f1c6c","block_num":27778165,"block_time":"2019-05-09T10:54:56.500","producer_block_id":"01a7dc756cc6f1397b3efafe3433dd815651212f56cf7e6ab11cebd1b65044f7","account_ram_deltas":[],"except":null,"inline_traces":[]}]}]}],"except":null}`),
// tableops: []byte(`[{"op":"INS","action_idx":0,"payer":"laulaulau123","path":"eosio/laulaulau123/userres"},{"op":"INS","action_idx":11,"payer":"laulaulau123","path":"eosio/laulaulau123/delband"}]`),
// ramops: []byte(`[{"op":"newaccount","action_idx":0,"payer":"laulaulau123","delta":2724,"usage":2724},{"op":"create_table","action_idx":0,"payer":"laulaulau123","delta":112,"usage":2836},{"op":"primary_index_add","action_idx":0,"payer":"laulaulau123","delta":160,"usage":2996},{"op":"create_table","action_idx":11,"payer":"laulaulau123","delta":112,"usage":3108},{"op":"primary_index_add","action_idx":11,"payer":"laulaulau123","delta":160,"usage":3268},{"op":"primary_index_add","action_idx":11,"payer":"laulaulau123","delta":178,"usage":3446}]`),
// dtrxops: []byte(``),
// creationTree: []byte(`[[0,-1,0],[1,-1,1],[2,1,2],[3,2,3],[4,2,4],[5,1,5],[6,5,6],[7,5,7],[8,1,8],[9,8,9],[10,8,10],[11,-1,11],[12,11,12],[13,12,13],[14,12,14]]`),
// },
// undo: true,
// }
//
// return resp, nil
//}
//
//func TestResolverV1Payload(t *testing.T) {
//
// s, err := graphql.ParseSchema(
// test_schema.String(),
// &TestRoot{Root: Root{}},
// graphql.PrefixRootFunctions(),
// graphql.UseStringDescriptions(), graphql.UseFieldResolvers(),
// graphql.MaxDepth(24), // this is good for at least 6 levels of `inlineTraces`, fetching its data, etc..
// )
//
// assert.NoError(t, err)
//
// q := `
//{
// TestSearch {
// cursor
// undo
// isIrreversible
// irreversibleBlockNum
// trace {
// id
// block {
// ...block
// }
// status
// receipt {
// status
// cpuUsageMicroSeconds
// netUsageWords
// }
// elapsed
// netUsage
// scheduled
// executedActions {
// ...actionTrace
// }
// matchingActionIndexes{
// ...actionTrace
// }
// topLevelActions{
// ...actionTrace
// }
// exceptJSON
// }
// }
// }
//
//fragment block on BlockHeader {
// id
// num
// timestamp
// producer
// confirmed
// previous
// transactionMRoot
// actionMRoot
// scheduleVersion
// newProducers {
// version
// producers {
// producerName
// blockSigningKey
// }
// }
//}
//
//fragment transaction on SignedTransaction {
// expiration
// refBlockNum
// refBlockPrefix
// maxNetUsageWords
// maxCPUUsageMS
// delaySec
// contextFreeActions {
// account
// name
// authorization {
// actor
// permission
// }
// json
// data
// hexData
// }
// actions {
// ...action
// }
//}
//
//fragment action on Action {
// account
// name
// authorization {
// actor
// permission
// }
// json
// data
// hexData
//}
//
//fragment actionReceipt on ActionReceipt {
// receiver
// digest
// globalSequence
// codeSequence
// abiSequence
//}
//
//fragment authorization on PermissionLevel {
// actor
// permission
//}
//
//fragment ramOps on RAMOp {
// operation
// payer
// delta
// usage
//}
//
//fragment dtrxOps on DTrxOp {
// operation
// sender
// senderID
// payer
// publishedAt
// delayUntil
// expirationAt
// trxID
// transaction {
// ...transaction
// }
//}
//
//fragment tableOps on TableOp {
//
// operation
// table {
// code
// table
// scope
// }
//
//}
//
//fragment dbOps on DBOp {
// operation
// oldPayer
// newPayer
// key {
// code
// table
// scope
// key
// }
// oldData
// newData
// oldJSON
// newJSON
//}
//
//fragment baseActionTrace on ActionTrace {
// seq
// receiver
// account
// name
// data
// json
// hexData
// receipt {
// ...actionReceipt
// }
// authorization {
// ...authorization
// }
// ramOps {
// ...ramOps
// }
// dtrxOps {
// ...dtrxOps
// }
// tableOps {
// ...tableOps
// }
// dbOps {
// ...dbOps
// }
// console
// contextFree
// elapsed
// exceptJSON
// isNotify
// isMatchingQuery
//}
//
//fragment actionTrace on ActionTrace {
// ...baseActionTrace
// createdActions{
// ...baseActionTrace
// }
// creatorAction{
// ...baseActionTrace
// }
// closestUnnotifiedAncestorAction{
// ...baseActionTrace
// }
//}
// `
//
// expected := `{"data":{"searchTransactionsForward":{"results":[{"cursor":"Exu5wgkZmwO_II01DdCfYfe7JpE_AVJuUw7vIBkV0Yrz9yOUj5T3CA==","undo":false,"isIrreversible":true,"irreversibleBlockNum":27778165,"trace":{"id":"8dd157aab9a882c168f29db5b3d46043e84f5a220cce0bba266f8a626a962ab8","block":{"id":"01a7dc756cc6f1397b3efafe3433dd815651212f56cf7e6ab11cebd1b65044f7","num":27778165,"timestamp":"2019-05-09T10:54:56.5Z","producer":"eosdacserval","confirmed":0,"previous":"01a7dc74fe39f798f33e3ab8b1382c8fa2b79cea7a828bb33aee8387b9cbe85f","transactionMRoot":"ce1ef6dc2f0bb511a8b20b5cde4b9091c6c975efefa805511dfdf9e1cb9792ed","actionMRoot":"1b639c974b0f4fba0ef36a9644e41b2ef24bc126b42aef8140838c2ad9b45e7a","scheduleVersion":178,"newProducers":null},"status":"EXECUTED","receipt":{"status":"EXECUTED","cpuUsageMicroSeconds":100,"netUsageWords":0},"elapsed":"407","netUsage":"0","scheduled":false,"executedActions":[{"seq":"398248104","receiver":"eosio","account":"eosio","name":"onblock","data":{"h":{"timestamp":1221428992,"producer":"eosdacserval","confirmed":0,"previous":"01a7dc73386679e426a66c50a31702c10e37c1b4215e0a3cd598f23c39c35f95","transaction_mroot":"0000000000000000000000000000000000000000000000000000000000000000","action_mroot":"ab4d5a6811722690612b7466d50bd6445afbeee970fb74922bf0a964f5320054","schedule_version":178,"new_producers":null}},"json":{"h":{"timestamp":1221428992,"producer":"eosdacserval","confirmed":0,"previous":"01a7dc73386679e426a66c50a31702c10e37c1b4215e0a3cd598f23c39c35f95","transaction_mroot":"0000000000000000000000000000000000000000000000000000000000000000","action_mroot":"ab4d5a6811722690612b7466d50bd6445afbeee970fb74922bf0a964f5320054","schedule_version":178,"new_producers":null}},"hexData":"0087cd4810cdbe0a23933055000001a7dc73386679e426a66c50a31702c10e37c1b4215e0a3cd598f23c39c35f950000000000000000000000000000000000000000000000000000000000000000ab4d5a6811722690612b7466d50bd6445afbeee970fb74922bf0a964f5320054b20000000000","receipt":{"receiver":"eosio","digest":"da84489f49b49d492a7b9d9f453443db039960b7ce3460d9289e024b4bd3b1af","globalSequence":"398248104","codeSequence":"7","abiSequence":"7"},"authorization":[{"actor":"eosio","permission":"active"}],"ramOps":null,"dtrxOps":null,"tableOps":null,"dbOps":[{"operation":"UPD","oldPayer":"eosdacserval","newPayer":"eosdacserval","key":{"code":"eosio","table":"producers","scope":"eosio","key":"eosdacserval"},"oldData":"10cdbe0a23933055048a38058cb18c430002287039ea488ae1398c60a5e66350dfbdadf59faeffce2be7fccfafd5c30bedcf011168747470733a2f2f656f736461632e696f751500004036faa2638805003a03","newData":"10cdbe0a23933055048a38058cb18c430002287039ea488ae1398c60a5e66350dfbdadf59faeffce2be7fccfafd5c30bedcf011168747470733a2f2f656f736461632e696f761500004036faa2638805003a03","oldJSON":null,"newJSON":null},{"operation":"UPD","oldPayer":"eosio","newPayer":"eosio","key":{"code":"eosio","table":"global","scope":"eosio","key":"global"},"oldData":"0000080000000000e8030000ffff07000c000000f40100001400000064000000400d0300c4090000f049020064000000100e00005802000080533b000010000004000600000000001000000002d2e353030000002a63869c00000000e486cd4880eae0bd6d880500e09f04f20000000005ba09850500000008a26f00e20f54aab501000040f90d35587b05001500b637373a4605d5434193cb48","newData":"0000080000000000e8030000ffff07000c000000f40100001400000064000000400d0300c4090000f049020064000000100e00005802000080533b000010000004000600000000001000000002d2e353030000002a63869c00000000e486cd4880eae0bd6d880500e09f04f20000000005ba09850500000009a26f00e20f54aab501000040f90d35587b05001500b637373a4605d5434193cb48","oldJSON":null,"newJSON":null},{"operation":"UPD","oldPayer":"eosio","newPayer":"eosio","key":{"code":"eosio","table":"global2","scope":"eosio","key":"global2"},"oldData":"00009a86cd48ff86cd48ecc60b5659d7e24401","newData":"00009a86cd480087cd48ecc60b5659d7e24401","oldJSON":null,"newJSON":null},{"operation":"UPD","oldPayer":"eosio","newPayer":"eosio","key":{"code":"eosio","table":"global3","scope":"eosio","key":"global3"},"oldData":"809029437288050014705804f791cd43","newData":"809029437288050014705804f791cd43","oldJSON":null,"newJSON":null}],"console":"","contextFree":false,"elapsed":"281","exceptJSON":null,"isNotify":false,"isMatchingQuery":true,"createdActions":[],"creatorAction":null,"closestUnnotifiedAncestorAction":null}],"matchingActionIndexes":[{"seq":"398248104","receiver":"eosio","account":"eosio","name":"onblock","data":{"h":{"timestamp":1221428992,"producer":"eosdacserval","confirmed":0,"previous":"01a7dc73386679e426a66c50a31702c10e37c1b4215e0a3cd598f23c39c35f95","transaction_mroot":"0000000000000000000000000000000000000000000000000000000000000000","action_mroot":"ab4d5a6811722690612b7466d50bd6445afbeee970fb74922bf0a964f5320054","schedule_version":178,"new_producers":null}},"json":{"h":{"timestamp":1221428992,"producer":"eosdacserval","confirmed":0,"previous":"01a7dc73386679e426a66c50a31702c10e37c1b4215e0a3cd598f23c39c35f95","transaction_mroot":"0000000000000000000000000000000000000000000000000000000000000000","action_mroot":"ab4d5a6811722690612b7466d50bd6445afbeee970fb74922bf0a964f5320054","schedule_version":178,"new_producers":null}},"hexData":"0087cd4810cdbe0a23933055000001a7dc73386679e426a66c50a31702c10e37c1b4215e0a3cd598f23c39c35f950000000000000000000000000000000000000000000000000000000000000000ab4d5a6811722690612b7466d50bd6445afbeee970fb74922bf0a964f5320054b20000000000","receipt":{"receiver":"eosio","digest":"da84489f49b49d492a7b9d9f453443db039960b7ce3460d9289e024b4bd3b1af","globalSequence":"398248104","codeSequence":"7","abiSequence":"7"},"authorization":[{"actor":"eosio","permission":"active"}],"ramOps":null,"dtrxOps":null,"tableOps":null,"dbOps":[{"operation":"UPD","oldPayer":"eosdacserval","newPayer":"eosdacserval","key":{"code":"eosio","table":"producers","scope":"eosio","key":"eosdacserval"},"oldData":"10cdbe0a23933055048a38058cb18c430002287039ea488ae1398c60a5e66350dfbdadf59faeffce2be7fccfafd5c30bedcf011168747470733a2f2f656f736461632e696f751500004036faa2638805003a03","newData":"10cdbe0a23933055048a38058cb18c430002287039ea488ae1398c60a5e66350dfbdadf59faeffce2be7fccfafd5c30bedcf011168747470733a2f2f656f736461632e696f761500004036faa2638805003a03","oldJSON":null,"newJSON":null},{"operation":"UPD","oldPayer":"eosio","newPayer":"eosio","key":{"code":"eosio","table":"global","scope":"eosio","key":"global"},"oldData":"0000080000000000e8030000ffff07000c000000f40100001400000064000000400d0300c4090000f049020064000000100e00005802000080533b000010000004000600000000001000000002d2e353030000002a63869c00000000e486cd4880eae0bd6d880500e09f04f20000000005ba09850500000008a26f00e20f54aab501000040f90d35587b05001500b637373a4605d5434193cb48","newData":"0000080000000000e8030000ffff07000c000000f40100001400000064000000400d0300c4090000f049020064000000100e00005802000080533b000010000004000600000000001000000002d2e353030000002a63869c00000000e486cd4880eae0bd6d880500e09f04f20000000005ba09850500000009a26f00e20f54aab501000040f90d35587b05001500b637373a4605d5434193cb48","oldJSON":null,"newJSON":null},{"operation":"UPD","oldPayer":"eosio","newPayer":"eosio","key":{"code":"eosio","table":"global2","scope":"eosio","key":"global2"},"oldData":"00009a86cd48ff86cd48ecc60b5659d7e24401","newData":"00009a86cd480087cd48ecc60b5659d7e24401","oldJSON":null,"newJSON":null},{"operation":"UPD","oldPayer":"eosio","newPayer":"eosio","key":{"code":"eosio","table":"global3","scope":"eosio","key":"global3"},"oldData":"809029437288050014705804f791cd43","newData":"809029437288050014705804f791cd43","oldJSON":null,"newJSON":null}],"console":"","contextFree":false,"elapsed":"281","exceptJSON":null,"isNotify":false,"isMatchingQuery":true,"createdActions":[],"creatorAction":null,"closestUnnotifiedAncestorAction":null}],"topLevelActions":[{"seq":"398248104","receiver":"eosio","account":"eosio","name":"onblock","data":{"h":{"timestamp":1221428992,"producer":"eosdacserval","confirmed":0,"previous":"01a7dc73386679e426a66c50a31702c10e37c1b4215e0a3cd598f23c39c35f95","transaction_mroot":"0000000000000000000000000000000000000000000000000000000000000000","action_mroot":"ab4d5a6811722690612b7466d50bd6445afbeee970fb74922bf0a964f5320054","schedule_version":178,"new_producers":null}},"json":{"h":{"timestamp":1221428992,"producer":"eosdacserval","confirmed":0,"previous":"01a7dc73386679e426a66c50a31702c10e37c1b4215e0a3cd598f23c39c35f95","transaction_mroot":"0000000000000000000000000000000000000000000000000000000000000000","action_mroot":"ab4d5a6811722690612b7466d50bd6445afbeee970fb74922bf0a964f5320054","schedule_version":178,"new_producers":null}},"hexData":"0087cd4810cdbe0a23933055000001a7dc73386679e426a66c50a31702c10e37c1b4215e0a3cd598f23c39c35f950000000000000000000000000000000000000000000000000000000000000000ab4d5a6811722690612b7466d50bd6445afbeee970fb74922bf0a964f5320054b20000000000","receipt":{"receiver":"eosio","digest":"da84489f49b49d492a7b9d9f453443db039960b7ce3460d9289e024b4bd3b1af","globalSequence":"398248104","codeSequence":"7","abiSequence":"7"},"authorization":[{"actor":"eosio","permission":"active"}],"ramOps":null,"dtrxOps":null,"tableOps":null,"dbOps":[{"operation":"UPD","oldPayer":"eosdacserval","newPayer":"eosdacserval","key":{"code":"eosio","table":"producers","scope":"eosio","key":"eosdacserval"},"oldData":"10cdbe0a23933055048a38058cb18c430002287039ea488ae1398c60a5e66350dfbdadf59faeffce2be7fccfafd5c30bedcf011168747470733a2f2f656f736461632e696f751500004036faa2638805003a03","newData":"10cdbe0a23933055048a38058cb18c430002287039ea488ae1398c60a5e66350dfbdadf59faeffce2be7fccfafd5c30bedcf011168747470733a2f2f656f736461632e696f761500004036faa2638805003a03","oldJSON":null,"newJSON":null},{"operation":"UPD","oldPayer":"eosio","newPayer":"eosio","key":{"code":"eosio","table":"global","scope":"eosio","key":"global"},"oldData":"0000080000000000e8030000ffff07000c000000f40100001400000064000000400d0300c4090000f049020064000000100e00005802000080533b000010000004000600000000001000000002d2e353030000002a63869c00000000e486cd4880eae0bd6d880500e09f04f20000000005ba09850500000008a26f00e20f54aab501000040f90d35587b05001500b637373a4605d5434193cb48","newData":"0000080000000000e8030000ffff07000c000000f40100001400000064000000400d0300c4090000f049020064000000100e00005802000080533b000010000004000600000000001000000002d2e353030000002a63869c00000000e486cd4880eae0bd6d880500e09f04f20000000005ba09850500000009a26f00e20f54aab501000040f90d35587b05001500b637373a4605d5434193cb48","oldJSON":null,"newJSON":null},{"operation":"UPD","oldPayer":"eosio","newPayer":"eosio","key":{"code":"eosio","table":"global2","scope":"eosio","key":"global2"},"oldData":"00009a86cd48ff86cd48ecc60b5659d7e24401","newData":"00009a86cd480087cd48ecc60b5659d7e24401","oldJSON":null,"newJSON":null},{"operation":"UPD","oldPayer":"eosio","newPayer":"eosio","key":{"code":"eosio","table":"global3","scope":"eosio","key":"global3"},"oldData":"809029437288050014705804f791cd43","newData":"809029437288050014705804f791cd43","oldJSON":null,"newJSON":null}],"console":"","contextFree":false,"elapsed":"281","exceptJSON":null,"isNotify":false,"isMatchingQuery":true,"createdActions":[],"creatorAction":null,"closestUnnotifiedAncestorAction":null}],"exceptJSON":null}}]}}}`
//
// resp := s.Exec(context.Background(), q, "", make(map[string]interface{}))
// fmt.Println(resp.Errors)
// require.Len(t, resp.Errors, 0)
//
// out := gjson.GetBytes(resp.Data, "TestSearch").Str
//
// dmp := diffmatchpatch.New()
//
// diffs := dmp.DiffMain(out, expected, false)
//
// fmt.Println("diff", dmp.DiffPrettyText(diffs))
//}
| [
"\"TEST_LOG\""
]
| []
| [
"TEST_LOG"
]
| [] | ["TEST_LOG"] | go | 1 | 0 | |
util/config.go | package util
import (
"fmt"
"os"
"os/user"
"time"
"github.com/FactomProject/factomd/common/primitives"
"github.com/FactomProject/factomd/log"
"gopkg.in/gcfg.v1"
)
var _ = fmt.Print
type FactomdConfig struct {
App struct {
PortNumber int
HomeDir string
ControlPanelPort int
ControlPanelFilesPath string
ControlPanelSetting string
DBType string
LdbPath string
BoltDBPath string
DataStorePath string
DirectoryBlockInSeconds int
ExportData bool
ExportDataSubpath string
FastBoot bool
FastBootLocation string
NodeMode string
IdentityChainID string
LocalServerPrivKey string
LocalServerPublicKey string
ExchangeRate uint64
ExchangeRateChainId string
ExchangeRateAuthorityPublicKey string
ExchangeRateAuthorityPublicKeyMainNet string
ExchangeRateAuthorityPublicKeyTestNet string
ExchangeRateAuthorityPublicKeyLocalNet string
// Network Configuration
Network string
MainNetworkPort string
PeersFile string
MainSeedURL string
MainSpecialPeers string
TestNetworkPort string
TestSeedURL string
TestSpecialPeers string
LocalNetworkPort string
LocalSeedURL string
LocalSpecialPeers string
CustomBootstrapIdentity string
CustomBootstrapKey string
FactomdTlsEnabled bool
FactomdTlsPrivateKey string
FactomdTlsPublicCert string
FactomdRpcUser string
FactomdRpcPass string
ChangeAcksHeight uint32
}
Peer struct {
AddPeers []string `short:"a" long:"addpeer" description:"Add a peer to connect with at startup"`
ConnectPeers []string `long:"connect" description:"Connect only to the specified peers at startup"`
Listeners []string `long:"listen" description:"Add an interface/port to listen for connections (default all interfaces port: 8108, testnet: 18108)"`
MaxPeers int `long:"maxpeers" description:"Max number of inbound and outbound peers"`
BanDuration time.Duration `long:"banduration" description:"How long to ban misbehaving peers. Valid time units are {s, m, h}. Minimum 1 second"`
TestNet bool `long:"testnet" description:"Use the test network"`
SimNet bool `long:"simnet" description:"Use the simulation test network"`
}
Log struct {
LogPath string
LogLevel string
ConsoleLogLevel string
}
Wallet struct {
Address string
Port int
DataFile string
RefreshInSeconds string
BoltDBPath string
FactomdAddress string
FactomdPort int
}
Walletd struct {
WalletRpcUser string
WalletRpcPass string
WalletTlsEnabled bool
WalletTlsPrivateKey string
WalletTlsPublicCert string
FactomdLocation string
WalletdLocation string
}
}
// defaultConfig
const defaultConfig = `
; ------------------------------------------------------------------------------
; App settings
; ------------------------------------------------------------------------------
[app]
PortNumber = 8088
HomeDir = ""
; --------------- ControlPanel disabled | readonly | readwrite
ControlPanelSetting = readonly
ControlPanelPort = 8090
; --------------- DBType: LDB | Bolt | Map
DBType = "LDB"
LdbPath = "database/ldb"
BoltDBPath = "database/bolt"
DataStorePath = "data/export"
DirectoryBlockInSeconds = 6
ExportData = false
ExportDataSubpath = "database/export/"
FastBoot = true
FastBootLocation = ""
; --------------- Network: MAIN | TEST | LOCAL
Network = MAIN
PeersFile = "peers.json"
MainNetworkPort = 8108
MainSeedURL = "https://raw.githubusercontent.com/FactomProject/factomproject.github.io/master/seed/mainseed.txt"
MainSpecialPeers = ""
TestNetworkPort = 8109
TestSeedURL = "https://raw.githubusercontent.com/FactomProject/factomproject.github.io/master/seed/testseed.txt"
TestSpecialPeers = ""
LocalNetworkPort = 8110
LocalSeedURL = "https://raw.githubusercontent.com/FactomProject/factomproject.github.io/master/seed/localseed.txt"
LocalSpecialPeers = ""
CustomBootstrapIdentity = 38bab1455b7bd7e5efd15c53c777c79d0c988e9210f1da49a99d95b3a6417be9
CustomBootstrapKey = cc1985cdfae4e32b5a454dfda8ce5e1361558482684f3367649c3ad852c8e31a
; --------------- NodeMode: FULL | SERVER ----------------
NodeMode = FULL
LocalServerPrivKey = 4c38c72fc5cdad68f13b74674d3ffb1f3d63a112710868c9b08946553448d26d
LocalServerPublicKey = cc1985cdfae4e32b5a454dfda8ce5e1361558482684f3367649c3ad852c8e31a
ExchangeRateChainId = 111111118d918a8be684e0dac725493a75862ef96d2d3f43f84b26969329bf03
ExchangeRateAuthorityPublicKeyMainNet = daf5815c2de603dbfa3e1e64f88a5cf06083307cf40da4a9b539c41832135b4a
ExchangeRateAuthorityPublicKeyTestNet = 1d75de249c2fc0384fb6701b30dc86b39dc72e5a47ba4f79ef250d39e21e7a4f
; Private key all zeroes:
ExchangeRateAuthorityPublicKeyLocalNet = 3b6a27bcceb6a42d62a3a8d02a6f0d73653215771de243a63ac048a18b59da29
; These define if the RPC and Control Panel connection to factomd should be encrypted, and if it is, what files
; are the secret key and the public certificate. factom-cli and factom-walletd uses the certificate specified here if TLS is enabled.
; To use default files and paths leave /full/path/to/... in place.
FactomdTlsEnabled = false
FactomdTlsPrivateKey = "/full/path/to/factomdAPIpriv.key"
FactomdTlsPublicCert = "/full/path/to/factomdAPIpub.cert"
; These are the username and password that factomd requires for the RPC API and the Control Panel
; This file is also used by factom-cli and factom-walletd to determine what login to use
FactomdRpcUser = ""
FactomdRpcPass = ""
; Specifying when to change ACKs for switching leader servers
ChangeAcksHeight = 0
; ------------------------------------------------------------------------------
; logLevel - allowed values are: debug, info, notice, warning, error, critical, alert, emergency and none
; ConsoleLogLevel - allowed values are: debug, standard
; ------------------------------------------------------------------------------
[log]
logLevel = error
LogPath = "database/Log"
ConsoleLogLevel = standard
; ------------------------------------------------------------------------------
; Configurations for factom-walletd
; ------------------------------------------------------------------------------
[Walletd]
; These are the username and password that factom-walletd requires
; This file is also used by factom-cli to determine what login to use
WalletRpcUser = ""
WalletRpcPass = ""
; These define if the connection to the wallet should be encrypted, and if it is, what files
; are the secret key and the public certificate. factom-cli uses the certificate specified here if TLS is enabled.
; To use default files and paths leave /full/path/to/... in place.
WalletTlsEnabled = false
WalletTlsPrivateKey = "/full/path/to/walletAPIpriv.key"
WalletTlsPublicCert = "/full/path/to/walletAPIpub.cert"
; This is where factom-walletd and factom-cli will find factomd to interact with the blockchain
; This value can also be updated to authorize an external ip or domain name when factomd creates a TLS cert
FactomdLocation = "localhost:8088"
; This is where factom-cli will find factom-walletd to create Factoid and Entry Credit transactions
; This value can also be updated to authorize an external ip or domain name when factom-walletd creates a TLS cert
WalletdLocation = "localhost:8089"
`
func (s *FactomdConfig) String() string {
var out primitives.Buffer
out.WriteString(fmt.Sprintf("\nFactomd Config"))
out.WriteString(fmt.Sprintf("\n App"))
out.WriteString(fmt.Sprintf("\n PortNumber %v", s.App.PortNumber))
out.WriteString(fmt.Sprintf("\n HomeDir %v", s.App.HomeDir))
out.WriteString(fmt.Sprintf("\n ControlPanelPort %v", s.App.ControlPanelPort))
out.WriteString(fmt.Sprintf("\n ControlPanelFilesPath %v", s.App.ControlPanelFilesPath))
out.WriteString(fmt.Sprintf("\n ControlPanelSetting %v", s.App.ControlPanelSetting))
out.WriteString(fmt.Sprintf("\n DBType %v", s.App.DBType))
out.WriteString(fmt.Sprintf("\n LdbPath %v", s.App.LdbPath))
out.WriteString(fmt.Sprintf("\n BoltDBPath %v", s.App.BoltDBPath))
out.WriteString(fmt.Sprintf("\n DataStorePath %v", s.App.DataStorePath))
out.WriteString(fmt.Sprintf("\n DirectoryBlockInSeconds %v", s.App.DirectoryBlockInSeconds))
out.WriteString(fmt.Sprintf("\n ExportData %v", s.App.ExportData))
out.WriteString(fmt.Sprintf("\n ExportDataSubpath %v", s.App.ExportDataSubpath))
out.WriteString(fmt.Sprintf("\n Network %v", s.App.Network))
out.WriteString(fmt.Sprintf("\n MainNetworkPort %v", s.App.MainNetworkPort))
out.WriteString(fmt.Sprintf("\n PeersFile %v", s.App.PeersFile))
out.WriteString(fmt.Sprintf("\n MainSeedURL %v", s.App.MainSeedURL))
out.WriteString(fmt.Sprintf("\n MainSpecialPeers %v", s.App.MainSpecialPeers))
out.WriteString(fmt.Sprintf("\n TestNetworkPort %v", s.App.TestNetworkPort))
out.WriteString(fmt.Sprintf("\n TestSeedURL %v", s.App.TestSeedURL))
out.WriteString(fmt.Sprintf("\n TestSpecialPeers %v", s.App.TestSpecialPeers))
out.WriteString(fmt.Sprintf("\n LocalNetworkPort %v", s.App.LocalNetworkPort))
out.WriteString(fmt.Sprintf("\n LocalSeedURL %v", s.App.LocalSeedURL))
out.WriteString(fmt.Sprintf("\n LocalSpecialPeers %v", s.App.LocalSpecialPeers))
out.WriteString(fmt.Sprintf("\n CustomBootstrapIdentity %v", s.App.CustomBootstrapIdentity))
out.WriteString(fmt.Sprintf("\n CustomBootstrapKey %v", s.App.CustomBootstrapKey))
out.WriteString(fmt.Sprintf("\n NodeMode %v", s.App.NodeMode))
out.WriteString(fmt.Sprintf("\n IdentityChainID %v", s.App.IdentityChainID))
out.WriteString(fmt.Sprintf("\n LocalServerPrivKey %v", s.App.LocalServerPrivKey))
out.WriteString(fmt.Sprintf("\n LocalServerPublicKey %v", s.App.LocalServerPublicKey))
out.WriteString(fmt.Sprintf("\n ExchangeRate %v", s.App.ExchangeRate))
out.WriteString(fmt.Sprintf("\n ExchangeRateChainId %v", s.App.ExchangeRateChainId))
out.WriteString(fmt.Sprintf("\n ExchangeRateAuthorityPublicKey %v", s.App.ExchangeRateAuthorityPublicKey))
out.WriteString(fmt.Sprintf("\n FactomdTlsEnabled %v", s.App.FactomdTlsEnabled))
out.WriteString(fmt.Sprintf("\n FactomdTlsPrivateKey %v", s.App.FactomdTlsPrivateKey))
out.WriteString(fmt.Sprintf("\n FactomdTlsPublicCert %v", s.App.FactomdTlsPublicCert))
out.WriteString(fmt.Sprintf("\n FactomdRpcUser %v", s.App.FactomdRpcUser))
out.WriteString(fmt.Sprintf("\n FactomdRpcPass %v", s.App.FactomdRpcPass))
out.WriteString(fmt.Sprintf("\n ChangeAcksHeight %v", s.App.ChangeAcksHeight))
out.WriteString(fmt.Sprintf("\n Log"))
out.WriteString(fmt.Sprintf("\n LogPath %v", s.Log.LogPath))
out.WriteString(fmt.Sprintf("\n LogLevel %v", s.Log.LogLevel))
out.WriteString(fmt.Sprintf("\n ConsoleLogLevel %v", s.Log.ConsoleLogLevel))
out.WriteString(fmt.Sprintf("\n Walletd"))
out.WriteString(fmt.Sprintf("\n WalletRpcUser %v", s.Walletd.WalletRpcUser))
out.WriteString(fmt.Sprintf("\n WalletRpcPass %v", s.Walletd.WalletRpcPass))
out.WriteString(fmt.Sprintf("\n WalletTlsEnabled %v", s.Walletd.WalletTlsEnabled))
out.WriteString(fmt.Sprintf("\n WalletTlsPrivateKey %v", s.Walletd.WalletTlsPrivateKey))
out.WriteString(fmt.Sprintf("\n WalletTlsPublicCert %v", s.Walletd.WalletTlsPublicCert))
out.WriteString(fmt.Sprintf("\n FactomdLocation %v", s.Walletd.FactomdLocation))
out.WriteString(fmt.Sprintf("\n WalletdLocation %v", s.Walletd.WalletdLocation))
return out.String()
}
func ConfigFilename() string {
return GetHomeDir() + "/.factom/m2/factomd.conf"
}
func GetConfigFilename(dir string) string {
return GetHomeDir() + "/.factom/" + dir + "/factomd.conf"
}
func GetChangeAcksHeight(filename string) (change uint32, err error) {
defer func() {
if r := recover(); r != nil {
err = fmt.Errorf("Error getting acks - %v\n", r)
}
}()
config := ReadConfig(filename)
return config.App.ChangeAcksHeight, nil
}
func ReadConfig(filename string) *FactomdConfig {
if filename == "" {
filename = ConfigFilename()
}
cfg := new(FactomdConfig)
err := gcfg.ReadStringInto(cfg, defaultConfig)
if err != nil {
panic(err)
}
err = gcfg.FatalOnly(gcfg.ReadFileInto(cfg, filename))
if err != nil {
log.Printfln("Reading from '%s'", filename)
log.Printfln("Cannot open custom config file,\nStarting with default settings.\n%v\n", err)
err = gcfg.ReadStringInto(cfg, defaultConfig)
if err != nil {
panic(err)
}
}
// Default to home directory if not set
if len(cfg.App.HomeDir) < 1 {
cfg.App.HomeDir = GetHomeDir() + "/.factom/m2/"
} else {
cfg.App.HomeDir = cfg.App.HomeDir + "/.factom/m2/"
}
if len(cfg.App.FastBootLocation) < 1 {
cfg.App.FastBootLocation = cfg.App.HomeDir
}
switch cfg.App.Network {
case "MAIN":
cfg.App.ExchangeRateAuthorityPublicKey = cfg.App.ExchangeRateAuthorityPublicKeyMainNet
break
case "TEST":
cfg.App.ExchangeRateAuthorityPublicKey = cfg.App.ExchangeRateAuthorityPublicKeyTestNet
break
case "LOCAL":
cfg.App.ExchangeRateAuthorityPublicKey = cfg.App.ExchangeRateAuthorityPublicKeyLocalNet
break
}
return cfg
}
func GetHomeDir() string {
factomhome := os.Getenv("FACTOM_HOME")
if factomhome != "" {
return factomhome
}
// Get the OS specific home directory via the Go standard lib.
var homeDir string
usr, err := user.Current()
if err == nil {
homeDir = usr.HomeDir
}
// Fall back to standard HOME environment variable that works
// for most POSIX OSes if the directory from the Go standard
// lib failed.
if err != nil || homeDir == "" {
homeDir = os.Getenv("HOME")
}
return homeDir
}
| [
"\"FACTOM_HOME\"",
"\"HOME\""
]
| []
| [
"HOME",
"FACTOM_HOME"
]
| [] | ["HOME", "FACTOM_HOME"] | go | 2 | 0 | |
web/apps.py | import os
from django.apps import AppConfig
from django.conf import settings
class WebConfig(AppConfig):
name = "web"
def ready(self):
debug = settings.DEBUG
run_main = os.environ.get("RUN_MAIN") != "true"
# When using the development server, `ready` is invoked twice; since
# the call to `start` is not idempotent, we need to prevent it from
# being called a second time.
# HOWEVER, the value of `RUN_MAIN` we're looking for depends on whether
# we're in development or production mode. This is a major pain in the
# ass.
if (debug and not run_main) or (not debug and run_main):
from .jobs import scheduler
scheduler.start()
| []
| []
| [
"RUN_MAIN"
]
| [] | ["RUN_MAIN"] | python | 1 | 0 | |
common/persistence/sql/sqlplugin/mysql/tls.go | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package mysql
import (
"crypto/tls"
"crypto/x509"
"fmt"
"io/ioutil"
"github.com/go-sql-driver/mysql"
"go.temporal.io/server/common/auth"
"go.temporal.io/server/common/config"
)
func registerTLSConfig(cfg *config.SQL) error {
if cfg.TLS == nil || !cfg.TLS.Enabled {
return nil
}
// TODO: create a way to set MinVersion and CipherSuites via cfg.
tlsConfig := auth.NewTLSConfigForServer(cfg.TLS.ServerName, cfg.TLS.EnableHostVerification)
if cfg.TLS.CaFile != "" {
rootCertPool := x509.NewCertPool()
pem, err := ioutil.ReadFile(cfg.TLS.CaFile)
if err != nil {
return fmt.Errorf("failed to load CA files: %v", err)
}
if ok := rootCertPool.AppendCertsFromPEM(pem); !ok {
return fmt.Errorf("failed to append CA file")
}
tlsConfig.RootCAs = rootCertPool
}
if cfg.TLS.CertFile != "" && cfg.TLS.KeyFile != "" {
clientCert := make([]tls.Certificate, 0, 1)
certs, err := tls.LoadX509KeyPair(
cfg.TLS.CertFile,
cfg.TLS.KeyFile,
)
if err != nil {
return fmt.Errorf("failed to load tls x509 key pair: %v", err)
}
clientCert = append(clientCert, certs)
tlsConfig.Certificates = clientCert
}
// In order to use the TLS configuration you need to register it. Once registered you use it by specifying
// `tls` in the connect attributes.
err := mysql.RegisterTLSConfig(customTLSName, tlsConfig)
if err != nil {
return fmt.Errorf("failed to register tls config: %v", err)
}
if cfg.ConnectAttributes == nil {
cfg.ConnectAttributes = map[string]string{}
}
// If no `tls` connect attribute is provided then we override it to our newly registered tls config automatically.
// This allows users to simply provide a tls config without needing to remember to also set the connect attribute
if cfg.ConnectAttributes["tls"] == "" {
cfg.ConnectAttributes["tls"] = customTLSName
}
return nil
}
| []
| []
| []
| [] | [] | go | null | null | null |
AutoOut/wsgi.py | """
WSGI config for AutoOut project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import math
import multiprocessing
import os
from distributed import LocalCluster, Client
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'AutoOut.settings')
# Start dask cluster
no_cpus = multiprocessing.cpu_count()
threads_per_worker = 2
no_workers = math.floor((no_cpus-1)/threads_per_worker)
c = LocalCluster(processes=False, n_workers=no_workers, threads_per_worker=threads_per_worker)
dask_client = Client(c)
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
sparse/tests/test_coo.py | import contextlib
import operator
import pickle
import sys
from functools import reduce
import numpy as np
import pytest
import scipy.sparse
import scipy.stats
import sparse
from sparse import COO
from sparse._settings import NEP18_ENABLED
from sparse._utils import assert_eq, random_value_array, html_table
@pytest.fixture(scope="module", params=["f8", "f4", "i8", "i4"])
def random_sparse(request):
dtype = request.param
if np.issubdtype(dtype, np.integer):
def data_rvs(n):
return np.random.randint(-1000, 1000, n)
else:
data_rvs = None
return sparse.random((20, 30, 40), density=0.25, data_rvs=data_rvs).astype(dtype)
@pytest.fixture(scope="module", params=["f8", "f4", "i8", "i4"])
def random_sparse_small(request):
dtype = request.param
if np.issubdtype(dtype, np.integer):
def data_rvs(n):
return np.random.randint(-10, 10, n)
else:
data_rvs = None
return sparse.random((20, 30, 40), density=0.25, data_rvs=data_rvs).astype(dtype)
@pytest.mark.parametrize(
"reduction, kwargs", [("sum", {}), ("sum", {"dtype": np.float32}), ("prod", {})]
)
@pytest.mark.parametrize("axis", [None, 0, 1, 2, (0, 2), -3, (1, -1)])
@pytest.mark.parametrize("keepdims", [True, False])
def test_reductions_fv(reduction, random_sparse_small, axis, keepdims, kwargs):
x = random_sparse_small + np.random.randint(-1, 1, dtype="i4")
y = x.todense()
xx = getattr(x, reduction)(axis=axis, keepdims=keepdims, **kwargs)
yy = getattr(y, reduction)(axis=axis, keepdims=keepdims, **kwargs)
assert_eq(xx, yy)
@pytest.mark.parametrize(
"reduction, kwargs",
[
("sum", {}),
("sum", {"dtype": np.float32}),
("mean", {}),
("mean", {"dtype": np.float32}),
("prod", {}),
("max", {}),
("min", {}),
("std", {}),
("var", {}),
],
)
@pytest.mark.parametrize("axis", [None, 0, 1, 2, (0, 2), -3, (1, -1)])
@pytest.mark.parametrize("keepdims", [True, False])
def test_reductions(reduction, random_sparse, axis, keepdims, kwargs):
x = random_sparse
y = x.todense()
xx = getattr(x, reduction)(axis=axis, keepdims=keepdims, **kwargs)
yy = getattr(y, reduction)(axis=axis, keepdims=keepdims, **kwargs)
assert_eq(xx, yy)
@pytest.mark.xfail(
reason=("Setting output dtype=float16 produces results " "inconsistent with numpy")
)
@pytest.mark.filterwarnings("ignore:overflow")
@pytest.mark.parametrize(
"reduction, kwargs",
[("sum", {"dtype": np.float16}), ("mean", {"dtype": np.float16})],
)
@pytest.mark.parametrize("axis", [None, 0, 1, 2, (0, 2)])
def test_reductions_float16(random_sparse, reduction, kwargs, axis):
x = random_sparse
y = x.todense()
xx = getattr(x, reduction)(axis=axis, **kwargs)
yy = getattr(y, reduction)(axis=axis, **kwargs)
assert_eq(xx, yy, atol=1e-2)
@pytest.mark.parametrize("reduction,kwargs", [("any", {}), ("all", {})])
@pytest.mark.parametrize("axis", [None, 0, 1, 2, (0, 2), -3, (1, -1)])
@pytest.mark.parametrize("keepdims", [True, False])
def test_reductions_bool(random_sparse, reduction, kwargs, axis, keepdims):
y = np.zeros((2, 3, 4), dtype=bool)
y[0] = True
y[1, 1, 1] = True
x = sparse.COO.from_numpy(y)
xx = getattr(x, reduction)(axis=axis, keepdims=keepdims, **kwargs)
yy = getattr(y, reduction)(axis=axis, keepdims=keepdims, **kwargs)
assert_eq(xx, yy)
@pytest.mark.parametrize(
"reduction,kwargs",
[
(np.max, {}),
(np.sum, {}),
(np.sum, {"dtype": np.float32}),
(np.mean, {}),
(np.mean, {"dtype": np.float32}),
(np.prod, {}),
(np.min, {}),
],
)
@pytest.mark.parametrize("axis", [None, 0, 1, 2, (0, 2), -1, (0, -1)])
@pytest.mark.parametrize("keepdims", [True, False])
def test_ufunc_reductions(random_sparse, reduction, kwargs, axis, keepdims):
x = random_sparse
y = x.todense()
xx = reduction(x, axis=axis, keepdims=keepdims, **kwargs)
yy = reduction(y, axis=axis, keepdims=keepdims, **kwargs)
assert_eq(xx, yy)
# If not a scalar/1 element array, must be a sparse array
if xx.size > 1:
assert isinstance(xx, COO)
@pytest.mark.parametrize(
"reduction,kwargs",
[
(np.max, {}),
(np.sum, {"axis": 0}),
(np.prod, {"keepdims": True}),
(np.add.reduce, {}),
(np.add.reduce, {"keepdims": True}),
(np.minimum.reduce, {"axis": 0}),
],
)
def test_ufunc_reductions_kwargs(reduction, kwargs):
x = sparse.random((2, 3, 4), density=0.5)
y = x.todense()
xx = reduction(x, **kwargs)
yy = reduction(y, **kwargs)
assert_eq(xx, yy)
# If not a scalar/1 element array, must be a sparse array
if xx.size > 1:
assert isinstance(xx, COO)
@pytest.mark.parametrize(
"reduction", ["nansum", "nanmean", "nanprod", "nanmax", "nanmin"]
)
@pytest.mark.parametrize("axis", [None, 0, 1])
@pytest.mark.parametrize("keepdims", [False])
@pytest.mark.parametrize("fraction", [0.25, 0.5, 0.75, 1.0])
@pytest.mark.filterwarnings("ignore:All-NaN")
@pytest.mark.filterwarnings("ignore:Mean of empty slice")
def test_nan_reductions(reduction, axis, keepdims, fraction):
s = sparse.random(
(2, 3, 4), data_rvs=random_value_array(np.nan, fraction), density=0.25
)
x = s.todense()
expected = getattr(np, reduction)(x, axis=axis, keepdims=keepdims)
actual = getattr(sparse, reduction)(s, axis=axis, keepdims=keepdims)
assert_eq(expected, actual)
@pytest.mark.parametrize("reduction", ["nanmax", "nanmin", "nanmean"])
@pytest.mark.parametrize("axis", [None, 0, 1])
def test_all_nan_reduction_warning(reduction, axis):
x = random_value_array(np.nan, 1.0)(2 * 3 * 4).reshape(2, 3, 4)
s = COO.from_numpy(x)
with pytest.warns(RuntimeWarning):
getattr(sparse, reduction)(s, axis=axis)
@pytest.mark.parametrize(
"axis",
[None, (1, 2, 0), (2, 1, 0), (0, 1, 2), (0, 1, -1), (0, -2, -1), (-3, -2, -1)],
)
def test_transpose(axis):
x = sparse.random((2, 3, 4), density=0.25)
y = x.todense()
xx = x.transpose(axis)
yy = y.transpose(axis)
assert_eq(xx, yy)
@pytest.mark.parametrize(
"axis",
[
(0, 1), # too few
(0, 1, 2, 3), # too many
(3, 1, 0), # axis 3 illegal
(0, -1, -4), # axis -4 illegal
(0, 0, 1), # duplicate axis 0
(0, -1, 2), # duplicate axis -1 == 2
0.3, # Invalid type in axis
((0, 1, 2),), # Iterable inside iterable
],
)
def test_transpose_error(axis):
x = sparse.random((2, 3, 4), density=0.25)
with pytest.raises(ValueError):
x.transpose(axis)
@pytest.mark.parametrize(
"a,b",
[
[(3, 4), (5, 5)],
[(12,), (3, 4)],
[(12,), (3, 6)],
[(5, 5, 5), (6, 6, 6)],
[(3, 4), (9, 4)],
[(5,), (4,)],
[(2, 3, 4, 5), (2, 3, 4, 5, 6)],
[(100,), (5, 5)],
[(2, 3, 4, 5), (20, 6)],
[(), ()],
],
)
def test_resize(a, b):
s = sparse.random(a, density=0.5)
orig_size = s.size
x = s.todense()
x.resize(b)
s.resize(b)
temp = x.reshape(x.size)
temp[orig_size:] = s.fill_value
assert isinstance(s, sparse.SparseArray)
assert_eq(x, s)
def test_resize_upcast():
s = sparse.random((10, 10, 10), density=0.5, format="coo", idx_dtype=np.uint8)
s.resize(600)
assert s.coords.dtype == np.uint16
@pytest.mark.parametrize("axis1", [-3, -2, -1, 0, 1, 2])
@pytest.mark.parametrize("axis2", [-3, -2, -1, 0, 1, 2])
def test_swapaxes(axis1, axis2):
x = sparse.random((2, 3, 4), density=0.25)
y = x.todense()
xx = x.swapaxes(axis1, axis2)
yy = y.swapaxes(axis1, axis2)
assert_eq(xx, yy)
@pytest.mark.parametrize("axis1", [-4, 3])
@pytest.mark.parametrize("axis2", [-4, 3, 0])
def test_swapaxes_error(axis1, axis2):
x = sparse.random((2, 3, 4), density=0.25)
with pytest.raises(ValueError):
x.swapaxes(axis1, axis2)
@pytest.mark.parametrize(
"source, destination",
[
[0, 1],
[2, 1],
[-2, 1],
[-2, -3],
[(0, 1), (2, 3)],
[(-1, 0), (0, 1)],
[(0, 1, 2), (2, 1, 0)],
[(0, 1, 2), (-2, -3, -1)],
],
)
def test_moveaxis(source, destination):
x = sparse.random((2, 3, 4, 5), density=0.25)
y = x.todense()
xx = sparse.moveaxis(x, source, destination)
yy = np.moveaxis(y, source, destination)
assert_eq(xx, yy)
@pytest.mark.parametrize(
"source, destination", [[0, -4], [(0, 5), (1, 2)], [(0, 1, 2), (2, 1)]]
)
def test_moveaxis_error(source, destination):
x = sparse.random((2, 3, 4), density=0.25)
with pytest.raises(ValueError):
sparse.moveaxis(x, source, destination)
@pytest.mark.parametrize(
"a,b",
[
[(3, 4), (5, 5)],
[(12,), (3, 4)],
[(12,), (3, 6)],
[(5, 5, 5), (6, 6, 6)],
[(3, 4), (9, 4)],
[(5,), (4,)],
[(2, 3, 4, 5), (2, 3, 4, 5, 6)],
[(100,), (5, 5)],
[(2, 3, 4, 5), (20, 6)],
[(), ()],
],
)
def test_resize(a, b):
s = sparse.random(a, density=0.5)
orig_size = s.size
x = s.todense()
x = np.resize(x, b)
s.resize(b)
temp = x.reshape(x.size)
temp[orig_size:] = s.fill_value
assert isinstance(s, sparse.SparseArray)
assert_eq(x, s)
@pytest.mark.parametrize(
"a,b",
[
[(3, 4), (3, 4)],
[(12,), (3, 4)],
[(12,), (3, -1)],
[(3, 4), (12,)],
[(3, 4), (-1, 4)],
[(3, 4), (3, -1)],
[(2, 3, 4, 5), (8, 15)],
[(2, 3, 4, 5), (24, 5)],
[(2, 3, 4, 5), (20, 6)],
[(), ()],
],
)
def test_reshape(a, b):
s = sparse.random(a, density=0.5)
x = s.todense()
assert_eq(x.reshape(b), s.reshape(b))
def test_large_reshape():
n = 100
m = 10
row = np.arange(
n, dtype=np.uint16
) # np.random.randint(0, n, size=n, dtype=np.uint16)
col = row % m # np.random.randint(0, m, size=n, dtype=np.uint16)
data = np.ones(n, dtype=np.uint8)
x = COO((data, (row, col)), sorted=True, has_duplicates=False)
assert_eq(x, x.reshape(x.shape))
def test_reshape_same():
s = sparse.random((3, 5), density=0.5)
assert s.reshape(s.shape) is s
def test_reshape_function():
s = sparse.random((5, 3), density=0.5)
x = s.todense()
shape = (3, 5)
s2 = np.reshape(s, shape)
assert isinstance(s2, COO)
assert_eq(s2, x.reshape(shape))
def test_reshape_upcast():
a = sparse.random((10, 10, 10), density=0.5, format="coo", idx_dtype=np.uint8)
assert a.reshape(1000).coords.dtype == np.uint16
def test_to_scipy_sparse():
s = sparse.random((3, 5), density=0.5)
a = s.to_scipy_sparse()
b = scipy.sparse.coo_matrix(s.todense())
assert_eq(a, b)
@pytest.mark.parametrize("a_ndim", [1, 2, 3])
@pytest.mark.parametrize("b_ndim", [1, 2, 3])
def test_kron(a_ndim, b_ndim):
a_shape = (2, 3, 4)[:a_ndim]
b_shape = (5, 6, 7)[:b_ndim]
sa = sparse.random(a_shape, density=0.5)
a = sa.todense()
sb = sparse.random(b_shape, density=0.5)
b = sb.todense()
sol = np.kron(a, b)
assert_eq(sparse.kron(sa, sb), sol)
assert_eq(sparse.kron(sa, b), sol)
assert_eq(sparse.kron(a, sb), sol)
with pytest.raises(ValueError):
assert_eq(sparse.kron(a, b), sol)
@pytest.mark.parametrize(
"a_spmatrix, b_spmatrix", [(True, True), (True, False), (False, True)]
)
def test_kron_spmatrix(a_spmatrix, b_spmatrix):
sa = sparse.random((3, 4), density=0.5)
a = sa.todense()
sb = sparse.random((5, 6), density=0.5)
b = sb.todense()
if a_spmatrix:
sa = sa.tocsr()
if b_spmatrix:
sb = sb.tocsr()
sol = np.kron(a, b)
assert_eq(sparse.kron(sa, sb), sol)
assert_eq(sparse.kron(sa, b), sol)
assert_eq(sparse.kron(a, sb), sol)
with pytest.raises(ValueError):
assert_eq(sparse.kron(a, b), sol)
@pytest.mark.parametrize("ndim", [1, 2, 3])
def test_kron_scalar(ndim):
if ndim:
a_shape = (3, 4, 5)[:ndim]
sa = sparse.random(a_shape, density=0.5)
a = sa.todense()
else:
sa = a = np.array(6)
scalar = np.array(5)
sol = np.kron(a, scalar)
assert_eq(sparse.kron(sa, scalar), sol)
assert_eq(sparse.kron(scalar, sa), sol)
def test_gt():
s = sparse.random((2, 3, 4), density=0.5)
x = s.todense()
m = x.mean()
assert_eq(x > m, s > m)
m = s.data[2]
assert_eq(x > m, s > m)
assert_eq(x >= m, s >= m)
@pytest.mark.parametrize(
"index",
[
# Integer
0,
1,
-1,
(1, 1, 1),
# Pure slices
(slice(0, 2),),
(slice(None, 2), slice(None, 2)),
(slice(1, None), slice(1, None)),
(slice(None, None),),
(slice(None, None, -1),),
(slice(None, 2, -1), slice(None, 2, -1)),
(slice(1, None, 2), slice(1, None, 2)),
(slice(None, None, 2),),
(slice(None, 2, -1), slice(None, 2, -2)),
(slice(1, None, 2), slice(1, None, 1)),
(slice(None, None, -2),),
# Combinations
(0, slice(0, 2)),
(slice(0, 1), 0),
(None, slice(1, 3), 0),
(slice(0, 3), None, 0),
(slice(1, 2), slice(2, 4)),
(slice(1, 2), slice(None, None)),
(slice(1, 2), slice(None, None), 2),
(slice(1, 2, 2), slice(None, None), 2),
(slice(1, 2, None), slice(None, None, 2), 2),
(slice(1, 2, -2), slice(None, None), -2),
(slice(1, 2, None), slice(None, None, -2), 2),
(slice(1, 2, -1), slice(None, None), -1),
(slice(1, 2, None), slice(None, None, -1), 2),
(slice(2, 0, -1), slice(None, None), -1),
(slice(-2, None, None),),
(slice(-1, None, None), slice(-2, None, None)),
# With ellipsis
(Ellipsis, slice(1, 3)),
(1, Ellipsis, slice(1, 3)),
(slice(0, 1), Ellipsis),
(Ellipsis, None),
(None, Ellipsis),
(1, Ellipsis),
(1, Ellipsis, None),
(1, 1, 1, Ellipsis),
(Ellipsis, 1, None),
# With multi-axis advanced indexing
([0, 1],) * 2,
([0, 1], [0, 2]),
([0, 0, 0], [0, 1, 2], [1, 2, 1]),
# Pathological - Slices larger than array
(slice(None, 1000)),
(slice(None), slice(None, 1000)),
(slice(None), slice(1000, -1000, -1)),
(slice(None), slice(1000, -1000, -50)),
# Pathological - Wrong ordering of start/stop
(slice(5, 0),),
(slice(0, 5, -1),),
(slice(0, 0, None),),
],
)
def test_slicing(index):
s = sparse.random((2, 3, 4), density=0.5)
x = s.todense()
assert_eq(x[index], s[index])
@pytest.mark.parametrize(
"index",
[
([1, 0], 0),
(1, [0, 2]),
(0, [1, 0], 0),
(1, [2, 0], 0),
(1, [], 0),
([True, False], slice(1, None), slice(-2, None)),
(slice(1, None), slice(-2, None), [True, False, True, False]),
([1, 0],),
(Ellipsis, [2, 1, 3]),
(slice(None), [2, 1, 2]),
(1, [2, 0, 1]),
],
)
def test_advanced_indexing(index):
s = sparse.random((2, 3, 4), density=0.5)
x = s.todense()
assert_eq(x[index], s[index])
def test_custom_dtype_slicing():
dt = np.dtype(
[("part1", np.float_), ("part2", np.int_, (2,)), ("part3", np.int_, (2, 2))]
)
x = np.zeros((2, 3, 4), dtype=dt)
x[1, 1, 1] = (0.64, [4, 2], [[1, 2], [3, 0]])
s = COO.from_numpy(x)
assert x[1, 1, 1] == s[1, 1, 1]
assert x[0, 1, 2] == s[0, 1, 2]
assert_eq(x["part1"], s["part1"])
assert_eq(x["part2"], s["part2"])
assert_eq(x["part3"], s["part3"])
@pytest.mark.parametrize(
"index",
[
(Ellipsis, Ellipsis),
(1, 1, 1, 1),
(slice(None),) * 4,
5,
-5,
"foo",
[True, False, False],
0.5,
[0.5],
{"potato": "kartoffel"},
([[0, 1]],),
],
)
def test_slicing_errors(index):
s = sparse.random((2, 3, 4), density=0.5)
with pytest.raises(IndexError):
s[index]
def test_concatenate():
xx = sparse.random((2, 3, 4), density=0.5)
x = xx.todense()
yy = sparse.random((5, 3, 4), density=0.5)
y = yy.todense()
zz = sparse.random((4, 3, 4), density=0.5)
z = zz.todense()
assert_eq(
np.concatenate([x, y, z], axis=0), sparse.concatenate([xx, yy, zz], axis=0)
)
xx = sparse.random((5, 3, 1), density=0.5)
x = xx.todense()
yy = sparse.random((5, 3, 3), density=0.5)
y = yy.todense()
zz = sparse.random((5, 3, 2), density=0.5)
z = zz.todense()
assert_eq(
np.concatenate([x, y, z], axis=2), sparse.concatenate([xx, yy, zz], axis=2)
)
assert_eq(
np.concatenate([x, y, z], axis=-1), sparse.concatenate([xx, yy, zz], axis=-1)
)
@pytest.mark.parametrize("axis", [0, 1])
@pytest.mark.parametrize("func", [sparse.stack, sparse.concatenate])
def test_concatenate_mixed(func, axis):
s = sparse.random((10, 10), density=0.5)
d = s.todense()
with pytest.raises(ValueError):
func([d, s, s], axis=axis)
def test_concatenate_noarrays():
with pytest.raises(ValueError):
sparse.concatenate([])
@pytest.mark.parametrize("shape", [(5,), (2, 3, 4), (5, 2)])
@pytest.mark.parametrize("axis", [0, 1, -1])
def test_stack(shape, axis):
xx = sparse.random(shape, density=0.5)
x = xx.todense()
yy = sparse.random(shape, density=0.5)
y = yy.todense()
zz = sparse.random(shape, density=0.5)
z = zz.todense()
assert_eq(np.stack([x, y, z], axis=axis), sparse.stack([xx, yy, zz], axis=axis))
def test_large_concat_stack():
data = np.array([1], dtype=np.uint8)
coords = np.array([[255]], dtype=np.uint8)
xs = COO(coords, data, shape=(256,), has_duplicates=False, sorted=True)
x = xs.todense()
assert_eq(np.stack([x, x]), sparse.stack([xs, xs]))
assert_eq(np.concatenate((x, x)), sparse.concatenate((xs, xs)))
def test_addition():
a = sparse.random((2, 3, 4), density=0.5)
x = a.todense()
b = sparse.random((2, 3, 4), density=0.5)
y = b.todense()
assert_eq(x + y, a + b)
assert_eq(x - y, a - b)
@pytest.mark.parametrize("scalar", [2, 2.5, np.float32(2.0), np.int8(3)])
def test_scalar_multiplication(scalar):
a = sparse.random((2, 3, 4), density=0.5)
x = a.todense()
assert_eq(x * scalar, a * scalar)
assert (a * scalar).nnz == a.nnz
assert_eq(scalar * x, scalar * a)
assert (scalar * a).nnz == a.nnz
assert_eq(x / scalar, a / scalar)
assert (a / scalar).nnz == a.nnz
assert_eq(x // scalar, a // scalar)
# division may reduce nnz.
@pytest.mark.filterwarnings("ignore:divide by zero")
def test_scalar_exponentiation():
a = sparse.random((2, 3, 4), density=0.5)
x = a.todense()
assert_eq(x ** 2, a ** 2)
assert_eq(x ** 0.5, a ** 0.5)
assert_eq(x ** -1, a ** -1)
def test_create_with_lists_of_tuples():
L = [((0, 0, 0), 1), ((1, 2, 1), 1), ((1, 1, 1), 2), ((1, 3, 2), 3)]
s = COO(L)
x = np.zeros((2, 4, 3), dtype=np.asarray([1, 2, 3]).dtype)
for ind, value in L:
x[ind] = value
assert_eq(s, x)
def test_sizeof():
x = np.eye(100)
y = COO.from_numpy(x)
nb = sys.getsizeof(y)
assert 400 < nb < x.nbytes / 10
def test_scipy_sparse_interface():
n = 100
m = 10
row = np.random.randint(0, n, size=n, dtype=np.uint16)
col = np.random.randint(0, m, size=n, dtype=np.uint16)
data = np.ones(n, dtype=np.uint8)
inp = (data, (row, col))
x = scipy.sparse.coo_matrix(inp)
xx = sparse.COO(inp)
assert_eq(x, xx, check_nnz=False)
assert_eq(x.T, xx.T, check_nnz=False)
assert_eq(xx.to_scipy_sparse(), x, check_nnz=False)
assert_eq(COO.from_scipy_sparse(xx.to_scipy_sparse()), xx, check_nnz=False)
assert_eq(x, xx, check_nnz=False)
assert_eq(x.T.dot(x), xx.T.dot(xx), check_nnz=False)
assert isinstance(x + xx, COO)
assert isinstance(xx + x, COO)
@pytest.mark.parametrize("scipy_format", ["coo", "csr", "dok", "csc"])
def test_scipy_sparse_interaction(scipy_format):
x = sparse.random((10, 20), density=0.2).todense()
sp = getattr(scipy.sparse, scipy_format + "_matrix")(x)
coo = COO(x)
assert isinstance(sp + coo, COO)
assert isinstance(coo + sp, COO)
assert_eq(sp, coo)
@pytest.mark.parametrize(
"func",
[operator.mul, operator.add, operator.sub, operator.gt, operator.lt, operator.ne],
)
def test_op_scipy_sparse(func):
xs = sparse.random((3, 4), density=0.5)
y = sparse.random((3, 4), density=0.5).todense()
ys = scipy.sparse.csr_matrix(y)
x = xs.todense()
assert_eq(func(x, y), func(xs, ys))
@pytest.mark.parametrize(
"func",
[
operator.add,
operator.sub,
pytest.param(
operator.mul,
marks=pytest.mark.xfail(reason="Scipy sparse auto-densifies in this case."),
),
pytest.param(
operator.gt,
marks=pytest.mark.xfail(reason="Scipy sparse doesn't support this yet."),
),
pytest.param(
operator.lt,
marks=pytest.mark.xfail(reason="Scipy sparse doesn't support this yet."),
),
pytest.param(
operator.ne,
marks=pytest.mark.xfail(reason="Scipy sparse doesn't support this yet."),
),
],
)
def test_op_scipy_sparse_left(func):
ys = sparse.random((3, 4), density=0.5)
x = sparse.random((3, 4), density=0.5).todense()
xs = scipy.sparse.csr_matrix(x)
y = ys.todense()
assert_eq(func(x, y), func(xs, ys))
def test_cache_csr():
x = sparse.random((10, 5), density=0.5).todense()
s = COO(x, cache=True)
assert isinstance(s.tocsr(), scipy.sparse.csr_matrix)
assert isinstance(s.tocsc(), scipy.sparse.csc_matrix)
assert s.tocsr() is s.tocsr()
assert s.tocsc() is s.tocsc()
def test_empty_shape():
x = COO(np.empty((0, 1), dtype=np.int8), [1.0])
assert x.shape == ()
assert_eq(2 * x, np.float_(2.0))
def test_single_dimension():
x = COO([1, 3], [1.0, 3.0])
assert x.shape == (4,)
assert_eq(x, np.array([0, 1.0, 0, 3.0]))
def test_large_sum():
n = 500000
x = np.random.randint(0, 10000, size=(n,))
y = np.random.randint(0, 1000, size=(n,))
z = np.random.randint(0, 3, size=(n,))
data = np.random.random(n)
a = COO((x, y, z), data)
assert a.shape == (10000, 1000, 3)
b = a.sum(axis=2)
assert b.nnz > 100000
def test_add_many_sparse_arrays():
x = COO({(1, 1): 1})
y = sum([x] * 100)
assert y.nnz < np.prod(y.shape)
def test_caching():
x = COO({(9, 9, 9): 1})
assert (
x[:].reshape((100, 10)).transpose().tocsr()
is not x[:].reshape((100, 10)).transpose().tocsr()
)
x = COO({(9, 9, 9): 1}, cache=True)
assert (
x[:].reshape((100, 10)).transpose().tocsr()
is x[:].reshape((100, 10)).transpose().tocsr()
)
x = COO({(1, 1, 1, 1, 1, 1, 1, 2): 1}, cache=True)
for i in range(x.ndim):
x.reshape(x.size)
assert len(x._cache["reshape"]) < 5
def test_scalar_slicing():
x = np.array([0, 1])
s = COO(x)
assert np.isscalar(s[0])
assert_eq(x[0], s[0])
assert isinstance(s[0, ...], COO)
assert s[0, ...].shape == ()
assert_eq(x[0, ...], s[0, ...])
assert np.isscalar(s[1])
assert_eq(x[1], s[1])
assert isinstance(s[1, ...], COO)
assert s[1, ...].shape == ()
assert_eq(x[1, ...], s[1, ...])
@pytest.mark.parametrize(
"shape, k",
[((3, 4), 0), ((3, 4, 5), 1), ((4, 2), -1), ((2, 4), -2), ((4, 4), 1000)],
)
def test_triul(shape, k):
s = sparse.random(shape, density=0.5)
x = s.todense()
assert_eq(np.triu(x, k), sparse.triu(s, k))
assert_eq(np.tril(x, k), sparse.tril(s, k))
def test_empty_reduction():
x = np.zeros((2, 3, 4), dtype=np.float_)
xs = COO.from_numpy(x)
assert_eq(x.sum(axis=(0, 2)), xs.sum(axis=(0, 2)))
@pytest.mark.parametrize("shape", [(2,), (2, 3), (2, 3, 4)])
@pytest.mark.parametrize("density", [0.1, 0.3, 0.5, 0.7])
def test_random_shape(shape, density):
s = sparse.random(shape, density)
assert isinstance(s, COO)
assert s.shape == shape
expected_nnz = density * np.prod(shape)
assert np.floor(expected_nnz) <= s.nnz <= np.ceil(expected_nnz)
@pytest.mark.parametrize("shape, nnz", [((1,), 1), ((2,), 0), ((3, 4), 5)])
def test_random_nnz(shape, nnz):
s = sparse.random(shape, nnz=nnz)
assert isinstance(s, COO)
assert s.nnz == nnz
@pytest.mark.parametrize(
"density, nnz", [(1, 1), (1.01, None), (-0.01, None), (None, 2)]
)
def test_random_invalid_density_and_nnz(density, nnz):
with pytest.raises(ValueError):
sparse.random((1,), density, nnz=nnz)
def test_two_random_unequal():
s1 = sparse.random((2, 3, 4), 0.3)
s2 = sparse.random((2, 3, 4), 0.3)
assert not np.allclose(s1.todense(), s2.todense())
def test_two_random_same_seed():
state = np.random.randint(100)
s1 = sparse.random((2, 3, 4), 0.3, random_state=state)
s2 = sparse.random((2, 3, 4), 0.3, random_state=state)
assert_eq(s1, s2)
@pytest.mark.parametrize(
"rvs, dtype",
[
(None, np.float64),
(scipy.stats.poisson(25, loc=10).rvs, np.int_),
(lambda x: np.random.choice([True, False], size=x), np.bool_),
],
)
@pytest.mark.parametrize("shape", [(2, 4, 5), (20, 40, 50)])
@pytest.mark.parametrize("density", [0.0, 0.01, 0.1, 0.2])
def test_random_rvs(rvs, dtype, shape, density):
x = sparse.random(shape, density, data_rvs=rvs)
assert x.shape == shape
assert x.dtype == dtype
@pytest.mark.parametrize("format", ["coo", "dok"])
def test_random_fv(format):
fv = np.random.rand()
s = sparse.random((2, 3, 4), density=0.5, format=format, fill_value=fv)
assert s.fill_value == fv
def test_scalar_shape_construction():
x = np.random.rand(5)
coords = np.arange(5)[None]
s = COO(coords, x, shape=5)
assert_eq(x, s)
def test_len():
s = sparse.random((20, 30, 40))
assert len(s) == 20
def test_density():
s = sparse.random((20, 30, 40), density=0.1)
assert np.isclose(s.density, 0.1)
def test_size():
s = sparse.random((20, 30, 40))
assert s.size == 20 * 30 * 40
def test_np_array():
s = sparse.random((20, 30, 40))
with pytest.raises(RuntimeError):
np.array(s)
@pytest.mark.parametrize(
"shapes",
[
[(2,), (3, 2), (4, 3, 2)],
[(3,), (2, 3), (2, 2, 3)],
[(2,), (2, 2), (2, 2, 2)],
[(4,), (4, 4), (4, 4, 4)],
[(4,), (4, 4), (4, 4, 4)],
[(4,), (4, 4), (4, 4, 4)],
[(1, 1, 2), (1, 3, 1), (4, 1, 1)],
[(2,), (2, 1), (2, 1, 1)],
[(3,), (), (2, 3)],
[(4, 4), (), ()],
],
)
def test_three_arg_where(shapes):
cs = sparse.random(shapes[0], density=0.5).astype(np.bool_)
xs = sparse.random(shapes[1], density=0.5)
ys = sparse.random(shapes[2], density=0.5)
c = cs.todense()
x = xs.todense()
y = ys.todense()
expected = np.where(c, x, y)
actual = sparse.where(cs, xs, ys)
assert isinstance(actual, COO)
assert_eq(expected, actual)
def test_one_arg_where():
s = sparse.random((2, 3, 4), density=0.5)
x = s.todense()
expected = np.where(x)
actual = sparse.where(s)
assert len(expected) == len(actual)
for e, a in zip(expected, actual):
assert_eq(e, a, compare_dtype=False)
def test_one_arg_where_dense():
x = np.random.rand(2, 3, 4)
with pytest.raises(ValueError):
sparse.where(x)
def test_two_arg_where():
cs = sparse.random((2, 3, 4), density=0.5).astype(np.bool_)
xs = sparse.random((2, 3, 4), density=0.5)
with pytest.raises(ValueError):
sparse.where(cs, xs)
@pytest.mark.parametrize("func", [operator.imul, operator.iadd, operator.isub])
def test_inplace_invalid_shape(func):
xs = sparse.random((3, 4), density=0.5)
ys = sparse.random((2, 3, 4), density=0.5)
with pytest.raises(ValueError):
func(xs, ys)
def test_nonzero():
s = sparse.random((2, 3, 4), density=0.5)
x = s.todense()
expected = x.nonzero()
actual = s.nonzero()
assert isinstance(actual, tuple)
assert len(expected) == len(actual)
for e, a in zip(expected, actual):
assert_eq(e, a, compare_dtype=False)
def test_argwhere():
s = sparse.random((2, 3, 4), density=0.5)
x = s.todense()
assert_eq(np.argwhere(s), np.argwhere(x), compare_dtype=False)
@pytest.mark.parametrize("format", ["coo", "dok"])
def test_asformat(format):
s = sparse.random((2, 3, 4), density=0.5, format="coo")
s2 = s.asformat(format)
assert_eq(s, s2)
@pytest.mark.parametrize(
"format", [sparse.COO, sparse.DOK, scipy.sparse.csr_matrix, np.asarray]
)
def test_as_coo(format):
x = format(sparse.random((3, 4), density=0.5, format="coo").todense())
s1 = sparse.as_coo(x)
s2 = COO(x)
assert_eq(x, s1)
assert_eq(x, s2)
def test_invalid_attrs_error():
s = sparse.random((3, 4), density=0.5, format="coo")
with pytest.raises(ValueError):
sparse.as_coo(s, shape=(2, 3))
with pytest.raises(ValueError):
COO(s, shape=(2, 3))
with pytest.raises(ValueError):
sparse.as_coo(s, fill_value=0.0)
with pytest.raises(ValueError):
COO(s, fill_value=0.0)
def test_invalid_iterable_error():
with pytest.raises(ValueError):
x = [(3, 4, 5)]
COO.from_iter(x)
with pytest.raises(ValueError):
x = [((2.3, 4.5), 3.2)]
COO.from_iter(x)
def test_prod_along_axis():
s1 = sparse.random((10, 10), density=0.1)
s2 = 1 - s1
x1 = s1.todense()
x2 = s2.todense()
assert_eq(s1.prod(axis=0), x1.prod(axis=0))
assert_eq(s2.prod(axis=0), x2.prod(axis=0))
class TestRoll:
# test on 1d array #
@pytest.mark.parametrize("shift", [0, 2, -2, 20, -20])
def test_1d(self, shift):
xs = sparse.random((100,), density=0.5)
x = xs.todense()
assert_eq(np.roll(x, shift), sparse.roll(xs, shift))
assert_eq(np.roll(x, shift), sparse.roll(x, shift))
# test on 2d array #
@pytest.mark.parametrize("shift", [0, 2, -2, 20, -20])
@pytest.mark.parametrize("ax", [None, 0, 1, (0, 1)])
def test_2d(self, shift, ax):
xs = sparse.random((10, 10), density=0.5)
x = xs.todense()
assert_eq(np.roll(x, shift, axis=ax), sparse.roll(xs, shift, axis=ax))
assert_eq(np.roll(x, shift, axis=ax), sparse.roll(x, shift, axis=ax))
# test on rolling multiple axes at once #
@pytest.mark.parametrize("shift", [(0, 0), (1, -1), (-1, 1), (10, -10)])
@pytest.mark.parametrize("ax", [(0, 1), (0, 2), (1, 2), (-1, 1)])
def test_multiaxis(self, shift, ax):
xs = sparse.random((9, 9, 9), density=0.5)
x = xs.todense()
assert_eq(np.roll(x, shift, axis=ax), sparse.roll(xs, shift, axis=ax))
assert_eq(np.roll(x, shift, axis=ax), sparse.roll(x, shift, axis=ax))
# test original is unchanged #
@pytest.mark.parametrize("shift", [0, 2, -2, 20, -20])
@pytest.mark.parametrize("ax", [None, 0, 1, (0, 1)])
def test_original_is_copied(self, shift, ax):
xs = sparse.random((10, 10), density=0.5)
xc = COO(np.copy(xs.coords), np.copy(xs.data), shape=xs.shape)
sparse.roll(xs, shift, axis=ax)
assert_eq(xs, xc)
# test on empty array #
def test_empty(self):
x = np.array([])
assert_eq(np.roll(x, 1), sparse.roll(sparse.as_coo(x), 1))
# test error handling #
@pytest.mark.parametrize(
"args",
[
# iterable shift, but axis not iterable
((1, 1), 0),
# ndim(axis) != 1
(1, [[0, 1]]),
# ndim(shift) != 1
([[0, 1]], [0, 1]),
([[0, 1], [0, 1]], [0, 1]),
],
)
def test_valerr(self, args):
x = sparse.random((2, 2, 2), density=1)
with pytest.raises(ValueError):
sparse.roll(x, *args)
@pytest.mark.parametrize("dtype", [np.uint8, np.int8])
@pytest.mark.parametrize("shift", [300, -300])
def test_dtype_errors(self, dtype, shift):
x = sparse.random((5, 5, 5), density=0.2, idx_dtype=dtype)
with pytest.raises(ValueError):
sparse.roll(x, shift)
def test_unsigned_type_error(self):
x = sparse.random((5, 5, 5), density=0.3, idx_dtype=np.uint8)
with pytest.raises(ValueError):
sparse.roll(x, -1)
def test_clip():
x = np.array([[0, 0, 1, 0, 2], [5, 0, 0, 3, 0]])
s = sparse.COO.from_numpy(x)
assert_eq(s.clip(min=1), x.clip(min=1))
assert_eq(s.clip(max=3), x.clip(max=3))
assert_eq(s.clip(min=1, max=3), x.clip(min=1, max=3))
assert_eq(s.clip(min=1, max=3.0), x.clip(min=1, max=3.0))
assert_eq(np.clip(s, 1, 3), np.clip(x, 1, 3))
with pytest.raises(ValueError):
s.clip()
out = sparse.COO.from_numpy(np.zeros_like(x))
out2 = s.clip(min=1, max=3, out=out)
assert out is out2
assert_eq(out, x.clip(min=1, max=3))
class TestFailFillValue:
# Check failed fill_value op
def test_nonzero_fv(self):
xs = sparse.random((2, 3), density=0.5, fill_value=1)
ys = sparse.random((3, 4), density=0.5)
with pytest.raises(ValueError):
sparse.dot(xs, ys)
def test_inconsistent_fv(self):
xs = sparse.random((3, 4), density=0.5, fill_value=1)
ys = sparse.random((3, 4), density=0.5, fill_value=2)
with pytest.raises(ValueError):
sparse.concatenate([xs, ys])
def test_pickle():
x = sparse.COO.from_numpy([1, 0, 0, 0, 0]).reshape((5, 1))
# Enable caching and add some data to it
x.enable_caching()
x.T
assert x._cache is not None
# Pickle sends data but not cache
x2 = pickle.loads(pickle.dumps(x))
assert_eq(x, x2)
assert x2._cache is None
@pytest.mark.parametrize("deep", [True, False])
def test_copy(deep):
x = sparse.COO.from_numpy([1, 0, 0, 0, 0]).reshape((5, 1))
# Enable caching and add some data to it
x.enable_caching()
x.T
assert x._cache is not None
x2 = x.copy(deep)
assert_eq(x, x2)
assert (x2.data is x.data) is not deep
assert (x2.coords is x.coords) is not deep
assert x2._cache is None
@pytest.mark.parametrize("ndim", [2, 3, 4, 5])
def test_initialization(ndim):
shape = [10] * ndim
shape[1] *= 2
shape = tuple(shape)
coords = np.random.randint(10, size=ndim * 20).reshape(ndim, 20)
data = np.random.rand(20)
COO(coords, data=data, shape=shape)
with pytest.raises(ValueError, match="data length"):
COO(coords, data=data[:5], shape=shape)
with pytest.raises(ValueError, match="shape of `coords`"):
coords = np.random.randint(10, size=20).reshape(1, 20)
COO(coords, data=data, shape=shape)
@pytest.mark.parametrize("N, M", [(4, None), (4, 10), (10, 4), (0, 10)])
def test_eye(N, M):
m = M or N
for k in [0, N - 2, N + 2, m - 2, m + 2]:
assert_eq(sparse.eye(N, M=M, k=k), np.eye(N, M=M, k=k))
assert_eq(sparse.eye(N, M=M, k=k, dtype="i4"), np.eye(N, M=M, k=k, dtype="i4"))
@pytest.mark.parametrize("funcname", ["ones", "zeros"])
def test_ones_zeros(funcname):
sp_func = getattr(sparse, funcname)
np_func = getattr(np, funcname)
assert_eq(sp_func(5), np_func(5))
assert_eq(sp_func((5, 4)), np_func((5, 4)))
assert_eq(sp_func((5, 4), dtype="i4"), np_func((5, 4), dtype="i4"))
assert_eq(sp_func((5, 4), dtype=None), np_func((5, 4), dtype=None))
@pytest.mark.parametrize("funcname", ["ones_like", "zeros_like"])
def test_ones_zeros_like(funcname):
sp_func = getattr(sparse, funcname)
np_func = getattr(np, funcname)
x = np.ones((5, 5), dtype="i8")
assert_eq(sp_func(x), np_func(x))
assert_eq(sp_func(x, dtype="f8"), np_func(x, dtype="f8"))
assert_eq(sp_func(x, dtype=None), np_func(x, dtype=None))
assert_eq(sp_func(x, shape=(2, 2)), np_func(x, shape=(2, 2)))
def test_full():
assert_eq(sparse.full(5, 9), np.full(5, 9))
assert_eq(sparse.full(5, 9, dtype="f8"), np.full(5, 9, dtype="f8"))
assert_eq(sparse.full((5, 4), 9.5), np.full((5, 4), 9.5))
assert_eq(sparse.full((5, 4), 9.5, dtype="i4"), np.full((5, 4), 9.5, dtype="i4"))
def test_full_like():
x = np.zeros((5, 5), dtype="i8")
assert_eq(sparse.full_like(x, 9.5), np.full_like(x, 9.5))
assert_eq(sparse.full_like(x, 9.5, dtype="f8"), np.full_like(x, 9.5, dtype="f8"))
assert_eq(
sparse.full_like(x, 9.5, shape=(2, 2)), np.full_like(x, 9.5, shape=(2, 2))
)
@pytest.mark.parametrize("complex", [True, False])
def test_complex_methods(complex):
if complex:
x = np.array([1 + 2j, 2 - 1j, 0, 1, 0])
else:
x = np.array([1, 2, 0, 0, 0])
s = sparse.COO.from_numpy(x)
assert_eq(s.imag, x.imag)
assert_eq(s.real, x.real)
assert_eq(s.conj(), x.conj())
def test_np_matrix():
x = np.random.rand(10, 1).view(type=np.matrix)
s = sparse.COO.from_numpy(x)
assert_eq(x, s)
def test_out_dtype():
a = sparse.eye(5, dtype="float32")
b = sparse.eye(5, dtype="float64")
assert (
np.positive(a, out=b).dtype == np.positive(a.todense(), out=b.todense()).dtype
)
assert (
np.positive(a, out=b, dtype="float64").dtype
== np.positive(a.todense(), out=b.todense(), dtype="float64").dtype
)
@contextlib.contextmanager
def auto_densify():
"For use in tests only! Not threadsafe."
import os
from importlib import reload
os.environ["SPARSE_AUTO_DENSIFY"] = "1"
reload(sparse._settings)
yield
del os.environ["SPARSE_AUTO_DENSIFY"]
reload(sparse._settings)
def test_setting_into_numpy_slice():
actual = np.zeros((5, 5))
s = sparse.COO(data=[1, 1], coords=(2, 4), shape=(5,))
# This calls s.__array__(dtype('float64')) which means that __array__
# must accept a positional argument. If not this will raise, of course,
# TypeError: __array__() takes 1 positional argument but 2 were given
with auto_densify():
actual[:, 0] = s
# Might as well check the content of the result as well.
expected = np.zeros((5, 5))
expected[:, 0] = s.todense()
assert_eq(actual, expected)
# Without densification, setting is unsupported.
with pytest.raises(RuntimeError):
actual[:, 0] = s
def test_successful_densification():
s = sparse.random((3, 4, 5), density=0.5)
with auto_densify():
x = np.array(s)
assert isinstance(x, np.ndarray)
assert_eq(s, x)
def test_failed_densification():
s = sparse.random((3, 4, 5), density=0.5)
with pytest.raises(RuntimeError):
np.array(s)
def test_warn_on_too_dense():
import os
from importlib import reload
os.environ["SPARSE_WARN_ON_TOO_DENSE"] = "1"
reload(sparse._settings)
with pytest.warns(RuntimeWarning):
sparse.random((3, 4, 5), density=1.0)
del os.environ["SPARSE_WARN_ON_TOO_DENSE"]
reload(sparse._settings)
def test_prune_coo():
coords = np.array([[0, 1, 2, 3]])
data = np.array([1, 0, 1, 2])
s1 = COO(coords, data)
s2 = COO(coords, data, prune=True)
assert s2.nnz == 3
# Densify s1 because it isn't canonical
assert_eq(s1.todense(), s2, check_nnz=False)
def test_diagonal():
a = sparse.random((4, 4), density=0.5)
assert_eq(sparse.diagonal(a, offset=0), np.diagonal(a.todense(), offset=0))
assert_eq(sparse.diagonal(a, offset=1), np.diagonal(a.todense(), offset=1))
assert_eq(sparse.diagonal(a, offset=2), np.diagonal(a.todense(), offset=2))
a = sparse.random((4, 5, 4, 6), density=0.5)
assert_eq(
sparse.diagonal(a, offset=0, axis1=0, axis2=2),
np.diagonal(a.todense(), offset=0, axis1=0, axis2=2),
)
assert_eq(
sparse.diagonal(a, offset=1, axis1=0, axis2=2),
np.diagonal(a.todense(), offset=1, axis1=0, axis2=2),
)
assert_eq(
sparse.diagonal(a, offset=2, axis1=0, axis2=2),
np.diagonal(a.todense(), offset=2, axis1=0, axis2=2),
)
def test_diagonalize():
assert_eq(sparse.diagonalize(np.ones(3)), sparse.eye(3))
assert_eq(
sparse.diagonalize(scipy.sparse.coo_matrix(np.eye(3))),
sparse.diagonalize(sparse.eye(3)),
)
# inverse of diagonal
b = sparse.random((4, 3, 2), density=0.5)
b_diag = sparse.diagonalize(b, axis=1)
assert_eq(b, sparse.diagonal(b_diag, axis1=1, axis2=3).transpose([0, 2, 1]))
RESULT_TYPE_DTYPES = [
"i1",
"i2",
"i4",
"i8",
"u1",
"u2",
"u4",
"u8",
"f4",
"f8",
"c8",
"c16",
object,
]
@pytest.mark.parametrize("t1", RESULT_TYPE_DTYPES)
@pytest.mark.parametrize("t2", RESULT_TYPE_DTYPES)
@pytest.mark.parametrize(
"func",
[
sparse.result_type,
pytest.param(
np.result_type,
marks=pytest.mark.skipif(not NEP18_ENABLED, reason="NEP18 is not enabled"),
),
],
)
@pytest.mark.parametrize("data", [1, [1]]) # Not the same outputs!
def test_result_type(t1, t2, func, data):
a = np.array(data, dtype=t1)
b = np.array(data, dtype=t2)
expect = np.result_type(a, b)
assert func(a, sparse.COO(b)) == expect
assert func(sparse.COO(a), b) == expect
assert func(sparse.COO(a), sparse.COO(b)) == expect
assert func(a.dtype, sparse.COO(b)) == np.result_type(a.dtype, b)
assert func(sparse.COO(a), b.dtype) == np.result_type(a, b.dtype)
@pytest.mark.parametrize("in_shape", [(5, 5), 62, (3, 3, 3)])
def test_flatten(in_shape):
s = sparse.random(in_shape, density=0.5)
x = s.todense()
a = s.flatten()
e = x.flatten()
assert_eq(e, a)
def test_asnumpy():
s = sparse.COO(data=[1], coords=[2], shape=(5,))
assert_eq(sparse.asnumpy(s), s.todense())
assert_eq(
sparse.asnumpy(s, dtype=np.float64), np.asarray(s.todense(), dtype=np.float64)
)
a = np.array([1, 2, 3])
# Array passes through with no copying.
assert sparse.asnumpy(a) is a
@pytest.mark.parametrize("shape1", [(2,), (2, 3), (2, 3, 4)])
@pytest.mark.parametrize("shape2", [(2,), (2, 3), (2, 3, 4)])
def test_outer(shape1, shape2):
s1 = sparse.random(shape1, density=0.5)
s2 = sparse.random(shape2, density=0.5)
x1 = s1.todense()
x2 = s2.todense()
assert_eq(sparse.outer(s1, s2), np.outer(x1, x2))
assert_eq(np.multiply.outer(s1, s2), np.multiply.outer(x1, x2))
def test_scalar_list_init():
a = sparse.COO([], [], ())
b = sparse.COO([], [1], ())
assert a.todense() == 0
assert b.todense() == 1
def test_raise_on_nd_data():
s1 = sparse.random((2, 3, 4), density=0.5)
with pytest.raises(ValueError):
sparse.COO(s1.coords, s1.data[:, None], shape=(2, 3, 4))
def test_astype_casting():
s1 = sparse.random((2, 3, 4), density=0.5)
with pytest.raises(TypeError):
s1.astype(dtype=np.int64, casting="safe")
def test_astype_no_copy():
s1 = sparse.random((2, 3, 4), density=0.5)
s2 = s1.astype(s1.dtype, copy=False)
assert s1 is s2
def test_coo_valerr():
a = np.arange(300)
with pytest.raises(ValueError):
COO.from_numpy(a, idx_dtype=np.int8)
def test_random_idx_dtype():
with pytest.raises(ValueError):
sparse.random((300,), density=0.1, format="coo", idx_dtype=np.int8)
def test_html_for_size_zero():
arr = sparse.COO.from_numpy(np.array(()))
ground_truth = "<table><tbody>"
ground_truth += '<tr><th style="text-align: left">Format</th><td style="text-align: left">coo</td></tr>'
ground_truth += '<tr><th style="text-align: left">Data Type</th><td style="text-align: left">float64</td></tr>'
ground_truth += '<tr><th style="text-align: left">Shape</th><td style="text-align: left">(0,)</td></tr>'
ground_truth += '<tr><th style="text-align: left">nnz</th><td style="text-align: left">0</td></tr>'
ground_truth += '<tr><th style="text-align: left">Density</th><td style="text-align: left">nan</td></tr>'
ground_truth += '<tr><th style="text-align: left">Read-only</th><td style="text-align: left">True</td></tr>'
ground_truth += '<tr><th style="text-align: left">Size</th><td style="text-align: left">0</td></tr>'
ground_truth += '<tr><th style="text-align: left">Storage ratio</th><td style="text-align: left">nan</td></tr>'
ground_truth += "</tbody></table>"
table = html_table(arr)
assert table == ground_truth
@pytest.mark.parametrize(
"pad_width",
[
2,
(2, 1),
((2), (1)),
((1, 2), (4, 5), (7, 8)),
],
)
@pytest.mark.parametrize("constant_values", [0, 1, 150, np.nan])
def test_pad_valid(pad_width, constant_values):
y = sparse.random((50, 50, 3), density=0.15, fill_value=constant_values)
x = y.todense()
xx = np.pad(x, pad_width=pad_width, constant_values=constant_values)
yy = np.pad(y, pad_width=pad_width, constant_values=constant_values)
assert_eq(xx, yy)
@pytest.mark.parametrize(
"pad_width",
[
((2, 1), (5, 7)),
],
)
@pytest.mark.parametrize("constant_values", [150, 2, (1, 2)])
def test_pad_invalid(pad_width, constant_values, fill_value=0):
y = sparse.random((50, 50, 3), density=0.15)
with pytest.raises(ValueError):
np.pad(y, pad_width, constant_values=constant_values)
@pytest.mark.parametrize("val", [0, 5])
def test_scalar_from_numpy(val):
x = np.int64(val)
s = sparse.COO.from_numpy(x)
assert s.nnz == 0
assert_eq(x, s)
def test_scalar_elemwise():
s1 = sparse.random((), density=0.5)
x2 = np.random.rand(2)
x1 = s1.todense()
assert_eq(s1 * x2, x1 * x2)
| []
| []
| [
"SPARSE_AUTO_DENSIFY",
"SPARSE_WARN_ON_TOO_DENSE"
]
| [] | ["SPARSE_AUTO_DENSIFY", "SPARSE_WARN_ON_TOO_DENSE"] | python | 2 | 0 | |
commands/helpers.go | package commands
import (
"encoding/json"
"fmt"
"net/http"
"os"
"github.com/cloudfoundry-incubator/credhub-cli/config"
"github.com/cloudfoundry-incubator/credhub-cli/credhub"
"github.com/cloudfoundry-incubator/credhub-cli/credhub/auth"
"github.com/cloudfoundry-incubator/credhub-cli/errors"
"gopkg.in/yaml.v2"
)
func initializeCredhubClient(cfg config.Config) (*credhub.CredHub, error) {
var credhubClient *credhub.CredHub
readConfigFromEnvironmentVariables(&cfg)
err := config.ValidateConfig(cfg)
if err != nil {
if !clientCredentialsInEnvironment() || config.ValidateConfigApi(cfg) != nil {
return nil, err
}
}
if clientCredentialsInEnvironment() {
credhubClient, err = newCredhubClient(&cfg, os.Getenv("CREDHUB_CLIENT"), os.Getenv("CREDHUB_SECRET"), true)
} else {
credhubClient, err = newCredhubClient(&cfg, config.AuthClient, config.AuthPassword, false)
}
return credhubClient, err
}
func printCredential(outputJson bool, v interface{}) {
if outputJson {
s, _ := json.MarshalIndent(v, "", "\t")
fmt.Println(string(s))
} else {
s, _ := yaml.Marshal(v)
fmt.Println(string(s))
}
}
func readConfigFromEnvironmentVariables(cfg *config.Config) error {
if cfg.CaCerts == nil && os.Getenv("CREDHUB_CA_CERT") != "" {
caCerts, err := ReadOrGetCaCerts([]string{os.Getenv("CREDHUB_CA_CERT")})
if err != nil {
return err
}
cfg.CaCerts = caCerts
}
if cfg.ApiURL == "" && os.Getenv("CREDHUB_SERVER") != "" {
cfg.ApiURL = os.Getenv("CREDHUB_SERVER")
}
if cfg.AuthURL == "" && cfg.ApiURL != "" {
credhubInfo, err := GetApiInfo(cfg.ApiURL, cfg.CaCerts, cfg.InsecureSkipVerify)
if err != nil {
return errors.NewNetworkError(err)
}
cfg.AuthURL = credhubInfo.AuthServer.URL
}
return config.WriteConfig(*cfg)
}
func newCredhubClient(cfg *config.Config, clientId string, clientSecret string, usingClientCredentials bool) (*credhub.CredHub, error) {
credhubClient, err := credhub.New(cfg.ApiURL, credhub.CaCerts(cfg.CaCerts...), credhub.SkipTLSValidation(cfg.InsecureSkipVerify), credhub.Auth(auth.Uaa(
clientId,
clientSecret,
"",
"",
cfg.AccessToken,
cfg.RefreshToken,
usingClientCredentials,
)),
credhub.AuthURL(cfg.AuthURL))
return credhubClient, err
}
func clientCredentialsInEnvironment() bool {
return os.Getenv("CREDHUB_CLIENT") != "" || os.Getenv("CREDHUB_SECRET") != ""
}
func verifyAuthServerConnection(cfg config.Config, skipTlsValidation bool) error {
credhubClient, err := credhub.New(cfg.ApiURL, credhub.CaCerts(cfg.CaCerts...), credhub.SkipTLSValidation(skipTlsValidation))
if err != nil {
return err
}
if !skipTlsValidation {
request, _ := http.NewRequest("GET", cfg.AuthURL+"/info", nil)
request.Header.Add("Accept", "application/json")
_, err = credhubClient.Client().Do(request)
}
return err
}
| [
"\"CREDHUB_CLIENT\"",
"\"CREDHUB_SECRET\"",
"\"CREDHUB_CA_CERT\"",
"\"CREDHUB_CA_CERT\"",
"\"CREDHUB_SERVER\"",
"\"CREDHUB_SERVER\"",
"\"CREDHUB_CLIENT\"",
"\"CREDHUB_SECRET\""
]
| []
| [
"CREDHUB_CA_CERT",
"CREDHUB_SERVER",
"CREDHUB_CLIENT",
"CREDHUB_SECRET"
]
| [] | ["CREDHUB_CA_CERT", "CREDHUB_SERVER", "CREDHUB_CLIENT", "CREDHUB_SECRET"] | go | 4 | 0 | |
deploy_addon.py | #!/usr/bin/env python
# coding: utf-8
# License: GPL v.3 <http://www.gnu.org/licenses/gpl-3.0.en.html>
"""
Create Kodi addons zip file
"""
from __future__ import print_function
import re
import os
# import argparse
import os.path
import zipfile
def clean_pyc(folder):
cwd = os.getcwd()
os.chdir(folder)
paths = os.listdir(folder)
for path in paths:
abs_path = os.path.abspath(path)
if os.path.isdir(abs_path):
clean_pyc(abs_path)
elif path[-4:] == '.pyc' or path[-4:] == '.pyo':
print('deleting <%s>' % abs_path)
os.remove(abs_path)
os.chdir(cwd)
def create_zip(zip_name, root_dir, addon_name):
clean_pyc(root_dir)
print('%s cleaned.' % root_dir)
with zipfile.ZipFile(zip_name, "w", compression=zipfile.ZIP_DEFLATED) as zf:
base_path = os.path.normpath(root_dir)
print('Creating zip file...')
for dirpath, dirnames, filenames in os.walk(root_dir):
filenames = [f for f in filenames if not f[0] == '.']
dirnames[:] = [d for d in dirnames if not d[0] == '.']
for name in sorted(dirnames):
# if name == "extra":
# continue
path = os.path.normpath(os.path.join(dirpath, name))
print("+ <%s>" % path)
zf.write(path, os.path.join(
addon_name, os.path.relpath(path, base_path)))
for name in filenames:
path = os.path.normpath(os.path.join(dirpath, name))
if os.path.isfile(path) and not path.endswith(".zip") and not path.endswith("deploy_addon.py") and not path.endswith(".m3u"):
print("+ <%s>" % path)
zf.write(path, os.path.join(
addon_name, os.path.relpath(path, base_path)))
print('ZIP created successfully.')
# Argument parsing
# parser = argparse.ArgumentParser(description='Creates an addon zip file')
# parser.add_argument('addon', nargs='?', help='addon ID',
# action='store', default='')
# args = parser.parse_args()
# # Define paths
# if not args.addon:
# addon = os.environ['ADDON']
# else:
# addon = args.addon
root_dir = os.path.dirname(os.path.abspath(__file__))
addon = root_dir.split(os.sep)[-1]
with open(os.path.join(root_dir, 'addon.xml'), 'rb') as addon_xml:
version = re.search(r'(?<!xml )version="(.+?)"', addon_xml.read()).group(1)
zip_name = '{0}-{1}'.format(addon, version) + '.zip'
# Start working
os.chdir(root_dir)
create_zip(zip_name, root_dir, addon)
| []
| []
| [
"ADDON"
]
| [] | ["ADDON"] | python | 1 | 0 | |
src/tzer/tir/report.py | import dill as pickle
from typing import List, Optional
from tvm import tir
import tvm
import time
import os
import uuid
import datetime
import git
__TVM_INSTRUMENTED__ = False
try:
from tvm.contrib import coverage
__TVM_INSTRUMENTED__ = True
except Exception as e:
print(f'No coverage in linked TVM. {e}')
if __TVM_INSTRUMENTED__:
assert os.getenv(
'NO_COV') is None, "Since you want coverage disabled, why linking an instrumented TVM?"
_METADATA_NAME_ = 'meta.txt'
_COV_BY_TIME_NAME_ = 'cov_by_time.txt'
_COMPILATION_RATE_ = 'compile_rate.txt'
_TIR_BY_TIME_NAME_ = 'tir_by_time.pickle'
_ITERATION_ = 'iterations.txt'
_VALID_SEED_NEW_COV_COUNT_ = 'valid_seed_new_cov_count.txt'
class TVMFuzzerUsageError(Exception):
def __init__(self, msg):
self.message = msg
def __str__(self):
return f'Please TVMFuzzer in the right way... {self.message}'
class Reporter:
def __init__(self, report_folder=None, use_coverage=True, record_tir=False, use_existing_dir=False) -> None:
# Checks
tvm_home = os.getenv('TVM_HOME')
if not tvm_home or not os.path.exists(tvm_home):
raise TVMFuzzerUsageError('got incorrect env var `TVM_HOME`: "{tvm_home}"')
self.start_time = time.perf_counter()
self.report_folder = report_folder
if report_folder is None:
self.report_folder = f'fuzzing-report-{uuid.uuid4()}'
if use_existing_dir:
assert os.path.exists(self.report_folder)
else:
# TODO: Allow continous fuzzing...
if os.path.exists(self.report_folder):
raise TVMFuzzerUsageError(
f'{self.report_folder} already exist... We want an empty folder to report...')
os.mkdir(self.report_folder)
print(f'Create report folder: {self.report_folder}')
print(f'Using `{self.report_folder}` as the fuzzing report folder')
with open(os.path.join(self.report_folder, _METADATA_NAME_), 'w') as f:
fuzz_repo = git.Repo(search_parent_directories=True)
tvm_repo = git.Repo(search_parent_directories=True)
def _log_repo(f, tag, repo: git.Repo):
f.write(f'{tag} GIT HASH: {repo.head.object.hexsha}\n')
f.write(f'{tag} GIT STATUS: ')
f.write(
'\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n')
f.write(repo.git.status())
f.write(
'\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n\n')
f.write(f'START TIME: {datetime.datetime.now()}')
_log_repo(f, 'Fuzzer', fuzz_repo)
_log_repo(f, 'TVM', tvm_repo)
self.cov_by_time_file = None
if use_coverage:
self.cov_by_time_file = open(os.path.join(
self.report_folder, _COV_BY_TIME_NAME_), 'w')
self.tir_by_time_file = None
if record_tir:
self.tir_by_time_file = open(os.path.join(
self.report_folder, _TIR_BY_TIME_NAME_), 'wb')
self.n_bug = 0
def record_tir_and_passes(self, tir, passes):
assert self.tir_by_time_file
pickle.dump((time.perf_counter() - self.start_time, tir, passes),
self.tir_by_time_file)
def record_coverage(self, t=None):
if t is None:
t = time.perf_counter() - self.start_time
assert self.cov_by_time_file
self.cov_by_time_file.write(
f'{t:.2f},{coverage.get_now()},\n')
def record_compile_rate(self, rate):
with open(os.path.join(self.report_folder, _COMPILATION_RATE_), 'w') as f:
f.write(rate)
def record_valid_seed_achieving_new_cov_count(self, count: int):
with open(os.path.join(self.report_folder, _VALID_SEED_NEW_COV_COUNT_), 'w') as f:
f.write(str(count))
def record_iteration(self, iteration: int):
with open(os.path.join(self.report_folder, _ITERATION_), 'w') as f:
f.write(str(iteration))
def report_tir_bug(
self,
err: Exception,
func: tir.PrimFunc,
passes: Optional[List[tvm.transform.Pass]],
parameters: Optional[list],
msg: str
):
bug_prefix = f'{type(err).__name__}__{uuid.uuid4()}'
with open(os.path.join(self.report_folder, f'{bug_prefix}.ctx'), 'wb') as f:
pickle.dump({
'func': func,
'passes': passes,
'args': parameters
}, f, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.report_folder, f'{bug_prefix}.error_message.txt'), 'w') as f1:
f1.write(msg) # type: ignore
self.n_bug += 1
| []
| []
| [
"TVM_HOME",
"NO_COV"
]
| [] | ["TVM_HOME", "NO_COV"] | python | 2 | 0 | |
package_pickup_inquiry.go | package go_usps
// Package Pickup Inquiry Web Tools
// https://www.usps.com/business/web-tools-apis/package-pickup-api.htm#_Toc450550270
type CarrierPickupInquiryRequest struct {
USERID string `xml:"USERID,attr"`
FirmName string `xml:"FirmName,omitempty"`
SuiteOrApt string `xml:"SuiteOrApt"`
Address2 string `xml:"Address2"`
Urbanization string `xml:"Urbanization"`
City string `xml:"City"`
State string `xml:"State"`
ZIP5 string `xml:"ZIP5"`
ZIP4 string `xml:"ZIP4"`
ConfirmationNumber string `xml:"ConfirmationNumber"`
}
func (r *CarrierPickupInquiryRequest) toHTTPRequestStr(bool) (string, error) {
return createUSPSApiRequestStr("CarrierPickupInquiry", r)
}
type CarrierPickupInquiryResponse struct {
FirstName string `xml:"FirstName"`
LastName string `xml:"LastName"`
FirmName string `xml:"FirmName"`
SuiteOrApt string `xml:"SuiteOrApt"`
Address2 string `xml:"Address2"`
Urbanization string `xml:"Urbanization"`
City string `xml:"City"`
State string `xml:"State"`
ZIP5 string `xml:"ZIP5"`
ZIP4 string `xml:"ZIP4"`
Phone string `xml:"Phone"`
Extension string `xml:"Extension"`
Package struct {
ServiceType string `xml:"ServiceType"`
Count string `xml:"Count"`
} `xml:"Package"`
EstimatedWeight string `xml:"EstimatedWeight"`
PackageLocation string `xml:"PackageLocation"`
SpecialInstructions string `xml:"SpecialInstructions"`
ConfirmationNumber string `xml:"ConfirmationNumber"`
DayOfWeek string `xml:"DayOfWeek"`
Date string `xml:"Date"`
EmailAddress string `xml:"EmailAddress,omitempty"`
}
func (U *USPS) PackagePickupInquiry(request *CarrierPickupInquiryRequest) (CarrierPickupInquiryResponse, error) {
request.USERID = U.Username
result := new(CarrierPickupInquiryResponse)
err := U.Client.Execute(request, result)
return *result, err
}
| []
| []
| []
| [] | [] | go | null | null | null |
pkg/injector/inject_test.go | package injector
import (
"fmt"
"io/ioutil"
"os"
"path"
"runtime"
"testing"
"cuelang.org/go/cue/cuecontext"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestInjectNil(t *testing.T) {
ctx := cuecontext.New()
v := ctx.CompileString("foo: _ @inject(type=nil)")
v = Inject(v, nil)
assert.EqualError(t, v.Err(), "injection error: unsupported injector type nil")
}
func TestInjectNoType(t *testing.T) {
ctx := cuecontext.New()
v := ctx.CompileString("foo: _ @inject()")
v = Inject(v, nil)
assert.EqualError(t, v.Err(), "injection error: missing injector type")
}
func TestInjectFileNoSrc(t *testing.T) {
ctx := cuecontext.New()
v := ctx.CompileString("foo: _ @inject(type=file)")
v = Inject(v, nil)
assert.EqualError(t, v.Err(), "injection error: missing src key for file injector")
}
func TestInjectFile(t *testing.T) {
if runtime.GOOS == "windows" && os.Getenv("CI") != "" {
t.Skip("skipping fs related test on windows")
}
f, err := ioutil.TempFile("", "*.json")
require.NoError(t, err)
defer os.Remove(f.Name())
defer f.Close()
_, err = f.WriteString("{\"potato\": 42}")
require.NoError(t, err)
ctx := cuecontext.New()
v := ctx.CompileString(fmt.Sprintf("foo: _ @inject(type=file, src=%s, path=$.potato)", path.Base(f.Name())))
v = Inject(v, os.DirFS(path.Dir(f.Name())))
assert.NoError(t, v.Err())
json, err := v.MarshalJSON()
assert.NoError(t, err)
assert.Equal(t, "{\"foo\":42}", string(json))
}
| [
"\"CI\""
]
| []
| [
"CI"
]
| [] | ["CI"] | go | 1 | 0 | |
scripts/sde_pe_train_obs.py | import numpy as np
from tqdm import tqdm as tqdm
import random
import sys, os
# PyTorch stuff
import torch
from torch.optim.lr_scheduler import StepLR
# GradBED stuff
from gradbed.networks.fullyconnected import *
from gradbed.bounds.jsd import *
from gradbed.bounds.nwj import *
from gradbed.utils.initialisations import *
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# --- Classes and Functions --- #
class SIR_SDE_Observations_Simulator(torch.autograd.Function):
@staticmethod
def forward(ctx, input, device):
# observation factor
phi = torch.tensor(0.95, dtype=torch.float, device=device)
with torch.no_grad():
# compute nearest neighbours in time grid
indices = torch.min(
torch.abs(input - data['ts'][:-1]), axis=1).indices
# extract number of infected from data
y = data['ys'][:-1][indices, :, 1].T
# sample observations
y_obs = torch.poisson(phi * torch.nn.ReLU()(y))
# compute ratios
delta = torch.tensor(1e-8, dtype=torch.float, device=device)
tmp_ratios = y / (y_obs + delta)
zer = torch.zeros_like(y_obs, dtype=torch.float, device=device)
ratios = torch.where(y_obs == 0, zer, tmp_ratios)
ctx.save_for_backward(input, ratios)
ctx.device = device
ctx.indices = indices
ctx.phi = phi
return y_obs
@staticmethod
def backward(ctx, grad_output):
# unpack saved tensors
input, ratios = ctx.saved_tensors
device = ctx.device
indices = ctx.indices
phi = ctx.phi
# extract gradients of infected from data
y_grads = data['grads'][indices, :, 1].T # GLOBAL VARIABLE DATA
# compute observational gradients
obs_grads = (ratios - phi) * y_grads
# compute the Jacobian
identity = torch.eye(
len(indices),
device=device,
dtype=torch.float).reshape(1, len(indices), len(indices))
Jac = torch.mul(
identity.repeat(len(obs_grads), 1, 1), obs_grads[:, None])
# compute the Jacobian vector product
grad_input = Jac.matmul(grad_output[:, :, None])
return grad_input, None
class SIRDatasetPE_obs(torch.utils.data.Dataset):
def __init__(self, d, prior, device):
"""
A linear toy model dataset for parameter estimation.
Parameters
----------
designs: torch.tensor
Design variables that we want to optimise.
prior: numpy array
Samples from the prior distribution.
device: torch.device
Device to run the training process on.
"""
super(SIRDatasetPE_obs, self).__init__()
# convert designs and prior samples to PyTorch tensors
self.X = prior
# simulate data
sim_sir = SIR_SDE_Observations_Simulator.apply
self.Y = sim_sir(d, device)
def __getitem__(self, idx):
""" Get Prior samples and data by index.
Parameters
----------
idx : int
Item index
Returns
-------
Batched prior samples, batched data samples
"""
return self.X[idx], self.Y[idx]
def __len__(self):
"""Number of samples in the dataset"""
return len(self.X)
def update(self, d, device):
"""
Simulates new data when d is updated.
"""
# simulate data
sim_sir = SIR_SDE_Observations_Simulator.apply
self.Y = sim_sir(d, device)
def seed_torch(seed=2019):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
# --- HYPER-PARAMETERS --- #
# Parameter dimension; Not relevant for now
modelparams = dict()
# Design Dimensions
modelparams['DIM'] = 3
# Network Params
modelparams['L'] = 4
modelparams['H'] = 20
# Sizes
modelparams['data_size'] = 20000
modelparams['num_epochs'] = 50000
# Optimisation Params: Psi
modelparams['lr_psi'] = 1e-4
modelparams['step_psi'] = 5000
modelparams['gamma_psi'] = 1
# Optimisation Params: Designs
modelparams['lr_d'] = 3 * 1e-2
modelparams['step_d'] = 5000
modelparams['gamma_d'] = 1
# design bounds
bounds = [0, 100]
INIT = 'uniform'
# filename
part = 1 # if you already ran e.g. part=1 you can continue with part=2
FILENAME = '../data/sde_pe_obs_D{}_traindata_part{}.pt'.format(
modelparams['DIM'], part)
# all the flags
Filter = True # Reject bad / trivial SDE solves
PRINT_D = False # Print design to tqdm bar
via_NWJ = True # evaluate JSD lower bound on NWJ lower bound
CLAMP = True # heuristic gradient clamping for numerical stability
CLAMP_VAL = 2 # gradient clamping value
SEED = 12345679
seed_torch(SEED) # set seed
# Load initial model and initial designs (check file below)
if part == 1:
RELOAD = False
else:
RELOAD = True
# --- DATA PREPARATION --- #
data = torch.load('../data/sir_sde_data.pt')
data = {
key: value.to(device)
for key, value in data.items()
if isinstance(value, torch.Tensor)
}
if Filter:
# find the indices corresponding to non-trivial solutions
idx_good = np.where(
np.mean(data['ys'][:, :, 1].data.cpu().numpy(), axis=0) >= 1)[0]
# update the dataset with non-trivial solutions
data['ys'] = data['ys'][:, idx_good, :]
data['grads'] = data['grads'][:, idx_good, :]
data['prior_samples'] = data['prior_samples'][idx_good]
# --- TRAINING PREP --- #
# task specific things
task = 'pe'
loss_function = jsd_loss
# Load Hyper-Parameters if wanted
if RELOAD:
if part >= 10:
fil_load = FILENAME[:-5] + str(part - 1) + '.pt'
else:
fil_load = FILENAME[:-4] + str(part - 1) + '.pt'
meta_info = torch.load(fil_load)
# designs
d_init = meta_info['d_init']
d = torch.tensor(
meta_info['designs_train_jsd'][-1], dtype=torch.float,
requires_grad=True, device=device)
designs = [torch.tensor(
dd,
dtype=torch.float,
device=device) for dd in meta_info['designs_train_jsd']]
# initialise model from previous state
model_init_state = meta_info['model_init_state']
model_last_state = meta_info['model_jsd']
model, _ = initialise_model(
modelparams, device, task=task, model_init_state=model_last_state)
# data containers
train_loss = [torch.tensor(
tl,
dtype=torch.float,
device=device) for tl in meta_info['train_loss_jsd']]
train_loss_viaNWJ = [torch.tensor(
tl,
dtype=torch.float,
device=device) for tl in meta_info['train_loss_jsd_viaNWJ']]
else:
# randomly initialise design
d, d_init = initialise_design(
modelparams, device, bounds=bounds, d_init=None, init_type=INIT)
# randomly initialise neural network
model, model_init_state = initialise_model(
modelparams, device, task=task, model_init_state=None)
# data containers
designs = [d.clone().detach()]
train_loss = list()
train_loss_viaNWJ = list()
print("Initial Design:", np.sort(d_init.reshape(-1).astype(np.int16)))
# Define Optimizers and Schedulers
optimizer_psi = torch.optim.Adam(
model.parameters(), lr=modelparams['lr_psi'], amsgrad=True)
optimizer_design = torch.optim.Adam(
[d], lr=modelparams['lr_d'], amsgrad=True)
scheduler_psi = StepLR(
optimizer_psi, step_size=modelparams['step_psi'],
gamma=modelparams['gamma_psi'])
scheduler_design = StepLR(
optimizer_design, step_size=modelparams['step_d'],
gamma=modelparams['gamma_d'])
if RELOAD:
# load in optimizer state dicts
optimizer_psi.load_state_dict(meta_info['optimizer_psi_state'])
optimizer_design.load_state_dict(meta_info['optimizer_design_state'])
scheduler_psi.load_state_dict(meta_info['scheduler_psi_state'])
scheduler_design.load_state_dict(meta_info['scheduler_design_state'])
del meta_info
# --- TRAINING --- #
num_params = sum(
[np.prod(p.size()) for p in filter(
lambda p: p.requires_grad, model.parameters())])
print("Number of trainable parameters", num_params)
# initialize dataset
dataset = SIRDatasetPE_obs(d, data['prior_samples'], device)
# training loop
pbar = tqdm(range(modelparams['num_epochs']), leave=True, disable=False)
for epoch in pbar:
# update samples in dataset
dataset.update(d, device)
# get shuffled data
idx = np.random.choice(
range(len(data['prior_samples'])),
size=modelparams['data_size'],
replace=False)
x, y = dataset[idx]
# move to device if not there yet
x, y = x.to(device), y.to(device)
# compute loss
loss = loss_function(x, y, model, device)
# Zero grad the NN optimizer
optimizer_psi.zero_grad()
optimizer_design.zero_grad()
# Back-Propagation
loss.backward()
# heuristic clamping of gradients for numerical stability
if CLAMP:
with torch.no_grad():
for param in model.parameters():
param.grad.clamp_(-CLAMP_VAL, CLAMP_VAL)
# Perform opt steps for NN
optimizer_psi.step()
optimizer_design.step()
# save a few things to lists
train_loss.append(-loss.clone().detach())
# compute MI estimate and update tqdm bar
if via_NWJ:
tl = nwj_loss(x, y, lambda a, b: model(a, b) + 1, device)
train_loss_viaNWJ.append(-tl.clone().detach())
if PRINT_D:
pbar.set_postfix(
MI='{:.3f}'.format(-tl.data.cpu().numpy()),
JSD='{:.3f}'.format(-loss.data.cpu().numpy()),
d=np.sort(d.data.cpu().numpy().reshape(-1)).astype(np.int16))
else:
pbar.set_postfix(
MI='{:.3f}'.format(-tl.data.cpu().numpy()),
JSD='{:.3f}'.format(-loss.data.cpu().numpy()))
else:
if PRINT_D:
pbar.set_postfix(
JSD='{:.3f}'.format(-loss.data.cpu().numpy()),
d=np.sort(d.data.cpu().numpy().reshape(-1)).astype(np.int16))
else:
pbar.set_postfix(
JSD='{:.3f}'.format(-loss.data.cpu().numpy()))
# LR scheduler step for psi and designs
scheduler_psi.step()
scheduler_design.step()
# Clamp design if beyond boundaries
with torch.no_grad():
d.clamp_(bounds[0], bounds[1])
# Save designs to list
designs.append(d.clone().detach())
# --- SAVE DATA --- #
# clean up lists
train_loss = np.array([i.cpu().data.numpy() for i in train_loss])
train_loss_viaNWJ = np.array([i.cpu().data.numpy() for i in train_loss_viaNWJ])
designs = np.array([dd.cpu().tolist() for dd in designs])
# create save_dict
save_dict = dict()
save_dict['seed'] = SEED
save_dict['modelparams_jsd'] = modelparams
save_dict['d_init'] = d_init
save_dict['model_init_state'] = model_init_state
save_dict['designs_train_jsd'] = designs
save_dict['model_jsd'] = model.state_dict()
save_dict['train_loss_jsd'] = train_loss
save_dict['train_loss_jsd_viaNWJ'] = train_loss_viaNWJ
save_dict['optimizer_psi_state'] = optimizer_psi.state_dict()
save_dict['optimizer_design_state'] = optimizer_design.state_dict()
save_dict['scheduler_psi_state'] = scheduler_psi.state_dict()
save_dict['scheduler_design_state'] = scheduler_design.state_dict()
# save data
torch.save(save_dict, FILENAME)
| []
| []
| [
"PYTHONHASHSEED"
]
| [] | ["PYTHONHASHSEED"] | python | 1 | 0 | |
main.go | package main
import (
"fmt"
"os"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/cobra/doc"
"github.com/flanksource/karina/cmd"
)
var (
version = "dev"
commit = "none"
date = "unknown"
)
func main() {
var root = &cobra.Command{
Use: "karina",
PersistentPreRun: cmd.GlobalPreRun,
}
root.AddCommand(
cmd.Access,
cmd.APIDocs,
cmd.Apply,
cmd.Backup,
cmd.CA,
cmd.Cleanup,
cmd.CleanupJobs,
cmd.Config,
cmd.Conformance,
cmd.Consul,
cmd.Dashboard,
cmd.DB,
cmd.Deploy,
cmd.DNS,
cmd.Exec,
cmd.ExecNode,
cmd.Etcd,
cmd.Harbor,
cmd.Images,
cmd.Logs,
cmd.MachineImages,
cmd.Namespace,
cmd.Node,
cmd.NSX,
cmd.Operator,
cmd.Orphan,
cmd.Provision,
cmd.Render,
cmd.Report,
cmd.Rolling,
cmd.Snapshot,
cmd.Status,
cmd.Test,
cmd.TerminateNodes,
cmd.TerminateOrphans,
cmd.Undelete,
cmd.Upgrade,
cmd.Vault,
)
if len(commit) > 8 {
version = fmt.Sprintf("%v, commit %v, built at %v", version, commit[0:8], date)
}
root.AddCommand(&cobra.Command{
Use: "version",
Short: "Print the version of karina",
Args: cobra.MinimumNArgs(0),
Run: func(cmd *cobra.Command, args []string) {
fmt.Println(version)
},
})
docs := &cobra.Command{
Use: "docs",
Short: "generate documentation",
}
docs.AddCommand(&cobra.Command{
Use: "cli [PATH]",
Short: "generate CLI documentation",
Args: cobra.MinimumNArgs(1),
Run: func(cmd *cobra.Command, args []string) {
err := doc.GenMarkdownTree(root, args[0])
if err != nil {
log.Fatal(err)
}
},
})
docs.AddCommand(cmd.APIDocs)
root.AddCommand(docs)
config := "karina.yml"
if env := os.Getenv("PLATFORM_CONFIG"); env != "" {
config = env
}
root.PersistentFlags().StringArrayP("config", "c", []string{config}, "Path to config file")
root.PersistentFlags().StringArrayP("extra", "e", nil, "Extra arguments to apply e.g. -e ldap.domain=example.com")
root.PersistentFlags().StringP("kubeconfig", "", "", "Specify a kubeconfig to use, if empty a new kubeconfig is generated from master CA's at runtime")
root.PersistentFlags().CountP("loglevel", "v", "Increase logging level")
root.PersistentFlags().Bool("prune", true, "Delete previously enabled resources")
root.PersistentFlags().Bool("dry-run", false, "Don't apply any changes, print what would have been done")
root.PersistentFlags().Bool("trace", false, "Print out generated specs and configs")
root.PersistentFlags().Bool("in-cluster", false, "Use in cluster kubernetes config")
root.SetUsageTemplate(root.UsageTemplate() + fmt.Sprintf("\nversion: %s\n ", version))
if err := root.Execute(); err != nil {
os.Exit(1)
}
}
| [
"\"PLATFORM_CONFIG\""
]
| []
| [
"PLATFORM_CONFIG"
]
| [] | ["PLATFORM_CONFIG"] | go | 1 | 0 | |
build.py | import os, re, sys, subprocess, time, argparse, shutil
import regex #https://pypi.org/project/regex/
from colorama import Fore, Style, init #https://pypi.org/project/colorama/
#customizable data
project_name = "G8MJ01"
C_BUILD_FLAGS = ["-Iinclude/", "-once", "-O4,p", "-opt", "nodeadcode", "-sdata", "48", "-sdata2", "6", "-multibyte", "-rostr",
"-use_lmw_stmw", "on", "-w", "all", "-w", "nonotused", "-w", "nounusedarg", "-w", "nopadding"] #, "-inline", "deferred"
C_LINK_FLAGS = ["-lcf", "linker/linker.lcf", "-map", "build/%s.map" % project_name]
def iconv_check(source_file):
if source_file in [b"source/event.c", b"source/data/item_data.c", b"source/mario_pouch.c"]:#b"source/data/npc_data.c"]:
with open("build/temp/iconv.c", "wb") as o:
subprocess.run(["iconv.exe", "-f", "UTF-8", "-t", "SJIS", source_file], stdout=o)
print("running iconv")
return "build/temp/iconv.c", True
else:
return source_file, False
def iconv_delete():
os.remove("build/temp/iconv.c")
#-----------------------------------------------------------------------------------------------------------------------------
#helper functions
def check_env():
paths = {}
if not os.getenv("SDK_BASE_PATH"):
print("SDK_BASE_PATH not defined. Please point it to your SDK root directory.")
sys.exit(0)
paths["SDK_BASE_PATH"] = os.getenv("SDK_BASE_PATH")
paths["SDK_LIB_PATH"] = paths["SDK_BASE_PATH"] + "/HW2/lib/"
paths["SDK_INC_PATH"] = paths["SDK_BASE_PATH"] + "/include/"
if not os.getenv("MW_BASE_PATH"):
print("MW_BASE_PATH not defined. Please point it to your Metrowerks CodeWarrior root directory.")
sys.exit(0)
paths["MW_BASE_PATH"] = os.getenv("MW_BASE_PATH")
paths["MW_LIB_PATH"] = paths["MW_BASE_PATH"] + "/PowerPC_EABI_Support/"
paths["MW_INC_PATH"] = paths["MW_BASE_PATH"] + "/PowerPC_EABI_Support/MSL/MSL_C/MSL_Common/Include/"
paths["MW_BIN_PATH"] = paths["MW_BASE_PATH"] + "/PowerPC_EABI_Tools/Command_Line_Tools/"
if not os.getenv("MUSYX_BASE_PATH"):
print("MUSYX_BASE_PATH not defined. Please point it to your MusyX root directory.")
sys.exit(0)
paths["MUSYX_BASE_PATH"] = os.getenv("MUSYX_BASE_PATH")
paths["MUSYX_LIB_PATH"] = paths["MUSYX_BASE_PATH"] + "/HW2/lib/"
paths["MUSYX_INC_PATH"] = paths["MUSYX_BASE_PATH"] + "/include/"
if not os.path.exists(paths["MW_BIN_PATH"] + "/mwcceppc.exe"):
print("Couldn't find mwcceppc.exe in MW_BASE_PATH. Please ensure you're pointing to the right directory.")
sys.exit(0)
if not os.path.exists(paths["MW_BIN_PATH"] + "/mwldeppc.exe"):
print("Couldn't find mwldeppc.exe in MW_BASE_PATH. Please ensure you're pointing to the right directory.")
sys.exit(0)
return paths
def generate_libraries(paths, build_type):
array = []
#SDK libraries
lib_names = ["ai", "am", "amcnotstub", "amcstubs", "ar", "ax", "axart", "axfx", "base", "card", "db", "demo",
"dsp", "dtk", "dvd", "exi", "fileCache", "G2D", "gd", "gx", "hio", "mcc", "mix", "mtx", "odemustubs",
"odenotstub", "os", "pad", "perf", "seq", "si", "sp", "support", "syn", "texPalette", "vi"]
for entry in lib_names:
if build_type.lower() == "debug":
entry += "D.a"
else:
entry += ".a"
array.append("%s/%s" % (paths["SDK_LIB_PATH"], entry))
#Metrowerks libraries
array.append("%s/%s" % (paths["MW_LIB_PATH"], "MSL/MSL_C/PPC_EABI/Lib/MSL_C.PPCEABI.bare.H.a"))
array.append("%s/%s" % (paths["MW_LIB_PATH"], "MetroTRK/TRK_Minnow_Dolphin.a"))
array.append("%s/%s" % (paths["MW_LIB_PATH"], "Runtime/Lib/Runtime.PPCEABI.H.a"))
#MusyX library
array.append("%s/%s" % (paths["MUSYX_LIB_PATH"], "musyx.a"))
return array
def build_MWCIncludes(paths):
string = ""
string += "%s/%s;" % (paths["MW_BASE_PATH"], "PowerPC_EABI_Support/MSL/MSL_C/MSL_Common/Include")
string += "%s/%s;" % (paths["MW_BASE_PATH"], "PowerPC_EABI_Support/MSL/MSL_C/PPC_EABI/Include")
string += "%s/%s;" % (paths["SDK_BASE_PATH"], "include")
return string
def find_all_include(path, include, new_array):
data = open(b"%s/%s" % (include, path), "rb").read()
for entry in regex.findall(b'#[[:blank:]]*include[[:blank:]]*\"\K([\w.\/]+)(?=\")', data): #change \" to ["|>] to include system
if entry not in new_array:
for entry1 in find_all_include(entry, b"include/", new_array + [entry]):
if entry1 not in new_array:
new_array.append(entry1)
return new_array
def build_cache_files(object_depends, cache_times, build_flags, link_flags):
all_paths = []
with open("build/cache/cached_data.txt", "wb") as o:
for object_file in object_depends.keys():
if object_file not in [b"", ""] and object_depends[object_file] != []:
if object_file not in all_paths:
all_paths.append(object_file)
try: o.write(object_file.encode("ASCII"))
except: o.write(object_file)
o.write(b"\r\n")
for include_file in object_depends[object_file]:
if include_file not in all_paths:
all_paths.append(include_file)
o.write(include_file + b"\r\n")
o.write(b"\r\n") #extra to separate entries
with open("build/cache/cache_times.txt", "wb") as o:
for path in all_paths:
try: o.write(path.encode("ASCII"))
except: o.write(path)
o.write(b" " + str(os.path.getmtime(path)).encode("ASCII") + b"\r\n")
with open("build/cache/cache_build_flags.txt", "wb") as o:
for entry in build_flags:
o.write(entry.strip().encode("ASCII") + b"\r\n")
with open("build/cache/cache_link_flags.txt", "wb") as o:
for entry in link_flags:
o.write(entry.strip().encode("ASCII") + b"\r\n")
def compile_object(source_file, output_file, flag_array, paths, MWCIncludes):
arguments = [paths["MW_BIN_PATH"] + "/mwcceppc.exe", "-o", output_file, "-c", source_file] + flag_array
return subprocess.run(arguments, env={"MWCIncludes": MWCIncludes})
def link_executable(object_files, output_file, flag_array, paths):
arguments = [paths["MW_BIN_PATH"] + "/mwldeppc.exe", "-o", output_file] + flag_array + object_files
return subprocess.run(arguments)
def create_dol(source_file, output_file, paths, do_verbose=False):
arguments = [paths["SDK_BASE_PATH"] + "/X86/bin/makedol.exe", "-f", source_file, "-d", output_file]
if do_verbose:
arguments.append("-v")
return subprocess.run(arguments)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='CodeWarrior Project Build System')
parser.add_argument("build_type", nargs="?", default="release", choices=["debug", "release", "clean", "rebuild"])
args = parser.parse_args()
build_type = args.build_type
if build_type == "clean":
shutil.rmtree("build/cache/")
shutil.rmtree("build/objects/") #may fail, good enough
shutil.rmtree("build/temp/")
else: #debug, release
if build_type == "rebuild": #just delete the folders
shutil.rmtree("build/cache/")
shutil.rmtree("build/objects/") #may fail, good enough
shutil.rmtree("build/temp/")
#generators
paths = check_env()
lib_paths = generate_libraries(paths, build_type)
MWCIncludes = build_MWCIncludes(paths)
if not os.path.exists("build/cache/"):
os.makedirs("build/cache/")
if not os.path.exists("build/objects/"):
os.makedirs("build/objects/")
if not os.path.exists("build/temp/"):
os.makedirs("build/temp/")
#standard defines
if build_type.lower() == "debug":
HW2_Prefix = paths["MW_LIB_PATH"] + "/Include/Prefix/HW2_Debug_Prefix.h"
else:
HW2_Prefix = paths["MW_LIB_PATH"] + "/Include/Prefix/HW2_Release_Prefix.h"
C_BASE_FLAGS = ["-gccinc", "-proc", "gekko", "-fp", "hard", "-maf", "on", "-enum", "int"]
C_BASE_INCLUDE_FLAGS = ["-I" + paths["SDK_INC_PATH"], "-I" + paths["MW_INC_PATH"],
"-I" + paths["MUSYX_INC_PATH"], "-include", HW2_Prefix]
C_BASE_FLAGS_DEBUG = ["-Og"]
C_BASE_FLAGS_RELEASE = ["-O2"]
C_BASE_LINK_FLAGS = ["-proc", "gekko", "-fp", "hard", "-nostdlib"]
C_BASE_LINK_INC_FLAGS = ["-l,"] + lib_paths
#find all source files
file_list = []
for root, dirs, files in os.walk("source"):
new_root = root.replace("\\", "/").encode("ASCII")
if new_root != b"source/MusyX": #exclude this folder, TODO move to new project?
for filename in files:
filename = filename.encode("ASCII")
if filename.endswith(b".c"):
source_file = b"%s/%s" % (new_root, filename)
output_file = b"build/objects/%s/%s" % (b"/".join(new_root.split(b"/")[1:]), b".".join(filename.split(b".")[:-1]) + b".o")
output_path = b"build/objects/%s" % (b"/".join(new_root.split(b"/")[1:]))
file_list.append([source_file, output_file, output_path])
else: pass#print(root, dirs, files)
#get cache data
REBUILD_ALL = False
old_build_flags = []
if os.path.exists("build/cache/cache_build_flags.txt"):
with open("build/cache/cache_build_flags.txt", "rb") as f:
for line in f.readlines():
old_build_flags.append(line.strip().decode("ASCII"))
if old_build_flags != (C_BASE_FLAGS + C_BUILD_FLAGS):
REBUILD_ALL = True
RELINK_ALL = False
old_link_flags = []
if os.path.exists("build/cache/cache_link_flags.txt"):
with open("build/cache/cache_link_flags.txt", "rb") as f:
for line in f.readlines():
old_link_flags.append(line.strip().decode("ASCII"))
if old_link_flags != (C_BASE_LINK_FLAGS + C_LINK_FLAGS):
RELINK_ALL = True
object_depends = {}
if os.path.exists("build/cache/cached_data.txt"):
data = open("build/cache/cached_data.txt", "rb").read()
for object_file in data.split(b"\r\n\r\n"): #object path, include paths, double return
lines = object_file.split(b"\r\n")
object_depends[lines[0]] = lines[1:]
cache_times = {}
if os.path.exists("build/cache/cache_times.txt"):
with open("build/cache/cache_times.txt", "rb") as f:
for line in f.readlines():
obj_time = float(line.split(b" ")[-1])
obj_path = b" ".join(line.split(b" ")[:-1])
cache_times[obj_path] = obj_time
object_files = []
do_compile = []
for current_file in file_list:
if not REBUILD_ALL and current_file[0] in object_depends.keys(): #we have it cached, check if it needs rebuilt
if os.path.getmtime(current_file[0]) > cache_times[current_file[0]]: #C file updated, needs rebuild
do_compile.append(current_file)
else:
for include in object_depends[current_file[0]]:
if os.path.getmtime(include) > cache_times[include]: #header file updated, needs rebuild
do_compile.append(current_file)
break #out of include loop
else: #default to building
do_compile.append(current_file)
object_files.append(current_file[1])
#compile needed files
init() #setup colors
for i in range(len(do_compile)):
#print, initialization
current_file = do_compile[i]
percent = float(i) / float(len(do_compile)) * 100.0
percent = "[%.0f%%] " % percent
print(Fore.GREEN + percent + "Building " + current_file[0].decode("ASCII") + Style.RESET_ALL)
print
if not os.path.exists(current_file[2]):
os.makedirs(current_file[2])
#get cache data
fuckit = find_all_include(current_file[0], b".", [])
fuckit.sort(key=lambda x: x.lower())
object_depends[current_file[0]] = [] #reset that entry
for entry in fuckit:
object_depends[current_file[0]].append(b"include/" + entry)
#compile file
source_file, iconv_removeme = iconv_check(current_file[0]) #custom step, remove for other projects
ret = compile_object(source_file, current_file[1], C_BASE_FLAGS+C_BASE_INCLUDE_FLAGS+C_BUILD_FLAGS, paths, MWCIncludes)
if ret.returncode != 0: #file didn't compile correctly, abort
del object_depends[current_file[0]] #remove current entry so it will rebuild, save all others
build_cache_files(object_depends, cache_times, C_BASE_FLAGS+C_BUILD_FLAGS, C_BASE_LINK_FLAGS+C_LINK_FLAGS) #store new cache
sys.exit(0)
if iconv_removeme: iconv_delete() #custom step, remove for other projects
#build executable files
print(Fore.GREEN + Style.BRIGHT + "[100%%] Linking build/%s.elf" % project_name + Style.RESET_ALL)
link_executable(object_files, "build/%s.elf" % project_name, C_BASE_LINK_INC_FLAGS + C_BASE_LINK_FLAGS + C_LINK_FLAGS, paths)
create_dol("build/%s.elf" % project_name, "build/%s.dol" % project_name, paths)
build_cache_files(object_depends, cache_times, C_BASE_FLAGS+C_BUILD_FLAGS, C_BASE_LINK_FLAGS+C_LINK_FLAGS) #store new cache
| []
| []
| [
"SDK_BASE_PATH",
"MUSYX_BASE_PATH",
"MW_BASE_PATH"
]
| [] | ["SDK_BASE_PATH", "MUSYX_BASE_PATH", "MW_BASE_PATH"] | python | 3 | 0 | |
benchmarks/frozen_lake_desarsa.py | #
# Copyright (C) 2019 Luca Pasqualini
# University of Siena - Artificial Intelligence Laboratory - SAILab
#
#
# USienaRL is licensed under a BSD 3-Clause.
#
# You should have received a copy of the license along with this
# work. If not, see <https://opensource.org/licenses/BSD-3-Clause>.
# Import packages
import tensorflow
import logging
import os
# Import usienarl
from usienarl import Config, LayerType
from usienarl.utils import run_experiment, command_line_parse
from usienarl.models import DeepExpectedSARSA
from usienarl.agents import DeepExpectedSARSAAgentEpsilonGreedy, DeepExpectedSARSAAgentBoltzmann, DeepExpectedSARSAAgentDirichlet
# Import required src
# Require error handling to support both deployment and pycharm versions
try:
from src.openai_gym_environment import OpenAIGymEnvironment
from src.frozen_lake_refactored_environment import FrozenLakeRefactoredEnvironment
from src.benchmark_experiment import BenchmarkExperiment
except ImportError:
from benchmarks.src.openai_gym_environment import OpenAIGymEnvironment
from benchmarks.src.frozen_lake_refactored_environment import FrozenLakeRefactoredEnvironment
from benchmarks.src.benchmark_experiment import BenchmarkExperiment
# Define utility functions to run the experiment
def _define_desarsa_model(config: Config, error_clip: bool = True) -> DeepExpectedSARSA:
# Define attributes
learning_rate: float = 1e-3
discount_factor: float = 0.99
buffer_capacity: int = 1000
minimum_sample_probability: float = 1e-2
random_sample_trade_off: float = 0.6
importance_sampling_value_increment: float = 0.4
importance_sampling_value: float = 1e-3
# Return the _model
return DeepExpectedSARSA("model_mse" if not error_clip else "model_huber",
config,
buffer_capacity,
learning_rate, discount_factor,
minimum_sample_probability, random_sample_trade_off,
importance_sampling_value, importance_sampling_value_increment,
error_clip)
def _define_epsilon_greedy_agent(model: DeepExpectedSARSA) -> DeepExpectedSARSAAgentEpsilonGreedy:
# Define attributes
summary_save_step_interval: int = 500
weight_copy_step_interval: int = 25
batch_size: int = 100
exploration_rate_max: float = 1.0
exploration_rate_min: float = 1e-3
exploration_rate_decay: float = 1e-3
# Return the agent
return DeepExpectedSARSAAgentEpsilonGreedy("desarsa_agent", model, summary_save_step_interval, weight_copy_step_interval, batch_size,
exploration_rate_max, exploration_rate_min, exploration_rate_decay)
def _define_boltzmann_agent(model: DeepExpectedSARSA) -> DeepExpectedSARSAAgentBoltzmann:
# Define attributes
summary_save_step_interval: int = 500
weight_copy_step_interval: int = 25
batch_size: int = 100
temperature_max: float = 1.0
temperature_min: float = 1e-3
temperature_decay: float = 1e-3
# Return the agent
return DeepExpectedSARSAAgentBoltzmann("desarsa_agent", model, summary_save_step_interval, weight_copy_step_interval, batch_size,
temperature_max, temperature_min, temperature_decay)
def _define_dirichlet_agent(model: DeepExpectedSARSA) -> DeepExpectedSARSAAgentDirichlet:
# Define attributes
summary_save_step_interval: int = 500
weight_copy_step_interval: int = 25
batch_size: int = 100
alpha: float = 1.0
dirichlet_trade_off_min: float = 0.5
dirichlet_trade_off_max: float = 1.0
dirichlet_trade_off_update: float = 1e-3
# Return the agent
return DeepExpectedSARSAAgentDirichlet("desarsa_agent", model, summary_save_step_interval, weight_copy_step_interval, batch_size,
alpha, dirichlet_trade_off_min, dirichlet_trade_off_max, dirichlet_trade_off_update)
def run(workspace: str,
experiment_iterations: int,
render_training: bool, render_validation: bool, render_test: bool):
# Define the logger
logger: logging.Logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# Frozen Lake environment:
# - general success threshold to consider the training and the experiment successful is 0.78 over 100 episodes according to OpenAI guidelines
# - general success threshold for refactored environment is little above (slippery) the minimum number of steps required to reach the goal
environment_name: str = 'FrozenLake-v0'
success_threshold: float = 0.78
success_threshold_refactored: float = -8
# Generate the OpenAI environment
environment: OpenAIGymEnvironment = OpenAIGymEnvironment(environment_name)
# Generate the refactored environment
environment_refactored: FrozenLakeRefactoredEnvironment = FrozenLakeRefactoredEnvironment(environment_name)
# Define Neural Network layers
nn_config: Config = Config()
nn_config.add_hidden_layer(LayerType.dense, [32, tensorflow.nn.relu, True, tensorflow.contrib.layers.xavier_initializer()], layer_name="dense_1")
nn_config.add_hidden_layer(LayerType.dense, [32, tensorflow.nn.relu, True, tensorflow.contrib.layers.xavier_initializer()], layer_name="dense_2")
# Define model
inner_model: DeepExpectedSARSA = _define_desarsa_model(nn_config)
# Define agents
desarsa_agent_epsilon_greedy: DeepExpectedSARSAAgentEpsilonGreedy = _define_epsilon_greedy_agent(inner_model)
desarsa_agent_boltzmann: DeepExpectedSARSAAgentBoltzmann = _define_boltzmann_agent(inner_model)
desarsa_agent_dirichlet: DeepExpectedSARSAAgentDirichlet = _define_dirichlet_agent(inner_model)
# Define experiments
experiment_epsilon_greedy: BenchmarkExperiment = BenchmarkExperiment("experiment_epsilon_greedy", success_threshold, environment,
desarsa_agent_epsilon_greedy)
experiment_boltzmann: BenchmarkExperiment = BenchmarkExperiment("experiment_boltzmann", success_threshold, environment,
desarsa_agent_boltzmann)
experiment_dirichlet: BenchmarkExperiment = BenchmarkExperiment("experiment_dirichlet", success_threshold, environment,
desarsa_agent_dirichlet)
# Define refactored experiments
experiment_epsilon_greedy_refactored: BenchmarkExperiment = BenchmarkExperiment("experiment_refactored_epsilon_greedy", success_threshold_refactored,
environment_refactored,
desarsa_agent_epsilon_greedy)
experiment_boltzmann_refactored: BenchmarkExperiment = BenchmarkExperiment("experiment_refactored_boltzmann", success_threshold_refactored,
environment_refactored,
desarsa_agent_boltzmann)
experiment_dirichlet_refactored: BenchmarkExperiment = BenchmarkExperiment("experiment_refactored_dirichlet", success_threshold_refactored,
environment_refactored,
desarsa_agent_dirichlet)
# Define experiments data
saves_to_keep: int = 1
plots_dpi: int = 150
parallel: int = 10
training_episodes: int = 100
validation_episodes: int = 100
training_validation_volleys: int = 20
test_episodes: int = 100
test_volleys: int = 10
episode_length_max: int = 100
# Run experiments
run_experiment(logger=logger, experiment=experiment_epsilon_greedy,
file_name=__file__, workspace_path=workspace,
training_volleys_episodes=training_episodes, validation_volleys_episodes=validation_episodes,
training_validation_volleys=training_validation_volleys,
test_volleys_episodes=test_episodes, test_volleys=test_volleys,
episode_length=episode_length_max, parallel=parallel,
render_during_training=render_training, render_during_validation=render_validation,
render_during_test=render_test,
iterations=experiment_iterations, saves_to_keep=saves_to_keep, plots_dpi=plots_dpi)
run_experiment(logger=logger, experiment=experiment_boltzmann,
file_name=__file__, workspace_path=workspace,
training_volleys_episodes=training_episodes, validation_volleys_episodes=validation_episodes,
training_validation_volleys=training_validation_volleys,
test_volleys_episodes=test_episodes, test_volleys=test_volleys,
episode_length=episode_length_max, parallel=parallel,
render_during_training=render_training, render_during_validation=render_validation,
render_during_test=render_test,
iterations=experiment_iterations, saves_to_keep=saves_to_keep, plots_dpi=plots_dpi)
run_experiment(logger=logger, experiment=experiment_dirichlet,
file_name=__file__, workspace_path=workspace,
training_volleys_episodes=training_episodes, validation_volleys_episodes=validation_episodes,
training_validation_volleys=training_validation_volleys,
test_volleys_episodes=test_episodes, test_volleys=test_volleys,
episode_length=episode_length_max, parallel=parallel,
render_during_training=render_training, render_during_validation=render_validation,
render_during_test=render_test,
iterations=experiment_iterations, saves_to_keep=saves_to_keep, plots_dpi=plots_dpi)
# Run refactored experiments
run_experiment(logger=logger, experiment=experiment_epsilon_greedy_refactored,
file_name=__file__, workspace_path=workspace,
training_volleys_episodes=training_episodes, validation_volleys_episodes=validation_episodes,
training_validation_volleys=training_validation_volleys,
test_volleys_episodes=test_episodes, test_volleys=test_volleys,
episode_length=episode_length_max, parallel=parallel,
render_during_training=render_training, render_during_validation=render_validation,
render_during_test=render_test,
iterations=experiment_iterations, saves_to_keep=saves_to_keep, plots_dpi=plots_dpi)
run_experiment(logger=logger, experiment=experiment_boltzmann_refactored,
file_name=__file__, workspace_path=workspace,
training_volleys_episodes=training_episodes, validation_volleys_episodes=validation_episodes,
training_validation_volleys=training_validation_volleys,
test_volleys_episodes=test_episodes, test_volleys=test_volleys,
episode_length=episode_length_max, parallel=parallel,
render_during_training=render_training, render_during_validation=render_validation,
render_during_test=render_test,
iterations=experiment_iterations, saves_to_keep=saves_to_keep, plots_dpi=plots_dpi)
run_experiment(logger=logger, experiment=experiment_dirichlet_refactored,
file_name=__file__, workspace_path=workspace,
training_volleys_episodes=training_episodes, validation_volleys_episodes=validation_episodes,
training_validation_volleys=training_validation_volleys,
test_volleys_episodes=test_episodes, test_volleys=test_volleys,
episode_length=episode_length_max, parallel=parallel,
render_during_training=render_training, render_during_validation=render_validation,
render_during_test=render_test,
iterations=experiment_iterations, saves_to_keep=saves_to_keep, plots_dpi=plots_dpi)
if __name__ == "__main__":
# Remove tensorflow deprecation warnings
from tensorflow.python.util import deprecation
deprecation._PRINT_DEPRECATION_WARNINGS = False
# Parse the command line arguments
workspace_path, experiment_iterations_number, cuda_devices, render_during_training, render_during_validation, render_during_test = command_line_parse()
# Define the CUDA devices in which to run the experiment
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = cuda_devices
# Run this experiment
run(workspace_path, experiment_iterations_number, render_during_training, render_during_validation, render_during_test) | []
| []
| [
"CUDA_DEVICE_ORDER",
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES"] | python | 2 | 0 | |
nbi/server.go | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package nbi
import (
"fmt"
"net/http"
"os"
"path"
"strings"
"github.com/labstack/echo/v4"
"github.com/labstack/echo/v4/middleware"
elog "github.com/labstack/gommon/log"
"go.elastic.co/apm/module/apmechov4"
"github.com/ca17/teamsacs/common"
"github.com/ca17/teamsacs/common/log"
"github.com/ca17/teamsacs/common/tpl"
"github.com/ca17/teamsacs/models"
)
// 运行管理系统
func ListenNBIServer(manager *models.ModelManager) error {
e := echo.New()
e.Pre(middleware.RemoveTrailingSlash())
e.Use(middleware.GzipWithConfig(middleware.GzipConfig{
Level: 5,
}))
if os.Getenv("ELASTIC_APM_SERVER_URL") != "" {
e.Use(apmechov4.Middleware())
} else {
e.Use(ServerRecover(manager.Config.NBI.Debug))
}
e.Use(middleware.LoggerWithConfig(middleware.LoggerConfig{
Format: "nbi ${time_rfc3339} ${remote_ip} ${method} ${uri} ${protocol} ${status} ${id} ${user_agent} ${latency} ${bytes_in} ${bytes_out} ${error}\n",
Output: os.Stdout,
}))
manager.WebJwtConfig = &middleware.JWTConfig{
SigningMethod: middleware.AlgorithmHS256,
SigningKey: []byte(manager.Config.NBI.JwtSecret),
Skipper: func(c echo.Context) bool {
if strings.HasPrefix(c.Path(), "/nbi/status") ||
strings.HasPrefix(c.Path(), "/nbi/token") {
return true
}
return false
},
ErrorHandler: func(err error) error {
return NewHTTPError(http.StatusBadRequest, "Missing tokens, Access denied")
},
}
e.Use(middleware.JWTWithConfig(*manager.WebJwtConfig))
// Init Handlers
httphandler := NewHttpHandler(&WebContext{
Manager: manager,
Config: manager.Config,
})
httphandler.InitAllRouter(e)
manager.TplRender = tpl.NewCommonTemplate([]string{"/resources/templates"}, manager.Dev, manager.GetTemplateFuncMap())
e.Renderer = manager.TplRender
e.HideBanner = true
e.Logger.SetLevel(common.If(manager.Config.NBI.Debug, elog.DEBUG, elog.INFO).(elog.Lvl))
e.Debug = manager.Config.NBI.Debug
log.Info("try start tls web server")
err := e.StartTLS(fmt.Sprintf("%s:%d", manager.Config.NBI.Host, manager.Config.NBI.Port),
path.Join(manager.Config.GetPrivateDir(), "teamsacs-nbi.tls.crt"), path.Join(manager.Config.GetPrivateDir(), "teamsacs-nbi.tls.key"))
if err != nil {
log.Warningf("start tls server error %s", err)
log.Info("start web server")
err = e.Start(fmt.Sprintf("%s:%d", manager.Config.NBI.Host, manager.Config.NBI.Port))
}
return err
}
func ServerRecover(debug bool) echo.MiddlewareFunc {
return func(next echo.HandlerFunc) echo.HandlerFunc {
return func(c echo.Context) error {
defer func() {
if r := recover(); r != nil {
err, ok := r.(error)
if !ok {
err = fmt.Errorf("%v", r)
}
if debug {
log.Errorf("%+v", r)
}
c.Error(echo.NewHTTPError(http.StatusInternalServerError, err.Error()))
}
}()
return next(c)
}
}
}
| [
"\"ELASTIC_APM_SERVER_URL\""
]
| []
| [
"ELASTIC_APM_SERVER_URL"
]
| [] | ["ELASTIC_APM_SERVER_URL"] | go | 1 | 0 | |
tools/include/device_utils.py | """Utilities for interacting with a remote device."""
from __future__ import print_function
from __future__ import division
import os
import sys
import re
import subprocess
import textwrap
from time import sleep
def remote_shell(cmd, verbose=True):
"""Run the given command on on the device and return stdout. Throw an
exception if the remote command returns a non-zero return code.
Don't use this command for programs included in /system/bin/toolbox, such
as ls and ps; instead, use remote_toolbox_cmd.
adb shell doesn't check the remote command's error code. So to check this
ourselves, we echo $? after running the command and then strip that off
before returning the command's output.
"""
out = shell(r"""adb shell '%s; echo -n "|$?"'""" % cmd)
# The final '\n' in |out| separates the command output from the return
# code. (There's no newline after the return code because we did echo -n.)
(cmd_out, _, retcode) = out.rpartition('|')
retcode = retcode.strip()
if retcode == '0':
return cmd_out
if verbose:
print('Remote command %s failed with error code %s' % (cmd, retcode),
file=sys.stderr)
if cmd_out:
print(cmd_out, file=sys.stderr)
raise subprocess.CalledProcessError(retcode, cmd, cmd_out)
def remote_toolbox_cmd(cmd, args='', verbose=True):
"""Run the given command from /system/bin/toolbox on the device. Pass
args, if specified, and return stdout. Throw an exception if the command
returns a non-zero return code.
cmd must be a command that's part of /system/bin/toolbox. If you want to
run an arbitrary command, use remote_shell.
Use remote_toolbox_cmd instead of remote_shell if you're invoking a program
that's included in /system/bin/toolbox. remote_toolbox_cmd will ensure
that we use the toolbox's version, instead of busybox's version, even if
busybox is installed on the system. This will ensure that we get
the same output regardless of whether busybox is installed.
"""
return remote_shell('/system/bin/toolbox "%s" %s' % (cmd, args), verbose)
def remote_ls(dir, verbose=True):
"""Run ls on the remote device, and return a set containing the results."""
return {f.strip() for f in remote_toolbox_cmd('ls', dir, verbose).split('\n')}
def shell(cmd, cwd=None, show_errors=True):
"""Run the given command as a shell script on the host machine.
If cwd is specified, we run the command from that directory; otherwise, we
run the command from the current working directory.
"""
proc = subprocess.Popen(cmd, shell=True, cwd=cwd,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = proc.communicate()
if proc.returncode:
if show_errors:
print('Command %s failed with error code %d' %
(cmd, proc.returncode), file=sys.stderr)
if err:
print(err, file=sys.stderr)
raise subprocess.CalledProcessError(proc.returncode, cmd, err)
return out
def get_archive_path(out_dir, extension='.tar.bz2'):
"""Gets the full path for an archive that would contain the given out_dir"""
return out_dir.rstrip(os.path.sep) + extension
def create_specific_output_dir(out_dir):
"""Create the given directory if it doesn't exist.
Throw an exception if a non-directory file exists with the same name.
"""
if os.path.exists(out_dir):
if os.path.isdir(out_dir):
# Directory already exists; we're all good.
return
else:
raise Exception(textwrap.dedent('''\
Can't use %s as output directory; something that's not a
directory already exists with that name.''' % out_dir))
os.mkdir(out_dir)
def create_new_output_dir(out_dir_prefix):
"""Create a new directory whose name begins with out_dir_prefix."""
for i in range(0, 1024):
try:
dir = '%s%d' % (out_dir_prefix, i)
if not os.path.isfile(get_archive_path(dir)):
os.mkdir(dir)
return dir
except:
pass
raise Exception("Couldn't create output directory.")
def get_remote_b2g_pids():
"""Get the pids of all gecko processes running on the device.
Returns a tuple (master_pid, child_pids), where child_pids is a list.
"""
procs = remote_toolbox_cmd('ps').split('\n')
master_pid = None
child_pids = []
b2g_pids = {}
for line in procs:
if re.search(r'/b2g|plugin-container\s*$', line):
pids = line.split()[1:3]
pid = int(pids[0])
ppid = int(pids[1])
b2g_pids[pid] = ppid
for pid in b2g_pids:
ppid = b2g_pids[pid]
if ppid in b2g_pids:
child_pids.append(pid)
else:
if master_pid:
raise Exception('Two copies of b2g process found?')
master_pid = pid
if not master_pid:
raise Exception('b2g does not appear to be running on the device.')
return (master_pid, child_pids)
def is_using_nuwa():
"""Determines if Nuwa is being used"""
return "(Nuwa)" in remote_shell('b2g-ps', False)
def pull_procrank_etc(out_dir):
"""Get the output of procrank and a few other diagnostic programs and save
it into out_dir.
"""
shell('adb shell b2g-info > b2g-info', cwd=out_dir)
shell('adb shell procrank > procrank', cwd=out_dir)
shell('adb shell b2g-ps > b2g-ps', cwd=out_dir)
shell('adb shell b2g-procrank > b2g-procrank', cwd=out_dir)
def run_and_delete_dir_on_exception(fun, dir):
"""Run the given function and, if it throws an exception, delete the given
directory, if it's empty, before re-throwing the exception.
You might want to wrap your call to send_signal_and_pull_files in this
function."""
try:
return fun()
except:
# os.rmdir will throw if the directory is non-empty, and a simple
# 'raise' will re-throw the exception from os.rmdir (if that throws),
# so we need to explicitly save the exception info here. See
# http://nedbatchelder.com/blog/200711/rethrowing_exceptions_in_python.html
exception_info = sys.exc_info()
try:
# Throws if the directory is not empty.
os.rmdir(dir)
except OSError:
pass
# Raise the original exception.
raise exception_info[1], None, exception_info[2]
def notify_and_pull_files(outfiles_prefixes,
remove_outfiles_from_device,
out_dir,
optional_outfiles_prefixes=[],
fifo_msg=None,
signal=None,
ignore_nuwa=is_using_nuwa()):
"""Send a message to the main B2G process (either by sending it a signal or
by writing to a fifo that it monitors) and pull files created as a result.
Exactly one of fifo_msg or signal must be non-null; otherwise, we throw
an exception.
If fifo_msg is non-null, we write fifo_msg to
/data/local/debug_info_trigger. When this comment was written, B2G
understood the messages 'memory report', 'minimize memory report', and 'gc
log'. See nsMemoryInfoDumper.cpp's FifoWatcher.
If signal is non-null, we send the given signal (which may be either a
number or a string of the form 'SIGRTn', which we interpret as the signal
SIGRTMIN + n).
After writing to the fifo or sending the signal, we pull the files
generated into out_dir on the host machine. We only pull files which were
created after the signal was sent.
When we're done, we remove the files from the device if
remote_outfiles_from_device is true.
outfiles_prefixes must be a list containing the beginnings of the files we
expect to be created as a result of the signal. For example, if we expect
to see files named 'foo-XXX' and 'bar-YYY', we'd set outfiles_prefixes to
['foo-', 'bar-'].
We expect to pull len(outfiles_prefixes) * (# b2g processes) files from the
device. If that succeeds, we then pull all files which match
optional_outfiles_prefixes.
"""
if (fifo_msg is None) == (signal is None):
raise ValueError("Exactly one of the fifo_msg and "
"signal kw args must be non-null.")
# Check if we should override the ignore_nuwa value.
if not ignore_nuwa and os.getenv("MOZ_IGNORE_NUWA_PROCESS", "0") != "0":
ignore_nuwa = True
unified_outfiles_prefixes = ['unified-' + pfx for pfx in outfiles_prefixes]
all_outfiles_prefixes = outfiles_prefixes + optional_outfiles_prefixes \
+ unified_outfiles_prefixes
(master_pid, child_pids) = get_remote_b2g_pids()
child_pids = set(child_pids)
old_files = _list_remote_temp_files(outfiles_prefixes + unified_outfiles_prefixes)
if signal:
_send_remote_signal(signal, master_pid)
else:
_write_to_remote_file('/data/local/debug_info_trigger', fifo_msg)
num_expected_responses = 1 + len(child_pids)
if ignore_nuwa:
num_expected_responses -= 1
num_expected_files = len(outfiles_prefixes) * num_expected_responses
num_unified_expected = len(unified_outfiles_prefixes)
max_wait = 60 * 2
wait_interval = 1.0
for i in range(0, int(max_wait / wait_interval)):
new_files = _list_remote_temp_files(outfiles_prefixes) - old_files
new_unified_files = _list_remote_temp_files(unified_outfiles_prefixes) - old_files
if new_unified_files:
files_gotten = len(new_unified_files)
files_expected = num_unified_expected
else:
files_gotten = len(new_files)
files_expected = num_expected_files
sys.stdout.write('\rGot %d/%d files.' % (files_gotten, files_expected))
sys.stdout.flush()
if files_gotten >= files_expected:
print('')
if files_gotten > files_expected:
print("WARNING: Got more files than expected!", file=sys.stderr)
print("(Is MOZ_IGNORE_NUWA_PROCESS set incorrectly?)", file=sys.stderr)
break
sleep(wait_interval)
# Some pids may have gone away before reporting memory. This can happen
# normally if the triggering of memory reporting causes some old
# children to OOM. (Bug 931198)
dead_child_pids = child_pids - set(get_remote_b2g_pids()[1])
if len(dead_child_pids):
for pid in dead_child_pids:
print("\rWarning: Child %u exited during memory reporting" % pid, file=sys.stderr)
child_pids -= dead_child_pids
num_expected_files -= len(outfiles_prefixes) * len(dead_child_pids)
if files_gotten < files_expected:
print('')
print("We've waited %ds but the only relevant files we see are" % max_wait, file=sys.stderr)
print('\n'.join([' ' + f for f in new_files | new_unified_files]), file=sys.stderr)
print('We expected %d but see only %d files. Giving up...' %
(files_expected, files_gotten), file=sys.stderr)
raise Exception("Unable to pull some files.")
new_files = _pull_remote_files(all_outfiles_prefixes, old_files, out_dir)
if remove_outfiles_from_device:
_remove_files_from_device(all_outfiles_prefixes, old_files)
return [os.path.basename(f) for f in new_files]
def pull_remote_file(remote_file, dest_file):
"""Copies a file from the device."""
shell('adb pull "%s" "%s"' % (remote_file, dest_file))
# You probably don't need to call the functions below from outside this module,
# but hey, maybe you do.
def _send_remote_signal(signal, pid):
"""Send a signal to a process on the device.
signal can be either an integer or a string of the form 'SIGRTn' where n is
an integer. We interpret SIGRTn to mean the signal SIGRTMIN + n.
"""
# killer is a program we put on the device which is like kill(1), except it
# accepts signals above 31. It also understands "SIGRTn" per above.
remote_shell("killer %s %d" % (signal, pid))
def _write_to_remote_file(file, msg):
"""Write a message to a file on the device.
Note that echo is a shell built-in, so we use remote_shell, not
remote_toolbox_cmd, here.
Also, due to ghetto string escaping in remote_shell, we must use " and not
' in this command.
"""
remote_shell('echo -n "%s" > "%s"' % (msg, file))
def _list_remote_temp_files(prefixes):
"""Return a set of absolute filenames in the device's temp directory which
start with one of the given prefixes."""
# Look for files in both /data/local/tmp/ and
# /data/local/tmp/memory-reports. New versions of b2g dump everything into
# /data/local/tmp/memory-reports, but old versions use /data/local/tmp for
# some things (e.g. gc/cc logs).
tmpdir = '/data/local/tmp/'
outdirs = [d for d in [tmpdir, os.path.join(tmpdir, 'memory-reports')] if
os.path.basename(d) in remote_ls(os.path.dirname(d))]
found_files = set()
for d in outdirs:
found_files |= {os.path.join(d, file) for file in remote_ls(d)
if any(file.startswith(prefix) for prefix in prefixes)}
return found_files
def _pull_remote_files(outfiles_prefixes, old_files, out_dir):
"""Pull files from the remote device's temp directory into out_dir.
We pull each file in the temp directory whose name begins with one of the
elements of outfiles_prefixes and which isn't listed in old_files.
"""
new_files = _list_remote_temp_files(outfiles_prefixes) - old_files
for f in new_files:
shell('adb pull %s' % f, cwd=out_dir)
pass
print("Pulled files into %s." % out_dir)
return new_files
def _remove_files_from_device(outfiles_prefixes, old_files):
"""Remove files from the remote device's temp directory.
We remove all files starting with one of the elements of outfiles_prefixes
which aren't listed in old_files.
"""
files_to_remove = _list_remote_temp_files(outfiles_prefixes) - old_files
for f in files_to_remove:
remote_toolbox_cmd('rm', f)
| []
| []
| [
"MOZ_IGNORE_NUWA_PROCESS"
]
| [] | ["MOZ_IGNORE_NUWA_PROCESS"] | python | 1 | 0 | |
tests/unit_tests.py | from deepface import DeepFace
import json
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
#-----------------------------------------
print("Bulk tests")
print("-----------------------------------------")
print("Large scale face recognition")
df = DeepFace.find(img_path = "dataset/img1.jpg", db_path = "dataset")
print(df.head())
print("-----------------------------------------")
print("Ensemble for find function")
df = DeepFace.find(img_path = "dataset/img1.jpg", db_path = "dataset", model_name = "Ensemble")
print(df.head())
print("-----------------------------------------")
print("Bulk face recognition tests")
dataset = [
['dataset/img1.jpg', 'dataset/img2.jpg', True],
['dataset/img5.jpg', 'dataset/img6.jpg', True]
]
resp_obj = DeepFace.verify(dataset)
print(resp_obj["pair_1"]["verified"] == True)
print(resp_obj["pair_2"]["verified"] == True)
print("-----------------------------------------")
print("Ensemble learning bulk")
resp_obj = DeepFace.verify(dataset, model_name = "Ensemble")
for i in range(0, len(dataset)):
item = resp_obj['pair_%s' % (i+1)]
verified = item["verified"]
score = item["score"]
print(verified)
print("-----------------------------------------")
print("Bulk facial analysis tests")
dataset = [
'dataset/img1.jpg',
'dataset/img2.jpg',
'dataset/img5.jpg',
'dataset/img6.jpg'
]
resp_obj = DeepFace.analyze(dataset)
print(resp_obj["instance_1"]["age"]," years old ", resp_obj["instance_1"]["dominant_emotion"], " ",resp_obj["instance_1"]["gender"])
print(resp_obj["instance_2"]["age"]," years old ", resp_obj["instance_2"]["dominant_emotion"], " ",resp_obj["instance_2"]["gender"])
print(resp_obj["instance_3"]["age"]," years old ", resp_obj["instance_3"]["dominant_emotion"], " ",resp_obj["instance_3"]["gender"])
print(resp_obj["instance_4"]["age"]," years old ", resp_obj["instance_4"]["dominant_emotion"], " ",resp_obj["instance_4"]["gender"])
print("-----------------------------------------")
#-----------------------------------------
print("Facial analysis test. Passing nothing as an action")
img = "dataset/img4.jpg"
demography = DeepFace.analyze(img)
print(demography)
print("-----------------------------------------")
print("Facial analysis test. Passing all to the action")
demography = DeepFace.analyze(img, ['age', 'gender', 'race', 'emotion'])
print("Demography:")
print(demography)
#check response is a valid json
print("Age: ", demography["age"])
print("Gender: ", demography["gender"])
print("Race: ", demography["dominant_race"])
print("Emotion: ", demography["dominant_emotion"])
print("-----------------------------------------")
print("Face recognition tests")
dataset = [
['dataset/img1.jpg', 'dataset/img2.jpg', True],
['dataset/img5.jpg', 'dataset/img6.jpg', True],
['dataset/img6.jpg', 'dataset/img7.jpg', True],
['dataset/img8.jpg', 'dataset/img9.jpg', True],
['dataset/img1.jpg', 'dataset/img11.jpg', True],
['dataset/img2.jpg', 'dataset/img11.jpg', True],
['dataset/img1.jpg', 'dataset/img3.jpg', False],
['dataset/img2.jpg', 'dataset/img3.jpg', False],
['dataset/img6.jpg', 'dataset/img8.jpg', False],
['dataset/img6.jpg', 'dataset/img9.jpg', False],
]
models = ['VGG-Face', 'Facenet', 'OpenFace', 'DeepFace', 'DeepID']
metrics = ['cosine', 'euclidean', 'euclidean_l2']
passed_tests = 0; test_cases = 0
for model in models:
for metric in metrics:
for instance in dataset:
img1 = instance[0]
img2 = instance[1]
result = instance[2]
resp_obj = DeepFace.verify(img1, img2, model_name = model, distance_metric = metric)
prediction = resp_obj["verified"]
distance = round(resp_obj["distance"], 2)
required_threshold = resp_obj["max_threshold_to_verify"]
test_result_label = "failed"
if prediction == result:
passed_tests = passed_tests + 1
test_result_label = "passed"
if prediction == True:
classified_label = "verified"
else:
classified_label = "unverified"
test_cases = test_cases + 1
print(img1, " and ", img2," are ", classified_label, " as same person based on ", model," model and ",metric," distance metric. Distance: ",distance,", Required Threshold: ", required_threshold," (",test_result_label,")")
print("--------------------------")
#-----------------------------------------
print("Passed unit tests: ",passed_tests," / ",test_cases)
accuracy = 100 * passed_tests / test_cases
accuracy = round(accuracy, 2)
if accuracy > 75:
print("Unit tests are completed successfully. Score: ",accuracy,"%")
else:
raise ValueError("Unit test score does not satisfy the minimum required accuracy. Minimum expected score is 80% but this got ",accuracy,"%")
#-----------------------------------
# api tests - already built models will be passed to the functions
from deepface.basemodels import VGGFace, OpenFace, Facenet, FbDeepFace
#-----------------------------------
vggface_model = VGGFace.loadModel()
resp_obj = DeepFace.verify("dataset/img1.jpg", "dataset/img2.jpg", model_name = "VGG-Face", model = vggface_model)
print(resp_obj)
#-----------------------------------
from deepface.extendedmodels import Age, Gender, Race, Emotion
emotion_model = Emotion.loadModel()
age_model = Age.loadModel()
gender_model = Gender.loadModel()
race_model = Race.loadModel()
facial_attribute_models = {}
facial_attribute_models["emotion"] = emotion_model
facial_attribute_models["age"] = age_model
facial_attribute_models["gender"] = gender_model
facial_attribute_models["race"] = race_model
resp_obj = DeepFace.analyze("dataset/img1.jpg", models=facial_attribute_models)
| []
| []
| [
"TF_CPP_MIN_LOG_LEVEL"
]
| [] | ["TF_CPP_MIN_LOG_LEVEL"] | python | 1 | 0 | |
reconcile/github_owners.py | import os
import logging
from github import Github
from sretoolbox.utils import retry
import reconcile.utils.gql as gql
from reconcile.github_org import get_config
from reconcile.utils.raw_github_api import RawGithubApi
ROLES_QUERY = """
{
roles: roles_v1 {
name
users {
github_username
}
bots {
github_username
}
permissions {
service
...on PermissionGithubOrg_v1 {
org
role
}
...on PermissionGithubOrgTeam_v1 {
org
role
}
}
}
}
"""
QONTRACT_INTEGRATION = 'github-owners'
def fetch_desired_state():
desired_state = {}
gqlapi = gql.get_api()
roles = gqlapi.query(ROLES_QUERY)['roles']
for role in roles:
permissions = [p for p in role['permissions']
if p.get('service')
in ['github-org', 'github-org-team']
and p.get('role') == 'owner']
if not permissions:
continue
for permission in permissions:
github_org = permission['org']
desired_state.setdefault(github_org, [])
for user in role['users']:
github_username = user['github_username']
desired_state[github_org].append(github_username)
for bot in role['bots']:
github_username = bot['github_username']
desired_state[github_org].append(github_username)
return desired_state
@retry()
def get_current_github_usernames(github_org_name, github, raw_github):
gh_org = github.get_organization(github_org_name)
gh_org_members = gh_org.get_members(role='admin')
current_github_usernames = [m.login for m in gh_org_members]
invitations = raw_github.org_invitations(github_org_name)
current_github_usernames.extend(invitations)
return gh_org, current_github_usernames
def run(dry_run):
base_url = os.environ.get('GITHUB_API', 'https://api.github.com')
desired_state = fetch_desired_state()
for github_org_name, desired_github_usernames in desired_state.items():
config = get_config(desired_org_name=github_org_name)
token = config['github'][github_org_name]['token']
gh = Github(token, base_url=base_url)
raw_gh = RawGithubApi(token)
gh_org, current_github_usernames = \
get_current_github_usernames(github_org_name, gh, raw_gh)
current_github_usernames = \
[m.lower() for m in current_github_usernames]
desired_github_usernames = \
[m.lower() for m in desired_github_usernames]
for github_username in desired_github_usernames:
if github_username not in current_github_usernames:
logging.info(['add_owner', github_org_name, github_username])
if not dry_run:
gh_user = gh.get_user(github_username)
gh_org.add_to_members(gh_user, 'admin')
| []
| []
| [
"GITHUB_API"
]
| [] | ["GITHUB_API"] | python | 1 | 0 | |
twitter.py | import requests
import os
import json
import datetime
bearer_token = os.environ.get("BEARER_TOKEN")
discord_webhook = os.environ.get("DISCORD_WEBHOOK")
def get_user_lookup(username):
'''
ユーザ名からユーザIDを取得
'''
usernames = "usernames={}".format(username)
user_fields = "user.fields=created_at,description,entities,id,location,name,pinned_tweet_id,profile_image_url,protected,public_metrics,url,username,verified,withheld"
url = "https://api.twitter.com/2/users/by?{}&{}".format(usernames, user_fields)
headers = {
"Authorization": f"Bearer {bearer_token}",
"User-Agent": "v2UserLookupPython"
}
response = requests.request("GET", url, headers=headers,)
print(response.status_code)
if response.status_code != 200:
raise Exception(
"Request returned an error: {} {}".format(
response.status_code, response.text
)
)
return response.json()
def get_user_tweets(user_id):
'''
ユーザIDからユーザの最新ツイートを取得
'''
url = "https://api.twitter.com/2/users/{}/tweets".format(user_id)
headers = {
"Authorization": f"Bearer {bearer_token}",
"User-Agent": "v2UserTweetsPython"
}
params = {
"tweet.fields": "attachments,author_id,context_annotations,conversation_id,created_at,entities,geo,id,in_reply_to_user_id,lang,possibly_sensitive,public_metrics,referenced_tweets,source,text,withheld",
"expansions": "attachments.media_keys",
"media.fields": "media_key,type,duration_ms,height,url,width,alt_text"
}
response = requests.request("GET", url, headers=headers, params=params)
print(response.status_code)
if response.status_code != 200:
raise Exception(
"Request returned an error: {} {}".format(
response.status_code, response.text
)
)
return response.json()
def convert_to_datetime(datetime_str):
tweet_datetime = datetime.datetime.strptime(datetime_str, '%Y-%m-%dT%H:%M:%S.%f%z')
jst_datetime = tweet_datetime.astimezone(datetime.timezone(datetime.timedelta(hours=+9), 'JST'))
return jst_datetime
def notify_discord(lookup, tweets):
'''
最新ツイートをDiscordに転送
'''
url = "https://twitter.com/" + lookup['data'][0]['username'] + "/status/" + lookup['data'][0]['id']
json_send = {
"username": lookup['data'][0]['name'],
"avatar_url": lookup['data'][0]['profile_image_url'],
"embeds": [
{
"color": 1942002,
"author": {
"name": lookup['data'][0]['name'] + " (@" + lookup['data'][0]['username'] + ")",
"url": "https://twitter.com/" + lookup['data'][0]['username'],
"icon_url": lookup['data'][0]['profile_image_url']
},
"url": url,
"description": tweets['data'][0]['text'],
"footer": {
"text": tweets['data'][0]['source'],
"icon_url": "https://abs.twimg.com/icons/apple-touch-icon-192x192.png"
},
"timestamp": tweets['data'][0]['created_at']
}
]
}
first = True
if 'attachments' in tweets['data'][0]:
for img_key in tweets['data'][0]['attachments']['media_keys']:
for media in tweets['includes']['media']:
if img_key == media['media_key']:
imgurl = media['url']
break
if first:
first = False
json_send['embeds'][0]['image'] = {}
json_send['embeds'][0]['image']['url'] = imgurl
else:
part_json = {
"image": {
"url": imgurl
},
"url": url
}
json_send['embeds'].append(part_json)
response = requests.post(discord_webhook, json.dumps(json_send), headers={'Content-Type': 'application/json'})
print(json_send)
print(response.status_code)
def main():
usernames = ['NU_kouhou', 'nu_idsci', 'ShigeruKohno', 'nagasakicareer', 'nuc_bunkyo_shop', 'nuc_univ_coop', 'NagasakiUniLib']
for username in usernames:
user_lookup = get_user_lookup(username)
print(json.dumps(user_lookup, indent=4, sort_keys=True, ensure_ascii=False))
user_tweets = get_user_tweets(int(user_lookup['data'][0]['id']))
print(json.dumps(user_tweets, indent=4, sort_keys=True, ensure_ascii=False))
post_time = convert_to_datetime(user_tweets['data'][0]['created_at'])
now_time = datetime.datetime.now(datetime.timezone(datetime.timedelta(hours=+9), 'JST'))
updatetime = now_time - datetime.timedelta(minutes=30)
if updatetime < post_time:
notify_discord(user_lookup, user_tweets)
print(user_tweets['data'][0]['text'])
if __name__ == "__main__":
main()
| []
| []
| [
"BEARER_TOKEN",
"DISCORD_WEBHOOK"
]
| [] | ["BEARER_TOKEN", "DISCORD_WEBHOOK"] | python | 2 | 0 | |
test/endpoint/setup.go | // +build varlink
package endpoint
import (
"encoding/json"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"github.com/containers/libpod/v2/pkg/rootless"
iopodman "github.com/containers/libpod/v2/pkg/varlink"
"github.com/containers/storage/pkg/stringid"
"github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/sirupsen/logrus"
)
func Setup(tempDir string) *EndpointTestIntegration {
var (
endpoint string
)
cwd, _ := os.Getwd()
INTEGRATION_ROOT = filepath.Join(cwd, "../../")
podmanBinary := filepath.Join(cwd, "../../bin/podman")
if os.Getenv("PODMAN_BINARY") != "" {
podmanBinary = os.Getenv("PODMAN_BINARY")
}
conmonBinary := filepath.Join("/usr/libexec/podman/conmon")
altConmonBinary := "/usr/bin/conmon"
if _, err := os.Stat(conmonBinary); os.IsNotExist(err) {
conmonBinary = altConmonBinary
}
if os.Getenv("CONMON_BINARY") != "" {
conmonBinary = os.Getenv("CONMON_BINARY")
}
storageOptions := STORAGE_OPTIONS
if os.Getenv("STORAGE_OPTIONS") != "" {
storageOptions = os.Getenv("STORAGE_OPTIONS")
}
cgroupManager := CGROUP_MANAGER
if rootless.IsRootless() {
cgroupManager = "cgroupfs"
}
if os.Getenv("CGROUP_MANAGER") != "" {
cgroupManager = os.Getenv("CGROUP_MANAGER")
}
ociRuntime := os.Getenv("OCI_RUNTIME")
if ociRuntime == "" {
var err error
ociRuntime, err = exec.LookPath("runc")
// If we cannot find the runc binary, setting to something static as we have no way
// to return an error. The tests will fail and point out that the runc binary could
// not be found nicely.
if err != nil {
ociRuntime = "/usr/bin/runc"
}
}
os.Setenv("DISABLE_HC_SYSTEMD", "true")
CNIConfigDir := "/etc/cni/net.d"
storageFs := STORAGE_FS
if rootless.IsRootless() {
storageFs = ROOTLESS_STORAGE_FS
}
uuid := stringid.GenerateNonCryptoID()
if !rootless.IsRootless() {
endpoint = fmt.Sprintf("unix:/run/podman/io.podman-%s", uuid)
} else {
runtimeDir := os.Getenv("XDG_RUNTIME_DIR")
socket := fmt.Sprintf("io.podman-%s", uuid)
fqpath := filepath.Join(runtimeDir, socket)
endpoint = fmt.Sprintf("unix:%s", fqpath)
}
eti := EndpointTestIntegration{
ArtifactPath: ARTIFACT_DIR,
CNIConfigDir: CNIConfigDir,
CgroupManager: cgroupManager,
ConmonBinary: conmonBinary,
CrioRoot: filepath.Join(tempDir, "crio"),
ImageCacheDir: ImageCacheDir,
ImageCacheFS: storageFs,
OCIRuntime: ociRuntime,
PodmanBinary: podmanBinary,
RunRoot: filepath.Join(tempDir, "crio-run"),
SignaturePolicyPath: filepath.Join(INTEGRATION_ROOT, "test/policy.json"),
StorageOptions: storageOptions,
TmpDir: tempDir,
// Timings: nil,
VarlinkBinary: VarlinkBinary,
VarlinkCommand: nil,
VarlinkEndpoint: endpoint,
VarlinkSession: nil,
}
return &eti
}
func (p *EndpointTestIntegration) Cleanup() {
// Remove all containers
// TODO Make methods to do all this?
p.stopAllContainers()
// TODO need to make stop all pods
p.StopVarlink()
// Nuke tempdir
if err := os.RemoveAll(p.TmpDir); err != nil {
fmt.Printf("%q\n", err)
}
// Clean up the registries configuration file ENV variable set in Create
resetRegistriesConfigEnv()
}
func (p *EndpointTestIntegration) listContainers() []iopodman.Container {
containers := p.Varlink("ListContainers", "", false)
var varlinkContainers map[string][]iopodman.Container
if err := json.Unmarshal(containers.OutputToBytes(), &varlinkContainers); err != nil {
logrus.Error("failed to unmarshal containers")
}
return varlinkContainers["containers"]
}
func (p *EndpointTestIntegration) stopAllContainers() {
containers := p.listContainers()
for _, container := range containers {
p.stopContainer(container.Id)
}
}
func (p *EndpointTestIntegration) stopContainer(cid string) {
p.Varlink("StopContainer", fmt.Sprintf("{\"name\":\"%s\", \"timeout\":0}", cid), false)
}
func resetRegistriesConfigEnv() {
os.Setenv("REGISTRIES_CONFIG_PATH", "")
}
func (p *EndpointTestIntegration) createArtifact(image string) {
if os.Getenv("NO_TEST_CACHE") != "" {
return
}
dest := strings.Split(image, "/")
destName := fmt.Sprintf("/tmp/%s.tar", strings.Replace(strings.Join(strings.Split(dest[len(dest)-1], "/"), ""), ":", "-", -1))
fmt.Printf("Caching %s at %s...", image, destName)
if _, err := os.Stat(destName); os.IsNotExist(err) {
pull := p.Varlink("PullImage", fmt.Sprintf("{\"name\":\"%s\"}", image), false)
Expect(pull.ExitCode()).To(Equal(0))
imageSave := iopodman.ImageSaveOptions{
// Name:image,
// Output: destName,
// Format: "oci-archive",
}
imageSave.Name = image
imageSave.Output = destName
imageSave.Format = "oci-archive"
foo := make(map[string]iopodman.ImageSaveOptions)
foo["options"] = imageSave
f, _ := json.Marshal(foo)
save := p.Varlink("ImageSave", string(f), false)
result := save.OutputToMoreResponse()
Expect(save.ExitCode()).To(Equal(0))
Expect(os.Rename(result.Id, destName)).To(BeNil())
fmt.Printf("\n")
} else {
fmt.Printf(" already exists.\n")
}
}
func populateCache(p *EndpointTestIntegration) {
p.CrioRoot = p.ImageCacheDir
p.StartVarlink()
for _, image := range CACHE_IMAGES {
p.RestoreArtifactToCache(image)
}
p.StopVarlink()
}
func (p *EndpointTestIntegration) RestoreArtifactToCache(image string) error {
fmt.Printf("Restoring %s...\n", image)
dest := strings.Split(image, "/")
destName := fmt.Sprintf("/tmp/%s.tar", strings.Replace(strings.Join(strings.Split(dest[len(dest)-1], "/"), ""), ":", "-", -1))
// fmt.Println(destName, p.ImageCacheDir)
load := p.Varlink("LoadImage", fmt.Sprintf("{\"name\": \"%s\", \"inputFile\": \"%s\"}", image, destName), false)
Expect(load.ExitCode()).To(BeZero())
return nil
}
func (p *EndpointTestIntegration) startTopContainer(name string) string {
t := true
args := iopodman.Create{
Args: []string{"docker.io/library/alpine:latest", "top"},
Tty: &t,
Detach: &t,
}
if len(name) > 0 {
args.Name = &name
}
b, err := json.Marshal(args)
if err != nil {
ginkgo.Fail("failed to marshal data for top container")
}
input := fmt.Sprintf("{\"create\":%s}", string(b))
top := p.Varlink("CreateContainer", input, false)
if top.ExitCode() != 0 {
ginkgo.Fail("failed to start top container")
}
start := p.Varlink("StartContainer", fmt.Sprintf("{\"name\":\"%s\"}", name), false)
if start.ExitCode() != 0 {
ginkgo.Fail("failed to start top container")
}
return start.OutputToString()
}
| [
"\"PODMAN_BINARY\"",
"\"PODMAN_BINARY\"",
"\"CONMON_BINARY\"",
"\"CONMON_BINARY\"",
"\"STORAGE_OPTIONS\"",
"\"STORAGE_OPTIONS\"",
"\"CGROUP_MANAGER\"",
"\"CGROUP_MANAGER\"",
"\"OCI_RUNTIME\"",
"\"XDG_RUNTIME_DIR\"",
"\"NO_TEST_CACHE\""
]
| []
| [
"STORAGE_OPTIONS",
"NO_TEST_CACHE",
"PODMAN_BINARY",
"XDG_RUNTIME_DIR",
"OCI_RUNTIME",
"CONMON_BINARY",
"CGROUP_MANAGER"
]
| [] | ["STORAGE_OPTIONS", "NO_TEST_CACHE", "PODMAN_BINARY", "XDG_RUNTIME_DIR", "OCI_RUNTIME", "CONMON_BINARY", "CGROUP_MANAGER"] | go | 7 | 0 | |
cmd/bbgo-webview/main.go | package main
import (
"context"
"flag"
"net"
"os"
"os/signal"
"path/filepath"
"strconv"
"time"
"github.com/joho/godotenv"
"github.com/webview/webview"
log "github.com/sirupsen/logrus"
"github.com/ycdesu/spreaddog/pkg/bbgo"
"github.com/ycdesu/spreaddog/pkg/cmd"
"github.com/ycdesu/spreaddog/pkg/server"
)
func main() {
noChangeDir := false
portNum := 0
flag.BoolVar(&noChangeDir, "no-chdir", false, "do not change directory")
flag.IntVar(&portNum, "port", 0, "server port")
flag.Parse()
if !noChangeDir {
ep, err := os.Executable()
if err != nil {
log.Fatalln("failed to find the current executable:", err)
}
resourceDir := filepath.Join(filepath.Dir(ep), "..", "Resources")
if _, err := os.Stat(resourceDir); err == nil {
err = os.Chdir(resourceDir)
if err != nil {
log.Fatalln("chdir error:", err)
}
}
}
dotenvFile := ".env.local"
if _, err := os.Stat(dotenvFile); err == nil {
if err := godotenv.Load(dotenvFile); err != nil {
log.WithError(err).Error("error loading dotenv file")
return
}
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
debug, _ := strconv.ParseBool(os.Getenv("DEBUG_WEBVIEW"))
view := webview.New(debug)
defer view.Destroy()
view.SetTitle("BBGO")
view.SetSize(1024, 780, webview.HintNone)
configFile := "bbgo.yaml"
var setup *server.Setup
var userConfig *bbgo.Config
_, err := os.Stat(configFile)
if os.IsNotExist(err) {
setup = &server.Setup{
Context: ctx,
Cancel: cancel,
Token: "",
BeforeRestart: func() {
view.Destroy()
},
}
userConfig = &bbgo.Config{
Notifications: nil,
Persistence: nil,
Sessions: nil,
ExchangeStrategies: nil,
}
} else {
userConfig, err = bbgo.Load(configFile, true)
if err != nil {
log.WithError(err).Error("can not load config file")
return
}
}
environ := bbgo.NewEnvironment()
trader := bbgo.NewTrader(environ)
// we could initialize the environment from the settings
if setup == nil {
if err := cmd.BootstrapEnvironment(ctx, environ, userConfig); err != nil {
log.WithError(err).Error("failed to bootstrap environment")
return
}
// we could initialize the environment from the settings
go func() {
if err := environ.Sync(ctx); err != nil {
log.WithError(err).Error("failed to sync data")
return
}
if err := trader.Configure(userConfig); err != nil {
log.WithError(err).Error("failed to configure trader")
return
}
// for setup mode, we don't start the trader
if err := trader.Run(ctx); err != nil {
log.WithError(err).Error("failed to start trader")
}
}()
}
// find a free port for binding the server
ln, err := net.Listen("tcp", "127.0.0.1:" + strconv.Itoa(portNum))
if err != nil {
log.WithError(err).Error("can not bind listener")
return
}
defer ln.Close()
baseURL := "http://" + ln.Addr().String()
srv := &server.Server{
Config: userConfig,
Environ: environ,
Trader: trader,
OpenInBrowser: false,
Setup: setup,
}
go func() {
if err := srv.RunWithListener(ctx, ln); err != nil {
log.WithError(err).Errorf("server error")
}
}()
log.Infof("pinging the server at %s", baseURL)
server.PingUntil(ctx, time.Second, baseURL, func() {
log.Infof("got pong, navigate to %s", baseURL)
view.Navigate(baseURL)
view.Run()
})
// Wait until the interrupt signal arrives or browser window is closed
sigc := make(chan os.Signal)
signal.Notify(sigc, os.Interrupt)
select {
case <-sigc:
}
log.Println("exiting...")
}
| [
"\"DEBUG_WEBVIEW\""
]
| []
| [
"DEBUG_WEBVIEW"
]
| [] | ["DEBUG_WEBVIEW"] | go | 1 | 0 | |
main.go | package main
import (
"bufio"
"bytes"
"fmt"
"os"
"regexp"
"strconv"
"strings"
flag "github.com/spf13/pflag"
log "github.com/dihedron/go-log"
)
func init() {
switch strings.ToLower(os.Getenv("PUT_DEBUG")) {
case "debug", "dbg", "d":
log.SetLevel(log.DBG)
case "informational", "information", "info", "inf", "i":
log.SetLevel(log.INF)
case "warning", "warn", "wrn", "w":
log.SetLevel(log.WRN)
case "error", "err", "e":
log.SetLevel(log.ERR)
default:
log.SetLevel(log.NUL)
}
log.SetStream(os.Stderr, true)
log.SetTimeFormat("15:04:05.000")
log.SetPrintCallerInfo(true)
log.SetPrintSourceInfo(log.SourceInfoShort)
}
func main() {
once := flag.Bool("once", false, "whether the instruction should be applied only to the first occurrence")
help := flag.Bool("help", false, "prints help information and quits")
flag.Parse()
if len(flag.Args()) < 3 || *help {
fmt.Fprintf(os.Stderr, "usage:\n")
fmt.Fprintf(os.Stderr, " put [--once] {<text>|nil} {at <index>|{before|after|where} <pattern>}\n")
fmt.Fprintf(os.Stderr, "examples:\n")
fmt.Fprintf(os.Stderr, " put \"some text\" at 0 add a leading line\n")
fmt.Fprintf(os.Stderr, " put nil where \"^#.*$\" remove all comments\n")
fmt.Fprintf(os.Stderr, " put \"some text\" after \"^#.*$\" add line after matching lines\n")
os.Exit(1)
}
processStream(flag.Args(), *once)
//cmd.Execute()
}
type operation int8
const (
// OperationReplace replaces the matching line with the user provided text.
OperationReplace operation = iota
// OperationPrepend inserts the user provided text before the matching line.
OperationPrepend
// OperationAppend inserts the user provided text after the matching line.
OperationAppend
// OperationDelete removes the matching line.
OperationDelete
// OperationInsert inserts the user provided text at a given line index (0-based).
OperationInsert
// OperationInvalid means that the operation could not be recognised.
OperationInvalid
)
// String returns an operation in human-readable form.
func (op operation) String() string {
switch op {
case OperationReplace:
return "<replace> (" + strconv.Itoa(int(op)) + ")"
case OperationPrepend:
return "<prepend> (" + strconv.Itoa(int(op)) + ")"
case OperationAppend:
return "<append> (" + strconv.Itoa(int(op)) + ")"
case OperationDelete:
return "<delete> (" + strconv.Itoa(int(op)) + ")"
case OperationInsert:
return "<insert> (" + strconv.Itoa(int(op)) + ")"
case OperationInvalid:
return "<invalid> (" + strconv.Itoa(int(op)) + ")"
}
return ""
}
// processStream is the actual workhorse: it identifies input and output, then
// reads in the input stream one line at a time and applies its pattern matching
// line by line; matching lines are processed and written to the output stream.
func processStream(args []string, once bool) {
log.Debugf("Apply only once: %t", once)
for i, arg := range args {
log.Debugf("args[%d] => %q\n", i, arg)
}
input, err := getInput(args)
if err != nil {
log.Fatalf("Unable to open input file: %v", err)
}
defer input.Close()
output, err := getOutput(args)
if err != nil {
log.Fatalf("Unable to open output file: %v", err)
}
defer output.Close()
op := getOperation(args)
log.Debugf("Operation: %v", op)
var re *regexp.Regexp
var insertAtIndex int
switch op {
case OperationReplace, OperationPrepend, OperationAppend, OperationDelete:
log.Debugf("Matching against %q", args[2])
re = regexp.MustCompile(args[2])
case OperationInsert:
log.Debugf("Inserting/dropping at index %q", args[2])
insertAtIndex, err = strconv.Atoi(strings.TrimSpace(args[2]))
if err != nil {
log.Fatalf("Error parsing line index: %v", err)
} else if insertAtIndex < 0 {
log.Fatalf("Invalid (negative) line index: %d", insertAtIndex)
}
}
scanner := bufio.NewScanner(input)
doneOnce := false
currentIndex := 0
for scanner.Scan() {
if op == OperationInsert {
log.Debugf("Comparing currentIndex (%d) to insertAtIndex (%d)\n", currentIndex, insertAtIndex)
if currentIndex == insertAtIndex {
if args[0] == "nil" {
log.Debugf("Dropping line at index %d: %q\n", insertAtIndex, scanner.Text())
// skip line (drop it!)
currentIndex++
continue
} else {
log.Debugf("Inserting %q at index %d\n", args[0], insertAtIndex)
fmt.Fprintf(output, "%s\n", args[0])
}
}
currentIndex++
log.Debugf("Keeping text as is: %q\n", scanner.Text())
fmt.Fprintf(output, "%s\n", scanner.Text())
} else {
if re.MatchString(scanner.Text()) && (!once || !doneOnce) {
log.Debugf("Input text %q matches pattern", scanner.Text())
line := processLine(scanner.Text(), args[0], re)
switch op {
case OperationReplace:
fmt.Fprintf(output, "%s\n", line)
case OperationPrepend:
fmt.Fprintf(output, "%s\n", line)
fmt.Fprintf(output, "%s\n", scanner.Text())
case OperationAppend:
fmt.Fprintf(output, "%s\n", scanner.Text())
fmt.Fprintf(output, "%s\n", line)
case OperationDelete:
}
doneOnce = true
} else {
log.Debugf("Keeping text as is: %q\n", scanner.Text())
fmt.Fprintf(output, "%s\n", scanner.Text())
}
}
}
if err := scanner.Err(); err != nil {
log.Fatalf("Error reading text: %v", err)
}
}
// getInput returns the input Reader to use; if a filename argument is provided,
// open the file to read from it, otherwise return STDIN; the Reader must be
// closed by the method's caller.
func getInput(args []string) (*os.File, error) {
if len(args) > 3 && args[3] != "" {
log.Debugf("Reading text from input file: %q", args[3])
return os.Open(args[3])
}
return os.Stdin, nil
}
// getOutput returns the output Writer to use; if a filename argument is provided,
// open the file to write to it, otherwise return STDOUT; the Writer must be
// closed by the method's caller.
func getOutput(args []string) (*os.File, error) {
if len(args) > 4 && args[4] != "" {
log.Debugf("Writing text to output file: %q", args[4])
return os.Create(args[4])
}
return os.Stdout, nil
}
// getOperation decodes the requested operation using the clause, according to the
// command usage; fuzzy matching (see github.com/sahilm/fuzzy) may be introduced
// later on once the product is sufficiently stable.
func getOperation(args []string) operation {
if args[1] == "where" || args[1] == "wherever" {
if args[0] == "nil" {
return OperationDelete
}
return OperationReplace
}
if args[1] == "before" {
return OperationPrepend
}
if args[1] == "after" {
return OperationAppend
}
if args[1] == "at" {
return OperationInsert
}
log.Fatalf("Unknown clause: %q; valid values include 'where', 'wherever', after' and 'before'")
return OperationInvalid
}
var anchors = regexp.MustCompile(`(?:\{(\d+)\})`)
func processLine(original string, replacement string, re *regexp.Regexp) string {
if anchors.MatchString(replacement) {
log.Debugf("Replacement text requires binding\n")
// TODO: find all capturing groups in scanner.Text(), then use them to
// bind the replacement arguments; this processing is common to all
// matching methods so it should be moved to its own method.
matches := re.FindStringSubmatch(original)
if len(matches) == 0 {
log.Fatalf("Invalid number of bindings: %d\n", len(matches))
}
bindings := []string{}
for i, match := range matches {
log.Debugf("Match[%d] => %q\n", i, match)
bindings = append(bindings, match)
}
buffer := ""
cursor := 0
for _, indexes := range anchors.FindAllStringSubmatchIndex(replacement, -1) {
index, _ := strconv.Atoi(replacement[indexes[2]:indexes[3]])
if index > len(bindings) {
var buffer bytes.Buffer
buffer.WriteString(fmt.Sprintf("Invalid binding index %d, current binding values are:\n", index))
for i, binding := range bindings {
buffer.WriteString(fmt.Sprintf(" {%d} => %q\n", i, binding))
}
log.Fatalln(buffer.String())
}
log.Debugf("Binding {%d}: %q => %q (from index %d to %d)", index, replacement[indexes[0]:indexes[1]], bindings[index], indexes[0], indexes[1])
buffer = buffer + replacement[cursor:indexes[0]] + bindings[index]
cursor = indexes[1]
log.Debugf("Current temporary buffer: %q", buffer)
}
buffer = buffer + replacement[cursor:]
log.Debugf("Temporary buffer at end of line processing: %q", buffer)
return buffer
}
log.Debugf("Replacing text %q with %q\n", original, replacement)
return replacement
}
| [
"\"PUT_DEBUG\""
]
| []
| [
"PUT_DEBUG"
]
| [] | ["PUT_DEBUG"] | go | 1 | 0 | |
test/functional/test_framework/test_node.py | #!/usr/bin/env python3
# Copyright (c) 2017-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Class for bitcoinrushd node under test"""
import contextlib
import decimal
import errno
from enum import Enum
import http.client
import json
import logging
import os
import re
import subprocess
import tempfile
import time
import urllib.parse
from .authproxy import JSONRPCException
from .util import (
append_config,
delete_cookie_file,
get_rpc_proxy,
rpc_url,
wait_until,
p2p_port,
)
# For Python 3.4 compatibility
JSONDecodeError = getattr(json, "JSONDecodeError", ValueError)
BITCOINRUSHD_PROC_WAIT_TIMEOUT = 60
class FailedToStartError(Exception):
"""Raised when a node fails to start correctly."""
class ErrorMatch(Enum):
FULL_TEXT = 1
FULL_REGEX = 2
PARTIAL_REGEX = 3
class TestNode():
"""A class for representing a bitcoinrushd node under test.
This class contains:
- state about the node (whether it's running, etc)
- a Python subprocess.Popen object representing the running process
- an RPC connection to the node
- one or more P2P connections to the node
To make things easier for the test writer, any unrecognised messages will
be dispatched to the RPC connection."""
def __init__(self, i, datadir, *, rpchost, timewait, bitcoinrushd, bitcoinrush_cli, mocktime, coverage_dir, extra_conf=None, extra_args=None, use_cli=False):
self.index = i
self.datadir = datadir
self.stdout_dir = os.path.join(self.datadir, "stdout")
self.stderr_dir = os.path.join(self.datadir, "stderr")
self.rpchost = rpchost
self.rpc_timeout = timewait
self.binary = bitcoinrushd
self.coverage_dir = coverage_dir
if extra_conf != None:
append_config(datadir, extra_conf)
# Most callers will just need to add extra args to the standard list below.
# For those callers that need more flexibility, they can just set the args property directly.
# Note that common args are set in the config file (see initialize_datadir)
self.extra_args = extra_args
self.args = [
self.binary,
"-datadir=" + self.datadir,
"-logtimemicros",
"-debug",
"-debugexclude=libevent",
"-debugexclude=leveldb",
"-mocktime=" + str(mocktime),
"-uacomment=testnode%d" % i
]
self.cli = TestNodeCLI(bitcoinrush_cli, self.datadir)
self.use_cli = use_cli
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.url = None
self.log = logging.getLogger('TestFramework.node%d' % i)
self.cleanup_on_exit = True # Whether to kill the node when this object goes away
self.p2ps = []
def get_deterministic_priv_key(self):
"""Return a deterministic priv key in base58, that only depends on the node's index"""
PRIV_KEYS = [
# adress , privkey
('mjTkW3DjgyZck4KbiRusZsqTgaYTxdSz6z', 'cVpF924EspNh8KjYsfhgY96mmxvT6DgdWiTYMtMjuM74hJaU5psW'),
('msX6jQXvxiNhx3Q62PKeLPrhrqZQdSimTg', 'cUxsWyKyZ9MAQTaAhUQWJmBbSvHMwSmuv59KgxQV7oZQU3PXN3KE'),
('mnonCMyH9TmAsSj3M59DsbH8H63U3RKoFP', 'cTrh7dkEAeJd6b3MRX9bZK8eRmNqVCMH3LSUkE3dSFDyzjU38QxK'),
('mqJupas8Dt2uestQDvV2NH3RU8uZh2dqQR', 'cVuKKa7gbehEQvVq717hYcbE9Dqmq7KEBKqWgWrYBa2CKKrhtRim'),
('msYac7Rvd5ywm6pEmkjyxhbCDKqWsVeYws', 'cQDCBuKcjanpXDpCqacNSjYfxeQj8G6CAtH1Dsk3cXyqLNC4RPuh'),
('n2rnuUnwLgXqf9kk2kjvVm8R5BZK1yxQBi', 'cQakmfPSLSqKHyMFGwAqKHgWUiofJCagVGhiB4KCainaeCSxeyYq'),
('myzuPxRwsf3vvGzEuzPfK9Nf2RfwauwYe6', 'cQMpDLJwA8DBe9NcQbdoSb1BhmFxVjWD5gRyrLZCtpuF9Zi3a9RK'),
('mumwTaMtbxEPUswmLBBN3vM9oGRtGBrys8', 'cSXmRKXVcoouhNNVpcNKFfxsTsToY5pvB9DVsFksF1ENunTzRKsy'),
('mpV7aGShMkJCZgbW7F6iZgrvuPHjZjH9qg', 'cSoXt6tm3pqy43UMabY6eUTmR3eSUYFtB2iNQDGgb3VUnRsQys2k'),
]
return PRIV_KEYS[self.index]
def _node_msg(self, msg: str) -> str:
"""Return a modified msg that identifies this node by its index as a debugging aid."""
return "[node %d] %s" % (self.index, msg)
def _raise_assertion_error(self, msg: str):
"""Raise an AssertionError with msg modified to identify this node."""
raise AssertionError(self._node_msg(msg))
def __del__(self):
# Ensure that we don't leave any bitcoinrushd processes lying around after
# the test ends
if self.process and self.cleanup_on_exit:
# Should only happen on test failure
# Avoid using logger, as that may have already been shutdown when
# this destructor is called.
print(self._node_msg("Cleaning up leftover process"))
self.process.kill()
def __getattr__(self, name):
"""Dispatches any unrecognised messages to the RPC connection or a CLI instance."""
if self.use_cli:
return getattr(self.cli, name)
else:
assert self.rpc_connected and self.rpc is not None, self._node_msg("Error: no RPC connection")
return getattr(self.rpc, name)
def start(self, extra_args=None, *, stdout=None, stderr=None, **kwargs):
"""Start the node."""
if extra_args is None:
extra_args = self.extra_args
# Add a new stdout and stderr file each time bitcoinrushd is started
if stderr is None:
stderr = tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False)
if stdout is None:
stdout = tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False)
self.stderr = stderr
self.stdout = stdout
# Delete any existing cookie file -- if such a file exists (eg due to
# unclean shutdown), it will get overwritten anyway by bitcoinrushd, and
# potentially interfere with our attempt to authenticate
delete_cookie_file(self.datadir)
# add environment variable LIBC_FATAL_STDERR_=1 so that libc errors are written to stderr and not the terminal
subp_env = dict(os.environ, LIBC_FATAL_STDERR_="1")
self.process = subprocess.Popen(self.args + extra_args, env=subp_env, stdout=stdout, stderr=stderr, **kwargs)
self.running = True
self.log.debug("bitcoinrushd started, waiting for RPC to come up")
def wait_for_rpc_connection(self):
"""Sets up an RPC connection to the bitcoinrushd process. Returns False if unable to connect."""
# Poll at a rate of four times per second
poll_per_s = 4
for _ in range(poll_per_s * self.rpc_timeout):
if self.process.poll() is not None:
raise FailedToStartError(self._node_msg(
'bitcoinrushd exited with status {} during initialization'.format(self.process.returncode)))
try:
self.rpc = get_rpc_proxy(rpc_url(self.datadir, self.index, self.rpchost), self.index, timeout=self.rpc_timeout, coveragedir=self.coverage_dir)
self.rpc.getblockcount()
# If the call to getblockcount() succeeds then the RPC connection is up
self.rpc_connected = True
self.url = self.rpc.url
self.log.debug("RPC successfully started")
return
except IOError as e:
if e.errno != errno.ECONNREFUSED: # Port not yet open?
raise # unknown IO error
except JSONRPCException as e: # Initialization phase
if e.error['code'] != -28: # RPC in warmup?
raise # unknown JSON RPC exception
except ValueError as e: # cookie file not found and no rpcuser or rpcassword. bitcoinrushd still starting
if "No RPC credentials" not in str(e):
raise
time.sleep(1.0 / poll_per_s)
self._raise_assertion_error("Unable to connect to bitcoinrushd")
def get_wallet_rpc(self, wallet_name):
if self.use_cli:
return self.cli("-rpcwallet={}".format(wallet_name))
else:
assert self.rpc_connected and self.rpc, self._node_msg("RPC not connected")
wallet_path = "wallet/{}".format(urllib.parse.quote(wallet_name))
return self.rpc / wallet_path
def stop_node(self, expected_stderr=''):
"""Stop the node."""
if not self.running:
return
self.log.debug("Stopping node")
try:
self.stop()
except http.client.CannotSendRequest:
self.log.exception("Unable to stop node.")
# Check that stderr is as expected
self.stderr.seek(0)
stderr = self.stderr.read().decode('utf-8').strip()
if stderr != expected_stderr:
raise AssertionError("Unexpected stderr {} != {}".format(stderr, expected_stderr))
self.stdout.close()
self.stderr.close()
del self.p2ps[:]
def is_node_stopped(self):
"""Checks whether the node has stopped.
Returns True if the node has stopped. False otherwise.
This method is responsible for freeing resources (self.process)."""
if not self.running:
return True
return_code = self.process.poll()
if return_code is None:
return False
# process has stopped. Assert that it didn't return an error code.
assert return_code == 0, self._node_msg(
"Node returned non-zero exit code (%d) when stopping" % return_code)
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.log.debug("Node stopped")
return True
def wait_until_stopped(self, timeout=BITCOINRUSHD_PROC_WAIT_TIMEOUT):
wait_until(self.is_node_stopped, timeout=timeout)
@contextlib.contextmanager
def assert_debug_log(self, expected_msgs):
debug_log = os.path.join(self.datadir, 'regtest', 'debug.log')
with open(debug_log, encoding='utf-8') as dl:
dl.seek(0, 2)
prev_size = dl.tell()
try:
yield
finally:
with open(debug_log, encoding='utf-8') as dl:
dl.seek(prev_size)
log = dl.read()
print_log = " - " + "\n - ".join(log.splitlines())
for expected_msg in expected_msgs:
if re.search(re.escape(expected_msg), log, flags=re.MULTILINE) is None:
self._raise_assertion_error('Expected message "{}" does not partially match log:\n\n{}\n\n'.format(expected_msg, print_log))
def assert_start_raises_init_error(self, extra_args=None, expected_msg=None, match=ErrorMatch.FULL_TEXT, *args, **kwargs):
"""Attempt to start the node and expect it to raise an error.
extra_args: extra arguments to pass through to bitcoinrushd
expected_msg: regex that stderr should match when bitcoinrushd fails
Will throw if bitcoinrushd starts without an error.
Will throw if an expected_msg is provided and it does not match bitcoinrushd's stdout."""
with tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False) as log_stderr, \
tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False) as log_stdout:
try:
self.start(extra_args, stdout=log_stdout, stderr=log_stderr, *args, **kwargs)
self.wait_for_rpc_connection()
self.stop_node()
self.wait_until_stopped()
except FailedToStartError as e:
self.log.debug('bitcoinrushd failed to start: %s', e)
self.running = False
self.process = None
# Check stderr for expected message
if expected_msg is not None:
log_stderr.seek(0)
stderr = log_stderr.read().decode('utf-8').strip()
if match == ErrorMatch.PARTIAL_REGEX:
if re.search(expected_msg, stderr, flags=re.MULTILINE) is None:
self._raise_assertion_error(
'Expected message "{}" does not partially match stderr:\n"{}"'.format(expected_msg, stderr))
elif match == ErrorMatch.FULL_REGEX:
if re.fullmatch(expected_msg, stderr) is None:
self._raise_assertion_error(
'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
elif match == ErrorMatch.FULL_TEXT:
if expected_msg != stderr:
self._raise_assertion_error(
'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
else:
if expected_msg is None:
assert_msg = "bitcoinrushd should have exited with an error"
else:
assert_msg = "bitcoinrushd should have exited with expected error " + expected_msg
self._raise_assertion_error(assert_msg)
def add_p2p_connection(self, p2p_conn, *, wait_for_verack=True, **kwargs):
"""Add a p2p connection to the node.
This method adds the p2p connection to the self.p2ps list and also
returns the connection to the caller."""
if 'dstport' not in kwargs:
kwargs['dstport'] = p2p_port(self.index)
if 'dstaddr' not in kwargs:
kwargs['dstaddr'] = '127.0.0.1'
p2p_conn.peer_connect(**kwargs)()
self.p2ps.append(p2p_conn)
if wait_for_verack:
p2p_conn.wait_for_verack()
return p2p_conn
@property
def p2p(self):
"""Return the first p2p connection
Convenience property - most tests only use a single p2p connection to each
node, so this saves having to write node.p2ps[0] many times."""
assert self.p2ps, self._node_msg("No p2p connection")
return self.p2ps[0]
def disconnect_p2ps(self):
"""Close all p2p connections to the node."""
for p in self.p2ps:
p.peer_disconnect()
del self.p2ps[:]
class TestNodeCLIAttr:
def __init__(self, cli, command):
self.cli = cli
self.command = command
def __call__(self, *args, **kwargs):
return self.cli.send_cli(self.command, *args, **kwargs)
def get_request(self, *args, **kwargs):
return lambda: self(*args, **kwargs)
class TestNodeCLI():
"""Interface to bitcoinrush-cli for an individual node"""
def __init__(self, binary, datadir):
self.options = []
self.binary = binary
self.datadir = datadir
self.input = None
self.log = logging.getLogger('TestFramework.bitcoinrushcli')
def __call__(self, *options, input=None):
# TestNodeCLI is callable with bitcoinrush-cli command-line options
cli = TestNodeCLI(self.binary, self.datadir)
cli.options = [str(o) for o in options]
cli.input = input
return cli
def __getattr__(self, command):
return TestNodeCLIAttr(self, command)
def batch(self, requests):
results = []
for request in requests:
try:
results.append(dict(result=request()))
except JSONRPCException as e:
results.append(dict(error=e))
return results
def send_cli(self, command=None, *args, **kwargs):
"""Run bitcoinrush-cli command. Deserializes returned string as python object."""
pos_args = [str(arg).lower() if type(arg) is bool else str(arg) for arg in args]
named_args = [str(key) + "=" + str(value) for (key, value) in kwargs.items()]
assert not (pos_args and named_args), "Cannot use positional arguments and named arguments in the same bitcoinrush-cli call"
p_args = [self.binary, "-datadir=" + self.datadir] + self.options
if named_args:
p_args += ["-named"]
if command is not None:
p_args += [command]
p_args += pos_args + named_args
self.log.debug("Running bitcoinrush-cli command: %s" % command)
process = subprocess.Popen(p_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
cli_stdout, cli_stderr = process.communicate(input=self.input)
returncode = process.poll()
if returncode:
match = re.match(r'error code: ([-0-9]+)\nerror message:\n(.*)', cli_stderr)
if match:
code, message = match.groups()
raise JSONRPCException(dict(code=int(code), message=message))
# Ignore cli_stdout, raise with cli_stderr
raise subprocess.CalledProcessError(returncode, self.binary, output=cli_stderr)
try:
return json.loads(cli_stdout, parse_float=decimal.Decimal)
except JSONDecodeError:
return cli_stdout.rstrip("\n")
| []
| []
| []
| [] | [] | python | 0 | 0 | |
internal/database/repos.go | package database
import (
"context"
"database/sql"
"encoding/json"
"fmt"
"net"
"os"
regexpsyntax "regexp/syntax"
"strconv"
"strings"
"time"
"github.com/inconshreveable/log15"
"github.com/keegancsmith/sqlf"
"github.com/lib/pq"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/sourcegraph/sourcegraph/internal/actor"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/conf"
"github.com/sourcegraph/sourcegraph/internal/database/basestore"
"github.com/sourcegraph/sourcegraph/internal/database/dbutil"
"github.com/sourcegraph/sourcegraph/internal/env"
"github.com/sourcegraph/sourcegraph/internal/extsvc"
"github.com/sourcegraph/sourcegraph/internal/extsvc/awscodecommit"
"github.com/sourcegraph/sourcegraph/internal/extsvc/bitbucketcloud"
"github.com/sourcegraph/sourcegraph/internal/extsvc/bitbucketserver"
"github.com/sourcegraph/sourcegraph/internal/extsvc/github"
"github.com/sourcegraph/sourcegraph/internal/extsvc/gitlab"
"github.com/sourcegraph/sourcegraph/internal/extsvc/gitolite"
"github.com/sourcegraph/sourcegraph/internal/extsvc/jvmpackages"
"github.com/sourcegraph/sourcegraph/internal/extsvc/npm/npmpackages"
"github.com/sourcegraph/sourcegraph/internal/extsvc/pagure"
"github.com/sourcegraph/sourcegraph/internal/extsvc/perforce"
"github.com/sourcegraph/sourcegraph/internal/extsvc/phabricator"
"github.com/sourcegraph/sourcegraph/internal/trace"
"github.com/sourcegraph/sourcegraph/internal/types"
"github.com/sourcegraph/sourcegraph/lib/errors"
)
type RepoNotFoundErr struct {
ID api.RepoID
Name api.RepoName
HashedName api.RepoHashedName
}
func (e *RepoNotFoundErr) Error() string {
if e.Name != "" {
return fmt.Sprintf("repo not found: name=%q", e.Name)
}
if e.ID != 0 {
return fmt.Sprintf("repo not found: id=%d", e.ID)
}
return "repo not found"
}
func (e *RepoNotFoundErr) NotFound() bool {
return true
}
type RepoStore interface {
basestore.ShareableStore
Transact(context.Context) (RepoStore, error)
With(basestore.ShareableStore) RepoStore
Query(ctx context.Context, query *sqlf.Query) (*sql.Rows, error)
Done(error) error
Count(context.Context, ReposListOptions) (int, error)
Create(context.Context, ...*types.Repo) error
Delete(context.Context, ...api.RepoID) error
Get(context.Context, api.RepoID) (*types.Repo, error)
GetByIDs(context.Context, ...api.RepoID) ([]*types.Repo, error)
GetByName(context.Context, api.RepoName) (*types.Repo, error)
GetByHashedName(context.Context, api.RepoHashedName) (*types.Repo, error)
GetFirstRepoNamesByCloneURL(context.Context, string) (api.RepoName, error)
GetReposSetByIDs(context.Context, ...api.RepoID) (map[api.RepoID]*types.Repo, error)
List(context.Context, ReposListOptions) ([]*types.Repo, error)
ListEnabledNames(context.Context) ([]api.RepoName, error)
ListIndexableRepos(context.Context, ListIndexableReposOptions) ([]types.MinimalRepo, error)
ListMinimalRepos(context.Context, ReposListOptions) ([]types.MinimalRepo, error)
Metadata(context.Context, ...api.RepoID) ([]*types.SearchedRepo, error)
StreamMinimalRepos(context.Context, ReposListOptions, func(*types.MinimalRepo)) error
}
var _ RepoStore = (*repoStore)(nil)
// repoStore handles access to the repo table
type repoStore struct {
*basestore.Store
}
// Repos instantiates and returns a new RepoStore with prepared statements.
func Repos(db dbutil.DB) RepoStore {
return &repoStore{Store: basestore.NewWithDB(db, sql.TxOptions{})}
}
// ReposWith instantiates and returns a new RepoStore using the other
// store handle.
func ReposWith(other basestore.ShareableStore) RepoStore {
return &repoStore{Store: basestore.NewWithHandle(other.Handle())}
}
func (s *repoStore) With(other basestore.ShareableStore) RepoStore {
return &repoStore{Store: s.Store.With(other)}
}
func (s *repoStore) Transact(ctx context.Context) (RepoStore, error) {
txBase, err := s.Store.Transact(ctx)
return &repoStore{Store: txBase}, err
}
// Get finds and returns the repo with the given repository ID from the database.
// When a repo isn't found or has been blocked, an error is returned.
func (s *repoStore) Get(ctx context.Context, id api.RepoID) (_ *types.Repo, err error) {
tr, ctx := trace.New(ctx, "repos.Get", "")
defer func() {
tr.SetError(err)
tr.Finish()
}()
repos, err := s.listRepos(ctx, tr, ReposListOptions{
IDs: []api.RepoID{id},
LimitOffset: &LimitOffset{Limit: 1},
IncludeBlocked: true,
})
if err != nil {
return nil, err
}
if len(repos) == 0 {
return nil, &RepoNotFoundErr{ID: id}
}
repo := repos[0]
return repo, repo.IsBlocked()
}
var counterAccessGranted = promauto.NewCounter(prometheus.CounterOpts{
Name: "src_access_granted_private_repo",
Help: "metric to measure the impact of logging access granted to private repos",
})
func logPrivateRepoAccessGranted(ctx context.Context, db dbutil.DB, ids []api.RepoID) {
if disabled, _ := strconv.ParseBool(os.Getenv("SRC_DISABLE_LOG_PRIVATE_REPO_ACCESS")); disabled {
return
}
a := actor.FromContext(ctx)
arg, _ := json.Marshal(struct {
Resource string `json:"resource"`
Service string `json:"service"`
Repos []api.RepoID `json:"repo_ids"`
}{
Resource: "db.repo",
Service: env.MyName,
Repos: ids,
})
event := &SecurityEvent{
Name: SecurityEventNameAccessGranted,
URL: "",
UserID: uint32(a.UID),
AnonymousUserID: "",
Argument: arg,
Source: "BACKEND",
Timestamp: time.Now(),
}
// If this event was triggered by an internal actor we need to ensure that at
// least the UserID or AnonymousUserID field are set so that we don't trigger
// the security_event_logs_check_has_user constraint
if a.Internal {
event.AnonymousUserID = "internal"
}
SecurityEventLogs(db).LogEvent(ctx, event)
}
// GetByName returns the repository with the given nameOrUri from the
// database, or an error. If we have a match on name and uri, we prefer the
// match on name.
//
// Name is the name for this repository (e.g., "github.com/user/repo"). It is
// the same as URI, unless the user configures a non-default
// repositoryPathPattern.
//
// When a repo isn't found or has been blocked, an error is returned.
func (s *repoStore) GetByName(ctx context.Context, nameOrURI api.RepoName) (_ *types.Repo, err error) {
tr, ctx := trace.New(ctx, "repos.GetByName", "")
defer func() {
tr.SetError(err)
tr.Finish()
}()
repos, err := s.listRepos(ctx, tr, ReposListOptions{
Names: []string{string(nameOrURI)},
LimitOffset: &LimitOffset{Limit: 1},
IncludeBlocked: true,
})
if err != nil {
return nil, err
}
if len(repos) == 1 {
return repos[0], repos[0].IsBlocked()
}
// We don't fetch in the same SQL query since uri is not unique and could
// conflict with a name. We prefer returning the matching name if it
// exists.
repos, err = s.listRepos(ctx, tr, ReposListOptions{
URIs: []string{string(nameOrURI)},
LimitOffset: &LimitOffset{Limit: 1},
IncludeBlocked: true,
})
if err != nil {
return nil, err
}
if len(repos) == 0 {
return nil, &RepoNotFoundErr{Name: nameOrURI}
}
return repos[0], repos[0].IsBlocked()
}
// GetByHashedName returns the repository with the given hashedName from the database, or an error.
// RepoHashedName is the repository hashed name.
// When a repo isn't found or has been blocked, an error is returned.
func (s *repoStore) GetByHashedName(ctx context.Context, repoHashedName api.RepoHashedName) (_ *types.Repo, err error) {
tr, ctx := trace.New(ctx, "repos.GetByHashedName", "")
defer func() {
tr.SetError(err)
tr.Finish()
}()
repos, err := s.listRepos(ctx, tr, ReposListOptions{
HashedName: string(repoHashedName),
LimitOffset: &LimitOffset{Limit: 1},
IncludeBlocked: true,
})
if err != nil {
return nil, err
}
if len(repos) == 0 {
return nil, &RepoNotFoundErr{HashedName: repoHashedName}
}
return repos[0], repos[0].IsBlocked()
}
// GetByIDs returns a list of repositories by given IDs. The number of results list could be less
// than the candidate list due to no repository is associated with some IDs.
func (s *repoStore) GetByIDs(ctx context.Context, ids ...api.RepoID) (_ []*types.Repo, err error) {
tr, ctx := trace.New(ctx, "repos.GetByIDs", "")
defer func() {
tr.SetError(err)
tr.Finish()
}()
return s.listRepos(ctx, tr, ReposListOptions{IDs: ids})
}
// GetReposSetByIDs returns a map of repositories with the given IDs, indexed by their IDs. The number of results
// entries could be less than the candidate list due to no repository is associated with some IDs.
func (s *repoStore) GetReposSetByIDs(ctx context.Context, ids ...api.RepoID) (map[api.RepoID]*types.Repo, error) {
repos, err := s.GetByIDs(ctx, ids...)
if err != nil {
return nil, err
}
repoMap := make(map[api.RepoID]*types.Repo, len(repos))
for _, r := range repos {
repoMap[r.ID] = r
}
return repoMap, nil
}
func (s *repoStore) Count(ctx context.Context, opt ReposListOptions) (ct int, err error) {
tr, ctx := trace.New(ctx, "repos.Count", "")
defer func() {
if err != nil {
tr.SetError(err)
}
tr.Finish()
}()
opt.Select = []string{"COUNT(*)"}
opt.OrderBy = nil
opt.LimitOffset = nil
err = s.list(ctx, tr, opt, func(rows *sql.Rows) error {
return rows.Scan(&ct)
})
return ct, err
}
// Metadata returns repo metadata used to decorate search results. The returned slice may be smaller than the
// number of IDs given if a repo with the given ID does not exist.
func (s *repoStore) Metadata(ctx context.Context, ids ...api.RepoID) (_ []*types.SearchedRepo, err error) {
tr, ctx := trace.New(ctx, "repos.Metadata", "")
defer func() {
tr.SetError(err)
tr.Finish()
}()
opts := ReposListOptions{
IDs: ids,
// Return a limited subset of fields
Select: []string{
"repo.id",
"repo.name",
"repo.description",
"repo.fork",
"repo.archived",
"repo.private",
"repo.stars",
"gr.last_fetched",
},
// Required so gr.last_fetched is select-able
joinGitserverRepos: true,
}
res := make([]*types.SearchedRepo, 0, len(ids))
scanMetadata := func(rows *sql.Rows) error {
var r types.SearchedRepo
if err := rows.Scan(
&r.ID,
&r.Name,
&dbutil.NullString{S: &r.Description},
&r.Fork,
&r.Archived,
&r.Private,
&dbutil.NullInt{N: &r.Stars},
&r.LastFetched,
); err != nil {
return err
}
res = append(res, &r)
return nil
}
return res, errors.Wrap(s.list(ctx, tr, opts, scanMetadata), "fetch metadata")
}
const listReposQueryFmtstr = `
-- source: internal/database/repos.go:list
%%s -- Populates "queryPrefix", i.e. CTEs
SELECT %s
FROM repo
%%s
WHERE
%%s -- Populates "queryConds"
AND
(%%s) -- Populates "authzConds"
%%s -- Populates "querySuffix"
`
const getSourcesByRepoQueryStr = `
(
SELECT
json_agg(
json_build_object(
'CloneURL', esr.clone_url,
'ID', esr.external_service_id,
'Kind', LOWER(svcs.kind)
)
)
FROM external_service_repos AS esr
JOIN external_services AS svcs ON esr.external_service_id = svcs.id
WHERE
esr.repo_id = repo.id
AND
svcs.deleted_at IS NULL
)
`
var minimalRepoColumns = []string{
"repo.id",
"repo.name",
"repo.private",
"repo.stars",
}
var repoColumns = []string{
"repo.id",
"repo.name",
"repo.private",
"repo.external_id",
"repo.external_service_type",
"repo.external_service_id",
"repo.uri",
"repo.description",
"repo.fork",
"repo.archived",
"repo.stars",
"repo.created_at",
"repo.updated_at",
"repo.deleted_at",
"repo.metadata",
"repo.blocked",
}
func scanRepo(rows *sql.Rows, r *types.Repo) (err error) {
var sources dbutil.NullJSONRawMessage
var metadata json.RawMessage
var blocked dbutil.NullJSONRawMessage
err = rows.Scan(
&r.ID,
&r.Name,
&r.Private,
&dbutil.NullString{S: &r.ExternalRepo.ID},
&dbutil.NullString{S: &r.ExternalRepo.ServiceType},
&dbutil.NullString{S: &r.ExternalRepo.ServiceID},
&dbutil.NullString{S: &r.URI},
&dbutil.NullString{S: &r.Description},
&r.Fork,
&r.Archived,
&dbutil.NullInt{N: &r.Stars},
&r.CreatedAt,
&dbutil.NullTime{Time: &r.UpdatedAt},
&dbutil.NullTime{Time: &r.DeletedAt},
&metadata,
&blocked,
&sources,
)
if err != nil {
return err
}
if blocked.Raw != nil {
r.Blocked = &types.RepoBlock{}
if err = json.Unmarshal(blocked.Raw, r.Blocked); err != nil {
return err
}
}
type sourceInfo struct {
ID int64
CloneURL string
Kind string
}
r.Sources = make(map[string]*types.SourceInfo)
if sources.Raw != nil {
var srcs []sourceInfo
if err = json.Unmarshal(sources.Raw, &srcs); err != nil {
return errors.Wrap(err, "scanRepo: failed to unmarshal sources")
}
for _, src := range srcs {
urn := extsvc.URN(src.Kind, src.ID)
r.Sources[urn] = &types.SourceInfo{
ID: urn,
CloneURL: src.CloneURL,
}
}
}
typ, ok := extsvc.ParseServiceType(r.ExternalRepo.ServiceType)
if !ok {
log15.Warn("scanRepo - failed to parse service type", "r.ExternalRepo.ServiceType", r.ExternalRepo.ServiceType)
return nil
}
switch typ {
case extsvc.TypeGitHub:
r.Metadata = new(github.Repository)
case extsvc.TypeGitLab:
r.Metadata = new(gitlab.Project)
case extsvc.TypeBitbucketServer:
r.Metadata = new(bitbucketserver.Repo)
case extsvc.TypeBitbucketCloud:
r.Metadata = new(bitbucketcloud.Repo)
case extsvc.TypeAWSCodeCommit:
r.Metadata = new(awscodecommit.Repository)
case extsvc.TypeGitolite:
r.Metadata = new(gitolite.Repo)
case extsvc.TypePerforce:
r.Metadata = new(perforce.Depot)
case extsvc.TypePhabricator:
r.Metadata = new(phabricator.Repo)
case extsvc.TypePagure:
r.Metadata = new(pagure.Project)
case extsvc.TypeOther:
r.Metadata = new(extsvc.OtherRepoMetadata)
case extsvc.TypeJVMPackages:
r.Metadata = new(jvmpackages.Metadata)
case extsvc.TypeNPMPackages:
r.Metadata = new(npmpackages.Metadata)
default:
log15.Warn("scanRepo - unknown service type", "typ", typ)
return nil
}
if err = json.Unmarshal(metadata, r.Metadata); err != nil {
return errors.Wrapf(err, "scanRepo: failed to unmarshal %q metadata", typ)
}
return nil
}
// ReposListOptions specifies the options for listing repositories.
//
// Query and IncludePatterns/ExcludePatterns may not be used together.
type ReposListOptions struct {
// What to select of each row.
Select []string
// Query specifies a search query for repositories. If specified, then the Sort and
// Direction options are ignored
Query string
// IncludePatterns is a list of regular expressions, all of which must match all
// repositories returned in the list.
IncludePatterns []string
// ExcludePattern is a regular expression that must not match any repository
// returned in the list.
ExcludePattern string
// CaseSensitivePatterns determines if IncludePatterns and ExcludePattern are treated
// with case sensitivity or not.
CaseSensitivePatterns bool
// Names is a list of repository names used to limit the results to that
// set of repositories.
// Note: This is currently used for version contexts. In future iterations,
// version contexts may have their own table
// and this may be replaced by the version context name.
Names []string
// HashedName is a repository hashed name used to limit the results to that repository.
HashedName string
// URIs selects any repos in the given set of URIs (i.e. uri column)
URIs []string
// IDs of repos to list. When zero-valued, this is omitted from the predicate set.
IDs []api.RepoID
// UserID, if non zero, will limit the set of results to repositories added by the user
// through external services. Mutually exclusive with the ExternalServiceIDs and SearchContextID options.
UserID int32
// OrgID, if non zero, will limit the set of results to repositories owned by the organization
// through external services. Mutually exclusive with the ExternalServiceIDs and SearchContextID options.
OrgID int32
// SearchContextID, if non zero, will limit the set of results to repositories listed in
// the search context.
SearchContextID int64
// ExternalServiceIDs, if non empty, will only return repos added by the given external services.
// The id is that of the external_services table NOT the external_service_id in the repo table
// Mutually exclusive with the UserID option.
ExternalServiceIDs []int64
// ExternalRepos of repos to list. When zero-valued, this is omitted from the predicate set.
ExternalRepos []api.ExternalRepoSpec
// ExternalRepoIncludeContains is the list of specs to include repos using
// SIMILAR TO matching. When zero-valued, this is omitted from the predicate set.
ExternalRepoIncludeContains []api.ExternalRepoSpec
// ExternalRepoExcludeContains is the list of specs to exclude repos using
// SIMILAR TO matching. When zero-valued, this is omitted from the predicate set.
ExternalRepoExcludeContains []api.ExternalRepoSpec
// NoForks excludes forks from the list.
NoForks bool
// OnlyForks excludes non-forks from the lhist.
OnlyForks bool
// NoArchived excludes archived repositories from the list.
NoArchived bool
// OnlyArchived excludes non-archived repositories from the list.
OnlyArchived bool
// NoCloned excludes cloned repositories from the list.
NoCloned bool
// OnlyCloned excludes non-cloned repositories from the list.
OnlyCloned bool
// NoPrivate excludes private repositories from the list.
NoPrivate bool
// OnlyPrivate excludes non-private repositories from the list.
OnlyPrivate bool
// Index when set will only include repositories which should be indexed
// if true. If false it will exclude repositories which should be
// indexed. An example use case of this is for indexed search only
// indexing a subset of repositories.
Index *bool
// List of fields by which to order the return repositories.
OrderBy RepoListOrderBy
// Cursors to efficiently paginate through large result sets.
Cursors types.MultiCursor
// UseOr decides between ANDing or ORing the predicates together.
UseOr bool
// IncludeUserPublicRepos will include repos from the user_public_repos table if this field is true, and the user_id
// is non-zero. Note that these are not repos owned by this user, just ones they are interested in.
IncludeUserPublicRepos bool
// FailedFetch, if true, will filter to only repos that failed to clone or fetch
// when last attempted. Specifically, this means that they have a non-null
// last_error value in the gitserver_repos table.
FailedFetch bool
// MinLastChanged finds repository metadata or data that has changed since
// MinLastChanged. It filters against repos.UpdatedAt,
// gitserver.LastChanged and searchcontexts.UpdatedAt.
//
// LastChanged is the time of the last git fetch which changed refs
// stored. IE the last time any branch changed (not just HEAD).
//
// UpdatedAt is the last time the metadata changed for a repository.
//
// Note: This option is used by our search indexer to determine what has
// changed since it last polled. The fields its checks are all based on
// what can affect search indexes.
MinLastChanged time.Time
// IncludeBlocked, if true, will include blocked repositories in the result set. Repos can be blocked
// automatically or manually for different reasons, like being too big or having copyright issues.
IncludeBlocked bool
// IncludeDeleted, if true, will include soft deleted repositories in the result set.
IncludeDeleted bool
// joinGitserverRepos, if true, will make the fields of gitserver_repos available to select against,
// with the table alias "gr".
joinGitserverRepos bool
// ExcludeSources, if true, will NULL out the Sources field on repo. Computing it is relatively costly
// and if it doesn't end up being used this is wasted compute.
ExcludeSources bool
*LimitOffset
}
type RepoListOrderBy []RepoListSort
func (r RepoListOrderBy) SQL() *sqlf.Query {
if len(r) == 0 {
return sqlf.Sprintf("")
}
clauses := make([]*sqlf.Query, 0, len(r))
for _, s := range r {
clauses = append(clauses, s.SQL())
}
return sqlf.Sprintf(`ORDER BY %s`, sqlf.Join(clauses, ", "))
}
// RepoListSort is a field by which to sort and the direction of the sorting.
type RepoListSort struct {
Field RepoListColumn
Descending bool
Nulls string
}
func (r RepoListSort) SQL() *sqlf.Query {
var sb strings.Builder
sb.WriteString(string(r.Field))
if r.Descending {
sb.WriteString(" DESC")
}
if r.Nulls == "FIRST" || r.Nulls == "LAST" {
sb.WriteString(" NULLS " + r.Nulls)
}
return sqlf.Sprintf(sb.String())
}
// RepoListColumn is a column by which repositories can be sorted. These correspond to columns in the database.
type RepoListColumn string
const (
RepoListCreatedAt RepoListColumn = "created_at"
RepoListName RepoListColumn = "name"
RepoListID RepoListColumn = "id"
RepoListStars RepoListColumn = "stars"
)
// List lists repositories in the Sourcegraph repository
//
// This will not return any repositories from external services that are not present in the Sourcegraph repository.
// Matching is done with fuzzy matching, i.e. "query" will match any repo name that matches the regexp `q.*u.*e.*r.*y`
func (s *repoStore) List(ctx context.Context, opt ReposListOptions) (results []*types.Repo, err error) {
tr, ctx := trace.New(ctx, "repos.List", "")
defer func() {
tr.SetError(err)
tr.Finish()
}()
// always having ID in ORDER BY helps Postgres create a more performant query plan
if len(opt.OrderBy) == 0 || (len(opt.OrderBy) == 1 && opt.OrderBy[0].Field != RepoListID) {
opt.OrderBy = append(opt.OrderBy, RepoListSort{Field: RepoListID})
}
return s.listRepos(ctx, tr, opt)
}
// StreamMinimalRepos calls the given callback for each of the repositories names and ids that match the given options.
func (s *repoStore) StreamMinimalRepos(ctx context.Context, opt ReposListOptions, cb func(*types.MinimalRepo)) (err error) {
tr, ctx := trace.New(ctx, "repos.StreamMinimalRepos", "")
defer func() {
tr.SetError(err)
tr.Finish()
}()
opt.Select = minimalRepoColumns
if len(opt.OrderBy) == 0 {
opt.OrderBy = append(opt.OrderBy, RepoListSort{Field: RepoListID})
}
var privateIDs []api.RepoID
err = s.list(ctx, tr, opt, func(rows *sql.Rows) error {
var r types.MinimalRepo
var private bool
err := rows.Scan(&r.ID, &r.Name, &private, &dbutil.NullInt{N: &r.Stars})
if err != nil {
return err
}
cb(&r)
if private {
privateIDs = append(privateIDs, r.ID)
}
return nil
})
if err != nil {
return err
}
if len(privateIDs) > 0 {
counterAccessGranted.Inc()
logPrivateRepoAccessGranted(ctx, s.Handle().DB(), privateIDs)
}
return nil
}
// ListMinimalRepos returns a list of repositories names and ids.
func (s *repoStore) ListMinimalRepos(ctx context.Context, opt ReposListOptions) (results []types.MinimalRepo, err error) {
return results, s.StreamMinimalRepos(ctx, opt, func(r *types.MinimalRepo) {
results = append(results, *r)
})
}
func (s *repoStore) listRepos(ctx context.Context, tr *trace.Trace, opt ReposListOptions) (rs []*types.Repo, err error) {
var privateIDs []api.RepoID
err = s.list(ctx, tr, opt, func(rows *sql.Rows) error {
var r types.Repo
if err := scanRepo(rows, &r); err != nil {
return err
}
rs = append(rs, &r)
if r.Private {
privateIDs = append(privateIDs, r.ID)
}
return nil
})
if len(privateIDs) > 0 {
counterAccessGranted.Inc()
logPrivateRepoAccessGranted(ctx, s.Handle().DB(), privateIDs)
}
return rs, err
}
func (s *repoStore) list(ctx context.Context, tr *trace.Trace, opt ReposListOptions, scanRepo func(rows *sql.Rows) error) error {
q, err := s.listSQL(ctx, opt)
if err != nil {
return err
}
tr.LogFields(trace.SQL(q))
rows, err := s.Query(ctx, q)
if err != nil {
if e, ok := err.(*net.OpError); ok && e.Timeout() {
return errors.Wrapf(context.DeadlineExceeded, "RepoStore.list: %s", err.Error())
}
return err
}
defer rows.Close()
for rows.Next() {
if err := scanRepo(rows); err != nil {
return err
}
}
return rows.Err()
}
func (s *repoStore) listSQL(ctx context.Context, opt ReposListOptions) (*sqlf.Query, error) {
var ctes, joins, where []*sqlf.Query
// Cursor-based pagination requires parsing a handful of extra fields, which
// may result in additional query conditions.
if len(opt.Cursors) > 0 {
cursorConds, err := parseCursorConds(opt.Cursors)
if err != nil {
return nil, err
}
if cursorConds != nil {
where = append(where, cursorConds)
}
}
if opt.Query != "" && (len(opt.IncludePatterns) > 0 || opt.ExcludePattern != "") {
return nil, errors.New("Repos.List: Query and IncludePatterns/ExcludePattern options are mutually exclusive")
}
if opt.Query != "" {
where = append(where, sqlf.Sprintf("lower(name) LIKE %s", "%"+strings.ToLower(opt.Query)+"%"))
}
for _, includePattern := range opt.IncludePatterns {
extraConds, err := parsePattern(includePattern, opt.CaseSensitivePatterns)
if err != nil {
return nil, err
}
where = append(where, extraConds...)
}
if opt.ExcludePattern != "" {
if opt.CaseSensitivePatterns {
where = append(where, sqlf.Sprintf("name !~* %s", opt.ExcludePattern))
} else {
where = append(where, sqlf.Sprintf("lower(name) !~* %s", opt.ExcludePattern))
}
}
if len(opt.IDs) > 0 {
where = append(where, sqlf.Sprintf("id = ANY (%s)", pq.Array(opt.IDs)))
}
if len(opt.ExternalRepos) > 0 {
er := make([]*sqlf.Query, 0, len(opt.ExternalRepos))
for _, spec := range opt.ExternalRepos {
er = append(er, sqlf.Sprintf("(external_id = %s AND external_service_type = %s AND external_service_id = %s)", spec.ID, spec.ServiceType, spec.ServiceID))
}
where = append(where, sqlf.Sprintf("(%s)", sqlf.Join(er, "\n OR ")))
}
if len(opt.ExternalRepoIncludeContains) > 0 {
er := make([]*sqlf.Query, 0, len(opt.ExternalRepoIncludeContains))
for _, spec := range opt.ExternalRepoIncludeContains {
er = append(er, sqlf.Sprintf("(external_id SIMILAR TO %s AND external_service_type = %s AND external_service_id = %s)", spec.ID, spec.ServiceType, spec.ServiceID))
}
where = append(where, sqlf.Sprintf("(%s)", sqlf.Join(er, "\n OR ")))
}
if len(opt.ExternalRepoExcludeContains) > 0 {
er := make([]*sqlf.Query, 0, len(opt.ExternalRepoExcludeContains))
for _, spec := range opt.ExternalRepoExcludeContains {
er = append(er, sqlf.Sprintf("(external_id NOT SIMILAR TO %s AND external_service_type = %s AND external_service_id = %s)", spec.ID, spec.ServiceType, spec.ServiceID))
}
where = append(where, sqlf.Sprintf("(%s)", sqlf.Join(er, "\n AND ")))
}
if opt.NoForks {
where = append(where, sqlf.Sprintf("NOT fork"))
}
if opt.OnlyForks {
where = append(where, sqlf.Sprintf("fork"))
}
if opt.NoArchived {
where = append(where, sqlf.Sprintf("NOT archived"))
}
if opt.OnlyArchived {
where = append(where, sqlf.Sprintf("archived"))
}
if opt.NoCloned {
where = append(where, sqlf.Sprintf("(gr.clone_status = 'not_cloned' OR gr.clone_status IS NULL)"))
}
if opt.OnlyCloned {
where = append(where, sqlf.Sprintf("gr.clone_status = 'cloned'"))
}
if opt.FailedFetch {
where = append(where, sqlf.Sprintf("gr.last_error IS NOT NULL"))
}
if !opt.MinLastChanged.IsZero() {
conds := []*sqlf.Query{
sqlf.Sprintf("gr.last_changed >= %s", opt.MinLastChanged),
sqlf.Sprintf("COALESCE(repo.updated_at, repo.created_at) >= %s", opt.MinLastChanged),
sqlf.Sprintf("repo.id IN (SELECT scr.repo_id FROM search_context_repos scr LEFT JOIN search_contexts sc ON scr.search_context_id = sc.id WHERE sc.updated_at >= %s)", opt.MinLastChanged),
}
where = append(where, sqlf.Sprintf("(%s)", sqlf.Join(conds, " OR ")))
}
if opt.NoPrivate {
where = append(where, sqlf.Sprintf("NOT private"))
}
if opt.OnlyPrivate {
where = append(where, sqlf.Sprintf("private"))
}
if len(opt.Names) > 0 {
lowerNames := make([]string, len(opt.Names))
for i, name := range opt.Names {
lowerNames[i] = strings.ToLower(name)
}
// Performance improvement
//
// Comparing JUST the name field will use the repo_name_unique index, which is
// a unique btree index over the citext name field. This tends to be a VERY SLOW
// comparison over a large table. We were seeing query plans growing linearly with
// the size of the result set such that each unique index scan would take ~0.1ms.
// This adds up as we regularly query 10k-40k repositories at a time.
//
// This condition instead forces the use of a btree index repo_name_idx defined over
// (lower(name::text) COLLATE "C"). This is a MUCH faster comparison as it does not
// need to fold the casing of either the input value nor the value in the index.
where = append(where, sqlf.Sprintf(`lower(name::text) COLLATE "C" = ANY (%s::text[])`, pq.Array(lowerNames)))
}
if opt.HashedName != "" {
// This will use the repo_hashed_name_idx
where = append(where, sqlf.Sprintf(`sha256(lower(name)::bytea) = decode(%s, 'hex')`, opt.HashedName))
}
if len(opt.URIs) > 0 {
where = append(where, sqlf.Sprintf("uri = ANY (%s)", pq.Array(opt.URIs)))
}
if opt.Index != nil {
// We don't currently have an index column, but when we want the
// indexable repositories to be a subset it will live in the database
// layer. So we do the filtering here.
indexAll := conf.SearchIndexEnabled()
if indexAll != *opt.Index {
where = append(where, sqlf.Sprintf("false"))
}
}
if (len(opt.ExternalServiceIDs) != 0 && (opt.UserID != 0 || opt.OrgID != 0)) ||
(opt.UserID != 0 && opt.OrgID != 0) {
return nil, errors.New("options ExternalServiceIDs, UserID and OrgID are mutually exclusive")
} else if len(opt.ExternalServiceIDs) != 0 {
where = append(where, sqlf.Sprintf("EXISTS (SELECT 1 FROM external_service_repos esr WHERE repo.id = esr.repo_id AND esr.external_service_id = ANY (%s))", pq.Array(opt.ExternalServiceIDs)))
} else if opt.SearchContextID != 0 {
// Joining on distinct search context repos to avoid returning duplicates
joins = append(joins, sqlf.Sprintf(`JOIN (SELECT DISTINCT repo_id, search_context_id FROM search_context_repos) dscr ON repo.id = dscr.repo_id`))
where = append(where, sqlf.Sprintf("dscr.search_context_id = %d", opt.SearchContextID))
} else if opt.UserID != 0 {
userReposCTE := sqlf.Sprintf(userReposCTEFmtstr, opt.UserID)
if opt.IncludeUserPublicRepos {
userReposCTE = sqlf.Sprintf("%s UNION %s", userReposCTE, sqlf.Sprintf(userPublicReposCTEFmtstr, opt.UserID))
}
ctes = append(ctes, sqlf.Sprintf("user_repos AS (%s)", userReposCTE))
joins = append(joins, sqlf.Sprintf("JOIN user_repos ON user_repos.id = repo.id"))
} else if opt.OrgID != 0 {
joins = append(joins, sqlf.Sprintf("INNER JOIN external_service_repos ON external_service_repos.repo_id = repo.id"))
where = append(where, sqlf.Sprintf("external_service_repos.org_id = %d", opt.OrgID))
}
if opt.NoCloned || opt.OnlyCloned || opt.FailedFetch || !opt.MinLastChanged.IsZero() || opt.joinGitserverRepos {
joins = append(joins, sqlf.Sprintf("LEFT JOIN gitserver_repos gr ON gr.repo_id = repo.id"))
}
baseConds := sqlf.Sprintf("TRUE")
if !opt.IncludeDeleted {
baseConds = sqlf.Sprintf("repo.deleted_at IS NULL")
}
if !opt.IncludeBlocked {
baseConds = sqlf.Sprintf("%s AND repo.blocked IS NULL", baseConds)
}
whereConds := sqlf.Sprintf("TRUE")
if len(where) > 0 {
if opt.UseOr {
whereConds = sqlf.Join(where, "\n OR ")
} else {
whereConds = sqlf.Join(where, "\n AND ")
}
}
queryConds := sqlf.Sprintf("%s AND (%s)", baseConds, whereConds)
queryPrefix := sqlf.Sprintf("")
if len(ctes) > 0 {
queryPrefix = sqlf.Sprintf("WITH %s", sqlf.Join(ctes, ",\n"))
}
querySuffix := sqlf.Sprintf("%s %s", opt.OrderBy.SQL(), opt.LimitOffset.SQL())
columns := repoColumns
if !opt.ExcludeSources {
columns = append(columns, getSourcesByRepoQueryStr)
} else {
columns = append(columns, "NULL")
}
if len(opt.Select) > 0 {
columns = opt.Select
}
authzConds, err := AuthzQueryConds(ctx, NewDB(s.Handle().DB()))
if err != nil {
return nil, err
}
q := sqlf.Sprintf(
fmt.Sprintf(listReposQueryFmtstr, strings.Join(columns, ",")),
queryPrefix,
sqlf.Join(joins, "\n"),
queryConds,
authzConds, // 🚨 SECURITY: Enforce repository permissions
querySuffix,
)
return q, nil
}
const userReposCTEFmtstr = `
SELECT repo_id as id FROM external_service_repos WHERE user_id = %d
`
const userPublicReposCTEFmtstr = `
SELECT repo_id as id FROM user_public_repos WHERE user_id = %d
`
type ListIndexableReposOptions struct {
// If true, will only include uncloned indexable repos
OnlyUncloned bool
// If true, we include user added private repos
IncludePrivate bool
*LimitOffset
}
var listIndexableReposMinStars, _ = strconv.Atoi(env.Get(
"SRC_INDEXABLE_REPOS_MIN_STARS",
"8",
"Minimum stars needed for a public repo to be indexed on sourcegraph.com",
))
// ListIndexableRepos returns a list of repos to be indexed for search on sourcegraph.com.
// This includes all repos with >= SRC_INDEXABLE_REPOS_MIN_STARS stars as well as user or org added repos.
func (s *repoStore) ListIndexableRepos(ctx context.Context, opts ListIndexableReposOptions) (results []types.MinimalRepo, err error) {
tr, ctx := trace.New(ctx, "repos.ListIndexable", "")
defer func() {
tr.SetError(err)
tr.Finish()
}()
var where, joins []*sqlf.Query
if opts.OnlyUncloned {
joins = append(joins, sqlf.Sprintf(
"LEFT JOIN gitserver_repos gr ON gr.repo_id = repo.id",
))
where = append(where, sqlf.Sprintf(
"(gr.clone_status IS NULL OR gr.clone_status = %s)",
types.CloneStatusNotCloned,
))
}
if !opts.IncludePrivate {
where = append(where, sqlf.Sprintf("NOT repo.private"))
}
if len(where) == 0 {
where = append(where, sqlf.Sprintf("TRUE"))
}
minStars := listIndexableReposMinStars
if minStars == 0 {
minStars = 8
}
q := sqlf.Sprintf(
listIndexableReposQuery,
sqlf.Join(joins, "\n"),
minStars,
sqlf.Join(where, "\nAND"),
opts.LimitOffset.SQL(),
)
rows, err := s.Query(ctx, q)
if err != nil {
return nil, errors.Wrap(err, "querying indexable repos")
}
defer rows.Close()
for rows.Next() {
var r types.MinimalRepo
if err := rows.Scan(&r.ID, &r.Name, &dbutil.NullInt{N: &r.Stars}); err != nil {
return nil, errors.Wrap(err, "scanning indexable repos")
}
results = append(results, r)
}
if err = rows.Err(); err != nil {
return nil, errors.Wrap(err, "scanning indexable repos")
}
return results, nil
}
const listIndexableReposQuery = `
-- source: internal/database/repos.go:ListIndexableRepos
SELECT
repo.id, repo.name, repo.stars
FROM repo
%s
WHERE
(
repo.stars >= %s
OR
lower(repo.name) ~ '^(src\.fedoraproject\.org|maven|npm|jdk)'
OR
repo.id IN (
SELECT
repo_id
FROM
external_service_repos
WHERE
external_service_repos.user_id IS NOT NULL
OR
external_service_repos.org_id IS NOT NULL
UNION ALL
SELECT
repo_id
FROM
user_public_repos
)
)
AND
deleted_at IS NULL
AND
blocked IS NULL
AND
%s
ORDER BY stars DESC NULLS LAST
%s
`
// Create inserts repos and their sources, respectively in the repo and external_service_repos table.
// Associated external services must already exist.
func (s *repoStore) Create(ctx context.Context, repos ...*types.Repo) (err error) {
tr, ctx := trace.New(ctx, "repos.Create", "")
defer func() {
tr.SetError(err)
tr.Finish()
}()
records := make([]*repoRecord, 0, len(repos))
for _, r := range repos {
repoRec, err := newRepoRecord(r)
if err != nil {
return err
}
records = append(records, repoRec)
}
encodedRepos, err := json.Marshal(records)
if err != nil {
return err
}
q := sqlf.Sprintf(insertReposQuery, string(encodedRepos))
rows, err := s.Query(ctx, q)
if err != nil {
return errors.Wrap(err, "insert")
}
defer func() { err = basestore.CloseRows(rows, err) }()
for i := 0; rows.Next(); i++ {
if err := rows.Scan(&repos[i].ID); err != nil {
return err
}
}
return nil
}
// repoRecord is the json representation of a repository as used in this package
// Postgres CTEs.
type repoRecord struct {
ID api.RepoID `json:"id"`
Name string `json:"name"`
URI *string `json:"uri,omitempty"`
Description string `json:"description"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt *time.Time `json:"updated_at,omitempty"`
DeletedAt *time.Time `json:"deleted_at,omitempty"`
ExternalServiceType *string `json:"external_service_type,omitempty"`
ExternalServiceID *string `json:"external_service_id,omitempty"`
ExternalID *string `json:"external_id,omitempty"`
Archived bool `json:"archived"`
Fork bool `json:"fork"`
Stars int `json:"stars"`
Private bool `json:"private"`
Metadata json.RawMessage `json:"metadata"`
Sources json.RawMessage `json:"sources,omitempty"`
}
func newRepoRecord(r *types.Repo) (*repoRecord, error) {
metadata, err := metadataColumn(r.Metadata)
if err != nil {
return nil, errors.Wrapf(err, "newRecord: metadata marshalling failed")
}
sources, err := sourcesColumn(r.ID, r.Sources)
if err != nil {
return nil, errors.Wrapf(err, "newRecord: sources marshalling failed")
}
return &repoRecord{
ID: r.ID,
Name: string(r.Name),
URI: nullStringColumn(r.URI),
Description: r.Description,
CreatedAt: r.CreatedAt.UTC(),
UpdatedAt: nullTimeColumn(r.UpdatedAt),
DeletedAt: nullTimeColumn(r.DeletedAt),
ExternalServiceType: nullStringColumn(r.ExternalRepo.ServiceType),
ExternalServiceID: nullStringColumn(r.ExternalRepo.ServiceID),
ExternalID: nullStringColumn(r.ExternalRepo.ID),
Archived: r.Archived,
Fork: r.Fork,
Stars: r.Stars,
Private: r.Private,
Metadata: metadata,
Sources: sources,
}, nil
}
func nullTimeColumn(t time.Time) *time.Time {
if t.IsZero() {
return nil
}
return &t
}
func nullInt32Column(n int32) *int32 {
if n == 0 {
return nil
}
return &n
}
func nullStringColumn(s string) *string {
if s == "" {
return nil
}
return &s
}
func metadataColumn(metadata interface{}) (msg json.RawMessage, err error) {
switch m := metadata.(type) {
case nil:
msg = json.RawMessage("{}")
case string:
msg = json.RawMessage(m)
case []byte:
msg = m
case json.RawMessage:
msg = m
default:
msg, err = json.MarshalIndent(m, " ", " ")
}
return
}
func sourcesColumn(repoID api.RepoID, sources map[string]*types.SourceInfo) (json.RawMessage, error) {
var records []externalServiceRepo
for _, src := range sources {
records = append(records, externalServiceRepo{
ExternalServiceID: src.ExternalServiceID(),
RepoID: int64(repoID),
CloneURL: src.CloneURL,
})
}
return json.MarshalIndent(records, " ", " ")
}
type externalServiceRepo struct {
ExternalServiceID int64 `json:"external_service_id"`
RepoID int64 `json:"repo_id"`
CloneURL string `json:"clone_url"`
}
var insertReposQuery = `
WITH repos_list AS (
SELECT * FROM ROWS FROM (
json_to_recordset(%s)
AS (
name citext,
uri citext,
description text,
created_at timestamptz,
updated_at timestamptz,
deleted_at timestamptz,
external_service_type text,
external_service_id text,
external_id text,
archived boolean,
fork boolean,
stars integer,
private boolean,
metadata jsonb,
sources jsonb
)
)
WITH ORDINALITY
),
inserted_repos AS (
INSERT INTO repo (
name,
uri,
description,
created_at,
updated_at,
deleted_at,
external_service_type,
external_service_id,
external_id,
archived,
fork,
stars,
private,
metadata
)
SELECT
name,
NULLIF(BTRIM(uri), ''),
description,
created_at,
updated_at,
deleted_at,
external_service_type,
external_service_id,
external_id,
archived,
fork,
stars,
private,
metadata
FROM repos_list
RETURNING id
),
inserted_repos_rows AS (
SELECT id, ROW_NUMBER() OVER () AS rn FROM inserted_repos
),
repos_list_rows AS (
SELECT *, ROW_NUMBER() OVER () AS rn FROM repos_list
),
inserted_repos_with_ids AS (
SELECT
inserted_repos_rows.id,
repos_list_rows.*
FROM repos_list_rows
JOIN inserted_repos_rows USING (rn)
),
sources_list AS (
SELECT
inserted_repos_with_ids.id AS repo_id,
sources.external_service_id AS external_service_id,
sources.clone_url AS clone_url
FROM
inserted_repos_with_ids,
jsonb_to_recordset(inserted_repos_with_ids.sources)
AS sources(
external_service_id bigint,
repo_id integer,
clone_url text
)
),
insert_sources AS (
INSERT INTO external_service_repos (
external_service_id,
repo_id,
user_id,
org_id,
clone_url
)
SELECT
external_service_id,
repo_id,
es.namespace_user_id,
es.namespace_org_id,
clone_url
FROM sources_list
JOIN external_services es ON (es.id = external_service_id)
ON CONFLICT ON CONSTRAINT external_service_repos_repo_id_external_service_id_unique
DO
UPDATE SET clone_url = EXCLUDED.clone_url
WHERE external_service_repos.clone_url != EXCLUDED.clone_url
)
SELECT id FROM inserted_repos_with_ids;
`
// Delete deletes repos associated with the given ids and their associated sources.
func (s *repoStore) Delete(ctx context.Context, ids ...api.RepoID) error {
if len(ids) == 0 {
return nil
}
// The number of deleted repos can potentially be higher
// than the maximum number of arguments we can pass to postgres.
// We pass them as a json array instead to overcome this limitation.
encodedIds, err := json.Marshal(ids)
if err != nil {
return err
}
q := sqlf.Sprintf(deleteReposQuery, string(encodedIds))
err = s.Exec(ctx, q)
if err != nil {
return errors.Wrap(err, "delete")
}
return nil
}
const deleteReposQuery = `
WITH repo_ids AS (
SELECT jsonb_array_elements_text(%s) AS id
)
UPDATE repo
SET
name = soft_deleted_repository_name(name),
deleted_at = transaction_timestamp()
FROM repo_ids
WHERE deleted_at IS NULL
AND repo.id = repo_ids.id::int
`
const listEnabledNamesQueryFmtstr = `
-- source:internal/database/repos.go:ListEnabledNames
SELECT
name
FROM
repo
WHERE
deleted_at IS NULL
AND
blocked IS NULL
`
// ListEnabledNames returns a list of all enabled repo names. This is used in the
// repo purger. We special case just returning enabled names so that we read much
// less data into memory.
func (s *repoStore) ListEnabledNames(ctx context.Context) (values []api.RepoName, err error) {
q := sqlf.Sprintf(listEnabledNamesQueryFmtstr)
rows, queryErr := s.Query(ctx, q)
if queryErr != nil {
return nil, queryErr
}
defer func() { err = basestore.CloseRows(rows, err) }()
for rows.Next() {
var value api.RepoName
if err := rows.Scan(&value); err != nil {
return nil, err
}
values = append(values, value)
}
return values, nil
}
const getFirstRepoNamesByCloneURLQueryFmtstr = `
-- source:internal/database/repos.go:GetFirstRepoNamesByCloneURL
SELECT
name
FROM
repo r
JOIN
external_service_repos esr ON r.id = esr.repo_id
WHERE
esr.clone_url = %s
ORDER BY
r.updated_at DESC
LIMIT 1
`
// GetFirstRepoNamesByCloneURL returns the first repo name in our database that
// match the given clone url. If not repo is found, an empty string and nil error
// are returned.
func (s *repoStore) GetFirstRepoNamesByCloneURL(ctx context.Context, cloneURL string) (api.RepoName, error) {
name, _, err := basestore.ScanFirstString(s.Query(ctx, sqlf.Sprintf(getFirstRepoNamesByCloneURLQueryFmtstr, cloneURL)))
if err != nil {
return "", err
}
return api.RepoName(name), nil
}
func parsePattern(p string, caseSensitive bool) ([]*sqlf.Query, error) {
exact, like, pattern, err := parseIncludePattern(p)
if err != nil {
return nil, err
}
var conds []*sqlf.Query
if exact != nil {
if len(exact) == 0 || (len(exact) == 1 && exact[0] == "") {
conds = append(conds, sqlf.Sprintf("TRUE"))
} else {
conds = append(conds, sqlf.Sprintf("name = ANY (%s)", pq.Array(exact)))
}
}
for _, v := range like {
if caseSensitive {
conds = append(conds, sqlf.Sprintf(`name::text LIKE %s`, v))
} else {
conds = append(conds, sqlf.Sprintf(`lower(name) LIKE %s`, strings.ToLower(v)))
}
}
if pattern != "" {
if caseSensitive {
conds = append(conds, sqlf.Sprintf("name::text ~ %s", pattern))
} else {
conds = append(conds, sqlf.Sprintf("lower(name) ~ lower(%s)", pattern))
}
}
return []*sqlf.Query{sqlf.Sprintf("(%s)", sqlf.Join(conds, "OR"))}, nil
}
// parseCursorConds returns the WHERE conditions for the given cursor
func parseCursorConds(cs types.MultiCursor) (cond *sqlf.Query, err error) {
var (
direction string
operator string
columns = make([]string, 0, len(cs))
values = make([]*sqlf.Query, 0, len(cs))
)
for _, c := range cs {
if c == nil || c.Column == "" || c.Value == "" {
continue
}
if direction == "" {
switch direction = c.Direction; direction {
case "next":
operator = ">="
case "prev":
operator = "<="
default:
return nil, errors.Errorf("missing or invalid cursor direction: %q", c.Direction)
}
} else if direction != c.Direction {
return nil, errors.Errorf("multi-cursors must have the same direction")
}
switch RepoListColumn(c.Column) {
case RepoListName, RepoListStars, RepoListCreatedAt, RepoListID:
columns = append(columns, c.Column)
values = append(values, sqlf.Sprintf("%s", c.Value))
default:
return nil, errors.Errorf("missing or invalid cursor: %q %q", c.Column, c.Value)
}
}
if len(columns) == 0 {
return nil, nil
}
return sqlf.Sprintf(fmt.Sprintf("(%s) %s (%%s)", strings.Join(columns, ", "), operator), sqlf.Join(values, ", ")), nil
}
// parseIncludePattern either (1) parses the pattern into a list of exact possible
// string values and LIKE patterns if such a list can be determined from the pattern,
// and (2) returns the original regexp if those patterns are not equivalent to the
// regexp.
//
// It allows Repos.List to optimize for the common case where a pattern like
// `(^github.com/foo/bar$)|(^github.com/baz/qux$)` is provided. In that case,
// it's faster to query for "WHERE name IN (...)" the two possible exact values
// (because it can use an index) instead of using a "WHERE name ~*" regexp condition
// (which generally can't use an index).
//
// This optimization is necessary for good performance when there are many repos
// in the database. With this optimization, specifying a "repogroup:" in the query
// will be fast (even if there are many repos) because the query can be constrained
// efficiently to only the repos in the group.
func parseIncludePattern(pattern string) (exact, like []string, regexp string, err error) {
re, err := regexpsyntax.Parse(pattern, regexpsyntax.Perl)
if err != nil {
return nil, nil, "", err
}
exact, contains, prefix, suffix, err := allMatchingStrings(re.Simplify(), false)
if err != nil {
return nil, nil, "", err
}
for _, v := range contains {
like = append(like, "%"+v+"%")
}
for _, v := range prefix {
like = append(like, v+"%")
}
for _, v := range suffix {
like = append(like, "%"+v)
}
if exact != nil || like != nil {
return exact, like, "", nil
}
return nil, nil, pattern, nil
}
// allMatchingStrings returns a complete list of the strings that re
// matches, if it's possible to determine the list. The "last" argument
// indicates if this is the last part of the original regexp.
func allMatchingStrings(re *regexpsyntax.Regexp, last bool) (exact, contains, prefix, suffix []string, err error) {
switch re.Op {
case regexpsyntax.OpEmptyMatch:
return []string{""}, nil, nil, nil, nil
case regexpsyntax.OpLiteral:
prog, err := regexpsyntax.Compile(re)
if err != nil {
return nil, nil, nil, nil, err
}
prefix, complete := prog.Prefix()
if complete {
return nil, []string{prefix}, nil, nil, nil
}
return nil, nil, nil, nil, nil
case regexpsyntax.OpCharClass:
// Only handle simple case of one range.
if len(re.Rune) == 2 {
len := int(re.Rune[1] - re.Rune[0] + 1)
if len > 26 {
// Avoid large character ranges (which could blow up the number
// of possible matches).
return nil, nil, nil, nil, nil
}
chars := make([]string, len)
for r := re.Rune[0]; r <= re.Rune[1]; r++ {
chars[r-re.Rune[0]] = string(r)
}
return nil, chars, nil, nil, nil
}
return nil, nil, nil, nil, nil
case regexpsyntax.OpStar:
if len(re.Sub) == 1 && (re.Sub[0].Op == regexpsyntax.OpAnyCharNotNL || re.Sub[0].Op == regexpsyntax.OpAnyChar) {
if last {
return nil, []string{""}, nil, nil, nil
}
return nil, nil, nil, nil, nil
}
case regexpsyntax.OpBeginText:
return nil, nil, []string{""}, nil, nil
case regexpsyntax.OpEndText:
return nil, nil, nil, []string{""}, nil
case regexpsyntax.OpCapture:
return allMatchingStrings(re.Sub0[0], false)
case regexpsyntax.OpConcat:
var begin, end bool
for i, sub := range re.Sub {
if sub.Op == regexpsyntax.OpBeginText && i == 0 {
begin = true
continue
}
if sub.Op == regexpsyntax.OpEndText && i == len(re.Sub)-1 {
end = true
continue
}
subexact, subcontains, subprefix, subsuffix, err := allMatchingStrings(sub, i == len(re.Sub)-1)
if err != nil {
return nil, nil, nil, nil, err
}
if subexact == nil && subcontains == nil && subprefix == nil && subsuffix == nil {
return nil, nil, nil, nil, nil
}
// We only returns subcontains for child literals. But because it
// is part of a concat pattern, we know it is exact when we
// append. This transformation has been running in production for
// many years, so while it isn't correct for all inputs
// theoretically, in practice this hasn't been a problem. However,
// a redesign of this function as a whole is needed. - keegan
if subcontains != nil {
subexact = append(subexact, subcontains...)
}
if exact == nil {
exact = subexact
} else {
size := len(exact) * len(subexact)
if len(subexact) > 4 || size > 30 {
// Avoid blowup in number of possible matches.
return nil, nil, nil, nil, nil
}
combined := make([]string, 0, size)
for _, match := range exact {
for _, submatch := range subexact {
combined = append(combined, match+submatch)
}
}
exact = combined
}
}
if exact == nil {
exact = []string{""}
}
if begin && end {
return exact, nil, nil, nil, nil
} else if begin {
return nil, nil, exact, nil, nil
} else if end {
return nil, nil, nil, exact, nil
}
return nil, exact, nil, nil, nil
case regexpsyntax.OpAlternate:
for _, sub := range re.Sub {
subexact, subcontains, subprefix, subsuffix, err := allMatchingStrings(sub, false)
if err != nil {
return nil, nil, nil, nil, err
}
exact = append(exact, subexact...)
contains = append(contains, subcontains...)
prefix = append(prefix, subprefix...)
suffix = append(suffix, subsuffix...)
}
return exact, contains, prefix, suffix, nil
}
return nil, nil, nil, nil, nil
}
| [
"\"SRC_DISABLE_LOG_PRIVATE_REPO_ACCESS\""
]
| []
| [
"SRC_DISABLE_LOG_PRIVATE_REPO_ACCESS"
]
| [] | ["SRC_DISABLE_LOG_PRIVATE_REPO_ACCESS"] | go | 1 | 0 | |
libgo/go/cmd/cgo/main.go | // Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Cgo; see gmp.go for an overview.
// TODO(rsc):
// Emit correct line number annotations.
// Make gc understand the annotations.
package main
import (
"crypto/md5"
"flag"
"fmt"
"go/ast"
"go/printer"
"go/token"
"io"
"io/ioutil"
"os"
"path/filepath"
"reflect"
"runtime"
"sort"
"strings"
"cmd/internal/edit"
"cmd/internal/objabi"
)
// A Package collects information about the package we're going to write.
type Package struct {
PackageName string // name of package
PackagePath string
PtrSize int64
IntSize int64
GccOptions []string
GccIsClang bool
CgoFlags map[string][]string // #cgo flags (CFLAGS, LDFLAGS)
Written map[string]bool
Name map[string]*Name // accumulated Name from Files
ExpFunc []*ExpFunc // accumulated ExpFunc from Files
Decl []ast.Decl
GoFiles []string // list of Go files
GccFiles []string // list of gcc output files
Preamble string // collected preamble for _cgo_export.h
typedefs map[string]bool // type names that appear in the types of the objects we're interested in
typedefList []typedefInfo
}
// A typedefInfo is an element on Package.typedefList: a typedef name
// and the position where it was required.
type typedefInfo struct {
typedef string
pos token.Pos
}
// A File collects information about a single Go input file.
type File struct {
AST *ast.File // parsed AST
Comments []*ast.CommentGroup // comments from file
Package string // Package name
Preamble string // C preamble (doc comment on import "C")
Ref []*Ref // all references to C.xxx in AST
Calls []*Call // all calls to C.xxx in AST
ExpFunc []*ExpFunc // exported functions for this file
Name map[string]*Name // map from Go name to Name
NamePos map[*Name]token.Pos // map from Name to position of the first reference
Edit *edit.Buffer
}
// Untyped constants in the current package.
var consts = make(map[string]bool)
func (f *File) offset(p token.Pos) int {
return fset.Position(p).Offset
}
func nameKeys(m map[string]*Name) []string {
var ks []string
for k := range m {
ks = append(ks, k)
}
sort.Strings(ks)
return ks
}
// A Call refers to a call of a C.xxx function in the AST.
type Call struct {
Call *ast.CallExpr
Deferred bool
Done bool
}
// A Ref refers to an expression of the form C.xxx in the AST.
type Ref struct {
Name *Name
Expr *ast.Expr
Context astContext
Done bool
}
func (r *Ref) Pos() token.Pos {
return (*r.Expr).Pos()
}
var nameKinds = []string{"iconst", "fconst", "sconst", "type", "var", "fpvar", "func", "macro", "not-type"}
// A Name collects information about C.xxx.
type Name struct {
Go string // name used in Go referring to package C
Mangle string // name used in generated Go
C string // name used in C
Define string // #define expansion
Kind string // one of the nameKinds
Type *Type // the type of xxx
FuncType *FuncType
AddError bool
Const string // constant definition
}
// IsVar reports whether Kind is either "var" or "fpvar"
func (n *Name) IsVar() bool {
return n.Kind == "var" || n.Kind == "fpvar"
}
// IsConst reports whether Kind is either "iconst", "fconst" or "sconst"
func (n *Name) IsConst() bool {
return strings.HasSuffix(n.Kind, "const")
}
// An ExpFunc is an exported function, callable from C.
// Such functions are identified in the Go input file
// by doc comments containing the line //export ExpName
type ExpFunc struct {
Func *ast.FuncDecl
ExpName string // name to use from C
Doc string
}
// A TypeRepr contains the string representation of a type.
type TypeRepr struct {
Repr string
FormatArgs []interface{}
}
// A Type collects information about a type in both the C and Go worlds.
type Type struct {
Size int64
Align int64
C *TypeRepr
Go ast.Expr
EnumValues map[string]int64
Typedef string
}
// A FuncType collects information about a function type in both the C and Go worlds.
type FuncType struct {
Params []*Type
Result *Type
Go *ast.FuncType
}
func usage() {
fmt.Fprint(os.Stderr, "usage: cgo -- [compiler options] file.go ...\n")
flag.PrintDefaults()
os.Exit(2)
}
var ptrSizeMap = map[string]int64{
"386": 4,
"alpha": 8,
"amd64": 8,
"arm": 4,
"arm64": 8,
"m68k": 4,
"mips": 4,
"mipsle": 4,
"mips64": 8,
"mips64le": 8,
"mips64p32": 4,
"mips64p32le": 4,
"nios2": 4,
"ppc": 4,
"ppc64": 8,
"ppc64le": 8,
"riscv64": 8,
"s390": 4,
"s390x": 8,
"sh": 4,
"shbe": 4,
"sparc": 4,
"sparc64": 8,
}
var intSizeMap = map[string]int64{
"386": 4,
"alpha": 8,
"amd64": 8,
"arm": 4,
"arm64": 8,
"m68k": 4,
"mips": 4,
"mipsle": 4,
"mips64": 8,
"mips64le": 8,
"mips64p32": 8,
"mips64p32le": 8,
"nios2": 4,
"ppc": 4,
"ppc64": 8,
"ppc64le": 8,
"riscv64": 8,
"s390": 4,
"s390x": 8,
"sh": 4,
"shbe": 4,
"sparc": 4,
"sparc64": 8,
}
var cPrefix string
var fset = token.NewFileSet()
var dynobj = flag.String("dynimport", "", "if non-empty, print dynamic import data for that file")
var dynout = flag.String("dynout", "", "write -dynimport output to this file")
var dynpackage = flag.String("dynpackage", "main", "set Go package for -dynimport output")
var dynlinker = flag.Bool("dynlinker", false, "record dynamic linker information in -dynimport mode")
// This flag is for bootstrapping a new Go implementation,
// to generate Go types that match the data layout and
// constant values used in the host's C libraries and system calls.
var godefs = flag.Bool("godefs", false, "for bootstrap: write Go definitions for C file to standard output")
var srcDir = flag.String("srcdir", "", "source directory")
var objDir = flag.String("objdir", "", "object directory")
var importPath = flag.String("importpath", "", "import path of package being built (for comments in generated files)")
var exportHeader = flag.String("exportheader", "", "where to write export header if any exported functions")
var gccgo = flag.Bool("gccgo", false, "generate files for use with gccgo")
var gccgoprefix = flag.String("gccgoprefix", "", "-fgo-prefix option used with gccgo")
var gccgopkgpath = flag.String("gccgopkgpath", "", "-fgo-pkgpath option used with gccgo")
var gccgoMangleCheckDone bool
var gccgoNewmanglingInEffect bool
var importRuntimeCgo = flag.Bool("import_runtime_cgo", true, "import runtime/cgo in generated code")
var importSyscall = flag.Bool("import_syscall", true, "import syscall in generated code")
var goarch, goos string
func main() {
objabi.AddVersionFlag() // -V
flag.Usage = usage
flag.Parse()
if *dynobj != "" {
// cgo -dynimport is essentially a separate helper command
// built into the cgo binary. It scans a gcc-produced executable
// and dumps information about the imported symbols and the
// imported libraries. The 'go build' rules for cgo prepare an
// appropriate executable and then use its import information
// instead of needing to make the linkers duplicate all the
// specialized knowledge gcc has about where to look for imported
// symbols and which ones to use.
dynimport(*dynobj)
return
}
if *godefs {
// Generating definitions pulled from header files,
// to be checked into Go repositories.
// Line numbers are just noise.
conf.Mode &^= printer.SourcePos
}
args := flag.Args()
if len(args) < 1 {
usage()
}
// Find first arg that looks like a go file and assume everything before
// that are options to pass to gcc.
var i int
for i = len(args); i > 0; i-- {
if !strings.HasSuffix(args[i-1], ".go") {
break
}
}
if i == len(args) {
usage()
}
goFiles := args[i:]
for _, arg := range args[:i] {
if arg == "-fsanitize=thread" {
tsanProlog = yesTsanProlog
}
if arg == "-fsanitize=memory" {
msanProlog = yesMsanProlog
}
}
p := newPackage(args[:i])
// Record CGO_LDFLAGS from the environment for external linking.
if ldflags := os.Getenv("CGO_LDFLAGS"); ldflags != "" {
args, err := splitQuoted(ldflags)
if err != nil {
fatalf("bad CGO_LDFLAGS: %q (%s)", ldflags, err)
}
p.addToFlag("LDFLAGS", args)
}
// Need a unique prefix for the global C symbols that
// we use to coordinate between gcc and ourselves.
// We already put _cgo_ at the beginning, so the main
// concern is other cgo wrappers for the same functions.
// Use the beginning of the md5 of the input to disambiguate.
h := md5.New()
io.WriteString(h, *importPath)
fs := make([]*File, len(goFiles))
for i, input := range goFiles {
if *srcDir != "" {
input = filepath.Join(*srcDir, input)
}
b, err := ioutil.ReadFile(input)
if err != nil {
fatalf("%s", err)
}
if _, err = h.Write(b); err != nil {
fatalf("%s", err)
}
f := new(File)
f.Edit = edit.NewBuffer(b)
f.ParseGo(input, b)
f.DiscardCgoDirectives()
fs[i] = f
}
cPrefix = fmt.Sprintf("_%x", h.Sum(nil)[0:6])
if *objDir == "" {
// make sure that _obj directory exists, so that we can write
// all the output files there.
os.Mkdir("_obj", 0777)
*objDir = "_obj"
}
*objDir += string(filepath.Separator)
for i, input := range goFiles {
f := fs[i]
p.Translate(f)
for _, cref := range f.Ref {
switch cref.Context {
case ctxCall, ctxCall2:
if cref.Name.Kind != "type" {
break
}
old := *cref.Expr
*cref.Expr = cref.Name.Type.Go
f.Edit.Replace(f.offset(old.Pos()), f.offset(old.End()), gofmt(cref.Name.Type.Go))
}
}
if nerrors > 0 {
os.Exit(2)
}
p.PackagePath = f.Package
p.Record(f)
if *godefs {
os.Stdout.WriteString(p.godefs(f, input))
} else {
p.writeOutput(f, input)
}
}
if !*godefs {
p.writeDefs()
}
if nerrors > 0 {
os.Exit(2)
}
}
// newPackage returns a new Package that will invoke
// gcc with the additional arguments specified in args.
func newPackage(args []string) *Package {
goarch = runtime.GOARCH
if s := os.Getenv("GOARCH"); s != "" {
goarch = s
}
goos = runtime.GOOS
if s := os.Getenv("GOOS"); s != "" {
goos = s
}
ptrSize := ptrSizeMap[goarch]
if ptrSize == 0 {
fatalf("unknown ptrSize for $GOARCH %q", goarch)
}
intSize := intSizeMap[goarch]
if intSize == 0 {
fatalf("unknown intSize for $GOARCH %q", goarch)
}
// Reset locale variables so gcc emits English errors [sic].
os.Setenv("LANG", "en_US.UTF-8")
os.Setenv("LC_ALL", "C")
p := &Package{
PtrSize: ptrSize,
IntSize: intSize,
CgoFlags: make(map[string][]string),
Written: make(map[string]bool),
}
p.addToFlag("CFLAGS", args)
return p
}
// Record what needs to be recorded about f.
func (p *Package) Record(f *File) {
if p.PackageName == "" {
p.PackageName = f.Package
} else if p.PackageName != f.Package {
error_(token.NoPos, "inconsistent package names: %s, %s", p.PackageName, f.Package)
}
if p.Name == nil {
p.Name = f.Name
} else {
for k, v := range f.Name {
if p.Name[k] == nil {
p.Name[k] = v
} else if p.incompleteTypedef(p.Name[k].Type) {
p.Name[k] = v
} else if p.incompleteTypedef(v.Type) {
// Nothing to do.
} else if _, ok := nameToC[k]; ok {
// Names we predefine may appear inconsistent
// if some files typedef them and some don't.
// Issue 26743.
} else if !reflect.DeepEqual(p.Name[k], v) {
error_(token.NoPos, "inconsistent definitions for C.%s", fixGo(k))
}
}
}
if f.ExpFunc != nil {
p.ExpFunc = append(p.ExpFunc, f.ExpFunc...)
p.Preamble += "\n" + f.Preamble
}
p.Decl = append(p.Decl, f.AST.Decls...)
}
// incompleteTypedef reports whether t appears to be an incomplete
// typedef definition.
func (p *Package) incompleteTypedef(t *Type) bool {
return t == nil || (t.Size == 0 && t.Align == -1)
}
| [
"\"CGO_LDFLAGS\"",
"\"GOARCH\"",
"\"GOOS\""
]
| []
| [
"CGO_LDFLAGS",
"GOARCH",
"GOOS"
]
| [] | ["CGO_LDFLAGS", "GOARCH", "GOOS"] | go | 3 | 0 | |
plugins/inputs/sysstat/sysstat_test.go | // +build linux
package sysstat
import (
"fmt"
"os"
"os/exec"
"path"
"testing"
"github.com/yevheniir/telegraf-fork/testutil"
)
var s = Sysstat{
Log: testutil.Logger{},
interval: 10,
Sadc: "/usr/lib/sa/sadc",
Sadf: "/usr/bin/sadf",
Group: false,
Activities: []string{"DISK", "SNMP"},
Options: map[string]string{
"C": "cpu",
"d": "disk",
},
DeviceTags: map[string][]map[string]string{
"sda": {
{
"vg": "rootvg",
},
},
},
}
func TestGather(t *testing.T) {
// overwriting exec commands with mock commands
execCommand = fakeExecCommand
defer func() { execCommand = exec.Command }()
var acc testutil.Accumulator
err := acc.GatherError(s.Gather)
if err != nil {
t.Fatal(err)
}
cpuTags := map[string]string{"device": "all"}
diskTags := map[string]string{"device": "sda", "vg": "rootvg"}
tests := []struct {
measurement string
fields map[string]interface{}
tags map[string]string
}{
{
"cpu_pct_user",
map[string]interface{}{
"value": 0.65,
},
cpuTags,
},
{
"cpu_pct_nice",
map[string]interface{}{
"value": 0.0,
},
cpuTags,
},
{
"cpu_pct_system",
map[string]interface{}{
"value": 0.10,
},
cpuTags,
},
{
"cpu_pct_iowait",
map[string]interface{}{
"value": 0.15,
},
cpuTags,
},
{
"cpu_pct_steal",
map[string]interface{}{
"value": 0.0,
},
cpuTags,
},
{
"cpu_pct_idle",
map[string]interface{}{
"value": 99.1,
},
cpuTags,
},
{
"disk_tps",
map[string]interface{}{
"value": 0.00,
},
diskTags,
},
{
"disk_rd_sec_per_s",
map[string]interface{}{
"value": 0.00,
},
diskTags,
},
{
"disk_wr_sec_per_s",
map[string]interface{}{
"value": 0.00,
},
diskTags,
},
{
"disk_avgrq-sz",
map[string]interface{}{
"value": 0.00,
},
diskTags,
},
{
"disk_avgqu-sz",
map[string]interface{}{
"value": 0.00,
},
diskTags,
},
{
"disk_await",
map[string]interface{}{
"value": 0.00,
},
diskTags,
},
{
"disk_svctm",
map[string]interface{}{
"value": 0.00,
},
diskTags,
},
{
"disk_pct_util",
map[string]interface{}{
"value": 0.00,
},
diskTags,
},
}
for _, test := range tests {
acc.AssertContainsTaggedFields(t, test.measurement, test.fields, test.tags)
}
}
func TestGatherGrouped(t *testing.T) {
s.Group = true
// overwriting exec commands with mock commands
execCommand = fakeExecCommand
defer func() { execCommand = exec.Command }()
var acc testutil.Accumulator
err := acc.GatherError(s.Gather)
if err != nil {
t.Fatal(err)
}
var tests = []struct {
measurement string
fields map[string]interface{}
tags map[string]string
}{
{
"cpu",
map[string]interface{}{
"pct_user": 0.65,
"pct_nice": 0.0,
"pct_system": 0.10,
"pct_iowait": 0.15,
"pct_steal": 0.0,
"pct_idle": 99.1,
},
map[string]string{"device": "all"},
},
{
"disk",
map[string]interface{}{
"tps": 0.00,
"rd_sec_per_s": 0.00,
"wr_sec_per_s": 0.00,
"avgrq-sz": 0.00,
"avgqu-sz": 0.00,
"await": 0.00,
"svctm": 0.00,
"pct_util": 0.00,
},
map[string]string{"device": "sda", "vg": "rootvg"},
},
{
"disk",
map[string]interface{}{
"tps": 2.01,
"rd_sec_per_s": 1.0,
"wr_sec_per_s": 0.00,
"avgrq-sz": 0.30,
"avgqu-sz": 0.60,
"await": 0.70,
"svctm": 0.20,
"pct_util": 0.30,
},
map[string]string{"device": "sdb"},
},
}
for _, test := range tests {
acc.AssertContainsTaggedFields(t, test.measurement, test.fields, test.tags)
}
}
func TestEscape(t *testing.T) {
var tests = []struct {
input string
escaped string
}{
{
"%util",
"pct_util",
},
{
"%%util",
"pct_util",
},
{
"bread/s",
"bread_per_s",
},
{
"%nice",
"pct_nice",
},
}
for _, test := range tests {
if test.escaped != escape(test.input) {
t.Errorf("wrong escape, got %s, wanted %s", escape(test.input), test.escaped)
}
}
}
// Helper function that mock the exec.Command call (and call the test binary)
func fakeExecCommand(command string, args ...string) *exec.Cmd {
cs := []string{"-test.run=TestHelperProcess", "--", command}
cs = append(cs, args...)
cmd := exec.Command(os.Args[0], cs...)
cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"}
return cmd
}
// TestHelperProcess isn't a real test. It's used to mock exec.Command
// For example, if you run:
// GO_WANT_HELPER_PROCESS=1 go test -test.run=TestHelperProcess -- sadf -p -- -p -C tmpFile
// it returns mockData["C"] output.
func TestHelperProcess(t *testing.T) {
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
return
}
mockData := map[string]string{
"C": `dell-xps 5 2016-03-25 16:18:10 UTC all %user 0.65
dell-xps 5 2016-03-25 16:18:10 UTC all %nice 0.00
dell-xps 5 2016-03-25 16:18:10 UTC all %system 0.10
dell-xps 5 2016-03-25 16:18:10 UTC all %iowait 0.15
dell-xps 5 2016-03-25 16:18:10 UTC all %steal 0.00
dell-xps 5 2016-03-25 16:18:10 UTC all %idle 99.10
`,
"d": `dell-xps 5 2016-03-25 16:18:10 UTC sda tps 0.00
dell-xps 5 2016-03-25 16:18:10 UTC sda rd_sec/s 0.00
dell-xps 5 2016-03-25 16:18:10 UTC sda wr_sec/s 0.00
dell-xps 5 2016-03-25 16:18:10 UTC sda avgrq-sz 0.00
dell-xps 5 2016-03-25 16:18:10 UTC sda avgqu-sz 0.00
dell-xps 5 2016-03-25 16:18:10 UTC sda await 0.00
dell-xps 5 2016-03-25 16:18:10 UTC sda svctm 0.00
dell-xps 5 2016-03-25 16:18:10 UTC sda %util 0.00
dell-xps 5 2016-03-25 16:18:10 UTC sdb tps 2.01
dell-xps 5 2016-03-25 16:18:10 UTC sdb rd_sec/s 1.00
dell-xps 5 2016-03-25 16:18:10 UTC sdb wr_sec/s 0.00
dell-xps 5 2016-03-25 16:18:10 UTC sdb avgrq-sz 0.30
dell-xps 5 2016-03-25 16:18:10 UTC sdb avgqu-sz 0.60
dell-xps 5 2016-03-25 16:18:10 UTC sdb await 0.70
dell-xps 5 2016-03-25 16:18:10 UTC sdb svctm 0.20
dell-xps 5 2016-03-25 16:18:10 UTC sdb %util 0.30
`,
}
args := os.Args
// Previous arguments are tests stuff, that looks like :
// /tmp/go-build970079519/…/_test/integration.test -test.run=TestHelperProcess --
cmd, args := args[3], args[4:]
// Handle the case where args[0] is dir:...
switch path.Base(cmd) {
case "sadf":
fmt.Fprint(os.Stdout, mockData[args[3]])
default:
}
// some code here to check arguments perhaps?
os.Exit(0)
}
| [
"\"GO_WANT_HELPER_PROCESS\""
]
| []
| [
"GO_WANT_HELPER_PROCESS"
]
| [] | ["GO_WANT_HELPER_PROCESS"] | go | 1 | 0 | |
setup.py | #!/usr/bin/env python
# coding=utf-8
# Copyright [2017] [B2W Digital]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
import sys
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
from setuptools.command.develop import develop as _develop
# Package basic info
PACKAGE_NAME = 'marvin_python_toolbox'
PACKAGE_DESCRIPTION = 'Marvin Python Toolbox'
URL = 'https://github.com/marvin-ai/marvin-python-toolbox'
AUTHOR_NAME = 'Daniel Takabayashi'
AUTHOR_EMAIL = '[email protected]'
PYTHON_2 = True
PYTHON_3 = False
# Project status
# (should be 'planning', 'pre-alpha', 'alpha', 'beta', 'stable', 'mature' or 'inactive').
STATUS = 'alpha'
# Project topic
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers for a list
TOPIC = 'Topic :: Software Development :: Libraries :: Python Modules',
# External dependencies
# More info https://pythonhosted.org/setuptools/setuptools.html#declaring-dependencies
REQUIREMENTS_EXTERNAL = [
'six>=1.10.0',
'bumpversion>=0.5.3',
'click>=3.3',
'jupyter>=1.0.0',
'pep8>=1.7.0',
'virtualenv>=15.0.1',
'pytest-cov>=1.8.1',
'mock>=2.0.0',
'tox==2.2.0',
'pytest-watch>=4.1.0',
'pytest-testmon>=0.8.2',
'jsonschema>=2.5.1',
'pytest==2.9.2',
'pytest-flask>=0.10.0',
'python-slugify==0.1.0',
'paramiko==2.1.2',
'PyHive==0.3.0',
'thrift==0.10.0',
'thrift-sasl==0.2.1',
'virtualenvwrapper>=4.7.1'
'requests==2.5.1',
'python-dateutil==2.4.2',
'python-slugify==0.1.0',
'path.py==7.2',
'httpretty==0.8.4',
'jsonschema>=2.5.1',
'gprof2dot',
'ujsonpath==0.0.2',
'simplejson>=3.10.0',
'configobj>=5.0.6',
'findspark==1.1.0',
'grpcio==1.6.0',
'grpcio-tools==1.6.0',
'joblib==0.11',
]
# Test dependencies
REQUIREMENTS_TESTS = []
# This is normally an empty list
DEPENDENCY_LINKS_EXTERNAL = []
# script to be used
SCRIPTS = ['bin/marvin']
def _get_version():
"""Return the project version from VERSION file."""
with open(os.path.join(os.path.dirname(__file__), PACKAGE_NAME, 'VERSION'), 'rb') as f:
version = f.read().decode('ascii').strip()
return version
def _set_autocomplete(dir):
virtualenv = os.environ.get('VIRTUAL_ENV', None)
if virtualenv:
postactivate = os.path.join(virtualenv, 'bin', 'postactivate')
if os.path.exists(postactivate):
from pkg_resources import Requirement, resource_filename
bash_completion = resource_filename(
Requirement.parse('marvin_python_toolbox'),
os.path.join('marvin_python_toolbox', 'extras', 'marvin_bash_completion'))
command = 'source "{}"'.format(bash_completion)
with open(postactivate, 'r+') as fp:
lines = fp.readlines()
fp.seek(0)
configured = False
for line in lines:
if 'marvin_bash_completion' in line:
# Replacing old autocomplete configuration
fp.write(command)
configured = True
else:
fp.write(line)
if not configured:
fp.write(command)
# 'Autocomplete was successfully configured'
fp.write('\n')
fp.truncate()
class develop(_develop):
def run(self):
_develop.run(self)
self.execute(_set_autocomplete, (self.install_lib,), msg="Running autocomplete preparation task")
class Tox(TestCommand):
"""Run the test cases using TOX command."""
user_options = [('tox-args=', 'a', "Arguments to pass to tox")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.tox_args = None
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# Import here, cause outside the eggs aren't loaded
import tox
import shlex
args = self.tox_args
if args:
args = shlex.split(self.tox_args)
else:
# Run all tests by default
args = ['-c', os.path.join(os.path.dirname(__file__), 'tox.ini'), 'tests']
errno = tox.cmdline(args=args)
sys.exit(errno)
DEVELOPMENT_STATUS = {
'planning': '1 - Planning',
'pre-alpha': '2 - Pre-Alpha',
'alpha': 'Alpha',
'beta': '4 - Beta',
'stable': '5 - Production/Stable',
'mature': '6 - Mature',
'inactive': '7 - Inactive',
}
CLASSIFIERS = ['Development Status :: {}'.format(DEVELOPMENT_STATUS[STATUS])]
if PYTHON_2:
CLASSIFIERS += [
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
]
if PYTHON_3:
CLASSIFIERS += [
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
]
setup(
name=PACKAGE_NAME,
version=_get_version(),
url=URL,
description=PACKAGE_DESCRIPTION,
long_description=open(os.path.join(os.path.dirname(__file__), 'README.md')).read(),
author=AUTHOR_NAME,
maintainer=AUTHOR_NAME,
maintainer_email=AUTHOR_EMAIL,
packages=find_packages(exclude=('tests', 'tests.*')),
include_package_data=True,
zip_safe=False,
classifiers=CLASSIFIERS,
install_requires=REQUIREMENTS_EXTERNAL,
tests_require=REQUIREMENTS_TESTS,
dependency_links=DEPENDENCY_LINKS_EXTERNAL,
scripts=SCRIPTS,
cmdclass={'test': Tox, 'develop': develop},
) | []
| []
| [
"VIRTUAL_ENV"
]
| [] | ["VIRTUAL_ENV"] | python | 1 | 0 | |
venv/Lib/site-packages/tensorboard/uploader/proto/write_service_pb2_grpc.py | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from tensorboard.uploader.proto import write_service_pb2 as tensorboard_dot_uploader_dot_proto_dot_write__service__pb2
class TensorBoardWriterServiceStub(object):
"""Service for writing data to TensorBoard.dev.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.CreateExperiment = channel.unary_unary(
'/tensorboard.service.TensorBoardWriterService/CreateExperiment',
request_serializer=tensorboard_dot_uploader_dot_proto_dot_write__service__pb2.CreateExperimentRequest.SerializeToString,
response_deserializer=tensorboard_dot_uploader_dot_proto_dot_write__service__pb2.CreateExperimentResponse.FromString,
)
self.UpdateExperiment = channel.unary_unary(
'/tensorboard.service.TensorBoardWriterService/UpdateExperiment',
request_serializer=tensorboard_dot_uploader_dot_proto_dot_write__service__pb2.UpdateExperimentRequest.SerializeToString,
response_deserializer=tensorboard_dot_uploader_dot_proto_dot_write__service__pb2.UpdateExperimentResponse.FromString,
)
self.DeleteExperiment = channel.unary_unary(
'/tensorboard.service.TensorBoardWriterService/DeleteExperiment',
request_serializer=tensorboard_dot_uploader_dot_proto_dot_write__service__pb2.DeleteExperimentRequest.SerializeToString,
response_deserializer=tensorboard_dot_uploader_dot_proto_dot_write__service__pb2.DeleteExperimentResponse.FromString,
)
self.PurgeData = channel.unary_unary(
'/tensorboard.service.TensorBoardWriterService/PurgeData',
request_serializer=tensorboard_dot_uploader_dot_proto_dot_write__service__pb2.PurgeDataRequest.SerializeToString,
response_deserializer=tensorboard_dot_uploader_dot_proto_dot_write__service__pb2.PurgeDataResponse.FromString,
)
self.WriteScalar = channel.unary_unary(
'/tensorboard.service.TensorBoardWriterService/WriteScalar',
request_serializer=tensorboard_dot_uploader_dot_proto_dot_write__service__pb2.WriteScalarRequest.SerializeToString,
response_deserializer=tensorboard_dot_uploader_dot_proto_dot_write__service__pb2.WriteScalarResponse.FromString,
)
self.WriteTensor = channel.unary_unary(
'/tensorboard.service.TensorBoardWriterService/WriteTensor',
request_serializer=tensorboard_dot_uploader_dot_proto_dot_write__service__pb2.WriteTensorRequest.SerializeToString,
response_deserializer=tensorboard_dot_uploader_dot_proto_dot_write__service__pb2.WriteTensorResponse.FromString,
)
self.GetOrCreateBlobSequence = channel.unary_unary(
'/tensorboard.service.TensorBoardWriterService/GetOrCreateBlobSequence',
request_serializer=tensorboard_dot_uploader_dot_proto_dot_write__service__pb2.GetOrCreateBlobSequenceRequest.SerializeToString,
response_deserializer=tensorboard_dot_uploader_dot_proto_dot_write__service__pb2.GetOrCreateBlobSequenceResponse.FromString,
)
self.GetBlobMetadata = channel.unary_unary(
'/tensorboard.service.TensorBoardWriterService/GetBlobMetadata',
request_serializer=tensorboard_dot_uploader_dot_proto_dot_write__service__pb2.GetBlobMetadataRequest.SerializeToString,
response_deserializer=tensorboard_dot_uploader_dot_proto_dot_write__service__pb2.GetBlobMetadataResponse.FromString,
)
self.WriteBlob = channel.stream_stream(
'/tensorboard.service.TensorBoardWriterService/WriteBlob',
request_serializer=tensorboard_dot_uploader_dot_proto_dot_write__service__pb2.WriteBlobRequest.SerializeToString,
response_deserializer=tensorboard_dot_uploader_dot_proto_dot_write__service__pb2.WriteBlobResponse.FromString,
)
self.DeleteOwnUser = channel.unary_unary(
'/tensorboard.service.TensorBoardWriterService/DeleteOwnUser',
request_serializer=tensorboard_dot_uploader_dot_proto_dot_write__service__pb2.DeleteOwnUserRequest.SerializeToString,
response_deserializer=tensorboard_dot_uploader_dot_proto_dot_write__service__pb2.DeleteOwnUserResponse.FromString,
)
class TensorBoardWriterServiceServicer(object):
"""Service for writing data to TensorBoard.dev.
"""
def CreateExperiment(self, request, context):
"""Request for a new location to write TensorBoard readable events.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateExperiment(self, request, context):
"""Request to mutate metadata associated with an experiment.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteExperiment(self, request, context):
"""Request that an experiment be deleted, along with all tags and scalars
that it contains. This call may only be made by the original owner of the
experiment.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PurgeData(self, request, context):
"""Request that unreachable data be purged. Used only for testing;
disabled in production.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def WriteScalar(self, request, context):
"""Request additional scalar data be stored in TensorBoard.dev.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def WriteTensor(self, request, context):
"""Request additional tensor data be stored in TensorBoard.dev.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetOrCreateBlobSequence(self, request, context):
"""Request to obtain a specific BlobSequence entry, creating it if needed,
to be subsequently populated with blobs.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetBlobMetadata(self, request, context):
"""Request the current status of blob data being stored in TensorBoard.dev,
to support resumable uploads.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def WriteBlob(self, request_iterator, context):
"""Request additional blob data be stored in TensorBoard.dev.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteOwnUser(self, request, context):
"""Request that the calling user and all their data be permanently deleted.
Used for testing purposes.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_TensorBoardWriterServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'CreateExperiment': grpc.unary_unary_rpc_method_handler(
servicer.CreateExperiment,
request_deserializer=tensorboard_dot_uploader_dot_proto_dot_write__service__pb2.CreateExperimentRequest.FromString,
response_serializer=tensorboard_dot_uploader_dot_proto_dot_write__service__pb2.CreateExperimentResponse.SerializeToString,
),
'UpdateExperiment': grpc.unary_unary_rpc_method_handler(
servicer.UpdateExperiment,
request_deserializer=tensorboard_dot_uploader_dot_proto_dot_write__service__pb2.UpdateExperimentRequest.FromString,
response_serializer=tensorboard_dot_uploader_dot_proto_dot_write__service__pb2.UpdateExperimentResponse.SerializeToString,
),
'DeleteExperiment': grpc.unary_unary_rpc_method_handler(
servicer.DeleteExperiment,
request_deserializer=tensorboard_dot_uploader_dot_proto_dot_write__service__pb2.DeleteExperimentRequest.FromString,
response_serializer=tensorboard_dot_uploader_dot_proto_dot_write__service__pb2.DeleteExperimentResponse.SerializeToString,
),
'PurgeData': grpc.unary_unary_rpc_method_handler(
servicer.PurgeData,
request_deserializer=tensorboard_dot_uploader_dot_proto_dot_write__service__pb2.PurgeDataRequest.FromString,
response_serializer=tensorboard_dot_uploader_dot_proto_dot_write__service__pb2.PurgeDataResponse.SerializeToString,
),
'WriteScalar': grpc.unary_unary_rpc_method_handler(
servicer.WriteScalar,
request_deserializer=tensorboard_dot_uploader_dot_proto_dot_write__service__pb2.WriteScalarRequest.FromString,
response_serializer=tensorboard_dot_uploader_dot_proto_dot_write__service__pb2.WriteScalarResponse.SerializeToString,
),
'WriteTensor': grpc.unary_unary_rpc_method_handler(
servicer.WriteTensor,
request_deserializer=tensorboard_dot_uploader_dot_proto_dot_write__service__pb2.WriteTensorRequest.FromString,
response_serializer=tensorboard_dot_uploader_dot_proto_dot_write__service__pb2.WriteTensorResponse.SerializeToString,
),
'GetOrCreateBlobSequence': grpc.unary_unary_rpc_method_handler(
servicer.GetOrCreateBlobSequence,
request_deserializer=tensorboard_dot_uploader_dot_proto_dot_write__service__pb2.GetOrCreateBlobSequenceRequest.FromString,
response_serializer=tensorboard_dot_uploader_dot_proto_dot_write__service__pb2.GetOrCreateBlobSequenceResponse.SerializeToString,
),
'GetBlobMetadata': grpc.unary_unary_rpc_method_handler(
servicer.GetBlobMetadata,
request_deserializer=tensorboard_dot_uploader_dot_proto_dot_write__service__pb2.GetBlobMetadataRequest.FromString,
response_serializer=tensorboard_dot_uploader_dot_proto_dot_write__service__pb2.GetBlobMetadataResponse.SerializeToString,
),
'WriteBlob': grpc.stream_stream_rpc_method_handler(
servicer.WriteBlob,
request_deserializer=tensorboard_dot_uploader_dot_proto_dot_write__service__pb2.WriteBlobRequest.FromString,
response_serializer=tensorboard_dot_uploader_dot_proto_dot_write__service__pb2.WriteBlobResponse.SerializeToString,
),
'DeleteOwnUser': grpc.unary_unary_rpc_method_handler(
servicer.DeleteOwnUser,
request_deserializer=tensorboard_dot_uploader_dot_proto_dot_write__service__pb2.DeleteOwnUserRequest.FromString,
response_serializer=tensorboard_dot_uploader_dot_proto_dot_write__service__pb2.DeleteOwnUserResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'tensorboard.service.TensorBoardWriterService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| []
| []
| []
| [] | [] | python | null | null | null |
trompace/config.py | import configparser
import datetime
import logging
import os
import urllib
from typing import List, Dict
from urllib.parse import urlparse
import requests
import trompace
import jwt
class TrompaConfig:
config: configparser.ConfigParser = None
host: str = None
websocket_host: str = None
# Is authentication required to write to the CE?
server_auth_required: bool = True
# JWT identifier
jwt_id: str = None
# JWT key
jwt_key: str = None
# Allowed CE scopes
jwt_scopes: List[str] = []
# path to store a cache file containing the jwt token
jwt_key_cache: str = None
# jwt token
jwt_token_encoded: str = None
# decoded jwt token
jwt_token_decoded: Dict[str, str] = {}
def load(self, configfile: str = None):
if configfile is None:
configfile = os.getenv("TROMPACE_CLIENT_CONFIG")
if not configfile:
raise ValueError("called load() without a path and TROMPACE_CLIENT_CONFIG environment variable unset")
if not os.path.exists(configfile):
raise ValueError(f"No such config file '{configfile}'")
self.config = configparser.ConfigParser()
self.config.read(configfile)
self._set_logging()
self._set_server()
self._set_jwt()
def _set_logging(self):
section_logging = self.config["logging"]
level = section_logging.get("level").upper()
ch = logging.StreamHandler()
ch.setLevel(level)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
trompace.logger.addHandler(ch)
trompace.logger.setLevel(level)
def _set_server(self):
server = self.config["server"]
if "host" not in server:
raise ValueError("Cannot find 'server.host' option")
if "secure" in server:
raise ValueError("Config file has changed, add http:// or https:// to server.host")
host = server.get("host")
if not host.startswith("http") or "://" not in host:
raise ValueError("server.host option doesn't appear to be a url with scheme")
parsed = urlparse(host)
hostpath = parsed.netloc + parsed.path
if not parsed.scheme:
raise ValueError("No scheme set on host")
else:
scheme = parsed.scheme
self.host = f"{scheme}://{hostpath}"
wss_scheme = "wss" if scheme == "https" else "ws"
websocket_host = f"{wss_scheme}://{hostpath}"
self.websocket_host = urllib.parse.urljoin(websocket_host, "graphql")
def _set_jwt(self):
server = self.config["server"]
host = server.get("host")
auth = self.config["auth"]
self.server_auth_required = auth.getboolean("required", True)
if not self.server_auth_required:
trompace.logger.debug("Auth not required, skipping setup")
return
if "id" not in auth or "key" not in auth or "scopes" not in auth:
raise ValueError("Cannot find 'auth.id' or 'auth.key' or 'auth.scopes' option")
self.jwt_id = auth.get("id")
self.jwt_key = auth.get("key")
self.jwt_scopes = auth.get("scopes").split(",")
if "token_cache_dir" not in auth:
cache_dir = os.getcwd()
trompace.logger.debug(f"No cache directory set for storing jwt token, "
f"using current directory ({cache_dir})")
else:
cache_dir = auth.get("token_cache_dir")
jwt_cache_file = f".trompace-client-jwt-token-cache-{host.replace('/', '-').replace(':', '')}"
self.jwt_key_cache = os.path.join(cache_dir, jwt_cache_file)
if os.path.exists(self.jwt_key_cache):
trompace.logger.debug(f"found a cached token, reading from file {jwt_cache_file}")
with open(self.jwt_key_cache) as fp:
token = fp.read()
self._set_jwt_token(token)
def _set_jwt_token(self, token):
try:
decoded = jwt.decode(token, algorithms=["HS256"], options={"verify_signature": False})
self.jwt_token_encoded = token
self.jwt_token_decoded = decoded
except (jwt.DecodeError, jwt.ExpiredSignatureError):
trompace.logger.warn("Could not decode cached jwt token, ignoring")
def _save_jwt_token(self, token):
"""Save a JWT token to the cache file"""
with open(self.jwt_key_cache, "w") as fp:
fp.write(token)
@property
def jwt_token(self):
"""Get the token needed to authenticate to the CE. If no token is available, request one from the CE
using the id, key and scopes. If the token is going to expire within the next hour, re-request it.
Once requested, save it to ``self.jwt_key_cache``"""
if self.jwt_token_encoded is None:
trompace.logger.debug("no token, getting one")
# No token, refresh it
token = get_jwt(self.host, self.jwt_id, self.jwt_key, self.jwt_scopes)
self._set_jwt_token(token)
self._save_jwt_token(token)
elif self.jwt_token_encoded:
token = jwt.decode(self.jwt_token_encoded, algorithms=["HS256"], options={"verify_signature": False})
now = datetime.datetime.now(datetime.timezone.utc).timestamp()
expired = token.get('exp', 0) < now
# check if it's expiring
if expired:
trompace.logger.debug("token is expiring, renewing")
# TODO: Duplicate
token = get_jwt(self.host, self.jwt_id, self.jwt_key, self.jwt_scopes)
self._set_jwt_token(token)
self._save_jwt_token(token)
# Now we have a token, return it
# TODO: The issuing step could fail
return self.jwt_token_encoded
def get_jwt(host, jwt_id, jwt_key, jwt_scopes):
"""Request a JWT key from the CE"""
# TODO: Would be nice to put this in trompace.connection, but issues with circular import
url = urllib.parse.urljoin(host, "jwt")
data = {
"id": jwt_id,
"apiKey": jwt_key,
"scopes": jwt_scopes
}
r = requests.post(url, json=data)
j = r.json()
if j['success']:
return j['jwt']
else:
print("invalid response getting jwt", j)
return None
config = TrompaConfig()
| []
| []
| [
"TROMPACE_CLIENT_CONFIG"
]
| [] | ["TROMPACE_CLIENT_CONFIG"] | python | 1 | 0 | |
mventory/wsgi.py | """
WSGI config for mventory project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mventory.settings')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
venv3864/Lib/site-packages/PyInstaller/loader/rthooks/pyi_rth_gstreamer.py | #-----------------------------------------------------------------------------
# Copyright (c) 2013-2019, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import os
import sys
# Without this environment variable set to 'no' importing 'gst'
# causes 100% CPU load. (Tested on OSX.)
os.environ['GST_REGISTRY_FORK'] = 'no'
gst_plugin_paths = [sys._MEIPASS, os.path.join(sys._MEIPASS, 'gst-plugins')]
os.environ['GST_PLUGIN_PATH'] = os.pathsep.join(gst_plugin_paths)
# Prevent permission issues on Windows
os.environ['GST_REGISTRY'] = os.path.join(sys._MEIPASS, 'registry.bin')
# Only use packaged plugins to prevent GStreamer from crashing when it finds
# plugins from another version which are installed system wide.
os.environ['GST_PLUGIN_SYSTEM_PATH'] = ''
| []
| []
| [
"GST_REGISTRY_FORK",
"GST_REGISTRY",
"GST_PLUGIN_PATH",
"GST_PLUGIN_SYSTEM_PATH"
]
| [] | ["GST_REGISTRY_FORK", "GST_REGISTRY", "GST_PLUGIN_PATH", "GST_PLUGIN_SYSTEM_PATH"] | python | 4 | 0 | |
src/telegramBot.py | from dotenv import load_dotenv
import os
import requests
import json
load_dotenv()
def create_answer(message_text):
if message_text in ["oi", "olá", "eai"]:
return "Olá, tudo bem?"
else:
return "Não entendi!"
class TelegramBot:
def __init__(self):
token = os.getenv("API_KEY") # get token from .env file
self.url = f"https://api.telegram.org/bot{token}/"
def start(self):
update_id = None
while True:
update = self.get_messages(update_id)
messages = update['result']
if messages:
for message in messages:
try:
update_id = message['update_id']
chat_id = message['message']['from']['id']
message_text = message['message']['text']
answer_bot = create_answer(message_text)
self.send_answer(chat_id, answer_bot)
except:
pass
def get_messages(self, update_id):
link_request = f"{self.url}getUpdates?timeout=1000"
if update_id:
link_request = f"{self.url}getUpdates?timeout=1000&offset={update_id + 1}"
result = requests.get(link_request)
return json.loads(result.content)
def send_answer(self, chat_id, answer):
link_to_send = f"{self.url}sendMessage?chat_id={chat_id}&text{answer}"
requests.get(link_to_send)
return
| []
| []
| [
"API_KEY"
]
| [] | ["API_KEY"] | python | 1 | 0 | |
code/pre_train_felix.py | from __future__ import division, print_function
import tensorflow as tf
import numpy as np
import pandas as pd
import os
import pprint
import sys
import keras
from keras.models import Sequential, Model
from keras.layers import Dense, Activation, Dropout, Input
from keras.utils import to_categorical
from keras import regularizers, initializers
from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau
from keras.layers.normalization import BatchNormalization
import pickle
import tensorflow as tf
import os
import keras.backend.tensorflow_backend as KTF
pp = pprint.PrettyPrinter()
flags = tf.app.flags
flags.DEFINE_string("gpu", "0", "GPU(s) to use. [0]")
flags.DEFINE_float("learning_rate", 2.5e-3, "Learning rate [2.5e-4]")
flags.DEFINE_integer("batch_size", 200, "The number of batch images [4]")
flags.DEFINE_integer("save_step", 500, "The interval of saveing checkpoints[500]")
flags.DEFINE_string("checkpoint", "checkpoint", "Directory name to save the checkpoints [checkpoint]")
flags.DEFINE_string("log", "summary", "log [log]")
flags.DEFINE_integer("epoch", 100, "Epoch[10]")
flags.DEFINE_integer("pre_epoch", 10, "Epoch[10]")
FLAGS = flags.FLAGS
dim=22283
pre_train_dim=(8196,512,128)
project_path='../'
def readData(label):
x = np.load(project_path+'data/all_raw_data.npy')
print(x.shape)
label_df = pd.read_csv(project_path+'data/all_label.csv')
y = np.array(label_df[label])
return x,y
def init(X, Y):
#x=np.array([X[i] for i in Y if i >0])
#y=np.array([Y[i]-1 for i in Y if i >0])
num_all = int(X.shape[0])
num_train = int(0.8 * num_all)
num_test = num_all - num_train
# shuffle
mask = np.random.permutation(num_all)
X = X[mask]
Y = Y[mask]
# training data
mask_train = range(num_train)
X_train = X
Y_train = Y
#testing data
mask_test = range(num_train, num_all)
X_test = X[mask_test]
Y_test = Y[mask_test]
# Y_train, Y_test = np.expand_dims(Y_train, axis=1), np.expand_dims(Y_test, axis=1)
print('All data shape: ', X.shape)
return X_train, Y_train, X_test, Y_test
def add_regularizer(model, kernel_regularizer = regularizers.l2(), bias_regularizer = regularizers.l2()):
for layer in model.layers:
if hasattr(layer, "kernel_regularizer"):
layer.kernel_regularizer = kernel_regularizer
if hasattr(layer, "bias_regularizer"):
layer.bias_regularizer = bias_regularizer
def pre_model1(pre_train_dim):
inputs = Input(shape = (22283, ))
hidden1 = Dropout(0.5)(BatchNormalization(axis = 1)(Dense(pre_train_dim[0], activation = 'relu')(inputs)))
outputs = Dense(dim, activation = 'relu')(hidden1)
model = Model(inputs = inputs, outputs = outputs)
add_regularizer(model)
return model
def pre_model2(pre_train_dim,weight):
inputs = Input(shape = (22283, ))
hidden1 = Dropout(0.5)(BatchNormalization(axis = 1)(Dense(pre_train_dim[0], activation = 'relu',trainable=False)(inputs)))
hidden2 = Dropout(0.5)(BatchNormalization(axis = 1)(Dense(pre_train_dim[1], activation = 'relu')(hidden1)))
hidden3 = Dropout(0.5)(BatchNormalization(axis = 1)(Dense(pre_train_dim[0], activation = 'relu')(hidden2)))
outputs= Dense(dim, activation = 'relu',trainable=False)(hidden3)
model = Model(inputs = inputs, outputs = outputs)
add_regularizer(model)
return model
def pre_model3(pre_train_dim,weight):
inputs = Input(shape = (22283, ))
hidden1 = Dropout(0.5)(BatchNormalization(axis = 1)(Dense(pre_train_dim[0], activation = 'relu',trainable=False)(inputs)))
hidden2 = Dropout(0.5)(BatchNormalization(axis = 1)(Dense(pre_train_dim[1], activation = 'relu',trainable=False)(hidden1)))
hidden3 = Dropout(0.5)(BatchNormalization(axis = 1)(Dense(pre_train_dim[2], activation = 'relu')(hidden2)))
hidden4 = Dropout(0.5)(BatchNormalization(axis = 1)(Dense(pre_train_dim[1], activation = 'relu')(hidden3)))
hidden5 = Dropout(0.5)(BatchNormalization(axis = 1)(Dense(pre_train_dim[0], activation = 'relu',trainable=False)(hidden4)))
outputs= Dense(dim, activation = 'relu',trainable=False)(hidden5)
model = Model(inputs = inputs, outputs = outputs)
add_regularizer(model)
return model
'''
def pre_model4(pre_train_dim,weight):
inputs = Input(shape = (22283, ))
hidden1 = Dropout(0.5)(BatchNormalization(axis = 1)(Dense(pre_train_dim[0], activation = 'relu',trainable=False)(inputs)))
hidden2 = Dropout(0.5)(BatchNormalization(axis = 1)(Dense(pre_train_dim[1], activation = 'relu',trainable=False)(hidden1)))
hidden3 = Dropout(0.5)(BatchNormalization(axis = 1)(Dense(pre_train_dim[2], activation = 'relu',trainable=False)(hidden2)))
hidden4 = Dropout(0.5)(BatchNormalization(axis = 1)(Dense(pre_train_dim[3], activation = 'relu')(hidden3)))
hidden5 = Dropout(0.5)(BatchNormalization(axis = 1)(Dense(pre_train_dim[2], activation = 'relu')(hidden4)))
hidden6 = Dropout(0.5)(BatchNormalization(axis = 1)(Dense(pre_train_dim[1], activation = 'relu',trainable=False)(hidden5)))
hidden7 = Dropout(0.5)(BatchNormalization(axis = 1)(Dense(pre_train_dim[0], activation = 'relu',trainable=False)(hidden6)))
outputs= Dense(dim, activation = 'relu',trainable=False)(hidden5)
model = Model(inputs = inputs, outputs = outputs)
add_regularizer(model)
return model
'''
def pre_train(x_train , x_test):
batch_size_now=5
weights=[]
model_hidden1=pre_model1(pre_train_dim)
model_hidden1.compile(loss='mse', optimizer='adam')
model_hidden1.fit(x_train, x_train, epochs=FLAGS.pre_epoch, batch_size=batch_size_now, validation_data=(x_test, x_test))
weights.append(model_hidden1.layers[1].get_weights())
#model_hidden1.save(project_path+'model_hidden1.h5')
model_hidden2=pre_model2(pre_train_dim,weights)
model_hidden2.layers[1].set_weights(model_hidden1.layers[1].get_weights())
model_hidden2.layers[-1].set_weights(model_hidden1.layers[-1].get_weights())
model_hidden2.compile(loss='mse', optimizer='adam')
model_hidden2.fit(x_train, x_train, epochs=FLAGS.pre_epoch, batch_size=batch_size_now, validation_data=(x_test, x_test))
weights.append(model_hidden2.layers[2].get_weights())
model_hidden3=pre_model3(pre_train_dim,weights)
model_hidden3.layers[1].set_weights(model_hidden2.layers[1].get_weights())
model_hidden3.layers[2].set_weights(model_hidden2.layers[2].get_weights())
model_hidden3.layers[-2].set_weights(model_hidden2.layers[-2].get_weights())
model_hidden3.layers[-1].set_weights(model_hidden2.layers[-1].get_weights())
model_hidden3.compile(loss='mse', optimizer='adam')
model_hidden3.fit(x_train, x_train, epochs=FLAGS.pre_epoch, batch_size=batch_size_now, validation_data=(x_test, x_test))
weights.append(model_hidden3.layers[3].get_weights())
'''
model_hidden4=pre_model4(pre_train_dim,weights)
#model_hidden4.layers[1].set_weights(model_hidden3.layers[1].get_weights())
#model_hidden4.layers[2].set_weights(model_hidden3.layers[2].get_weights())
#model_hidden4.layers[3].set_weights(model_hidden3.layers[3].get_weights())
#model_hidden4.layers[-2].set_weights(model_hidden3.layers[-2].get_weights())
#model_hidden4.layers[-3].set_weights(model_hidden3.layers[-3].get_weights())
#model_hidden4.layers[-1].set_weights(model_hidden3.layers[-1].get_weights())
model_hidden4.compile(loss='mse', optimizer='sgd')
model_hidden4.fit(x_train, x_train, epochs=FLAGS.pre_epoch, batch_size=100, validation_data=(x_test, x_test))
weights.append([model_hidden4.layers[4].get_weights(),model_hidden4.layers[-4].get_weights()])
'''
#save the weights of hidden layer:
output = open(project_path+'model/pre_weights.pkl', 'wb')
pickle.dump(weights, output)
output.close()
return weights
if __name__ == '__main__':
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
os.system('echo $CUDA_VISIBLE_DEVICES')
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
KTF.set_session(session)
label='MaterialType-2'
x, y = readData(label)
x_train, y_train, x_test, y_test = init(x, y)
print('----------------pre_train start---------------------')
weights=pre_train(x_train,x_test) | []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
review/reviewapi/reviewapi.go | package reviewapi
import (
"context"
"github.com/ahmedaabouzied/tasarruf/entities"
"github.com/ahmedaabouzied/tasarruf/review"
"github.com/gin-gonic/gin"
"github.com/go-ozzo/ozzo-validation/v3"
"net/http"
"strconv"
)
// ReviewAPI is the API handler for review related API endpoint
type ReviewAPI struct {
ReviewUsecase review.Usecase
}
// newReviewRequest represents the review body
type newReviewRequest struct {
CustomerID uint `json:"customerID"`
PartnerID uint `json:"partnerID"`
Stars int `json:"stars"`
Content string `json:"content"`
}
func (req *newReviewRequest) Validate() error {
return validation.ValidateStruct(req,
validation.Field(&req.CustomerID, validation.Required),
validation.Field(&req.PartnerID, validation.Required),
validation.Field(&req.Stars, validation.Required, validation.Max(5), validation.Min(1)),
)
}
// CreateReviewAPI returns a new review API instance
func CreateReviewAPI(u review.Usecase) ReviewAPI {
api := ReviewAPI{
ReviewUsecase: u,
}
return api
}
// CreateReview handles POST /review endpoint
func (h *ReviewAPI) CreateReview(c *gin.Context) {
ctx := context.Background()
userID := c.MustGet("userID").(uint)
ctx = context.WithValue(ctx, entities.UserIDKey, userID)
var req newReviewRequest
err := c.BindJSON(&req)
if err != nil {
entities.SendParsingError(c, "There has been an error while sending your information to the server, please try again", err)
return
}
err = req.Validate()
if err != nil {
entities.SendValidationError(c, err.Error(), err)
return
}
newReview := &entities.Review{
CustomerID: req.CustomerID,
PartnerID: req.PartnerID,
Stars: req.Stars,
Content: req.Content,
}
newReview, err = h.ReviewUsecase.Create(ctx, newReview)
if err != nil {
entities.SendValidationError(c, "only customer users can create reviews", err)
return
}
c.JSON(http.StatusOK, gin.H{
"success": "review created successfully",
"review": newReview,
})
return
}
// UpdateReview handles PUT /review/:id endpoint
func (h *ReviewAPI) UpdateReview(c *gin.Context) {
ctx := context.Background()
userID := c.MustGet("userID").(uint)
ctx = context.WithValue(ctx, entities.UserIDKey, userID)
reviewID, err := strconv.ParseInt(c.Param("id"), 10, 64)
if err != nil {
entities.SendParsingError(c, "There has been an error while sending your information to the server, please try again", err)
return
}
var req newReviewRequest
err = c.BindJSON(&req)
if err != nil {
entities.SendParsingError(c, "There has been an error while sending your information to the server, please try again", err)
return
}
err = req.Validate()
if err != nil {
entities.SendValidationError(c, err.Error(), err)
return
}
review, err := h.ReviewUsecase.GetByID(ctx, uint(reviewID))
if err != nil {
entities.SendNotFoundError(c, "Review not found", err)
return
}
review.Content = req.Content
review.Stars = req.Stars
review, err = h.ReviewUsecase.Update(ctx, review)
if err != nil {
entities.SendValidationError(c, "You are not the owner of this review", err)
return
}
c.JSON(http.StatusOK, gin.H{
"success": "review updated successfully",
"review": review,
})
return
}
// DeleteReview handles DELETE /review/:id endpoint
func (h *ReviewAPI) DeleteReview(c *gin.Context) {
ctx := context.Background()
userID := c.MustGet("userID").(uint)
ctx = context.WithValue(ctx, entities.UserIDKey, userID)
reviewID, err := strconv.ParseInt(c.Param("id"), 10, 64)
if err != nil {
entities.SendParsingError(c, "There has been an error while sending your information to the server, please try again", err)
return
}
review, err := h.ReviewUsecase.GetByID(ctx, uint(reviewID))
if err != nil {
entities.SendNotFoundError(c, "Review not found", err)
return
}
review, err = h.ReviewUsecase.Delete(ctx, review)
if err != nil {
entities.SendValidationError(c, "You are not the owner of this review", err)
return
}
c.JSON(http.StatusOK, gin.H{
"success": "review deleted successfully",
"review": review,
})
return
}
// GetByID handles GET /review/:id endpoint
func (h *ReviewAPI) GetByID(c *gin.Context) {
ctx := context.Background()
userID := c.MustGet("userID").(uint)
ctx = context.WithValue(ctx, entities.UserIDKey, userID)
reviewID, err := strconv.ParseInt(c.Param("id"), 10, 64)
if err != nil {
entities.SendParsingError(c, "There has been an error while sending your information to the server, please try again", err)
return
}
review, err := h.ReviewUsecase.GetByID(ctx, uint(reviewID))
if err != nil {
entities.SendNotFoundError(c, "Review not found", err)
return
}
c.JSON(http.StatusOK, gin.H{
"review": review,
})
}
// GetMyReviews handles GET /reviews/:id endpoint
func (h *ReviewAPI) GetMyReviews(c *gin.Context) {
ctx := context.Background()
userID := c.MustGet("userID").(uint)
ctx = context.WithValue(ctx, entities.UserIDKey, userID)
reviews, err := h.ReviewUsecase.GetByCustomerID(ctx, userID)
if err != nil {
entities.SendNotFoundError(c, "Reviews not found", err)
return
}
c.JSON(http.StatusOK, gin.H{
"reviews": reviews,
})
}
// GetPartnerReviews handles GET /reviews/:id endpoint
func (h *ReviewAPI) GetPartnerReviews(c *gin.Context) {
ctx := context.Background()
userID := c.MustGet("userID").(uint)
ctx = context.WithValue(ctx, entities.UserIDKey, userID)
partnerID, err := strconv.ParseInt(c.Param("id"), 10, 64)
if err != nil {
entities.SendParsingError(c, "There has been an error while sending your information to the server, please try again", err)
return
}
reviews, err := h.ReviewUsecase.GetByPartnerID(ctx, uint(partnerID))
if err != nil {
entities.SendNotFoundError(c, "Reviews not found", err)
return
}
average, err := h.ReviewUsecase.GetAverageRatings(ctx, uint(partnerID))
if err != nil {
entities.SendNotFoundError(c, "Reviews not found", err)
return
}
c.JSON(http.StatusOK, gin.H{
"reviews": reviews,
"average": average,
})
}
| []
| []
| []
| [] | [] | go | null | null | null |
modules/040-node-manager/hooks/chaos_monkey.go | /*
Copyright 2021 Flant JSC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package hooks
import (
"fmt"
"math/rand"
"os"
"strconv"
"time"
"github.com/flant/addon-operator/pkg/module_manager/go_hook"
"github.com/flant/addon-operator/sdk"
"github.com/flant/shell-operator/pkg/kube/object_patch"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/utils/pointer"
"github.com/deckhouse/deckhouse/modules/040-node-manager/hooks/internal/mcm/v1alpha1"
v1 "github.com/deckhouse/deckhouse/modules/040-node-manager/hooks/internal/v1"
)
var _ = sdk.RegisterFunc(&go_hook.HookConfig{
Settings: &go_hook.HookConfigSettings{
ExecutionMinInterval: 5 * time.Second,
ExecutionBurst: 3,
},
Queue: "/modules/node-manager/chaos_monkey",
Kubernetes: []go_hook.KubernetesConfig{
{
Name: "ngs",
ApiVersion: "deckhouse.io/v1",
Kind: "NodeGroup",
WaitForSynchronization: pointer.BoolPtr(false),
ExecuteHookOnEvents: pointer.BoolPtr(false),
ExecuteHookOnSynchronization: pointer.BoolPtr(false),
FilterFunc: chaosFilterNodeGroup,
},
{
Name: "nodes",
ApiVersion: "v1",
Kind: "Node",
LabelSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "node.deckhouse.io/group",
Operator: metav1.LabelSelectorOpExists,
},
},
},
WaitForSynchronization: pointer.BoolPtr(false),
ExecuteHookOnEvents: pointer.BoolPtr(false),
ExecuteHookOnSynchronization: pointer.BoolPtr(false),
FilterFunc: chaosFilterNode,
},
{
Name: "machines",
ApiVersion: "machine.sapcloud.io/v1alpha1",
Kind: "Machine",
WaitForSynchronization: pointer.BoolPtr(false),
ExecuteHookOnEvents: pointer.BoolPtr(false),
ExecuteHookOnSynchronization: pointer.BoolPtr(false),
FilterFunc: chaosFilterMachine,
},
},
Schedule: []go_hook.ScheduleConfig{
{
Name: "monkey",
Crontab: "* * * * *",
},
},
}, handleChaosMonkey)
func handleChaosMonkey(input *go_hook.HookInput) error {
random := time.Now().Unix()
testRandomSeed := os.Getenv("D8_TEST_RANDOM_SEED")
if testRandomSeed != "" {
res, _ := strconv.ParseInt(testRandomSeed, 10, 64)
random = res
}
randomizer := rand.New(rand.NewSource(random))
nodeGroups, machines, nodes, err := prepareChaosData(input)
if err != nil {
input.LogEntry.Infof(err.Error()) // just info message, already have a victim
return nil
}
// preparation complete, main hook logic goes here
for _, ng := range nodeGroups {
if ng.ChaosMode != "DrainAndDelete" {
continue
}
chaosPeriod, err := time.ParseDuration(ng.ChaosPeriod)
if err != nil {
input.LogEntry.Warnf("chaos period (%s) for NodeGroup:%s is invalid", ng.ChaosPeriod, ng.Name)
continue
}
run := randomizer.Uint32() % uint32(chaosPeriod.Milliseconds()/1000/60)
if run != 0 {
continue
}
nodeGroupNodes := nodes[ng.Name]
if len(nodeGroupNodes) == 0 {
continue
}
victimNode := nodeGroupNodes[randomizer.Intn(len(nodeGroupNodes))]
victimMachine, ok := machines[victimNode.Name]
if !ok {
continue
}
input.PatchCollector.MergePatch(victimAnnotationPatch, "machine.sapcloud.io/v1alpha1", "Machine", "d8-cloud-instance-manager", victimMachine.Name)
input.PatchCollector.Delete("machine.sapcloud.io/v1alpha1", "Machine", "d8-cloud-instance-manager", victimMachine.Name, object_patch.InBackground())
}
return nil
}
func prepareChaosData(input *go_hook.HookInput) ([]chaosNodeGroup, map[string]chaosMachine, map[string][]chaosNode, error) {
snap := input.Snapshots["machines"]
machines := make(map[string]chaosMachine, len(snap)) // map by node name
for _, sn := range snap {
machine := sn.(chaosMachine)
if machine.IsAlreadyMonkeyVictim {
return nil, nil, nil, fmt.Errorf("machine %s is already marked as chaos monkey victim. Exiting", machine.Name) // If there are nodes in deleting state then do nothing
}
machines[machine.Node] = machine
}
// collect NodeGroup with Enabled chaos monkey
snap = input.Snapshots["ngs"]
nodeGroups := make([]chaosNodeGroup, 0)
for _, sn := range snap {
ng := sn.(chaosNodeGroup)
// if chaos mode is empty - it's disabled
if ng.ChaosMode == "" || !ng.IsReadyForChaos {
continue
}
nodeGroups = append(nodeGroups, ng)
}
// map nodes by NodeGroup
nodes := make(map[string][]chaosNode)
snap = input.Snapshots["nodes"]
for _, sn := range snap {
node := sn.(chaosNode)
if v, ok := nodes[node.NodeGroup]; ok {
v = append(v, node)
nodes[node.NodeGroup] = v
} else {
nodes[node.NodeGroup] = []chaosNode{node}
}
}
return nodeGroups, machines, nodes, nil
}
func chaosFilterMachine(obj *unstructured.Unstructured) (go_hook.FilterResult, error) {
var machine v1alpha1.Machine
err := sdk.FromUnstructured(obj, &machine)
if err != nil {
return nil, err
}
isMonkeyVictim := false
if _, ok := machine.Labels["node.deckhouse.io/chaos-monkey-victim"]; ok {
isMonkeyVictim = true
}
return chaosMachine{
Name: machine.Name,
Node: machine.Labels["node"],
IsAlreadyMonkeyVictim: isMonkeyVictim,
}, nil
}
func chaosFilterNode(obj *unstructured.Unstructured) (go_hook.FilterResult, error) {
var node corev1.Node
err := sdk.FromUnstructured(obj, &node)
if err != nil {
return nil, err
}
return chaosNode{
Name: node.Name,
NodeGroup: node.Labels["node.deckhouse.io/group"],
}, nil
}
func chaosFilterNodeGroup(obj *unstructured.Unstructured) (go_hook.FilterResult, error) {
var ng v1.NodeGroup
err := sdk.FromUnstructured(obj, &ng)
if err != nil {
return nil, err
}
isReadyForChaos := false
if ng.Spec.NodeType == v1.NodeTypeCloudEphemeral {
if ng.Status.Desired > 1 && ng.Status.Desired == ng.Status.Ready {
isReadyForChaos = true
}
} else {
if ng.Status.Nodes > 1 && ng.Status.Nodes == ng.Status.Ready {
isReadyForChaos = true
}
}
period := ng.Spec.Chaos.Period
if period == "" {
period = "6h"
}
return chaosNodeGroup{
Name: ng.Name,
ChaosMode: ng.Spec.Chaos.Mode,
ChaosPeriod: period,
IsReadyForChaos: isReadyForChaos,
}, nil
}
type chaosNodeGroup struct {
Name string
ChaosMode string
ChaosPeriod string // default 6h
IsReadyForChaos bool
}
type chaosMachine struct {
Name string
Node string
IsAlreadyMonkeyVictim bool
}
type chaosNode struct {
Name string
NodeGroup string
}
var (
victimAnnotationPatch = map[string]interface{}{
"metadata": map[string]interface{}{
"annotations": map[string]interface{}{
"node.deckhouse.io/chaos-monkey-victim": "",
},
},
}
)
| [
"\"D8_TEST_RANDOM_SEED\""
]
| []
| [
"D8_TEST_RANDOM_SEED"
]
| [] | ["D8_TEST_RANDOM_SEED"] | go | 1 | 0 | |
st2client/tests/unit/test_shell.py | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import mock
import logging
from tests import base
from st2client import shell
from st2client.utils import httpclient
LOG = logging.getLogger(__name__)
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
CONFIG_FILE_PATH_FULL = os.path.join(BASE_DIR, '../fixtures/st2rc.full.ini')
CONFIG_FILE_PATH_PARTIAL = os.path.join(BASE_DIR, '../fixtures/st2rc.partial.ini')
class TestShell(base.BaseCLITestCase):
capture_output = True
def __init__(self, *args, **kwargs):
super(TestShell, self).__init__(*args, **kwargs)
self.shell = shell.Shell()
def test_endpoints_default(self):
base_url = 'http://localhost'
auth_url = 'http://localhost:9100'
api_url = 'http://localhost:9101/v1'
args = ['trigger', 'list']
parsed_args = self.shell.parser.parse_args(args)
client = self.shell.get_client(parsed_args)
self.assertEqual(client.endpoints['base'], base_url)
self.assertEqual(client.endpoints['auth'], auth_url)
self.assertEqual(client.endpoints['api'], api_url)
def test_endpoints_base_url_from_cli(self):
base_url = 'http://www.st2.com'
auth_url = 'http://www.st2.com:9100'
api_url = 'http://www.st2.com:9101/v1'
args = ['--url', base_url, 'trigger', 'list']
parsed_args = self.shell.parser.parse_args(args)
client = self.shell.get_client(parsed_args)
self.assertEqual(client.endpoints['base'], base_url)
self.assertEqual(client.endpoints['auth'], auth_url)
self.assertEqual(client.endpoints['api'], api_url)
def test_endpoints_base_url_from_env(self):
base_url = 'http://www.st2.com'
auth_url = 'http://www.st2.com:9100'
api_url = 'http://www.st2.com:9101/v1'
os.environ['ST2_BASE_URL'] = base_url
args = ['trigger', 'list']
parsed_args = self.shell.parser.parse_args(args)
client = self.shell.get_client(parsed_args)
self.assertEqual(client.endpoints['base'], base_url)
self.assertEqual(client.endpoints['auth'], auth_url)
self.assertEqual(client.endpoints['api'], api_url)
def test_endpoints_override_from_cli(self):
base_url = 'http://www.st2.com'
auth_url = 'http://www.st2.com:8888'
api_url = 'http://www.stackstorm1.com:9101/v1'
args = ['--url', base_url,
'--auth-url', auth_url,
'--api-url', api_url,
'trigger', 'list']
parsed_args = self.shell.parser.parse_args(args)
client = self.shell.get_client(parsed_args)
self.assertEqual(client.endpoints['base'], base_url)
self.assertEqual(client.endpoints['auth'], auth_url)
self.assertEqual(client.endpoints['api'], api_url)
def test_endpoints_override_from_env(self):
base_url = 'http://www.st2.com'
auth_url = 'http://www.st2.com:8888'
api_url = 'http://www.stackstorm1.com:9101/v1'
os.environ['ST2_BASE_URL'] = base_url
os.environ['ST2_AUTH_URL'] = auth_url
os.environ['ST2_API_URL'] = api_url
args = ['trigger', 'list']
parsed_args = self.shell.parser.parse_args(args)
client = self.shell.get_client(parsed_args)
self.assertEqual(client.endpoints['base'], base_url)
self.assertEqual(client.endpoints['auth'], auth_url)
self.assertEqual(client.endpoints['api'], api_url)
@mock.patch.object(
httpclient.HTTPClient, 'get',
mock.MagicMock(return_value=base.FakeResponse(json.dumps(base.RESOURCES), 200, 'OK')))
def test_exit_code_on_success(self):
argv = ['trigger', 'list']
self.assertEqual(self.shell.run(argv), 0)
@mock.patch.object(
httpclient.HTTPClient, 'get',
mock.MagicMock(return_value=base.FakeResponse(None, 500, 'INTERNAL SERVER ERROR')))
def test_exit_code_on_error(self):
argv = ['trigger', 'list']
self.assertEqual(self.shell.run(argv), 1)
def _validate_parser(self, args_list, is_subcommand=True):
for args in args_list:
ns = self.shell.parser.parse_args(args)
func = (self.shell.commands[args[0]].run_and_print
if not is_subcommand
else self.shell.commands[args[0]].commands[args[1]].run_and_print)
self.assertEqual(ns.func, func)
def test_action(self):
args_list = [
['action', 'list'],
['action', 'get', 'abc'],
['action', 'create', '/tmp/action.json'],
['action', 'update', '123', '/tmp/action.json'],
['action', 'delete', 'abc'],
['action', 'execute', '-h'],
['action', 'execute', 'remote', '-h'],
['action', 'execute', 'remote', 'hosts=192.168.1.1', 'user=st2', 'cmd="ls -l"'],
['action', 'execute', 'remote-fib', 'hosts=192.168.1.1', '3', '8']
]
self._validate_parser(args_list)
def test_action_execution(self):
args_list = [
['execution', 'list'],
['execution', 'get', '123'],
['execution', 'get', '123', '-d'],
['execution', 'get', '123', '-k', 'localhost.stdout']
]
self._validate_parser(args_list)
# Test mutually exclusive argument groups
self.assertRaises(SystemExit, self._validate_parser,
[['execution', 'get', '123', '-d', '-k', 'localhost.stdout']])
def test_key(self):
args_list = [
['key', 'list'],
['key', 'get', 'abc'],
['key', 'set', 'abc', '123'],
['key', 'delete', 'abc'],
['key', 'load', '/tmp/keys.json']
]
self._validate_parser(args_list)
def test_policy(self):
args_list = [
['policy', 'list'],
['policy', 'list', '-p', 'core'],
['policy', 'list', '--pack', 'core'],
['policy', 'list', '-r', 'core.local'],
['policy', 'list', '--resource-ref', 'core.local'],
['policy', 'list', '-pt', 'action.type1'],
['policy', 'list', '--policy-type', 'action.type1'],
['policy', 'list', '-r', 'core.local', '-pt', 'action.type1'],
['policy', 'list', '--resource-ref', 'core.local', '--policy-type', 'action.type1'],
['policy', 'get', 'abc'],
['policy', 'create', '/tmp/policy.json'],
['policy', 'update', '123', '/tmp/policy.json'],
['policy', 'delete', 'abc']
]
self._validate_parser(args_list)
def test_policy_type(self):
args_list = [
['policy-type', 'list'],
['policy-type', 'list', '-r', 'action'],
['policy-type', 'list', '--resource-type', 'action'],
['policy-type', 'get', 'abc']
]
self._validate_parser(args_list)
@mock.patch('st2client.shell.ST2_CONFIG_PATH', '/home/does/not/exist')
def test_print_config_default_config_no_config(self):
os.environ['ST2_CONFIG_FILE'] = '/home/does/not/exist'
argv = ['--print-config']
self.assertEqual(self.shell.run(argv), 3)
self.stdout.seek(0)
stdout = self.stdout.read()
self.assertTrue('username = None' in stdout)
self.assertTrue('cache_token = True' in stdout)
def test_print_config_custom_config_as_env_variable(self):
os.environ['ST2_CONFIG_FILE'] = CONFIG_FILE_PATH_FULL
argv = ['--print-config']
self.assertEqual(self.shell.run(argv), 3)
self.stdout.seek(0)
stdout = self.stdout.read()
self.assertTrue('username = test1' in stdout)
self.assertTrue('cache_token = False' in stdout)
def test_print_config_custom_config_as_command_line_argument(self):
argv = ['--print-config', '--config-file=%s' % (CONFIG_FILE_PATH_FULL)]
self.assertEqual(self.shell.run(argv), 3)
self.stdout.seek(0)
stdout = self.stdout.read()
self.assertTrue('username = test1' in stdout)
self.assertTrue('cache_token = False' in stdout)
def test_run(self):
args_list = [
['run', '-h'],
['run', 'abc', '-h'],
['run', 'remote', 'hosts=192.168.1.1', 'user=st2', 'cmd="ls -l"'],
['run', 'remote-fib', 'hosts=192.168.1.1', '3', '8']
]
self._validate_parser(args_list, is_subcommand=False)
def test_runner(self):
args_list = [
['runner', 'list'],
['runner', 'get', 'abc']
]
self._validate_parser(args_list)
def test_rule(self):
args_list = [
['rule', 'list'],
['rule', 'get', 'abc'],
['rule', 'create', '/tmp/rule.json'],
['rule', 'update', '123', '/tmp/rule.json'],
['rule', 'delete', 'abc']
]
self._validate_parser(args_list)
def test_trigger(self):
args_list = [
['trigger', 'list'],
['trigger', 'get', 'abc'],
['trigger', 'create', '/tmp/trigger.json'],
['trigger', 'update', '123', '/tmp/trigger.json'],
['trigger', 'delete', 'abc']
]
self._validate_parser(args_list)
| []
| []
| [
"ST2_CONFIG_FILE",
"ST2_AUTH_URL",
"ST2_API_URL",
"ST2_BASE_URL"
]
| [] | ["ST2_CONFIG_FILE", "ST2_AUTH_URL", "ST2_API_URL", "ST2_BASE_URL"] | python | 4 | 0 | |
flaskapp/main.py | import os
from flask import Flask
app = Flask(__name__)
@app.route('/')
def index():
appname = os.environ['APP_NAME']
appversion = os.environ['APP_VERSION']
response = "%s - %s.%s\n" %('Fluxcd', appname, appversion)
return response | []
| []
| [
"APP_VERSION",
"APP_NAME"
]
| [] | ["APP_VERSION", "APP_NAME"] | python | 2 | 0 | |
python/jimmy_plot/deprecated/front_end_preds/accuracy_vs_signaturelength.py | #ASSUMES DATA WITH THROTTLING, NO DECOR STALL
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
#PARAMETERS
HOME = os.environ['HOME']
# PREDICTORS = ['HarvardPowerPredictor','DecorOnly','IdealSensor','uArchEventPredictor']
PREDICTOR = 'HarvardPowerPredictor'
CLASS = 'LAPTOP'
TEST = 'dijkstra'
fig = plt.figure(figsize=(25,5))
ax = plt.axes()
SIG_LENGTH = [32,64,128,256,512]
for SIG_LEN in SIG_LENGTH:
path = HOME + '/output_9_28/gem5_out/' + CLASS + '_' + PREDICTOR + '_1_no_throttle_on_restore_nodecor_nothrottle_sig' + str(SIG_LEN) + '/' + TEST + '.txt'
print(path)
stats = open(path , 'r')
yvar = [0]
action = [False]
VE = [False]
action_count = 0
VE_count = 0
#read line by line
line = stats.readline()
while line:
if 'numCycles' in line:
linespl = line.split()
num_new_cycles = int(linespl[1])
for i in range(num_new_cycles):
yvar.append(None)
action.append(False)
VE.append(False)
elif 'system.cpu.powerPred.supply_voltage' in line and 'system.cpu.powerPred.supply_voltage_dv' not in line:
linespl = line.split()
yvar[-1] = float(linespl[1])
#if moved forward 2 cycles, middle cycle is average of adjacent cycles
if yvar[-2] == None:
yvar[-2] = (yvar[-1] + yvar[-3])/2
elif 'system.cpu.powerPred.total_action' in line:
linespl = line.split()
action_read = int(linespl[1])
if action_read > action_count:
action[-1] = True
action_count = action_read
#plt.axvspan(len(yvar), len(yvar)+1, color='red', alpha=0.3)
#elif 'system.cpu.powerPred.frequency' in line:
# linespl = line.split()
# freq = int(linespl[1])
# if freq == 1750000000:
# plt.axvspan(len(yvar), len(yvar)+1, color='red', alpha=0.1)
elif 'system.cpu.powerPred.num_voltage_emergency'in line:
linespl = line.split()
VE_read = int(linespl[1])
if VE_read > VE_count:
VE[-1] = True
VE_count +=1
#plt.axvspan(len(yvar), len(yvar)+1, color='blue', alpha=0.6)
line = stats.readline()
print(VE_count)
LEAD_TIME_CAP = 512
bins = dict()
ve_ind = 0
action_ind = 0
for i,ve in enumerate(VE):
if ve:
j = 0
while j < LEAD_TIME_CAP and i-j>=0:
if action[i-j]:
if j in bins.keys():
bins[j] += 1
else:
bins[j] = 1
break
j+=1
xvar = []
yvar = []
running_sum = 0
for i in range(LEAD_TIME_CAP):
if i in bins.keys():
running_sum += bins[i]
xvar.append(i)
yvar.append(100 * running_sum / VE_count)
plt.plot(xvar, yvar, linewidth=1.0, label = str(SIG_LEN))
fig.suptitle('Accuracy Over Lead Time' + '(' + PREDICTOR + ', ' + CLASS + ', ' + TEST + ' )', fontsize=14)
fig.set_size_inches(7.5, 5.5)
plt.xlabel('Lead Time', fontsize=14)
plt.ylabel('Accuracy (%)', fontsize=14)
plt.legend()
plt.savefig('9-29_sig_length_sweep' + '_' + PREDICTOR + '_' + CLASS + '_' + TEST +'.png')
| []
| []
| [
"HOME"
]
| [] | ["HOME"] | python | 1 | 0 | |
pkg/service/jwt/jwt_test.go | package jwt
import (
"errors"
"os"
"testing"
"time"
"github.com/dgrijalva/jwt-go"
)
func TestSignedString(t *testing.T) {
var expectedId int64 = 65
tokenString, err := SignedString(expectedId)
if err != nil {
t.Fatalf("Unexpected error when signing string, error: %s\n", err.Error())
}
parsedToken, err := jwt.ParseWithClaims(tokenString, &authClaims{}, func(token *jwt.Token) (interface{}, error) {
if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {
return nil, errors.New("invalid signing method")
}
return []byte(os.Getenv("TOKEN_SECRET")), err
})
if err != nil {
t.Fatalf("Unexpected error: %s\n", err.Error())
}
claims, ok := parsedToken.Claims.(*authClaims)
if !ok {
t.Fatalf("Invalid type assertion for authClaims\n")
}
id := claims.ID
if id != expectedId {
t.Errorf("Invalid id, expected: %d, got: %d\n", expectedId, id)
}
}
func TestParseToken(t *testing.T) {
cases := []struct {
name string
id int64
timeFrom time.Time
secret []byte
expectedId int64
errorPresent bool
}{
{
name: "With invalid secret method",
id: 65,
secret: []byte("invalid secret"),
timeFrom: time.Now(),
expectedId: 0,
errorPresent: true,
},
{
name: "With expired token",
id: 65,
secret: []byte(os.Getenv("TOKEN_SECRET")),
timeFrom: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),
expectedId: 0,
errorPresent: true,
},
{
name: "With valid paras",
id: 65,
secret: []byte(os.Getenv("TOKEN_SECRET")),
timeFrom: time.Now(),
expectedId: 65,
errorPresent: false,
},
}
for _, testCase := range cases {
t.Run(testCase.name, func(t *testing.T) {
claims := authClaims{
ID: testCase.id,
StandardClaims: jwt.StandardClaims{
IssuedAt: testCase.timeFrom.Unix(),
ExpiresAt: testCase.timeFrom.Add(tokenTD).Unix(),
},
}
token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
tokenStr, err := token.SignedString(testCase.secret)
if err != nil {
t.Fatalf("Unexpected error: %s\n", err.Error())
}
id, err := ParseToken(tokenStr)
if err == nil && testCase.errorPresent {
t.Errorf("Should be error\n")
}
if id != testCase.expectedId {
t.Errorf("Invalid id, expected: %d, got: %d\n", testCase.expectedId, id)
}
})
}
}
| [
"\"TOKEN_SECRET\"",
"\"TOKEN_SECRET\"",
"\"TOKEN_SECRET\""
]
| []
| [
"TOKEN_SECRET"
]
| [] | ["TOKEN_SECRET"] | go | 1 | 0 | |
src/helloworldservice/app.py | import os
import time
from concurrent.futures import ThreadPoolExecutor
import grpc
from opencensus.trace.exporters.stackdriver_exporter import StackdriverExporter
from opencensus.trace.ext.grpc.server_interceptor import OpenCensusServerInterceptor
from opencensus.trace.samplers.always_on import AlwaysOnSampler
import demo_pb2
import demo_pb2_grpc
from grpc_health.v1.health_pb2 import HealthCheckResponse
from grpc_health.v1.health_pb2_grpc import add_HealthServicer_to_server
from logger import get_json_logger
logger = get_json_logger("helloworldservice")
class HelloWorldService(demo_pb2_grpc.HelloWorldServiceServicer):
def Greet(self, request, context):
response = demo_pb2.GreetResponse()
response.message = f"Hello, {request.name}"
return response
def Check(self, request, context):
return HealthCheckResponse(status=HealthCheckResponse.SERVING)
def serve():
logger.info("initializing frontend")
try:
sampler = AlwaysOnSampler()
exporter = StackdriverExporter()
tracer_interceptor = OpenCensusServerInterceptor(sampler, exporter)
except:
tracer_interceptor = OpenCensusServerInterceptor()
port = os.environ.get("PORT", "9090")
server = grpc.server(ThreadPoolExecutor(max_workers=3))
service = HelloWorldService()
demo_pb2_grpc.add_HelloWorldServiceServicer_to_server(service, server)
add_HealthServicer_to_server(service, server)
logger.info(f"listening on port: {port}")
server.add_insecure_port(f"[::]:{port}")
server.start()
try:
while True:
time.sleep(10000)
except KeyboardInterrupt:
server.stop(0)
if __name__ == "__main__":
serve()
| []
| []
| [
"PORT"
]
| [] | ["PORT"] | python | 1 | 0 | |
tests/func/experiments/conftest.py | from textwrap import dedent
import pytest
from tests.func.test_repro_multistage import COPY_SCRIPT
CHECKPOINT_SCRIPT_FORMAT = dedent(
"""\
import os
import sys
import shutil
from time import sleep
from dvc.api import make_checkpoint
checkpoint_file = {}
checkpoint_iterations = int({})
if os.path.exists(checkpoint_file):
with open(checkpoint_file) as fobj:
try:
value = int(fobj.read())
except ValueError:
value = 0
else:
with open(checkpoint_file, "w"):
pass
value = 0
shutil.copyfile({}, {})
if os.getenv("DVC_CHECKPOINT"):
for _ in range(checkpoint_iterations):
value += 1
with open(checkpoint_file, "w") as fobj:
fobj.write(str(value))
make_checkpoint()
"""
)
CHECKPOINT_SCRIPT = CHECKPOINT_SCRIPT_FORMAT.format(
"sys.argv[1]", "sys.argv[2]", "sys.argv[3]", "sys.argv[4]"
)
@pytest.fixture
def exp_stage(tmp_dir, scm, dvc):
tmp_dir.gen("copy.py", COPY_SCRIPT)
tmp_dir.gen("params.yaml", "foo: 1")
stage = dvc.run(
cmd="python copy.py params.yaml metrics.yaml",
metrics_no_cache=["metrics.yaml"],
params=["foo"],
name="copy-file",
deps=["copy.py"],
)
scm.add(
[
"dvc.yaml",
"dvc.lock",
"copy.py",
"params.yaml",
"metrics.yaml",
".gitignore",
]
)
scm.commit("init")
return stage
@pytest.fixture
def checkpoint_stage(tmp_dir, scm, dvc):
tmp_dir.gen("checkpoint.py", CHECKPOINT_SCRIPT)
tmp_dir.gen("params.yaml", "foo: 1")
stage = dvc.run(
cmd="python checkpoint.py foo 5 params.yaml metrics.yaml",
metrics_no_cache=["metrics.yaml"],
params=["foo"],
checkpoints=["foo"],
deps=["checkpoint.py"],
no_exec=True,
name="checkpoint-file",
)
scm.add(["dvc.yaml", "checkpoint.py", "params.yaml", ".gitignore"])
scm.commit("init")
return stage
| []
| []
| [
"DVC_CHECKPOINT"
]
| [] | ["DVC_CHECKPOINT"] | python | 1 | 0 | |
pkg/operator/ceph/object/objectstore.go | /*
Copyright 2016 The Rook Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package object
import (
"context"
"encoding/json"
"fmt"
"os"
"strconv"
"strings"
"syscall"
"github.com/pkg/errors"
cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
"github.com/rook/rook/pkg/clusterd"
cephclient "github.com/rook/rook/pkg/daemon/ceph/client"
"github.com/rook/rook/pkg/operator/ceph/cluster/mgr"
"github.com/rook/rook/pkg/operator/ceph/config"
"github.com/rook/rook/pkg/operator/k8sutil"
"github.com/rook/rook/pkg/util"
"github.com/rook/rook/pkg/util/exec"
"golang.org/x/sync/errgroup"
v1 "k8s.io/api/core/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
)
const (
rootPool = ".rgw.root"
// AppName is the name Rook uses for the object store's application
AppName = "rook-ceph-rgw"
bucketProvisionerName = "ceph.rook.io/bucket"
AccessKeyName = "access-key"
SecretKeyName = "secret-key"
svcDNSSuffix = "svc"
)
var (
metadataPools = []string{
// .rgw.root (rootPool) is appended to this slice where needed
"rgw.control",
"rgw.meta",
"rgw.log",
"rgw.buckets.index",
"rgw.buckets.non-ec",
}
dataPoolName = "rgw.buckets.data"
// An user with system privileges for dashboard service
DashboardUser = "dashboard-admin"
)
type idType struct {
ID string `json:"id"`
}
type zoneGroupType struct {
MasterZoneID string `json:"master_zone"`
IsMaster string `json:"is_master"`
Zones []zoneType `json:"zones"`
}
type zoneType struct {
Name string `json:"name"`
Endpoints []string `json:"endpoints"`
}
type realmType struct {
Realms []string `json:"realms"`
}
// allow commitConfigChanges to be overridden for unit testing
var commitConfigChanges = CommitConfigChanges
func deleteRealmAndPools(objContext *Context, spec cephv1.ObjectStoreSpec) error {
if spec.IsMultisite() {
// since pools for object store are created by the zone, the object store only needs to be removed from the zone
err := removeObjectStoreFromMultisite(objContext, spec)
if err != nil {
return err
}
return nil
}
return deleteSingleSiteRealmAndPools(objContext, spec)
}
func removeObjectStoreFromMultisite(objContext *Context, spec cephv1.ObjectStoreSpec) error {
// get list of endpoints not including the endpoint of the object-store for the zone
zoneEndpointsList, err := getZoneEndpoints(objContext, objContext.Endpoint)
if err != nil {
return err
}
realmArg := fmt.Sprintf("--rgw-realm=%s", objContext.Realm)
zoneGroupArg := fmt.Sprintf("--rgw-zonegroup=%s", objContext.ZoneGroup)
zoneEndpoints := strings.Join(zoneEndpointsList, ",")
endpointArg := fmt.Sprintf("--endpoints=%s", zoneEndpoints)
zoneIsMaster, err := checkZoneIsMaster(objContext)
if err != nil {
return errors.Wrap(err, "failed to find out zone in Master")
}
zoneGroupIsMaster := false
if zoneIsMaster {
_, err = RunAdminCommandNoMultisite(objContext, false, "zonegroup", "modify", realmArg, zoneGroupArg, endpointArg)
if err != nil {
if kerrors.IsNotFound(err) {
return err
}
return errors.Wrapf(err, "failed to remove object store %q endpoint from rgw zone group %q", objContext.Name, objContext.ZoneGroup)
}
logger.Debugf("endpoint %q was removed from zone group %q. the remaining endpoints in the zone group are %q", objContext.Endpoint, objContext.ZoneGroup, zoneEndpoints)
// check if zone group is master only if zone is master for creating the system user
zoneGroupIsMaster, err = checkZoneGroupIsMaster(objContext)
if err != nil {
return errors.Wrapf(err, "failed to find out whether zone group %q in is the master zone group", objContext.ZoneGroup)
}
}
_, err = runAdminCommand(objContext, false, "zone", "modify", endpointArg)
if err != nil {
return errors.Wrapf(err, "failed to remove object store %q endpoint from rgw zone %q", objContext.Name, spec.Zone.Name)
}
logger.Debugf("endpoint %q was removed from zone %q. the remaining endpoints in the zone are %q", objContext.Endpoint, objContext.Zone, zoneEndpoints)
if zoneIsMaster && zoneGroupIsMaster && zoneEndpoints == "" {
logger.Infof("WARNING: No other zone in realm %q can commit to the period or pull the realm until you create another object-store in zone %q", objContext.Realm, objContext.Zone)
}
// this will notify other zones of changes if there are multi-zones
if err := commitConfigChanges(objContext); err != nil {
nsName := fmt.Sprintf("%s/%s", objContext.clusterInfo.Namespace, objContext.Name)
return errors.Wrapf(err, "failed to commit config changes after removing CephObjectStore %q from multi-site", nsName)
}
return nil
}
func deleteSingleSiteRealmAndPools(objContext *Context, spec cephv1.ObjectStoreSpec) error {
stores, err := getObjectStores(objContext)
if err != nil {
return errors.Wrap(err, "failed to detect object stores during deletion")
}
if len(stores) == 0 {
logger.Infof("did not find object store %q, nothing to delete", objContext.Name)
return nil
}
logger.Infof("Found stores %v when deleting store %s", stores, objContext.Name)
err = deleteRealm(objContext)
if err != nil {
return errors.Wrap(err, "failed to delete realm")
}
lastStore := false
if len(stores) == 1 && stores[0] == objContext.Name {
lastStore = true
}
if !spec.PreservePoolsOnDelete {
err = deletePools(objContext, spec, lastStore)
if err != nil {
return errors.Wrap(err, "failed to delete object store pools")
}
} else {
logger.Infof("PreservePoolsOnDelete is set in object store %s. Pools not deleted", objContext.Name)
}
return nil
}
// This is used for quickly getting the name of the realm, zone group, and zone for an object-store to pass into a Context
func getMultisiteForObjectStore(clusterdContext *clusterd.Context, spec *cephv1.ObjectStoreSpec, namespace, name string) (string, string, string, error) {
ctx := context.TODO()
if spec.IsMultisite() {
zone, err := clusterdContext.RookClientset.CephV1().CephObjectZones(namespace).Get(ctx, spec.Zone.Name, metav1.GetOptions{})
if err != nil {
return "", "", "", errors.Wrapf(err, "failed to find zone for object-store %q", name)
}
zonegroup, err := clusterdContext.RookClientset.CephV1().CephObjectZoneGroups(namespace).Get(ctx, zone.Spec.ZoneGroup, metav1.GetOptions{})
if err != nil {
return "", "", "", errors.Wrapf(err, "failed to find zone group for object-store %q", name)
}
realm, err := clusterdContext.RookClientset.CephV1().CephObjectRealms(namespace).Get(ctx, zonegroup.Spec.Realm, metav1.GetOptions{})
if err != nil {
return "", "", "", errors.Wrapf(err, "failed to find realm for object-store %q", name)
}
return realm.Name, zonegroup.Name, zone.Name, nil
}
return name, name, name, nil
}
func checkZoneIsMaster(objContext *Context) (bool, error) {
logger.Debugf("checking if zone %v is the master zone", objContext.Zone)
realmArg := fmt.Sprintf("--rgw-realm=%s", objContext.Realm)
zoneGroupArg := fmt.Sprintf("--rgw-zonegroup=%s", objContext.ZoneGroup)
zoneArg := fmt.Sprintf("--rgw-zone=%s", objContext.Zone)
zoneGroupJson, err := RunAdminCommandNoMultisite(objContext, true, "zonegroup", "get", realmArg, zoneGroupArg)
if err != nil {
// This handles the case where the pod we use to exec command (act as a proxy) is not found/ready yet
// The caller can nicely handle the error and not overflow the op logs with misleading error messages
if kerrors.IsNotFound(err) {
return false, err
}
return false, errors.Wrap(err, "failed to get rgw zone group")
}
zoneGroupOutput, err := DecodeZoneGroupConfig(zoneGroupJson)
if err != nil {
return false, errors.Wrap(err, "failed to parse zonegroup get json")
}
logger.Debugf("got master zone ID for zone group %v", objContext.ZoneGroup)
zoneOutput, err := RunAdminCommandNoMultisite(objContext, true, "zone", "get", realmArg, zoneGroupArg, zoneArg)
if err != nil {
// This handles the case where the pod we use to exec command (act as a proxy) is not found/ready yet
// The caller can nicely handle the error and not overflow the op logs with misleading error messages
if kerrors.IsNotFound(err) {
return false, err
}
return false, errors.Wrap(err, "failed to get rgw zone")
}
zoneID, err := decodeID(zoneOutput)
if err != nil {
return false, errors.Wrap(err, "failed to parse zone id")
}
logger.Debugf("got zone ID for zone %v", objContext.Zone)
if zoneID == zoneGroupOutput.MasterZoneID {
logger.Debugf("zone is master")
return true, nil
}
logger.Debugf("zone is not master")
return false, nil
}
func checkZoneGroupIsMaster(objContext *Context) (bool, error) {
logger.Debugf("checking if zone group %v is the master zone group", objContext.ZoneGroup)
realmArg := fmt.Sprintf("--rgw-realm=%s", objContext.Realm)
zoneGroupArg := fmt.Sprintf("--rgw-zonegroup=%s", objContext.ZoneGroup)
zoneGroupOutput, err := RunAdminCommandNoMultisite(objContext, true, "zonegroup", "get", realmArg, zoneGroupArg)
if err != nil {
// This handles the case where the pod we use to exec command (act as a proxy) is not found/ready yet
// The caller can nicely handle the error and not overflow the op logs with misleading error messages
if kerrors.IsNotFound(err) {
return false, err
}
return false, errors.Wrap(err, "failed to get rgw zone group")
}
zoneGroupJson, err := DecodeZoneGroupConfig(zoneGroupOutput)
if err != nil {
return false, errors.Wrap(err, "failed to parse master zone id")
}
zoneGroupIsMaster, err := strconv.ParseBool(zoneGroupJson.IsMaster)
if err != nil {
return false, errors.Wrap(err, "failed to parse is_master from zone group json into bool")
}
return zoneGroupIsMaster, nil
}
func DecodeSecret(secret *v1.Secret, keyName string) (string, error) {
realmKey, ok := secret.Data[keyName]
if !ok {
return "", errors.New(fmt.Sprintf("failed to find key %q in secret %q data. ", keyName, secret.Name) +
"user likely created or modified the secret manually and should add the missing key back into the secret")
}
return string(realmKey), nil
}
func GetRealmKeySecret(clusterdContext *clusterd.Context, realmName types.NamespacedName) (*v1.Secret, error) {
ctx := context.TODO()
realmSecretName := realmName.Name + "-keys"
realmSecret, err := clusterdContext.Clientset.CoreV1().Secrets(realmName.Namespace).Get(ctx, realmSecretName, metav1.GetOptions{})
if err != nil {
return nil, errors.Wrapf(err, "failed to get CephObjectRealm %q keys secret", realmName.String())
}
logger.Debugf("found keys secret for CephObjectRealm %q", realmName.String())
return realmSecret, nil
}
func GetRealmKeyArgsFromSecret(realmSecret *v1.Secret, realmName types.NamespacedName) (string, string, error) {
accessKey, err := DecodeSecret(realmSecret, AccessKeyName)
if err != nil {
return "", "", errors.Wrapf(err, "failed to decode CephObjectRealm %q access key from secret %q", realmName.String(), realmSecret.Name)
}
secretKey, err := DecodeSecret(realmSecret, SecretKeyName)
if err != nil {
return "", "", errors.Wrapf(err, "failed to decode CephObjectRealm %q secret key from secret %q", realmName.String(), realmSecret.Name)
}
logger.Debugf("decoded keys for realm %q", realmName.String())
accessKeyArg := fmt.Sprintf("--access-key=%s", accessKey)
secretKeyArg := fmt.Sprintf("--secret-key=%s", secretKey)
return accessKeyArg, secretKeyArg, nil
}
func GetRealmKeyArgs(clusterdContext *clusterd.Context, realmName, namespace string) (string, string, error) {
realmNsName := types.NamespacedName{Namespace: namespace, Name: realmName}
logger.Debugf("getting keys for realm %q", realmNsName.String())
secret, err := GetRealmKeySecret(clusterdContext, realmNsName)
if err != nil {
return "", "", err
}
return GetRealmKeyArgsFromSecret(secret, realmNsName)
}
func getZoneEndpoints(objContext *Context, serviceEndpoint string) ([]string, error) {
logger.Debugf("getting current endpoints for zone %v", objContext.Zone)
realmArg := fmt.Sprintf("--rgw-realm=%s", objContext.Realm)
zoneGroupArg := fmt.Sprintf("--rgw-zonegroup=%s", objContext.ZoneGroup)
zoneGroupOutput, err := RunAdminCommandNoMultisite(objContext, true, "zonegroup", "get", realmArg, zoneGroupArg)
if err != nil {
// This handles the case where the pod we use to exec command (act as a proxy) is not found/ready yet
// The caller can nicely handle the error and not overflow the op logs with misleading error messages
return []string{}, errorOrIsNotFound(err, "failed to get rgw zone group %q", objContext.Name)
}
zoneGroupJson, err := DecodeZoneGroupConfig(zoneGroupOutput)
if err != nil {
return []string{}, errors.Wrap(err, "failed to parse zones list")
}
zoneEndpointsList := []string{}
for _, zone := range zoneGroupJson.Zones {
if zone.Name == objContext.Zone {
for _, endpoint := range zone.Endpoints {
// in case object-store operator code is rereconciled, zone modify could get run again with serviceEndpoint added again
if endpoint != serviceEndpoint {
zoneEndpointsList = append(zoneEndpointsList, endpoint)
}
}
break
}
}
return zoneEndpointsList, nil
}
func createMultisite(objContext *Context, endpointArg string) error {
logger.Debugf("creating realm, zone group, zone for object-store %v", objContext.Name)
realmArg := fmt.Sprintf("--rgw-realm=%s", objContext.Realm)
zoneGroupArg := fmt.Sprintf("--rgw-zonegroup=%s", objContext.ZoneGroup)
// create the realm if it doesn't exist yet
output, err := RunAdminCommandNoMultisite(objContext, true, "realm", "get", realmArg)
if err != nil {
// ENOENT means "No such file or directory"
if code, err := exec.ExtractExitCode(err); err == nil && code == int(syscall.ENOENT) {
output, err = RunAdminCommandNoMultisite(objContext, false, "realm", "create", realmArg)
if err != nil {
return errorOrIsNotFound(err, "failed to create ceph realm %q, for reason %q", objContext.ZoneGroup, output)
}
logger.Debugf("created realm %q", objContext.Realm)
} else {
return errorOrIsNotFound(err, "'radosgw-admin realm get' failed with code %d, for reason %q. %v", strconv.Itoa(code), output, string(kerrors.ReasonForError(err)))
}
}
// create the zonegroup if it doesn't exist yet
output, err = RunAdminCommandNoMultisite(objContext, true, "zonegroup", "get", realmArg, zoneGroupArg)
if err != nil {
// ENOENT means "No such file or directory"
if code, err := exec.ExtractExitCode(err); err == nil && code == int(syscall.ENOENT) {
output, err = RunAdminCommandNoMultisite(objContext, false, "zonegroup", "create", "--master", realmArg, zoneGroupArg, endpointArg)
if err != nil {
return errorOrIsNotFound(err, "failed to create ceph zone group %q, for reason %q", objContext.ZoneGroup, output)
}
logger.Debugf("created zone group %q", objContext.ZoneGroup)
} else {
return errorOrIsNotFound(err, "'radosgw-admin zonegroup get' failed with code %d, for reason %q", strconv.Itoa(code), output)
}
}
// create the zone if it doesn't exist yet
output, err = runAdminCommand(objContext, true, "zone", "get")
if err != nil {
// ENOENT means "No such file or directory"
if code, err := exec.ExtractExitCode(err); err == nil && code == int(syscall.ENOENT) {
output, err = runAdminCommand(objContext, false, "zone", "create", "--master", endpointArg)
if err != nil {
return errorOrIsNotFound(err, "failed to create ceph zone %q, for reason %q", objContext.Zone, output)
}
logger.Debugf("created zone %q", objContext.Zone)
} else {
return errorOrIsNotFound(err, "'radosgw-admin zone get' failed with code %d, for reason %q", strconv.Itoa(code), output)
}
}
if err := commitConfigChanges(objContext); err != nil {
nsName := fmt.Sprintf("%s/%s", objContext.clusterInfo.Namespace, objContext.Name)
return errors.Wrapf(err, "failed to commit config changes after creating multisite config for CephObjectStore %q", nsName)
}
logger.Infof("Multisite for object-store: realm=%s, zonegroup=%s, zone=%s", objContext.Realm, objContext.ZoneGroup, objContext.Zone)
return nil
}
func joinMultisite(objContext *Context, endpointArg, zoneEndpoints, namespace string) error {
logger.Debugf("joining zone %v", objContext.Zone)
realmArg := fmt.Sprintf("--rgw-realm=%s", objContext.Realm)
zoneGroupArg := fmt.Sprintf("--rgw-zonegroup=%s", objContext.ZoneGroup)
zoneArg := fmt.Sprintf("--rgw-zone=%s", objContext.Zone)
zoneIsMaster, err := checkZoneIsMaster(objContext)
if err != nil {
return err
}
zoneGroupIsMaster := false
if zoneIsMaster {
// endpoints that are part of a master zone are supposed to be the endpoints for a zone group
_, err := RunAdminCommandNoMultisite(objContext, false, "zonegroup", "modify", realmArg, zoneGroupArg, endpointArg)
if err != nil {
return errorOrIsNotFound(err, "failed to add object store %q in rgw zone group %q", objContext.Name, objContext.ZoneGroup)
}
logger.Debugf("endpoints for zonegroup %q are now %q", objContext.ZoneGroup, zoneEndpoints)
// check if zone group is master only if zone is master for creating the system user
zoneGroupIsMaster, err = checkZoneGroupIsMaster(objContext)
if err != nil {
return errors.Wrapf(err, "failed to find out whether zone group %q in is the master zone group", objContext.ZoneGroup)
}
}
_, err = RunAdminCommandNoMultisite(objContext, false, "zone", "modify", realmArg, zoneGroupArg, zoneArg, endpointArg)
if err != nil {
return errorOrIsNotFound(err, "failed to add object store %q in rgw zone %q", objContext.Name, objContext.Zone)
}
logger.Debugf("endpoints for zone %q are now %q", objContext.Zone, zoneEndpoints)
if err := commitConfigChanges(objContext); err != nil {
nsName := fmt.Sprintf("%s/%s", objContext.clusterInfo.Namespace, objContext.Name)
return errors.Wrapf(err, "failed to commit config changes for CephObjectStore %q when joining multisite ", nsName)
}
logger.Infof("added object store %q to realm %q, zonegroup %q, zone %q", objContext.Name, objContext.Realm, objContext.ZoneGroup, objContext.Zone)
// create system user for realm for master zone in master zonegorup for multisite scenario
if zoneIsMaster && zoneGroupIsMaster {
err = createSystemUser(objContext, namespace)
if err != nil {
return err
}
}
return nil
}
func createSystemUser(objContext *Context, namespace string) error {
uid := objContext.Realm + "-system-user"
uidArg := fmt.Sprintf("--uid=%s", uid)
realmArg := fmt.Sprintf("--rgw-realm=%s", objContext.Realm)
zoneGroupArg := fmt.Sprintf("--rgw-zonegroup=%s", objContext.ZoneGroup)
zoneArg := fmt.Sprintf("--rgw-zone=%s", objContext.Zone)
output, err := RunAdminCommandNoMultisite(objContext, false, "user", "info", uidArg, realmArg, zoneGroupArg, zoneArg)
if err == nil {
logger.Debugf("realm system user %q has already been created", uid)
return nil
}
if code, ok := exec.ExitStatus(err); ok && code == int(syscall.EINVAL) {
logger.Debugf("realm system user %q not found, running `radosgw-admin user create`", uid)
accessKeyArg, secretKeyArg, err := GetRealmKeyArgs(objContext.Context, objContext.Realm, namespace)
if err != nil {
return errors.Wrap(err, "failed to get keys for realm")
}
logger.Debugf("found keys to create realm system user %v", uid)
systemArg := "--system"
displayNameArg := fmt.Sprintf("--display-name=%s.user", objContext.Realm)
output, err = RunAdminCommandNoMultisite(objContext, false, "user", "create", realmArg, zoneGroupArg, zoneArg, uidArg, displayNameArg, accessKeyArg, secretKeyArg, systemArg)
if err != nil {
return errorOrIsNotFound(err, "failed to create realm system user %q for reason: %q", uid, output)
}
logger.Debugf("created realm system user %v", uid)
} else {
return errorOrIsNotFound(err, "radosgw-admin user info for system user failed with code %d and output %q", strconv.Itoa(code), output)
}
return nil
}
func setMultisite(objContext *Context, store *cephv1.CephObjectStore, serviceIP string) error {
logger.Debugf("setting multisite configuration for object-store %v", store.Name)
serviceEndpoint := fmt.Sprintf("http://%s:%d", serviceIP, store.Spec.Gateway.Port)
if store.Spec.Gateway.SecurePort != 0 {
serviceEndpoint = fmt.Sprintf("https://%s:%d", serviceIP, store.Spec.Gateway.SecurePort)
}
if store.Spec.IsMultisite() {
zoneEndpointsList, err := getZoneEndpoints(objContext, serviceEndpoint)
if err != nil {
return err
}
zoneEndpointsList = append(zoneEndpointsList, serviceEndpoint)
zoneEndpoints := strings.Join(zoneEndpointsList, ",")
logger.Debugf("Endpoints for zone %q are: %q", objContext.Zone, zoneEndpoints)
endpointArg := fmt.Sprintf("--endpoints=%s", zoneEndpoints)
err = joinMultisite(objContext, endpointArg, zoneEndpoints, store.Namespace)
if err != nil {
return errors.Wrapf(err, "failed join ceph multisite in zone %q", objContext.Zone)
}
} else {
endpointArg := fmt.Sprintf("--endpoints=%s", serviceEndpoint)
err := createMultisite(objContext, endpointArg)
if err != nil {
return errorOrIsNotFound(err, "failed create ceph multisite for object-store %q", objContext.Name)
}
}
logger.Infof("multisite configuration for object-store %v is complete", store.Name)
return nil
}
func deleteRealm(context *Context) error {
// <name>
realmArg := fmt.Sprintf("--rgw-realm=%s", context.Name)
zoneGroupArg := fmt.Sprintf("--rgw-zonegroup=%s", context.Name)
_, err := RunAdminCommandNoMultisite(context, false, "realm", "delete", realmArg)
if err != nil {
logger.Warningf("failed to delete rgw realm %q. %v", context.Name, err)
}
_, err = RunAdminCommandNoMultisite(context, false, "zonegroup", "delete", realmArg, zoneGroupArg)
if err != nil {
logger.Warningf("failed to delete rgw zonegroup %q. %v", context.Name, err)
}
_, err = runAdminCommand(context, false, "zone", "delete")
if err != nil {
logger.Warningf("failed to delete rgw zone %q. %v", context.Name, err)
}
return nil
}
func decodeID(data string) (string, error) {
var id idType
err := json.Unmarshal([]byte(data), &id)
if err != nil {
return "", errors.Wrap(err, "failed to unmarshal json")
}
return id.ID, err
}
func DecodeZoneGroupConfig(data string) (zoneGroupType, error) {
var config zoneGroupType
err := json.Unmarshal([]byte(data), &config)
if err != nil {
return config, errors.Wrap(err, "failed to unmarshal json")
}
return config, err
}
func getObjectStores(context *Context) ([]string, error) {
output, err := RunAdminCommandNoMultisite(context, true, "realm", "list")
if err != nil {
// This handles the case where the pod we use to exec command (act as a proxy) is not found/ready yet
// The caller can nicely handle the error and not overflow the op logs with misleading error messages
if kerrors.IsNotFound(err) {
return []string{}, err
}
// exit status 2 indicates the object store does not exist, so return nothing
if strings.Index(err.Error(), "exit status 2") == 0 {
return []string{}, nil
}
return nil, err
}
var r realmType
err = json.Unmarshal([]byte(output), &r)
if err != nil {
return nil, errors.Wrap(err, "Failed to unmarshal realms")
}
return r.Realms, nil
}
func deletePools(ctx *Context, spec cephv1.ObjectStoreSpec, lastStore bool) error {
if emptyPool(spec.DataPool) && emptyPool(spec.MetadataPool) {
logger.Info("skipping removal of pools since not specified in the object store")
return nil
}
pools := append(metadataPools, dataPoolName)
if lastStore {
pools = append(pools, rootPool)
}
if configurePoolsConcurrently() {
waitGroup, _ := errgroup.WithContext(context.TODO())
for _, pool := range pools {
name := poolName(ctx.Name, pool)
waitGroup.Go(func() error {
if err := cephclient.DeletePool(ctx.Context, ctx.clusterInfo, name); err != nil {
return errors.Wrapf(err, "failed to delete pool %q. ", name)
}
return nil
},
)
}
// Wait for all the pools to be deleted
if err := waitGroup.Wait(); err != nil {
logger.Warning(err)
}
} else {
for _, pool := range pools {
name := poolName(ctx.Name, pool)
if err := cephclient.DeletePool(ctx.Context, ctx.clusterInfo, name); err != nil {
logger.Warningf("failed to delete pool %q. %v", name, err)
}
}
}
// Delete erasure code profile if any
erasureCodes, err := cephclient.ListErasureCodeProfiles(ctx.Context, ctx.clusterInfo)
if err != nil {
return errors.Wrapf(err, "failed to list erasure code profiles for cluster %s", ctx.clusterInfo.Namespace)
}
// cleans up the EC profile for the data pool only. Metadata pools don't support EC (only replication is supported).
ecProfileName := cephclient.GetErasureCodeProfileForPool(ctx.Name)
for i := range erasureCodes {
if erasureCodes[i] == ecProfileName {
if err := cephclient.DeleteErasureCodeProfile(ctx.Context, ctx.clusterInfo, ecProfileName); err != nil {
return errors.Wrapf(err, "failed to delete erasure code profile %s for object store %s", ecProfileName, ctx.Name)
}
break
}
}
return nil
}
func allObjectPools(storeName string) []string {
baseObjPools := append(metadataPools, dataPoolName, rootPool)
poolsForThisStore := make([]string, 0, len(baseObjPools))
for _, p := range baseObjPools {
poolsForThisStore = append(poolsForThisStore, poolName(storeName, p))
}
return poolsForThisStore
}
func missingPools(context *Context) ([]string, error) {
// list pools instead of querying each pool individually. querying each individually makes it
// hard to determine if an error is because the pool does not exist or because of a connection
// issue with ceph mons (or some other underlying issue). if listing pools fails, we can be sure
// it is a connection issue and return an error.
existingPoolSummaries, err := cephclient.ListPoolSummaries(context.Context, context.clusterInfo)
if err != nil {
return []string{}, errors.Wrapf(err, "failed to determine if pools are missing. failed to list pools")
}
existingPools := sets.NewString()
for _, summary := range existingPoolSummaries {
existingPools.Insert(summary.Name)
}
missingPools := []string{}
for _, objPool := range allObjectPools(context.Name) {
if !existingPools.Has(objPool) {
missingPools = append(missingPools, objPool)
}
}
return missingPools, nil
}
func CreatePools(context *Context, clusterSpec *cephv1.ClusterSpec, metadataPool, dataPool cephv1.PoolSpec) error {
if emptyPool(dataPool) && emptyPool(metadataPool) {
logger.Info("no pools specified for the CR, checking for their existence...")
missingPools, err := missingPools(context)
if err != nil {
return err
}
if len(missingPools) > 0 {
return fmt.Errorf("CR store pools are missing: %v", missingPools)
}
}
// get the default PG count for rgw metadata pools
metadataPoolPGs, err := config.GetMonStore(context.Context, context.clusterInfo).Get("mon.", "rgw_rados_pool_pg_num_min")
if err != nil {
logger.Warningf("failed to adjust the PG count for rgw metadata pools. using the general default. %v", err)
metadataPoolPGs = cephclient.DefaultPGCount
}
if err := createSimilarPools(context, append(metadataPools, rootPool), clusterSpec, metadataPool, metadataPoolPGs, ""); err != nil {
return errors.Wrap(err, "failed to create metadata pools")
}
ecProfileName := ""
if dataPool.IsErasureCoded() {
ecProfileName = cephclient.GetErasureCodeProfileForPool(context.Name)
// create a new erasure code profile for the data pool
if err := cephclient.CreateErasureCodeProfile(context.Context, context.clusterInfo, ecProfileName, dataPool); err != nil {
return errors.Wrap(err, "failed to create erasure code profile")
}
}
if err := createSimilarPools(context, []string{dataPoolName}, clusterSpec, dataPool, cephclient.DefaultPGCount, ecProfileName); err != nil {
return errors.Wrap(err, "failed to create data pool")
}
return nil
}
// configurePoolsConcurrently checks if operator pod resources are set or not
func configurePoolsConcurrently() bool {
// if operator resources are specified return false as it will lead to operator pod killed due to resource limit
// nolint #S1008, we can safely suppress this
if os.Getenv("OPERATOR_RESOURCES_SPECIFIED") == "true" {
return false
}
return true
}
func createSimilarPools(ctx *Context, pools []string, clusterSpec *cephv1.ClusterSpec, poolSpec cephv1.PoolSpec, pgCount, ecProfileName string) error {
// We have concurrency
if configurePoolsConcurrently() {
waitGroup, _ := errgroup.WithContext(context.TODO())
for _, pool := range pools {
// Avoid the loop re-using the same value with a closure
pool := pool
waitGroup.Go(func() error { return createRGWPool(ctx, clusterSpec, poolSpec, pgCount, ecProfileName, pool) })
}
return waitGroup.Wait()
}
// No concurrency!
for _, pool := range pools {
err := createRGWPool(ctx, clusterSpec, poolSpec, pgCount, ecProfileName, pool)
if err != nil {
return err
}
}
return nil
}
func createRGWPool(ctx *Context, clusterSpec *cephv1.ClusterSpec, poolSpec cephv1.PoolSpec, pgCount, ecProfileName, requestedName string) error {
// create the pool if it doesn't exist yet
pool := cephv1.NamedPoolSpec{
Name: poolName(ctx.Name, requestedName),
PoolSpec: poolSpec,
}
if poolDetails, err := cephclient.GetPoolDetails(ctx.Context, ctx.clusterInfo, pool.Name); err != nil {
// If the ceph config has an EC profile, an EC pool must be created. Otherwise, it's necessary
// to create a replicated pool.
var err error
if poolSpec.IsErasureCoded() {
// An EC pool backing an object store does not need to enable EC overwrites, so the pool is
// created with that property disabled to avoid unnecessary performance impact.
err = cephclient.CreateECPoolForApp(ctx.Context, ctx.clusterInfo, ecProfileName, pool, pgCount, AppName, false /* enableECOverwrite */)
} else {
err = cephclient.CreateReplicatedPoolForApp(ctx.Context, ctx.clusterInfo, clusterSpec, pool, pgCount, AppName)
}
if err != nil {
return errors.Wrapf(err, "failed to create pool %s for object store %s.", pool.Name, ctx.Name)
}
} else {
// pools already exist
if poolSpec.IsReplicated() {
// detect if the replication is different from the pool details
if poolDetails.Size != poolSpec.Replicated.Size {
logger.Infof("pool size is changed from %d to %d", poolDetails.Size, poolSpec.Replicated.Size)
if err := cephclient.SetPoolReplicatedSizeProperty(ctx.Context, ctx.clusterInfo, poolDetails.Name, strconv.FormatUint(uint64(poolSpec.Replicated.Size), 10)); err != nil {
return errors.Wrapf(err, "failed to set size property to replicated pool %q to %d", poolDetails.Name, poolSpec.Replicated.Size)
}
}
}
}
// Set the pg_num_min if not the default so the autoscaler won't immediately increase the pg count
if pgCount != cephclient.DefaultPGCount {
if err := cephclient.SetPoolProperty(ctx.Context, ctx.clusterInfo, pool.Name, "pg_num_min", pgCount); err != nil {
return errors.Wrapf(err, "failed to set pg_num_min on pool %q to %q", pool.Name, pgCount)
}
}
return nil
}
func poolName(storeName, poolName string) string {
if strings.HasPrefix(poolName, ".") {
return poolName
}
// the name of the pool is <instance>.<name>, except for the pool ".rgw.root" that spans object stores
return fmt.Sprintf("%s.%s", storeName, poolName)
}
// GetObjectBucketProvisioner returns the bucket provisioner name appended with operator namespace if OBC is watching on it
func GetObjectBucketProvisioner(data map[string]string, namespace string) string {
provName := bucketProvisionerName
obcWatchOnNamespace := k8sutil.GetValue(data, "ROOK_OBC_WATCH_OPERATOR_NAMESPACE", "false")
if strings.EqualFold(obcWatchOnNamespace, "true") {
provName = fmt.Sprintf("%s.%s", namespace, bucketProvisionerName)
}
return provName
}
// CheckDashboardUser returns true if the user is configure else return false
func checkDashboardUser(context *Context) (bool, error) {
args := []string{"dashboard", "get-rgw-api-access-key"}
cephCmd := cephclient.NewCephCommand(context.Context, context.clusterInfo, args)
out, err := cephCmd.Run()
if string(out) != "" {
return true, err
}
return false, err
}
func enableRGWDashboard(context *Context) error {
logger.Info("enabling rgw dashboard")
checkDashboard, err := checkDashboardUser(context)
if err != nil {
logger.Debug("Unable to fetch dashboard user key for RGW, hence skipping")
return nil
}
if checkDashboard {
logger.Debug("RGW Dashboard is already enabled")
return nil
}
user := ObjectUser{
UserID: DashboardUser,
DisplayName: &DashboardUser,
SystemUser: true,
}
// TODO:
// Use admin ops user instead!
// It's safe to create the user with the force flag regardless if the cluster's dashboard is
// configured as a secondary rgw site. The creation will return the user already exists and we
// will just fetch it (it has been created by the primary cluster)
u, errCode, err := CreateUser(context, user, true)
if err != nil || errCode != 0 {
return errors.Wrapf(err, "failed to create user %q", DashboardUser)
}
var accessArgs, secretArgs []string
var secretFile *os.File
// for latest Ceph versions
if mgr.FileBasedPasswordSupported(context.clusterInfo) {
accessFile, err := util.CreateTempFile(*u.AccessKey)
if err != nil {
return errors.Wrap(err, "failed to create a temporary dashboard access-key file")
}
accessArgs = []string{"dashboard", "set-rgw-api-access-key", "-i", accessFile.Name()}
defer func() {
if err := os.Remove(accessFile.Name()); err != nil {
logger.Errorf("failed to clean up dashboard access-key file. %v", err)
}
}()
secretFile, err = util.CreateTempFile(*u.SecretKey)
if err != nil {
return errors.Wrap(err, "failed to create a temporary dashboard secret-key file")
}
secretArgs = []string{"dashboard", "set-rgw-api-secret-key", "-i", secretFile.Name()}
} else {
// for older Ceph versions
accessArgs = []string{"dashboard", "set-rgw-api-access-key", *u.AccessKey}
secretArgs = []string{"dashboard", "set-rgw-api-secret-key", *u.SecretKey}
}
cephCmd := cephclient.NewCephCommand(context.Context, context.clusterInfo, accessArgs)
_, err = cephCmd.Run()
if err != nil {
return errors.Wrapf(err, "failed to set user %q accesskey", DashboardUser)
}
cephCmd = cephclient.NewCephCommand(context.Context, context.clusterInfo, secretArgs)
go func() {
// Setting the dashboard api secret started hanging in some clusters
// starting in ceph v15.2.8. We run it in a goroutine until the fix
// is found. We expect the ceph command to timeout so at least the goroutine exits.
logger.Info("setting the dashboard api secret key")
_, err = cephCmd.RunWithTimeout(exec.CephCommandsTimeout)
if err != nil {
logger.Errorf("failed to set user %q secretkey. %v", DashboardUser, err)
}
if mgr.FileBasedPasswordSupported(context.clusterInfo) {
if err := os.Remove(secretFile.Name()); err != nil {
logger.Errorf("failed to clean up dashboard secret-key file. %v", err)
}
}
logger.Info("done setting the dashboard api secret key")
}()
return nil
}
func disableRGWDashboard(context *Context) {
logger.Info("disabling the dashboard api user and secret key")
_, _, err := GetUser(context, DashboardUser)
if err != nil {
logger.Infof("unable to fetch the user %q details from this objectstore %q", DashboardUser, context.Name)
} else {
logger.Info("deleting rgw dashboard user")
_, err = DeleteUser(context, DashboardUser)
if err != nil {
logger.Warningf("failed to delete ceph user %q. %v", DashboardUser, err)
}
}
args := []string{"dashboard", "reset-rgw-api-access-key"}
cephCmd := cephclient.NewCephCommand(context.Context, context.clusterInfo, args)
_, err = cephCmd.RunWithTimeout(exec.CephCommandsTimeout)
if err != nil {
logger.Warningf("failed to reset user accesskey for user %q. %v", DashboardUser, err)
}
args = []string{"dashboard", "reset-rgw-api-secret-key"}
cephCmd = cephclient.NewCephCommand(context.Context, context.clusterInfo, args)
_, err = cephCmd.RunWithTimeout(exec.CephCommandsTimeout)
if err != nil {
logger.Warningf("failed to reset user secretkey for user %q. %v", DashboardUser, err)
}
logger.Info("done disabling the dashboard api secret key")
}
func errorOrIsNotFound(err error, msg string, args ...string) error {
// This handles the case where the pod we use to exec command (act as a proxy) is not found/ready yet
// The caller can nicely handle the error and not overflow the op logs with misleading error messages
if kerrors.IsNotFound(err) {
return err
}
return errors.Wrapf(err, msg, args)
}
| [
"\"OPERATOR_RESOURCES_SPECIFIED\""
]
| []
| [
"OPERATOR_RESOURCES_SPECIFIED"
]
| [] | ["OPERATOR_RESOURCES_SPECIFIED"] | go | 1 | 0 | |
app/modules/core/status_page.py | """
Status Page Module
"""
# Standard Library
import os
import json
from datetime import datetime
from datetime import timedelta
# Third Party Library
from dateutil.parser import parse
from django.utils import timezone
from pyumetric import Datetime_Utils
from pyumetric import NewRelic_Provider
from django.forms.fields import DateTimeField
from dateutil.relativedelta import relativedelta
from django.utils.translation import gettext as _
# Local Library
from app.modules.entity.option_entity import OptionEntity
from app.modules.entity.metric_entity import MetricEntity
from app.modules.entity.incident_entity import IncidentEntity
from app.modules.entity.component_entity import ComponentEntity
from app.modules.entity.component_group_entity import ComponentGroupEntity
from app.modules.entity.incident_update_entity import IncidentUpdateEntity
from app.modules.entity.incident_update_component_entity import IncidentUpdateComponentEntity
class StatusPage():
__option_entity = None
__incident_entity = None
__incident_update_entity = None
__incident_update_component_entity = None
__component_group_entity = None
__component_entity = None
__metric_entity = None
def __init__(self):
self.__option_entity = OptionEntity()
self.__incident_entity = IncidentEntity()
self.__incident_update_entity = IncidentUpdateEntity()
self.__incident_update_component_entity = IncidentUpdateComponentEntity()
self.__component_group_entity = ComponentGroupEntity()
self.__component_entity = ComponentEntity()
self.__metric_entity = MetricEntity()
def get_system_status(self):
# Get Open Incidents
# if it has no resolved update
# Check the last incident updates
# Check it has any component is affected
return "operational"
def get_about_site(self):
option = self.__option_entity.get_one_by_key("builder_about")
return option.value if option else ""
def get_logo_url(self):
option = self.__option_entity.get_one_by_key("builder_logo_url")
return option.value if option else ""
def get_favicon_url(self):
option = self.__option_entity.get_one_by_key("builder_favicon_url")
return option.value if option else ""
def get_incident_by_uri(self, uri):
incident = self.__incident_entity.get_one_by_uri(uri)
app_name = self.__option_entity.get_one_by_key("app_name")
if incident:
incident_data = {
"headline": incident.name,
"headline_class": "text-danger",
"status": incident.status,
"sub_headline": _("Incident Report for %s") % (app_name.value),
"affected_components": [],
"updates": []
}
updates = self.__incident_update_entity.get_all(incident.id)
for update in updates:
incident_data["updates"].append({
"type": update.status.title(),
"body": update.message,
"date": "%(date)s %(tz)s" % {
"date": update.datetime.strftime("%B %d, %H:%M"),
"tz": os.getenv("APP_TIMEZONE", "UTC")
}
})
components = self.__incident_update_component_entity.get_all(update.id)
for component in components:
if component.component.name not in incident_data["affected_components"]:
incident_data["affected_components"].append(component.component.name)
incident_data["affected_components"] = ", ".join(incident_data["affected_components"])
return incident_data
return False
def get_incidents_for_period(self, period):
today = timezone.now()
datem = datetime(today.year, today.month, 1)
from_date = datem - relativedelta(months=+(period - 1) * 3)
to_date = datem - relativedelta(months=+(period * 3))
period = "%(from)s - %(to)s" % {
"from": from_date.strftime("%B %Y"),
"to": (to_date + relativedelta(months=+1)).strftime("%B %Y")
}
from_date = datetime(from_date.year, from_date.month, 1)
to_date = datetime(to_date.year, to_date.month, 1)
incidents = []
while from_date > to_date:
current_incidents = []
incidents_list = self.__incident_entity.get_incident_on_month(DateTimeField().clean(from_date))
for incident in incidents_list:
current_incidents.append({
"uri": incident.uri,
"subject": incident.name,
"class": "text-danger",
"status": incident.status,
"final_update": _("This incident has been resolved.") if incident.status == "closed" else _("This incident is still open."),
"period": self.__get_incident_period(incident)
})
current_date = from_date.strftime("%B %Y")
incidents.append({
"date": current_date,
"incidents": current_incidents
})
from_date -= relativedelta(months=+1)
return {
"period": period,
"incidents": incidents
}
def __get_incident_period(self, incident):
updates = self.__get_incident_updates(incident.id)
if len(updates):
return "%(from)s %(tz)s - %(to)s" % {
"from": incident.datetime.strftime("%B %d, %H:%M"),
"tz": os.getenv("APP_TIMEZONE", "UTC"),
"to": updates[len(updates)-1]["date"]
}
return "%(from)s %(tz)s" % {
"from": incident.datetime.strftime("%B %d, %H:%M"),
"tz": os.getenv("APP_TIMEZONE", "UTC")
}
def get_past_incidents(self, days=7):
i = 0
past_incidents = []
while days > i:
date = (datetime.now() - timedelta(days=i))
incidents_result = []
incidents = self.__incident_entity.get_incident_from_days(i)
for incident in incidents:
incidents_result.append({
"uri": incident.uri,
"subject": incident.name,
"class": "text-danger",
"status": incident.status,
"updates": self.__get_incident_updates(incident.id)
})
past_incidents.append({
"date": date.strftime("%B %d, %Y"),
"incidents": incidents_result
})
i += 1
return past_incidents
def __get_incident_updates(self, incident_id):
updates_result = []
updates = self.__incident_update_entity.get_all(incident_id)
for update in updates:
updates_result.append({
"type": update.status.title(),
"date": "%(date)s %(tz)s" % {
"date": update.datetime.strftime("%B %d, %H:%M"),
"tz": os.getenv("APP_TIMEZONE", "UTC")
},
"body": update.message
})
return updates_result
def get_system_metrics(self):
metrics = []
option = self.__option_entity.get_one_by_key("builder_metrics")
if option:
items = json.loads(option.value)
for item in items:
if "m-" in item:
item = int(item.replace("m-", ""))
if item:
metric = self.__metric_entity.get_one_by_id(item)
if metric:
metrics.append({
"id": "metric_container_%d" % (metric.id),
"title": metric.title,
"xtitle": metric.x_axis,
"ytitle": metric.y_axis,
"day_data": self.__get_metrics(metric, -1),
"week_data": self.__get_metrics(metric, -7),
"month_data": self.__get_metrics(metric, -30)
})
return metrics
def __get_metrics(self, metric, period):
metric_values = []
option = self.__option_entity.get_one_by_key("newrelic_api_key")
if not option:
raise Exception("Unable to find option with key newrelic_api_key")
new_relic_client = NewRelic_Provider(option.value)
if metric.source == "newrelic":
data = json.loads(metric.data)
if data["metric"] == "response_time":
response = new_relic_client.get_metric(
data["application"],
["WebTransaction"],
["average_response_time"],
Datetime_Utils("UTC", period).iso(),
Datetime_Utils("UTC").iso(),
False
)
if len(response) > 0:
response = json.loads(response)
if "metric_data" not in response:
raise Exception(_("Error: Unable to find metric_data on NewRelic response!"))
if "WebTransaction" not in response["metric_data"]["metrics_found"]:
raise Exception(_("Error: Unable to find metric WebTransaction on NewRelic response!"))
if "metrics" not in response["metric_data"] or len(response["metric_data"]["metrics"]) < 1:
raise Exception(_("Error: Unable to find metric metrics on NewRelic response!"))
for item in response["metric_data"]["metrics"][0]["timeslices"]:
metric_values.append({
"timestamp": datetime.timestamp(parse(item["from"])),
"value": item["values"]["average_response_time"]
})
elif data["metric"] == "apdex":
raise Exception(_("Error: NewRelic apdex metric not implemented yet!"))
elif data["metric"] == "error_rate":
raise Exception(_("Error: NewRelic error_rate metric not implemented yet!"))
elif data["metric"] == "throughput":
raise Exception(_("Error: NewRelic throughput metric not implemented yet!"))
elif data["metric"] == "errors":
raise Exception(_("Error: NewRelic errors metric not implemented yet!"))
elif data["metric"] == "real_user_response_time":
raise Exception(_("Error: NewRelic real_user_response_time metric not implemented yet!"))
elif data["metric"] == "real_user_apdex":
raise Exception(_("Error: NewRelic real_user_apdex metric not implemented yet!"))
return metric_values
def get_services(self):
services = []
option = self.__option_entity.get_one_by_key("builder_components")
if option:
items = json.loads(option.value)
for item in items:
if "c-" in item:
component = self.__component_entity.get_one_by_id(item.replace("c-", ""))
if component:
services.append({
"name": component.name,
"description": component.description,
"current_status": self.get_status(component.id, "component"),
"current_status_class": "bg-green",
"uptime_chart": self.get_uptime_chart(component.id, "component"),
"sub_services": []
})
elif "g-" in item:
group = self.__component_group_entity.get_one_by_id(item.replace("g-", ""))
services.append({
"name": group.name,
"description": group.description,
"current_status": self.get_status(group.id, "group"),
"current_status_class": "bg-green",
"uptime_chart": self.get_uptime_chart(group.id, "group"),
"sub_services": self.get_sub_services(group.id)
})
return services
def get_sub_services(self, group_id):
services = []
items = self.__component_entity.get_all_components_by_group(group_id)
for item in items:
services.append({
"name": item.name,
"description": item.description,
"current_status": self.get_status(item.id, "component"),
"current_status_class": "bg-green",
"uptime_chart": self.get_uptime_chart(item.id, "component"),
"sub_services": []
})
return services
def get_status(self, id, type):
# Get Open Incidents
# if it has no resolved update
# Check the last incident updates
# Check if the component is affected
if type == "component":
return "Operational"
# Get Group Components
# Get Open Incidents
# if it has no resolved update
# Check the last incident updates
# Check if one of the group components is affected
elif type == "group":
return "Operational"
def __get_affectd_components(self):
# Get Open Incidents
# if it has no resolved update
# Check the last incident updates
# Create a list of affected components
return {}
def get_uptime_chart(self, id, type, period=90):
return []
| []
| []
| [
"APP_TIMEZONE"
]
| [] | ["APP_TIMEZONE"] | python | 1 | 0 | |
tests/env.go | package tests
import (
"os"
"path/filepath"
"strconv"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
func GetKubeconfig() string {
kubeconfPath := os.Getenv("INTEGRATION_KUBECONFIG")
if kubeconfPath != "" {
return kubeconfPath
}
homeDir, err := os.UserHomeDir()
if err != nil {
Fail("INTEGRATION_KUBECONFIG not provided, failed to use default: " + err.Error())
}
kubeconfPath = filepath.Join(homeDir, ".kube", "config")
_, err = os.Stat(kubeconfPath)
if os.IsNotExist(err) {
return ""
}
Expect(err).NotTo(HaveOccurred())
return kubeconfPath
}
func GetEiriniDockerHubPassword() string {
return lookupOptionalEnv("EIRINIUSER_PASSWORD")
}
func GetEiriniSystemNamespace() string {
return lookupOptionalEnv("EIRINI_SYSTEM_NS")
}
func GetEiriniWorkloadsNamespace() string {
return lookupOptionalEnv("EIRINI_WORKLOADS_NS")
}
func getEiriniTLSSecretName() string {
return lookupOptionalEnv("EIRINI_TLS_SECRET")
}
func GetEiriniAddress() string {
return lookupOptionalEnv("EIRINI_ADDRESS")
}
func lookupOptionalEnv(key string) string {
value, set := os.LookupEnv(key)
if !set || value == "" {
Skip("Please export optional environment variable " + key + " to run this test")
}
return value
}
func GetTelepresenceServiceName() string {
serviceName := os.Getenv("TELEPRESENCE_SERVICE_NAME")
Expect(serviceName).ToNot(BeEmpty())
return serviceName
}
func GetTelepresencePort() int {
startPort := os.Getenv("TELEPRESENCE_EXPOSE_PORT_START")
Expect(startPort).ToNot(BeEmpty())
portNo, err := strconv.Atoi(startPort)
Expect(err).NotTo(HaveOccurred())
return portNo + GinkgoParallelNode() - 1
}
| [
"\"INTEGRATION_KUBECONFIG\"",
"\"TELEPRESENCE_SERVICE_NAME\"",
"\"TELEPRESENCE_EXPOSE_PORT_START\""
]
| []
| [
"TELEPRESENCE_EXPOSE_PORT_START",
"INTEGRATION_KUBECONFIG",
"TELEPRESENCE_SERVICE_NAME"
]
| [] | ["TELEPRESENCE_EXPOSE_PORT_START", "INTEGRATION_KUBECONFIG", "TELEPRESENCE_SERVICE_NAME"] | go | 3 | 0 | |
restaurant/manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "restaurant.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.